repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
StuntsPT/pyRona | pyRona/plotters/map_plotter.py | 1 | 4373 | #!/usr/bin/python3
# Copyright 2018 Francisco Pina Martins <f.pinamartins@gmail.com>
# This file is part of pyRona.
# pyRona is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# pyRona is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with pyRona. If not, see <http://www.gnu.org/licenses/>.
try:
import cartopy.crs as ccrs
import cartopy.feature as cfeature
from cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER
except ImportError:
print("\nError importing 'Cartopy'. Please look at pyRona's manual"
"(http://pyrona.readthedocs.io/en/latest/install/#installing-cartopy)"
" for information on how to install it on your system. Map plotting"
" was **not** performed.")
quit()
import matplotlib.pyplot as plt
import numpy as np
def map_plotter(ronas, latitudes, longitudes, out_filename):
"""
Plots a map with each sampling site coloured per its averag RONA value
"""
def _define_map_edges(latitudes, longitudes, padding=0.10):
"""
Defines the edges of the map to be drawn.
Takes a list of latitudes and longitudes as input and returns the map
edges
"""
# Define padding for the map edges
hpad = padding
vpad = padding
# Get map edges
max_lon = np.max(longitudes)
max_lon = max_lon + abs(max_lon * vpad)
min_lon = np.min(longitudes)
min_lon = min_lon - abs(min_lon * vpad)
max_lat = np.max(latitudes)
max_lat = max_lat + abs(max_lat * hpad)
min_lat = np.min(latitudes)
min_lat = min_lat - abs(min_lat * hpad)
return([min_lat, max_lat, min_lon, max_lon])
max_ronas = []
pop_names = ronas[0].pop_names
for i, _ in enumerate(pop_names):
max_ronas += [max([x.avg_ronas[i] for x in ronas])]
fig = plt.figure(figsize=(22, 12), facecolor="none")
map_area = plt.axes(projection=ccrs.PlateCarree())
map_edges = _define_map_edges(latitudes, longitudes)
map_area.set_extent(map_edges)
map_area.coastlines(resolution='50m')
cfeature.BORDERS.scale = "50m"
map_area.add_feature(cfeature.BORDERS)
# Draw sampling sites
dotplot = map_area.scatter(latitudes, longitudes, c=max_ronas, s=700,
vmin=0, vmax=max(max_ronas),
transform=ccrs.PlateCarree(),
cmap='autumn_r', zorder=2)
# Label the locations
for label, x, y in zip(pop_names, latitudes, longitudes):
map_area.annotate(label.strip().replace("_", " "), xy=(x, y),
xytext=(0, -28), textcoords='offset points',
ha='center', va='bottom', fontsize=13,
bbox=dict(boxstyle='round,pad=0.1',
fc='lightgrey', edgecolor="none"))
# Control x and y ticks
gridlines = map_area.gridlines(draw_labels=True)
gridlines.xlines = False
gridlines.ylines = False
gridlines.ylabels_right = False
gridlines.xlabels_top = False
gridlines.xformatter = LONGITUDE_FORMATTER
gridlines.yformatter = LATITUDE_FORMATTER
gridlines.xlabel_style = {'size': 22}
gridlines.ylabel_style = {'size': 22}
# Control x and y labels
map_area.text(-0.10, 0.55, 'Latitude', va='bottom', ha='center',
rotation='vertical', rotation_mode='anchor',
transform=map_area.transAxes, fontsize=28)
map_area.text(0.5, -0.12, 'Longitude', va='bottom', ha='center',
rotation='horizontal', rotation_mode='anchor',
transform=map_area.transAxes, fontsize=28)
# Sidebar settings
sidebar = fig.colorbar(dotplot)
sidebar.ax.tick_params(labelsize=20)
sidebar.set_label(label='RONA', size=30, weight='bold')
# Save the map
fig.savefig(out_filename)
# TODO: Eventually make an interpolation
| gpl-3.0 |
gpetretto/pymatgen | pymatgen/analysis/phase_diagram.py | 1 | 84767 | # coding: utf-8
# Copyright (c) Pymatgen Development Team.
# Distributed under the terms of the MIT License.
from __future__ import division, unicode_literals
import re
import csv
import collections
import itertools
from io import open
import math
from six.moves import zip
import logging
from monty.json import MSONable, MontyDecoder
from monty.string import unicode2str
from monty.functools import lru_cache
import numpy as np
from scipy.spatial import ConvexHull
from pymatgen.core.composition import Composition
from pymatgen.core.periodic_table import Element, DummySpecie, get_el_sp
from pymatgen.util.coord import Simplex, in_coord_list
from pymatgen.util.string import latexify
from pymatgen.util.plotting import pretty_plot
from pymatgen.analysis.reaction_calculator import Reaction, \
ReactionError
"""
This module defines tools to generate and analyze phase diagrams.
"""
__author__ = "Shyue Ping Ong"
__copyright__ = "Copyright 2011, The Materials Project"
__version__ = "1.0"
__maintainer__ = "Shyue Ping Ong"
__email__ = "shyuep@gmail.com"
__status__ = "Production"
__date__ = "May 16, 2011"
logger = logging.getLogger(__name__)
class PDEntry(MSONable):
"""
An object encompassing all relevant data for phase diagrams.
.. attribute:: composition
The composition associated with the PDEntry.
.. attribute:: energy
The energy associated with the entry.
.. attribute:: name
A name for the entry. This is the string shown in the phase diagrams.
By default, this is the reduced formula for the composition, but can be
set to some other string for display purposes.
.. attribute:: attribute
A arbitrary attribute.
Args:
composition (Composition): Composition
energy (float): Energy for composition.
name (str): Optional parameter to name the entry. Defaults to the
reduced chemical formula.
attribute: Optional attribute of the entry. This can be used to
specify that the entry is a newly found compound, or to specify a
particular label for the entry, or else ... Used for further
analysis and plotting purposes. An attribute can be anything
but must be MSONable.
"""
def __init__(self, composition, energy, name=None, attribute=None):
self.energy = energy
self.composition = Composition(composition)
self.name = name if name else self.composition.reduced_formula
self.attribute = attribute
@property
def energy_per_atom(self):
"""
Returns the final energy per atom.
"""
return self.energy / self.composition.num_atoms
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.composition.is_element
def __repr__(self):
return "PDEntry : {} with energy = {:.4f}".format(self.composition,
self.energy)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"composition": self.composition.as_dict(),
"energy": self.energy,
"name": self.name,
"attribute": self.attribute}
def __eq__(self, other):
if isinstance(other, self.__class__):
return self.as_dict() == other.as_dict()
else:
return False
def __hash__(self):
return id(self)
@classmethod
def from_dict(cls, d):
return cls(Composition(d["composition"]), d["energy"],
d["name"] if "name" in d else None,
d["attribute"] if "attribute" in d else None)
@staticmethod
def to_csv(filename, entries, latexify_names=False):
"""
Exports PDEntries to a csv
Args:
filename: Filename to write to.
entries: PDEntries to export.
latexify_names: Format entry names to be LaTex compatible,
e.g., Li_{2}O
"""
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = sorted(list(elements), key=lambda a: a.X)
writer = csv.writer(open(filename, "w"), delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
writer.writerow(["Name"] + elements + ["Energy"])
for entry in entries:
row = [entry.name if not latexify_names
else re.sub(r"([0-9]+)", r"_{\1}", entry.name)]
row.extend([entry.composition[el] for el in elements])
row.append(entry.energy)
writer.writerow(row)
@staticmethod
def from_csv(filename):
"""
Imports PDEntries from a csv.
Args:
filename: Filename to import from.
Returns:
List of Elements, List of PDEntries
"""
with open(filename, "r", encoding="utf-8") as f:
reader = csv.reader(f, delimiter=unicode2str(","),
quotechar=unicode2str("\""),
quoting=csv.QUOTE_MINIMAL)
entries = list()
header_read = False
elements = None
for row in reader:
if not header_read:
elements = row[1:(len(row) - 1)]
header_read = True
else:
name = row[0]
energy = float(row[-1])
comp = dict()
for ind in range(1, len(row) - 1):
if float(row[ind]) > 0:
comp[Element(elements[ind - 1])] = float(row[ind])
entries.append(PDEntry(Composition(comp), energy, name))
elements = [Element(el) for el in elements]
return elements, entries
class GrandPotPDEntry(PDEntry):
"""
A grand potential pd entry object encompassing all relevant data for phase
diagrams. Chemical potentials are given as a element-chemical potential
dict.
Args:
entry: A PDEntry-like object.
chempots: Chemical potential specification as {Element: float}.
name: Optional parameter to name the entry. Defaults to the reduced
chemical formula of the original entry.
"""
def __init__(self, entry, chempots, name=None):
comp = entry.composition
self.original_entry = entry
self.original_comp = comp
grandpot = entry.energy - sum([comp[el] * pot
for el, pot in chempots.items()])
self.chempots = chempots
new_comp_map = {el: comp[el] for el in comp.elements
if el not in chempots}
super(GrandPotPDEntry, self).__init__(new_comp_map, grandpot,
entry.name)
self.name = name if name else entry.name
@property
def is_element(self):
"""
True if the entry is an element.
"""
return self.original_comp.is_element
def __repr__(self):
chempot_str = " ".join(["mu_%s = %.4f" % (el, mu)
for el, mu in self.chempots.items()])
return "GrandPotPDEntry with original composition " + \
"{}, energy = {:.4f}, {}".format(self.original_entry.composition,
self.original_entry.energy,
chempot_str)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"chempots": {el.symbol: u for el, u in self.chempots.items()},
"name": self.name}
@classmethod
def from_dict(cls, d):
chempots = {Element(symbol): u for symbol, u in d["chempots"].items()}
entry = MontyDecoder().process_decoded(d["entry"])
return cls(entry, chempots, d["name"])
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
class TransformedPDEntry(PDEntry):
"""
This class repesents a TransformedPDEntry, which allows for a PDEntry to be
transformed to a different composition coordinate space. It is used in the
construction of phase diagrams that do not have elements as the terminal
compositions.
Args:
comp (Composition): Transformed composition as a Composition.
original_entry (PDEntry): Original entry that this entry arose from.
"""
def __init__(self, comp, original_entry):
super(TransformedPDEntry, self).__init__(comp, original_entry.energy)
self.original_entry = original_entry
self.name = original_entry.name
def __getattr__(self, a):
"""
Delegate attribute to original entry if available.
"""
if hasattr(self.original_entry, a):
return getattr(self.original_entry, a)
raise AttributeError(a)
def __repr__(self):
output = ["TransformedPDEntry {}".format(self.composition),
" with original composition {}"
.format(self.original_entry.composition),
", E = {:.4f}".format(self.original_entry.energy)]
return "".join(output)
def __str__(self):
return self.__repr__()
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"entry": self.original_entry.as_dict(),
"composition": self.composition}
@classmethod
def from_dict(cls, d):
entry = MontyDecoder().process_decoded(d["entry"])
return cls(d["composition"], entry)
class PhaseDiagram(MSONable):
"""
Simple phase diagram class taking in elements and entries as inputs.
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
.. attribute: elements:
Elements in the phase diagram.
..attribute: all_entries
All entries provided for Phase Diagram construction. Note that this
does not mean that all these entries are actually used in the phase
diagram. For example, this includes the positive formation energy
entries that are filtered out before Phase Diagram construction.
.. attribute: qhull_data
Data used in the convex hull operation. This is essentially a matrix of
composition data and energy per atom values created from qhull_entries.
.. attribute: qhull_entries:
Actual entries used in convex hull. Excludes all positive formation
energy entries.
.. attribute: dim
The dimensionality of the phase diagram.
.. attribute: facets
Facets of the phase diagram in the form of [[1,2,3],[4,5,6]...].
For a ternary, it is the indices (references to qhull_entries and
qhull_data) for the vertices of the phase triangles. Similarly
extended to higher D simplices for higher dimensions.
.. attribute: el_refs:
List of elemental references for the phase diagrams. These are
entries corresponding to the lowest energy element entries for simple
compositional phase diagrams.
.. attribute: simplices:
The simplices of the phase diagram as a list of np.ndarray, i.e.,
the list of stable compositional coordinates in the phase diagram.
"""
# Tolerance for determining if formation energy is positive.
formation_energy_tol = 1e-11
numerical_tol = 1e-8
def __init__(self, entries, elements=None):
"""
Standard constructor for phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
elements = list(elements)
dim = len(elements)
get_reduced_comp = lambda e: e.composition.reduced_composition
entries = sorted(entries, key=get_reduced_comp)
el_refs = {}
min_entries = []
all_entries = []
for c, g in itertools.groupby(entries, key=get_reduced_comp):
g = list(g)
min_entry = min(g, key=lambda e: e.energy_per_atom)
if c.is_element:
el_refs[c.elements[0]] = min_entry
min_entries.append(min_entry)
all_entries.extend(g)
if len(el_refs) != dim:
raise PhaseDiagramError(
"There are no entries associated with a terminal element!.")
data = np.array([
[e.composition.get_atomic_fraction(el) for el in elements] + [
e.energy_per_atom]
for e in min_entries
])
# Use only entries with negative formation energy
vec = [el_refs[el].energy_per_atom for el in elements] + [-1]
form_e = -np.dot(data, vec)
inds = np.where(form_e < -self.formation_energy_tol)[0].tolist()
# Add the elemental references
inds.extend([min_entries.index(el) for el in el_refs.values()])
qhull_entries = [min_entries[i] for i in inds]
qhull_data = data[inds][:, 1:]
# Add an extra point to enforce full dimensionality.
# This point will be present in all upper hull facets.
extra_point = np.zeros(dim) + 1 / dim
extra_point[-1] = np.max(qhull_data) + 1
qhull_data = np.concatenate([qhull_data, [extra_point]], axis=0)
if dim == 1:
self.facets = [qhull_data.argmin(axis=0)]
else:
facets = get_facets(qhull_data)
finalfacets = []
for facet in facets:
# Skip facets that include the extra point
if max(facet) == len(qhull_data) - 1:
continue
m = qhull_data[facet]
m[:, -1] = 1
if abs(np.linalg.det(m)) > 1e-14:
finalfacets.append(facet)
self.facets = finalfacets
self.simplexes = [Simplex(qhull_data[f, :-1]) for f in self.facets]
self.all_entries = all_entries
self.qhull_data = qhull_data
self.dim = dim
self.el_refs = el_refs
self.elements = elements
self.qhull_entries = qhull_entries
self._stable_entries = set(self.qhull_entries[i] for i in
set(itertools.chain(*self.facets)))
def pd_coords(self, comp):
"""
The phase diagram is generated in a reduced dimensional space
(n_elements - 1). This function returns the coordinates in that space.
These coordinates are compatible with the stored simplex objects.
"""
if set(comp.elements).difference(self.elements):
raise ValueError('{} has elements not in the phase diagram {}'
''.format(comp, self.elements))
return np.array(
[comp.get_atomic_fraction(el) for el in self.elements[1:]])
@property
def all_entries_hulldata(self):
data = []
for entry in self.all_entries:
comp = entry.composition
row = [comp.get_atomic_fraction(el) for el in self.elements]
row.append(entry.energy_per_atom)
data.append(row)
return np.array(data)[:, 1:]
@property
def unstable_entries(self):
"""
Entries that are unstable in the phase diagram. Includes positive
formation energy entries.
"""
return [e for e in self.all_entries if e not in self.stable_entries]
@property
def stable_entries(self):
"""
Returns the stable entries in the phase diagram.
"""
return self._stable_entries
def get_form_energy(self, entry):
"""
Returns the formation energy for an entry (NOT normalized) from the
elemental references.
Args:
entry: A PDEntry-like object.
Returns:
Formation energy from the elemental references.
"""
c = entry.composition
return entry.energy - sum([c[el] * self.el_refs[el].energy_per_atom
for el in c.elements])
def get_form_energy_per_atom(self, entry):
"""
Returns the formation energy per atom for an entry from the
elemental references.
Args:
entry: An PDEntry-like object
Returns:
Formation energy **per atom** from the elemental references.
"""
return self.get_form_energy(entry) / entry.composition.num_atoms
def __repr__(self):
return self.__str__()
def __str__(self):
symbols = [el.symbol for el in self.elements]
output = ["{} phase diagram".format("-".join(symbols)),
"{} stable phases: ".format(len(self.stable_entries)),
", ".join([entry.name
for entry in self.stable_entries])]
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = [PDEntry.from_dict(dd) for dd in d["all_entries"]]
elements = [Element.from_dict(dd) for dd in d["elements"]]
return cls(entries, elements)
@lru_cache(1)
def _get_facet_and_simplex(self, comp):
"""
Get any facet that a composition falls into. Cached so successive
calls at same composition are fast.
"""
c = self.pd_coords(comp)
for f, s in zip(self.facets, self.simplexes):
if s.in_simplex(c, PhaseDiagram.numerical_tol / 10):
return f, s
raise RuntimeError("No facet found for comp = {}".format(comp))
def _get_facet_chempots(self, facet):
"""
Calculates the chemical potentials for each element within a facet.
Args:
facet: Facet of the phase diagram.
Returns:
{ element: chempot } for all elements in the phase diagram.
"""
complist = [self.qhull_entries[i].composition for i in facet]
energylist = [self.qhull_entries[i].energy_per_atom for i in facet]
m = [[c.get_atomic_fraction(e) for e in self.elements] for c in
complist]
chempots = np.linalg.solve(m, energylist)
return dict(zip(self.elements, chempots))
def get_decomposition(self, comp):
"""
Provides the decomposition at a particular composition.
Args:
comp: A composition
Returns:
Decomposition as a dict of {Entry: amount}
"""
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
return {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
def get_hull_energy(self, comp):
"""
Args:
comp (Composition): Input composition
Returns:
Energy of lowest energy equilibrium at desired composition. Not
normalized by atoms, i.e. E(Li4O2) = 2 * E(Li2O)
"""
e = 0
for k, v in self.get_decomposition(comp).items():
e += k.energy_per_atom * v
return e * comp.num_atoms
def get_decomp_and_e_above_hull(self, entry, allow_negative=False):
"""
Provides the decomposition and energy above convex hull for an entry.
Due to caching, can be much faster if entries with the same composition
are processed together.
Args:
entry: A PDEntry like object
allow_negative: Whether to allow negative e_above_hulls. Used to
calculate equilibrium reaction energies. Defaults to False.
Returns:
(decomp, energy above convex hull) Stable entries should have
energy above hull of 0. The decomposition is provided as a dict of
{Entry: amount}.
"""
if entry in self.stable_entries:
return {entry: 1}, 0
comp = entry.composition
facet, simplex = self._get_facet_and_simplex(comp)
decomp_amts = simplex.bary_coords(self.pd_coords(comp))
decomp = {self.qhull_entries[f]: amt
for f, amt in zip(facet, decomp_amts)
if abs(amt) > PhaseDiagram.numerical_tol}
energies = [self.qhull_entries[i].energy_per_atom for i in facet]
ehull = entry.energy_per_atom - np.dot(decomp_amts, energies)
if allow_negative or ehull >= -PhaseDiagram.numerical_tol:
return decomp, ehull
raise ValueError("No valid decomp found!")
def get_e_above_hull(self, entry):
"""
Provides the energy above convex hull for an entry
Args:
entry: A PDEntry like object
Returns:
Energy above convex hull of entry. Stable entries should have
energy above hull of 0.
"""
return self.get_decomp_and_e_above_hull(entry)[1]
def get_equilibrium_reaction_energy(self, entry):
"""
Provides the reaction energy of a stable entry from the neighboring
equilibrium stable entries (also known as the inverse distance to
hull).
Args:
entry: A PDEntry like object
Returns:
Equilibrium reaction energy of entry. Stable entries should have
equilibrium reaction energy <= 0.
"""
if entry not in self.stable_entries:
raise ValueError("Equilibrium reaction energy is available only "
"for stable entries.")
if entry.is_element:
return 0
entries = [e for e in self.stable_entries if e != entry]
modpd = PhaseDiagram(entries, self.elements)
return modpd.get_decomp_and_e_above_hull(entry,
allow_negative=True)[1]
def get_composition_chempots(self, comp):
facet = self._get_facet_and_simplex(comp)[0]
return self._get_facet_chempots(facet)
def get_transition_chempots(self, element):
"""
Get the critical chemical potentials for an element in the Phase
Diagram.
Args:
element: An element. Has to be in the PD in the first place.
Returns:
A sorted sequence of critical chemical potentials, from less
negative to more negative.
"""
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with "
"elements in the phase diagram.")
critical_chempots = []
for facet in self.facets:
chempots = self._get_facet_chempots(facet)
critical_chempots.append(chempots[element])
clean_pots = []
for c in sorted(critical_chempots):
if len(clean_pots) == 0:
clean_pots.append(c)
else:
if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol:
clean_pots.append(c)
clean_pots.reverse()
return tuple(clean_pots)
def get_critical_compositions(self, comp1, comp2):
"""
Get the critical compositions along the tieline between two
compositions. I.e. where the decomposition products change.
The endpoints are also returned.
Args:
comp1, comp2 (Composition): compositions that define the tieline
Returns:
[(Composition)]: list of critical compositions. All are of
the form x * comp1 + (1-x) * comp2
"""
n1 = comp1.num_atoms
n2 = comp2.num_atoms
pd_els = self.elements
# the reduced dimensionality Simplexes don't use the
# first element in the PD
c1 = self.pd_coords(comp1)
c2 = self.pd_coords(comp2)
# none of the projections work if c1 == c2, so just return *copies*
# of the inputs
if np.all(c1 == c2):
return [comp1.copy(), comp2.copy()]
intersections = [c1, c2]
for sc in self.simplexes:
intersections.extend(sc.line_intersection(c1, c2))
intersections = np.array(intersections)
# find position along line
l = (c2 - c1)
l /= np.sum(l ** 2) ** 0.5
proj = np.dot(intersections - c1, l)
# only take compositions between endpoints
proj = proj[np.logical_and(proj > -self.numerical_tol,
proj < proj[1] + self.numerical_tol)]
proj.sort()
# only unique compositions
valid = np.ones(len(proj), dtype=np.bool)
valid[1:] = proj[1:] > proj[:-1] + self.numerical_tol
proj = proj[valid]
ints = c1 + l * proj[:, None]
# reconstruct full-dimensional composition array
cs = np.concatenate([np.array([1 - np.sum(ints, axis=-1)]).T,
ints], axis=-1)
# mixing fraction when compositions are normalized
x = proj / np.dot(c2 - c1, l)
# mixing fraction when compositions are not normalized
x_unnormalized = x * n1 / (n2 + x * (n1 - n2))
num_atoms = n1 + (n2 - n1) * x_unnormalized
cs *= num_atoms[:, None]
return [Composition((c, v) for c, v in zip(pd_els, m)) for m in cs]
def get_element_profile(self, element, comp, comp_tol=1e-5):
"""
Provides the element evolution data for a composition.
For example, can be used to analyze Li conversion voltages by varying
uLi and looking at the phases formed. Also can be used to analyze O2
evolution by varying uO2.
Args:
element: An element. Must be in the phase diagram.
comp: A Composition
comp_tol: The tolerance to use when calculating decompositions.
Phases with amounts less than this tolerance are excluded.
Defaults to 1e-5.
Returns:
Evolution data as a list of dictionaries of the following format:
[ {'chempot': -10.487582010000001, 'evolution': -2.0,
'reaction': Reaction Object], ...]
"""
element = get_el_sp(element)
element = Element(element.symbol)
if element not in self.elements:
raise ValueError("get_transition_chempots can only be called with"
" elements in the phase diagram.")
gccomp = Composition({el: amt for el, amt in comp.items()
if el != element})
elref = self.el_refs[element]
elcomp = Composition(element.symbol)
evolution = []
for cc in self.get_critical_compositions(elcomp, gccomp)[1:]:
decomp_entries = self.get_decomposition(cc).keys()
decomp = [k.composition for k in decomp_entries]
rxn = Reaction([comp], decomp + [elcomp])
rxn.normalize_to(comp)
c = self.get_composition_chempots(cc + elcomp * 1e-5)[element]
amt = -rxn.coeffs[rxn.all_comp.index(elcomp)]
evolution.append({'chempot': c,
'evolution': amt,
'element_reference': elref,
'reaction': rxn, 'entries': decomp_entries})
return evolution
def get_chempot_range_map(self, elements, referenced=True, joggle=True):
"""
Returns a chemical potential range map for each stable entry.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges
of all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: If True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
Returns a dict of the form {entry: [simplices]}. The list of
simplices are the sides of the N-1 dim polytope bounding the
allowable chemical potential range of each entry.
"""
all_chempots = []
pd = self
facets = pd.facets
for facet in facets:
chempots = self._get_facet_chempots(facet)
all_chempots.append([chempots[el] for el in pd.elements])
inds = [pd.elements.index(el) for el in elements]
el_energies = {el: 0.0 for el in elements}
if referenced:
el_energies = {el: pd.el_refs[el].energy_per_atom
for el in elements}
chempot_ranges = collections.defaultdict(list)
vertices = [list(range(len(self.elements)))]
if len(all_chempots) > len(self.elements):
vertices = get_facets(all_chempots, joggle=joggle)
for ufacet in vertices:
for combi in itertools.combinations(ufacet, 2):
data1 = facets[combi[0]]
data2 = facets[combi[1]]
common_ent_ind = set(data1).intersection(set(data2))
if len(common_ent_ind) == len(elements):
common_entries = [pd.qhull_entries[i]
for i in common_ent_ind]
data = np.array([[all_chempots[i][j]
- el_energies[pd.elements[j]]
for j in inds] for i in combi])
sim = Simplex(data)
for entry in common_entries:
chempot_ranges[entry].append(sim)
return chempot_ranges
def getmu_vertices_stability_phase(self, target_comp, dep_elt, tol_en=1e-2):
"""
returns a set of chemical potentials corresponding to the vertices of the simplex
in the chemical potential phase diagram.
The simplex is built using all elements in the target_composition except dep_elt.
The chemical potential of dep_elt is computed from the target composition energy.
This method is useful to get the limiting conditions for
defects computations for instance.
Args:
target_comp: A Composition object
dep_elt: the element for which the chemical potential is computed from the energy of
the stable phase at the target composition
tol_en: a tolerance on the energy to set
Returns:
[{Element:mu}]: An array of conditions on simplex vertices for
which each element has a chemical potential set to a given
value. "absolute" values (i.e., not referenced to element energies)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != dep_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != dep_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != dep_elt]
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[dep_elt] / target_comp[dep_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
elts = [e for e in self.elements if e != dep_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = v[i] + muref[i]
res[dep_elt] = (np.dot(v + muref, coeff) + ef) / \
target_comp[dep_elt]
already_in = False
for di in all_coords:
dict_equals = True
for k in di:
if abs(di[k] - res[k]) > tol_en:
dict_equals = False
break
if dict_equals:
already_in = True
break
if not already_in:
all_coords.append(res)
return all_coords
def get_chempot_range_stability_phase(self, target_comp, open_elt):
"""
returns a set of chemical potentials correspoding to the max and min
chemical potential of the open element for a given composition. It is
quite common to have for instance a ternary oxide (e.g., ABO3) for
which you want to know what are the A and B chemical potential leading
to the highest and lowest oxygen chemical potential (reducing and
oxidizing conditions). This is useful for defect computations.
Args:
target_comp: A Composition object
open_elt: Element that you want to constrain to be max or min
Returns:
{Element:(mu_min,mu_max)}: Chemical potentials are given in
"absolute" values (i.e., not referenced to 0)
"""
muref = np.array([self.el_refs[e].energy_per_atom
for e in self.elements if e != open_elt])
chempot_ranges = self.get_chempot_range_map(
[e for e in self.elements if e != open_elt])
for e in self.elements:
if not e in target_comp.elements:
target_comp = target_comp + Composition({e: 0.0})
coeff = [-target_comp[e] for e in self.elements if e != open_elt]
max_open = -float('inf')
min_open = float('inf')
max_mus = None
min_mus = None
for e in chempot_ranges.keys():
if e.composition.reduced_composition == \
target_comp.reduced_composition:
multiplicator = e.composition[open_elt] / target_comp[open_elt]
ef = e.energy / multiplicator
all_coords = []
for s in chempot_ranges[e]:
for v in s._coords:
all_coords.append(v)
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] > max_open:
max_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
max_mus = v
if (np.dot(v + muref, coeff) + ef) / target_comp[
open_elt] < min_open:
min_open = (np.dot(v + muref, coeff) + ef) / \
target_comp[open_elt]
min_mus = v
elts = [e for e in self.elements if e != open_elt]
res = {}
for i in range(len(elts)):
res[elts[i]] = (min_mus[i] + muref[i], max_mus[i] + muref[i])
res[open_elt] = (min_open, max_open)
return res
class GrandPotentialPhaseDiagram(PhaseDiagram):
"""
A class representing a Grand potential phase diagram. Grand potential phase
diagrams are essentially phase diagrams that are open to one or more
components. To construct such phase diagrams, the relevant free energy is
the grand potential, which can be written as the Legendre transform of the
Gibbs free energy as follows
Grand potential = G - u_X N_X
The algorithm is based on the work in the following papers:
1. S. P. Ong, L. Wang, B. Kang, and G. Ceder, Li-Fe-P-O2 Phase Diagram from
First Principles Calculations. Chem. Mater., 2008, 20(5), 1798-1807.
doi:10.1021/cm702327g
2. S. P. Ong, A. Jain, G. Hautier, B. Kang, G. Ceder, Thermal stabilities
of delithiated olivine MPO4 (M=Fe, Mn) cathodes investigated using first
principles calculations. Electrochem. Comm., 2010, 12(3), 427-430.
doi:10.1016/j.elecom.2010.01.010
"""
def __init__(self, entries, chempots, elements=None):
"""
Standard constructor for grand potential phase diagram.
Args:
entries ([PDEntry]): A list of PDEntry-like objects having an
energy, energy_per_atom and composition.
chempots {Element: float}: Specify the chemical potentials
of the open elements.
elements ([Element]): Optional list of elements in the phase
diagram. If set to None, the elements are determined from
the the entries themselves.
"""
if elements is None:
elements = set()
for entry in entries:
elements.update(entry.composition.elements)
self.chempots = {get_el_sp(el): u for el, u in chempots.items()}
elements = set(elements).difference(self.chempots.keys())
all_entries = []
for e in entries:
if len(set(e.composition.elements).intersection(set(elements))) > 0:
all_entries.append(GrandPotPDEntry(e, self.chempots))
super(GrandPotentialPhaseDiagram, self).__init__(all_entries, elements)
def __str__(self):
output = []
chemsys = "-".join([el.symbol for el in self.elements])
output.append("{} grand potential phase diagram with ".format(chemsys))
output[-1] += ", ".join(["u{}={}".format(el, v)
for el, v in self.chempots.items()])
output.append("{} stable phases: ".format(len(self.stable_entries)))
output.append(", ".join([entry.name
for entry in self.stable_entries]))
return "\n".join(output)
def as_dict(self):
return {"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"all_entries": [e.as_dict() for e in self.all_entries],
"chempots": self.chempots,
"elements": [e.as_dict() for e in self.elements]}
@classmethod
def from_dict(cls, d):
entries = MontyDecoder().process_decoded(d["all_entries"])
elements = MontyDecoder().process_decoded(d["elements"])
return cls(entries, d["chempots"], elements)
class CompoundPhaseDiagram(PhaseDiagram):
"""
Generates phase diagrams from compounds as terminations instead of
elements.
"""
# Tolerance for determining if amount of a composition is positive.
amount_tol = 1e-5
def __init__(self, entries, terminal_compositions,
normalize_terminal_compositions=True):
"""
Initializes a CompoundPhaseDiagram.
Args:
entries ([PDEntry]): Sequence of input entries. For example,
if you want a Li2O-P2O5 phase diagram, you might have all
Li-P-O entries as an input.
terminal_compositions ([Composition]): Terminal compositions of
phase space. In the Li2O-P2O5 example, these will be the
Li2O and P2O5 compositions.
normalize_terminal_compositions (bool): Whether to normalize the
terminal compositions to a per atom basis. If normalized,
the energy above hulls will be consistent
for comparison across systems. Non-normalized terminals are
more intuitive in terms of compositional breakdowns.
"""
self.original_entries = entries
self.terminal_compositions = terminal_compositions
self.normalize_terminals = normalize_terminal_compositions
(pentries, species_mapping) = \
self.transform_entries(entries, terminal_compositions)
self.species_mapping = species_mapping
super(CompoundPhaseDiagram, self).__init__(
pentries, elements=species_mapping.values())
def transform_entries(self, entries, terminal_compositions):
"""
Method to transform all entries to the composition coordinate in the
terminal compositions. If the entry does not fall within the space
defined by the terminal compositions, they are excluded. For example,
Li3PO4 is mapped into a Li2O:1.5, P2O5:0.5 composition. The terminal
compositions are represented by DummySpecies.
Args:
entries: Sequence of all input entries
terminal_compositions: Terminal compositions of phase space.
Returns:
Sequence of TransformedPDEntries falling within the phase space.
"""
new_entries = []
if self.normalize_terminals:
fractional_comp = [c.fractional_composition
for c in terminal_compositions]
else:
fractional_comp = terminal_compositions
# Map terminal compositions to unique dummy species.
sp_mapping = collections.OrderedDict()
for i, comp in enumerate(fractional_comp):
sp_mapping[comp] = DummySpecie("X" + chr(102 + i))
for entry in entries:
try:
rxn = Reaction(fractional_comp, [entry.composition])
rxn.normalize_to(entry.composition)
# We only allow reactions that have positive amounts of
# reactants.
if all([rxn.get_coeff(comp) <= CompoundPhaseDiagram.amount_tol
for comp in fractional_comp]):
newcomp = {sp_mapping[comp]: -rxn.get_coeff(comp)
for comp in fractional_comp}
newcomp = {k: v for k, v in newcomp.items()
if v > CompoundPhaseDiagram.amount_tol}
transformed_entry = \
TransformedPDEntry(Composition(newcomp), entry)
new_entries.append(transformed_entry)
except ReactionError:
# If the reaction can't be balanced, the entry does not fall
# into the phase space. We ignore them.
pass
return new_entries, sp_mapping
def as_dict(self):
return {
"@module": self.__class__.__module__,
"@class": self.__class__.__name__,
"original_entries": [e.as_dict() for e in self.original_entries],
"terminal_compositions": [c.as_dict()
for c in self.terminal_compositions],
"normalize_terminal_compositions":
self.normalize_terminals}
@classmethod
def from_dict(cls, d):
dec = MontyDecoder()
entries = dec.process_decoded(d["original_entries"])
terminal_compositions = dec.process_decoded(d["terminal_compositions"])
return cls(entries, terminal_compositions,
d["normalize_terminal_compositions"])
class ReactionDiagram(object):
def __init__(self, entry1, entry2, all_entries, tol=1e-4,
float_fmt="%.4f"):
"""
Analyzes the possible reactions between a pair of compounds, e.g.,
an electrolyte and an electrode.
Args:
entry1 (ComputedEntry): Entry for 1st component. Note that
corrections, if any, must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
entry2 (ComputedEntry): Entry for 2nd component. Note that
corrections must already be pre-applied. This is to
give flexibility for different kinds of corrections, e.g.,
if a particular entry is fitted to an experimental data (such
as EC molecule).
all_entries ([ComputedEntry]): All other entries to be
considered in the analysis. Note that corrections, if any,
must already be pre-applied.
tol (float): Tolerance to be used to determine validity of reaction.
float_fmt (str): Formatting string to be applied to all floats.
Determines number of decimal places in reaction string.
"""
elements = set()
for e in [entry1, entry2]:
elements.update([el.symbol for el in e.composition.elements])
elements = tuple(elements) # Fix elements to ensure order.
comp_vec1 = np.array([entry1.composition.get_atomic_fraction(el)
for el in elements])
comp_vec2 = np.array([entry2.composition.get_atomic_fraction(el)
for el in elements])
r1 = entry1.composition.reduced_composition
r2 = entry2.composition.reduced_composition
logger.debug("%d total entries." % len(all_entries))
pd = PhaseDiagram(all_entries + [entry1, entry2])
terminal_formulas = [entry1.composition.reduced_formula,
entry2.composition.reduced_formula]
logger.debug("%d stable entries" % len(pd.stable_entries))
logger.debug("%d facets" % len(pd.facets))
logger.debug("%d qhull_entries" % len(pd.qhull_entries))
rxn_entries = []
done = []
fmt = lambda fl: float_fmt % fl
for facet in pd.facets:
for face in itertools.combinations(facet, len(facet) - 1):
face_entries = [pd.qhull_entries[i] for i in face]
if any([e.composition.reduced_formula in terminal_formulas
for e in face_entries]):
continue
try:
m = []
for e in face_entries:
m.append([e.composition.get_atomic_fraction(el)
for el in elements])
m.append(comp_vec2 - comp_vec1)
m = np.array(m).T
coeffs = np.linalg.solve(m, comp_vec2)
x = coeffs[-1]
if all([c >= -tol for c in coeffs]) and \
(abs(sum(coeffs[:-1]) - 1) < tol) and \
(tol < x < 1 - tol):
c1 = x / r1.num_atoms
c2 = (1 - x) / r2.num_atoms
factor = 1 / (c1 + c2)
c1 *= factor
c2 *= factor
# Avoid duplicate reactions.
if any([np.allclose([c1, c2], cc) for cc in done]):
continue
done.append((c1, c2))
rxn_str = "%s %s + %s %s -> " % (
fmt(c1), r1.reduced_formula,
fmt(c2), r2.reduced_formula)
products = []
energy = - (x * entry1.energy_per_atom +
(1 - x) * entry2.energy_per_atom)
for c, e in zip(coeffs[:-1], face_entries):
if c > tol:
r = e.composition.reduced_composition
products.append("%s %s" % (
fmt(c / r.num_atoms * factor),
r.reduced_formula))
energy += c * e.energy_per_atom
rxn_str += " + ".join(products)
comp = x * comp_vec1 + (1 - x) * comp_vec2
entry = PDEntry(
Composition(dict(zip(elements, comp))),
energy=energy, attribute=rxn_str)
rxn_entries.append(entry)
except np.linalg.LinAlgError as ex:
logger.debug("Reactants = %s" % (", ".join([
entry1.composition.reduced_formula,
entry2.composition.reduced_formula])))
logger.debug("Products = %s" % (
", ".join([e.composition.reduced_formula
for e in face_entries])))
rxn_entries = sorted(rxn_entries, key=lambda e: e.name, reverse=True)
self.entry1 = entry1
self.entry2 = entry2
self.rxn_entries = rxn_entries
self.labels = collections.OrderedDict()
for i, e in enumerate(rxn_entries):
self.labels[str(i + 1)] = e.attribute
e.name = str(i + 1)
self.all_entries = all_entries
self.pd = pd
def get_compound_pd(self):
"""
Get the CompoundPhaseDiagram object, which can then be used for
plotting.
Returns:
(CompoundPhaseDiagram)
"""
# For this plot, since the reactions are reported in formation
# energies, we need to set the energies of the terminal compositions
# to 0. So we make create copies with 0 energy.
entry1 = PDEntry(self.entry1.composition, 0)
entry2 = PDEntry(self.entry2.composition, 0)
cpd = CompoundPhaseDiagram(
self.rxn_entries + [entry1, entry2],
[Composition(entry1.composition.reduced_formula),
Composition(entry2.composition.reduced_formula)],
normalize_terminal_compositions=False)
return cpd
class PhaseDiagramError(Exception):
"""
An exception class for Phase Diagram generation.
"""
pass
def get_facets(qhull_data, joggle=False):
"""
Get the simplex facets for the Convex hull.
Args:
qhull_data (np.ndarray): The data from which to construct the convex
hull as a Nxd array (N being number of data points and d being the
dimension)
joggle (boolean): Whether to joggle the input to avoid precision
errors.
Returns:
List of simplices of the Convex Hull.
"""
if joggle:
return ConvexHull(qhull_data, qhull_options="QJ i").simplices
else:
return ConvexHull(qhull_data, qhull_options="Qt i").simplices
class PDPlotter(object):
"""
A plotter class for phase diagrams.
Args:
phasediagram: PhaseDiagram object.
show_unstable (float): Whether unstable phases will be plotted as
well as red crosses. If a number > 0 is entered, all phases with
ehull < show_unstable will be shown.
\\*\\*plotkwargs: Keyword args passed to matplotlib.pyplot.plot. Can
be used to customize markers etc. If not set, the default is
{
"markerfacecolor": (0.2157, 0.4941, 0.7216),
"markersize": 10,
"linewidth": 3
}
"""
def __init__(self, phasediagram, show_unstable=0, **plotkwargs):
# note: palettable imports matplotlib
from palettable.colorbrewer.qualitative import Set1_3
self._pd = phasediagram
self._dim = len(self._pd.elements)
if self._dim > 4:
raise ValueError("Only 1-4 components supported!")
self.lines = uniquelines(self._pd.facets) if self._dim > 1 else \
[[self._pd.facets[0][0], self._pd.facets[0][0]]]
self.show_unstable = show_unstable
colors = Set1_3.mpl_colors
self.plotkwargs = plotkwargs or {
"markerfacecolor": colors[2],
"markersize": 10,
"linewidth": 3
}
@property
def pd_plot_data(self):
"""
Plot data for phase diagram.
2-comp - Full hull with energies
3/4-comp - Projection into 2D or 3D Gibbs triangle.
Returns:
(lines, stable_entries, unstable_entries):
- lines is a list of list of coordinates for lines in the PD.
- stable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- unstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
lines = []
stable_entries = {}
for line in self.lines:
entry1 = entries[line[0]]
entry2 = entries[line[1]]
if self._dim < 3:
x = [data[line[0]][0], data[line[1]][0]]
y = [pd.get_form_energy_per_atom(entry1),
pd.get_form_energy_per_atom(entry2)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord(data[line, 0:2])
else:
coord = tet_coord(data[line, 0:3])
lines.append(coord)
labelcoord = list(zip(*coord))
stable_entries[labelcoord[0]] = entry1
stable_entries[labelcoord[1]] = entry2
all_entries = pd.all_entries
all_data = np.array(pd.all_entries_hulldata)
unstable_entries = dict()
stable = pd.stable_entries
for i in range(0, len(all_entries)):
entry = all_entries[i]
if entry not in stable:
if self._dim < 3:
x = [all_data[i][0], all_data[i][0]]
y = [pd.get_form_energy_per_atom(entry),
pd.get_form_energy_per_atom(entry)]
coord = [x, y]
elif self._dim == 3:
coord = triangular_coord([all_data[i, 0:2],
all_data[i, 0:2]])
else:
coord = tet_coord([all_data[i, 0:3], all_data[i, 0:3],
all_data[i, 0:3]])
labelcoord = list(zip(*coord))
unstable_entries[entry] = labelcoord[0]
return lines, stable_entries, unstable_entries
def get_plot(self, label_stable=True, label_unstable=True, ordering=None,
energy_colormap=None, process_attributes=False, plt=None):
if self._dim < 4:
plt = self._get_2d_plot(label_stable, label_unstable, ordering,
energy_colormap, plt=plt,
process_attributes=process_attributes)
elif self._dim == 4:
plt = self._get_3d_plot(label_stable)
return plt
def plot_element_profile(self, element, comp, show_label_index=None,
xlim=5):
"""
Draw the element profile plot for a composition varying different
chemical potential of an element.
X value is the negative value of the chemical potential reference to
elemental chemical potential. For example, if choose Element("Li"),
X= -(µLi-µLi0), which corresponds to the voltage versus metal anode.
Y values represent for the number of element uptake in this composition
(unit: per atom). All reactions are printed to help choosing the
profile steps you want to show label in the plot.
Args:
element (Element): An element of which the chemical potential is
considered. It also must be in the phase diagram.
comp (Composition): A composition.
show_label_index (list of integers): The labels for reaction products
you want to show in the plot. Default to None (not showing any
annotation for reaction products). For the profile steps you want
to show the labels, just add it to the show_label_index. The
profile step counts from zero. For example, you can set
show_label_index=[0, 2, 5] to label profile step 0,2,5.
xlim (float): The max x value. x value is from 0 to xlim. Default to
5 eV.
Returns:
Plot of element profile evolution by varying the chemical potential
of an element.
"""
plt = pretty_plot(12, 8)
pd = self._pd
evolution = pd.get_element_profile(element, comp)
num_atoms = evolution[0]["reaction"].reactants[0].num_atoms
element_energy = evolution[0]['chempot']
for i, d in enumerate(evolution):
v = -(d["chempot"] - element_energy)
print ("index= %s, -\u0394\u03BC=%.4f(eV)," % (i, v), d["reaction"])
if i != 0:
plt.plot([x2, x2], [y1, d["evolution"] / num_atoms],
'k', linewidth=2.5)
x1 = v
y1 = d["evolution"] / num_atoms
if i != len(evolution) - 1:
x2 = - (evolution[i + 1]["chempot"] - element_energy)
else:
x2 = 5.0
if show_label_index is not None and i in show_label_index:
products = [re.sub(r"(\d+)", r"$_{\1}$", p.reduced_formula)
for p in d["reaction"].products
if p.reduced_formula != element.symbol]
plt.annotate(", ".join(products), xy=(v + 0.05, y1 + 0.05),
fontsize=24, color='r')
plt.plot([x1, x2], [y1, y1], 'r', linewidth=3)
else:
plt.plot([x1, x2], [y1, y1], 'k', linewidth=2.5)
plt.xlim((0, xlim))
plt.xlabel("-$\\Delta{\\mu}$ (eV)")
plt.ylabel("Uptake per atom")
return plt
def show(self, *args, **kwargs):
"""
Draws the phase diagram using Matplotlib and show it.
Args:
\\*args: Passed to get_plot.
\\*\\*kwargs: Passed to get_plot.
"""
self.get_plot(*args, **kwargs).show()
def _get_2d_plot(self, label_stable=True, label_unstable=True,
ordering=None, energy_colormap=None, vmin_mev=-60.0,
vmax_mev=60.0, show_colorbar=True,
process_attributes=False, plt=None):
"""
Shows the plot using pylab. Usually I won't do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
if plt is None:
plt = pretty_plot(8, 6)
from matplotlib.font_manager import FontProperties
if ordering is None:
(lines, labels, unstable) = self.pd_plot_data
else:
(_lines, _labels, _unstable) = self.pd_plot_data
(lines, labels, unstable) = order_phase_diagram(
_lines, _labels, _unstable, ordering)
if energy_colormap is None:
if process_attributes:
for x, y in lines:
plt.plot(x, y, "k-", linewidth=3, markeredgecolor="k")
# One should think about a clever way to have "complex"
# attributes with complex processing options but with a clear
# logic. At this moment, I just use the attributes to know
# whether an entry is a new compound or an existing (from the
# ICSD or from the MP) one.
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "ko", **self.plotkwargs)
else:
plt.plot(x, y, "k*", **self.plotkwargs)
else:
for x, y in lines:
plt.plot(x, y, "ko-", **self.plotkwargs)
else:
from matplotlib.colors import Normalize, LinearSegmentedColormap
from matplotlib.cm import ScalarMappable
for x, y in lines:
plt.plot(x, y, "k-", markeredgecolor="k")
vmin = vmin_mev / 1000.0
vmax = vmax_mev / 1000.0
if energy_colormap == 'default':
mid = - vmin / (vmax - vmin)
cmap = LinearSegmentedColormap.from_list(
'my_colormap', [(0.0, '#005500'), (mid, '#55FF55'),
(mid, '#FFAAAA'), (1.0, '#FF0000')])
else:
cmap = energy_colormap
norm = Normalize(vmin=vmin, vmax=vmax)
_map = ScalarMappable(norm=norm, cmap=cmap)
_energies = [self._pd.get_equilibrium_reaction_energy(entry)
for coord, entry in labels.items()]
energies = [en if en < 0.0 else -0.00000001 for en in _energies]
vals_stable = _map.to_rgba(energies)
ii = 0
if process_attributes:
for x, y in labels.keys():
if labels[(x, y)].attribute is None or \
labels[(x, y)].attribute == "existing":
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=12)
else:
plt.plot(x, y, "*", markerfacecolor=vals_stable[ii],
markersize=18)
ii += 1
else:
for x, y in labels.keys():
plt.plot(x, y, "o", markerfacecolor=vals_stable[ii],
markersize=15)
ii += 1
font = FontProperties()
font.set_weight("bold")
font.set_size(24)
# Sets a nice layout depending on the type of PD. Also defines a
# "center" for the PD, which then allows the annotations to be spread
# out in a nice manner.
if len(self._pd.elements) == 3:
plt.axis("equal")
plt.xlim((-0.1, 1.2))
plt.ylim((-0.1, 1.0))
plt.axis("off")
center = (0.5, math.sqrt(3) / 6)
else:
all_coords = labels.keys()
miny = min([c[1] for c in all_coords])
ybuffer = max(abs(miny) * 0.1, 0.1)
plt.xlim((-0.1, 1.1))
plt.ylim((miny - ybuffer, ybuffer))
center = (0.5, miny / 2)
plt.xlabel("Fraction", fontsize=28, fontweight='bold')
plt.ylabel("Formation energy (eV/fu)", fontsize=28,
fontweight='bold')
for coords in sorted(labels.keys(), key=lambda x: -x[1]):
entry = labels[coords]
label = entry.name
# The follow defines an offset for the annotation text emanating
# from the center of the PD. Results in fairly nice layouts for the
# most part.
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 if np.linalg.norm(vec) != 0 \
else vec
valign = "bottom" if vec[1] > 0 else "top"
if vec[0] < -0.01:
halign = "right"
elif vec[0] > 0.01:
halign = "left"
else:
halign = "center"
if label_stable:
if process_attributes and entry.attribute == 'new':
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font,
color='g')
else:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign,
verticalalignment=valign,
fontproperties=font)
if self.show_unstable:
font = FontProperties()
font.set_size(16)
energies_unstable = [self._pd.get_e_above_hull(entry)
for entry, coord in unstable.items()]
if energy_colormap is not None:
energies.extend(energies_unstable)
vals_unstable = _map.to_rgba(energies_unstable)
ii = 0
for entry, coords in unstable.items():
ehull = self._pd.get_e_above_hull(entry)
if ehull < self.show_unstable:
vec = (np.array(coords) - center)
vec = vec / np.linalg.norm(vec) * 10 \
if np.linalg.norm(vec) != 0 else vec
label = entry.name
if energy_colormap is None:
plt.plot(coords[0], coords[1], "ks", linewidth=3,
markeredgecolor="k", markerfacecolor="r",
markersize=8)
else:
plt.plot(coords[0], coords[1], "s", linewidth=3,
markeredgecolor="k",
markerfacecolor=vals_unstable[ii],
markersize=8)
if label_unstable:
plt.annotate(latexify(label), coords, xytext=vec,
textcoords="offset points",
horizontalalignment=halign, color="b",
verticalalignment=valign,
fontproperties=font)
ii += 1
if energy_colormap is not None and show_colorbar:
_map.set_array(energies)
cbar = plt.colorbar(_map)
cbar.set_label(
'Energy [meV/at] above hull (in red)\nInverse energy ['
'meV/at] above hull (in green)',
rotation=-90, ha='left', va='center')
ticks = cbar.ax.get_yticklabels()
# cbar.ax.set_yticklabels(['${v}$'.format(
# v=float(t.get_text().strip('$'))*1000.0) for t in ticks])
f = plt.gcf()
f.set_size_inches((8, 6))
plt.subplots_adjust(left=0.09, right=0.98, top=0.98, bottom=0.07)
return plt
def _get_3d_plot(self, label_stable=True):
"""
Shows the plot using pylab. Usually I won"t do imports in methods,
but since plotting is a fairly expensive library to load and not all
machines have matplotlib installed, I have done it this way.
"""
import matplotlib.pyplot as plt
import mpl_toolkits.mplot3d.axes3d as p3
from matplotlib.font_manager import FontProperties
fig = plt.figure()
ax = p3.Axes3D(fig)
font = FontProperties()
font.set_weight("bold")
font.set_size(20)
(lines, labels, unstable) = self.pd_plot_data
count = 1
newlabels = list()
for x, y, z in lines:
ax.plot(x, y, z, "bo-", linewidth=3, markeredgecolor="b",
markerfacecolor="r", markersize=10)
for coords in sorted(labels.keys()):
entry = labels[coords]
label = entry.name
if label_stable:
if len(entry.composition.elements) == 1:
ax.text(coords[0], coords[1], coords[2], label)
else:
ax.text(coords[0], coords[1], coords[2], str(count))
newlabels.append("{} : {}".format(count, latexify(label)))
count += 1
plt.figtext(0.01, 0.01, "\n".join(newlabels))
ax.axis("off")
return plt
def write_image(self, stream, image_format="svg", **kwargs):
"""
Writes the phase diagram to an image in a stream.
Args:
stream:
stream to write to. Can be a file stream or a StringIO stream.
image_format
format for image. Can be any of matplotlib supported formats.
Defaults to svg for best results for vector graphics.
\\*\\*kwargs: Pass through to get_plot functino.
"""
plt = self.get_plot(**kwargs)
f = plt.gcf()
f.set_size_inches((12, 10))
plt.savefig(stream, format=image_format)
def plot_chempot_range_map(self, elements, referenced=True):
"""
Plot the chemical potential range _map. Currently works only for
3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
"""
self.get_chempot_range_map_plot(elements, referenced=referenced).show()
def get_chempot_range_map_plot(self, elements, referenced=True):
"""
Returns a plot of the chemical potential range _map. Currently works
only for 3-component PDs.
Args:
elements: Sequence of elements to be considered as independent
variables. E.g., if you want to show the stability ranges of
all Li-Co-O phases wrt to uLi and uO, you will supply
[Element("Li"), Element("O")]
referenced: if True, gives the results with a reference being the
energy of the elemental phase. If False, gives absolute values.
Returns:
A matplotlib plot object.
"""
plt = pretty_plot(12, 8)
chempot_ranges = self._pd.get_chempot_range_map(
elements, referenced=referenced)
missing_lines = {}
excluded_region = []
for entry, lines in chempot_ranges.items():
comp = entry.composition
center_x = 0
center_y = 0
coords = []
contain_zero = any([comp.get_atomic_fraction(el) == 0
for el in elements])
is_boundary = (not contain_zero) and \
sum([comp.get_atomic_fraction(el) for el in
elements]) == 1
for line in lines:
(x, y) = line.coords.transpose()
plt.plot(x, y, "k-")
for coord in line.coords:
if not in_coord_list(coords, coord):
coords.append(coord.tolist())
center_x += coord[0]
center_y += coord[1]
if is_boundary:
excluded_region.extend(line.coords)
if coords and contain_zero:
missing_lines[entry] = coords
else:
xy = (center_x / len(coords), center_y / len(coords))
plt.annotate(latexify(entry.name), xy, fontsize=22)
ax = plt.gca()
xlim = ax.get_xlim()
ylim = ax.get_ylim()
# Shade the forbidden chemical potential regions.
excluded_region.append([xlim[1], ylim[1]])
excluded_region = sorted(excluded_region, key=lambda c: c[0])
(x, y) = np.transpose(excluded_region)
plt.fill(x, y, "0.80")
# The hull does not generate the missing horizontal and vertical lines.
# The following code fixes this.
el0 = elements[0]
el1 = elements[1]
for entry, coords in missing_lines.items():
center_x = sum([c[0] for c in coords])
center_y = sum([c[1] for c in coords])
comp = entry.composition
is_x = comp.get_atomic_fraction(el0) < 0.01
is_y = comp.get_atomic_fraction(el1) < 0.01
n = len(coords)
if not (is_x and is_y):
if is_x:
coords = sorted(coords, key=lambda c: c[1])
for i in [0, -1]:
x = [min(xlim), coords[i][0]]
y = [coords[i][1], coords[i][1]]
plt.plot(x, y, "k")
center_x += min(xlim)
center_y += coords[i][1]
elif is_y:
coords = sorted(coords, key=lambda c: c[0])
for i in [0, -1]:
x = [coords[i][0], coords[i][0]]
y = [coords[i][1], min(ylim)]
plt.plot(x, y, "k")
center_x += coords[i][0]
center_y += min(ylim)
xy = (center_x / (n + 2), center_y / (n + 2))
else:
center_x = sum(coord[0] for coord in coords) + xlim[0]
center_y = sum(coord[1] for coord in coords) + ylim[0]
xy = (center_x / (n + 1), center_y / (n + 1))
plt.annotate(latexify(entry.name), xy,
horizontalalignment="center",
verticalalignment="center", fontsize=22)
plt.xlabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el0.symbol))
plt.ylabel("$\\mu_{{{0}}} - \\mu_{{{0}}}^0$ (eV)"
.format(el1.symbol))
plt.tight_layout()
return plt
def get_contour_pd_plot(self):
"""
Plot a contour phase diagram plot, where phase triangles are colored
according to degree of instability by interpolation. Currently only
works for 3-component phase diagrams.
Returns:
A matplotlib plot object.
"""
from scipy import interpolate
from matplotlib import cm
pd = self._pd
entries = pd.qhull_entries
data = np.array(pd.qhull_data)
plt = self._get_2d_plot()
data[:, 0:2] = triangular_coord(data[:, 0:2]).transpose()
for i, e in enumerate(entries):
data[i, 2] = self._pd.get_e_above_hull(e)
gridsize = 0.005
xnew = np.arange(0, 1., gridsize)
ynew = np.arange(0, 1, gridsize)
f = interpolate.LinearNDInterpolator(data[:, 0:2], data[:, 2])
znew = np.zeros((len(ynew), len(xnew)))
for (i, xval) in enumerate(xnew):
for (j, yval) in enumerate(ynew):
znew[j, i] = f(xval, yval)
plt.contourf(xnew, ynew, znew, 1000, cmap=cm.autumn_r)
plt.colorbar()
return plt
def uniquelines(q):
"""
Given all the facets, convert it into a set of unique lines. Specifically
used for converting convex hull facets into line pairs of coordinates.
Args:
q: A 2-dim sequence, where each row represents a facet. E.g.,
[[1,2,3],[3,6,7],...]
Returns:
setoflines:
A set of tuple of lines. E.g., ((1,2), (1,3), (2,3), ....)
"""
setoflines = set()
for facets in q:
for line in itertools.combinations(facets, 2):
setoflines.add(tuple(sorted(line)))
return setoflines
def triangular_coord(coord):
"""
Convert a 2D coordinate into a triangle-based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a triangular-based coordinate system.
"""
unitvec = np.array([[1, 0], [0.5, math.sqrt(3) / 2]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def tet_coord(coord):
"""
Convert a 3D coordinate into a tetrahedron based coordinate system for a
prettier phase diagram.
Args:
coordinate: coordinate used in the convex hull computation.
Returns:
coordinates in a tetrahedron-based coordinate system.
"""
unitvec = np.array([[1, 0, 0], [0.5, math.sqrt(3) / 2, 0],
[0.5, 1.0 / 3.0 * math.sqrt(3) / 2, math.sqrt(6) / 3]])
result = np.dot(np.array(coord), unitvec)
return result.transpose()
def order_phase_diagram(lines, stable_entries, unstable_entries, ordering):
"""
Orders the entries (their coordinates) in a phase diagram plot according
to the user specified ordering.
Ordering should be given as ['Up', 'Left', 'Right'], where Up,
Left and Right are the names of the entries in the upper, left and right
corners of the triangle respectively.
Args:
lines: list of list of coordinates for lines in the PD.
stable_entries: {coordinate : entry} for each stable node in the
phase diagram. (Each coordinate can only have one stable phase)
unstable_entries: {entry: coordinates} for all unstable nodes in the
phase diagram.
ordering: Ordering of the phase diagram, given as a list ['Up',
'Left','Right']
Returns:
(newlines, newstable_entries, newunstable_entries):
- newlines is a list of list of coordinates for lines in the PD.
- newstable_entries is a {coordinate : entry} for each stable node
in the phase diagram. (Each coordinate can only have one
stable phase)
- newunstable_entries is a {entry: coordinates} for all unstable
nodes in the phase diagram.
"""
yup = -1000.0
xleft = 1000.0
xright = -1000.0
for coord in stable_entries:
if coord[0] > xright:
xright = coord[0]
nameright = stable_entries[coord].name
if coord[0] < xleft:
xleft = coord[0]
nameleft = stable_entries[coord].name
if coord[1] > yup:
yup = coord[1]
nameup = stable_entries[coord].name
if (not nameup in ordering) or (not nameright in ordering) or \
(not nameleft in ordering):
raise ValueError(
'Error in ordering_phase_diagram : \n"{up}", "{left}" and "{'
'right}"'
' should be in ordering : {ord}'.format(up=nameup, left=nameleft,
right=nameright,
ord=ordering))
cc = np.array([0.5, np.sqrt(3.0) / 6.0], np.float)
if nameup == ordering[0]:
if nameleft == ordering[1]:
# The coordinates were already in the user ordering
return lines, stable_entries, unstable_entries
else:
newlines = [[np.array(1.0 - x), y] for x, y in lines]
newstable_entries = {(1.0 - c[0], c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (1.0 - c[0], c[1])
for entry, c in
unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[1]:
if nameleft == ordering[2]:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c120 * (xx - cc[0]) - s120 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s120 * (xx - cc[0]) + c120 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c120 * (c[0] - cc[0]) - s120 * (c[1] - cc[1]) + cc[0],
s120 * (c[0] - cc[0]) + c120 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c120 = np.cos(2.0 * np.pi / 3.0)
s120 = np.sin(2.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c120 * (xx - 1.0) - s120 * y[ii] + 1.0
newy[ii] = -s120 * (xx - 1.0) + c120 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (-c120 * (c[0] - 1.0) - s120 * c[1] + 1.0,
-s120 * (c[0] - 1.0) + c120 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
elif nameup == ordering[2]:
if nameleft == ordering[0]:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = c240 * (xx - cc[0]) - s240 * (y[ii] - cc[1]) + \
cc[0]
newy[ii] = s240 * (xx - cc[0]) + c240 * (y[ii] - cc[1]) + \
cc[1]
newlines.append([newx, newy])
newstable_entries = {
(c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {
entry: (c240 * (c[0] - cc[0]) - s240 * (c[1] - cc[1]) + cc[0],
s240 * (c[0] - cc[0]) + c240 * (c[1] - cc[1]) + cc[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
else:
c240 = np.cos(4.0 * np.pi / 3.0)
s240 = np.sin(4.0 * np.pi / 3.0)
newlines = []
for x, y in lines:
newx = np.zeros_like(x)
newy = np.zeros_like(y)
for ii, xx in enumerate(x):
newx[ii] = -c240 * xx - s240 * y[ii]
newy[ii] = -s240 * xx + c240 * y[ii]
newlines.append([newx, newy])
newstable_entries = {(-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1]): entry
for c, entry in stable_entries.items()}
newunstable_entries = {entry: (-c240 * c[0] - s240 * c[1],
-s240 * c[0] + c240 * c[1])
for entry, c in unstable_entries.items()}
return newlines, newstable_entries, newunstable_entries
| mit |
Ambrosys/glyph | examples/control/lorenz.py | 1 | 5449 | """gp application."""
import argparse
import logging
import numpy as np
import sympy
import sympy.utilities
from toolz import cons
import deap
import glyph.application as application
import glyph.assessment as assessment
import glyph.gp as gp
import glyph.utils as utils
import sys
import os
sys.path.append(os.path.dirname(__file__))
import control_problem
# Setup of the control problem and gp algorithm.
class Individual(gp.individual.AExpressionTree):
"""The gp representation (genotype) of the actuator for the control problem."""
pset = gp.sympy_primitive_set(
categories=["algebraic", "trigonometric", "exponential", "neg"],
arguments=["y_0", "y_1", "y_2"],
constants=["c"],
)
def __str__(self):
"""Human readable representation of the individual."""
return str(sympy.sympify(deap.gp.compile(repr(self), self.pset)))
class AssessmentRunner(assessment.AAssessmentRunner):
"""Define a measure for the fitness assessment."""
def setup(self):
"""Setup dynamic system."""
self.x = np.linspace(0.0, 100.0, 5000, dtype=np.float64)
self.yinit = np.array([10.0, 1.0, 5.0])
self.params = dict(s=10.0, r=28.0, b=8.0 / 3.0)
self.target = np.zeros_like(self.x)
def measure(self, individual):
popt, rmse_opt = assessment.const_opt(self.rmse, individual, lsq=True)
return rmse_opt[0], rmse_opt[1], rmse_opt[2], len(individual), popt
def assign_fitness(self, individual, fitness):
individual.fitness.values = fitness[:-1]
individual.popt = fitness[-1]
def rmse(self, individual, *f_args):
y = self.trajectory(individual, *f_args)
rmse_y_0 = utils.numeric.rmse(self.target, y[0, :])
rmse_y_1 = utils.numeric.rmse(self.target, y[1, :])
rmse_y_2 = utils.numeric.rmse(self.target, y[2, :])
return assessment.replace_nan((rmse_y_0, rmse_y_1, rmse_y_2))
def trajectory(self, individual, *f_args):
dy = control_problem.lorenz_in_2(gp.sympy_phenotype(individual), **self.params)
return control_problem.integrate(dy, yinit=self.yinit, x=self.x, f_args=f_args)
def main():
"""Entry point of application."""
program_description = "Lorenz system"
parser = argparse.ArgumentParser(program_description)
parser.add_argument(
"--params",
type=utils.argparse.ntuple(3, float),
default=(10, 28, 8 / 3),
help="parameters σ,r,b for the lorenz system (default: 10,28,8/3)",
)
parser.add_argument("--plot", help="plot best results", action="store_true")
app, args = application.default_console_app(Individual, AssessmentRunner, parser)
app.assessment_runner.params["s"] = args.params[0]
app.assessment_runner.params["r"] = args.params[1]
app.assessment_runner.params["b"] = args.params[2]
app.run()
logger = logging.getLogger(__name__)
logger.info("\n")
logger.info("Hall of Fame:")
for individual in app.gp_runner.pareto_front:
logger.info("{} {}".format(individual.fitness.values, str(individual)))
if not args.plot:
return
# Plot n best results.
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import seaborn
n = 2
seaborn.set_palette("husl", n + 2)
alpha = 0.7
label_size = 16
title_size = 20
params, yinit = app.assessment_runner.params, app.assessment_runner.yinit
x, target = app.assessment_runner.x, app.assessment_runner.target
title = program_description + "\nparams={}, yinit={}".format(params, yinit)
ax0 = plt.subplot2grid((3, 2), (0, 0))
ax1 = plt.subplot2grid((3, 2), (1, 0))
ax2 = plt.subplot2grid((3, 2), (2, 0))
ax3 = plt.subplot2grid((3, 2), (1, 1), projection="3d", rowspan=2)
lines, labels = [], []
(l,) = ax0.plot(x, target, linestyle="dotted")
ax1.plot(x, target, linestyle="dotted")
ax2.plot(x, target, linestyle="dotted")
labels.append("target")
lines.append(l)
uncontrolled = Individual.from_string("Add(y_0, Neg(y_0))")
for ind in cons(uncontrolled, app.gp_runner.pareto_front[:n]):
popt = getattr(ind, "popt", np.zeros(len(ind.pset.constants)))
label = "with $a({}) = {}$, $c={}$".format(",".join(ind.pset.args), str(ind), popt)
label = label.replace("**", "^").replace("*", "\cdot ")
y = app.assessment_runner.trajectory(ind, *popt)
(l,) = ax0.plot(x, y[0, :], alpha=alpha)
ax1.plot(x, y[1, :], alpha=alpha, color=l.get_color())
ax2.plot(x, y[2, :], alpha=alpha, color=l.get_color())
ax3.plot(y[0, :], y[1, :], y[2, :], alpha=alpha, color=l.get_color())
labels.append(label)
lines.append(l)
ax0.set_ylabel("$y_0$", fontsize=label_size)
ax0.set_xlabel("time", fontsize=label_size)
ax1.set_ylabel("$y_1$", fontsize=label_size)
ax1.set_xlabel("time", fontsize=label_size)
ax2.set_ylabel("$y_2$", fontsize=label_size)
ax2.set_xlabel("time", fontsize=label_size)
ax3.set_xlabel("$y_0$", fontsize=label_size)
ax3.set_ylabel("$y_1$", fontsize=label_size)
ax3.set_title("Phase Portrait", fontsize=label_size)
plt.figlegend(lines, labels, loc="upper right", bbox_to_anchor=(0.9, 0.9), fontsize=label_size)
plt.suptitle(title, fontsize=title_size)
plt.subplots_adjust(left=0.05, right=0.95, top=0.95, bottom=0.05)
plt.show()
if __name__ == "__main__":
main()
| lgpl-3.0 |
liebannam/pipes | steady_states.py | 1 | 1726 | import numpy as np
from scipy.integrate import odeint
from allthethings import PyPipe_ps
import matplotlib.pyplot as plt
def F(y,t,Q,L,N,D,M,a,S0,Mr):
#(Q,L,N,D,M,a,S0,Mr) = args
print "y = %f" %y
p0 = PyPipe_ps(N,D,L,M,a)
p0.Mr = Mr
p0.S0 = S0
S = p0.getSourceTerms(y,Q)
u = Q/y
c =p0.Cgrav(y,True)
return S/(c**2-u**2)
def main():
L = 10
N = 100
M = 10
a = 100
D = .1
Q = -0.04890698
Mr = 0.007
S0 = 0.042731691
S0 = 0.116914981
S0s = [0.042731691,0.174868713,0.116914981,0.065484615,-0.178530533,-0.017923349,0.042784691,-0.246330809]
t0 = 0
p0 = PyPipe_ps(N,D,L,M,a)
y0 = p0.AofH(10,False)
print F(y0,0,Q,L,N,D,M,a,S0,Mr)
ts = np.linspace(0,L,100)
'''for i in range(2):
S0 = i*.05+.01
for k in range(4):
Mr = 0.002*k+.01
Qt = (Q,L,N,D,M,a,S0,Mr)
y1 = odeint(F,y0,ts,args =Qt)
H = [p0.HofA(y,False) for y in y1]
plt.plot(ts,H,label ="Mr = %.3f, S0=%.2f"%(Mr,S0))
'''
dys =[]
for k in range(len(S0s)):
S0 = S0s[k]
Qt = (Q,L,N,D,M,a,S0,Mr)
y1 = odeint(F,y0,ts,args =Qt)
dys.append((y1[-1]-y1[0])/L)
for k in range(len(S0s)):
print "%f %e"%(S0s[k],dys[k])
#plt.plot(ts,H,label ="Mr = %.3f, S0=%.2f"%(Mr,S0))
#plt.legend(loc='lower left')
#plt.show()
#r = ode(F).set_integrator('zvode', method= 'bdf', with_jacobian =False)
#r.set_initial_value(y0,t0).set_f_params(Q)
#dt = 0.1
#t1 = L
#print r.y
#while r.successful() and r.t<t1:
# r.integrate(r.t+dt)
#print("%g %g" % (r.t, r.y))
if __name__ == "__main__":
main()
| gpl-3.0 |
walterreade/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
henrykironde/scikit-learn | sklearn/utils/estimator_checks.py | 7 | 47975 | from __future__ import print_function
import types
import warnings
import sys
import traceback
import inspect
import pickle
from copy import deepcopy
import numpy as np
from scipy import sparse
import struct
from sklearn.externals.six.moves import zip
from sklearn.externals.joblib import hash, Memory
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regex
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import META_ESTIMATORS
from sklearn.utils.testing import set_random_state
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import ignore_warnings
from sklearn.base import (clone, ClassifierMixin, RegressorMixin,
TransformerMixin, ClusterMixin, BaseEstimator)
from sklearn.metrics import accuracy_score, adjusted_rand_score, f1_score
from sklearn.lda import LDA
from sklearn.random_projection import BaseRandomProjection
from sklearn.feature_selection import SelectKBest
from sklearn.svm.base import BaseLibSVM
from sklearn.pipeline import make_pipeline
from sklearn.utils.validation import DataConversionWarning
from sklearn.cross_validation import train_test_split
from sklearn.utils import shuffle
from sklearn.preprocessing import StandardScaler
from sklearn.datasets import load_iris, load_boston, make_blobs
BOSTON = None
CROSS_DECOMPOSITION = ['PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']
MULTI_OUTPUT = ['CCA', 'DecisionTreeRegressor', 'ElasticNet',
'ExtraTreeRegressor', 'ExtraTreesRegressor', 'GaussianProcess',
'KNeighborsRegressor', 'KernelRidge', 'Lars', 'Lasso',
'LassoLars', 'LinearRegression', 'MultiTaskElasticNet',
'MultiTaskElasticNetCV', 'MultiTaskLasso', 'MultiTaskLassoCV',
'OrthogonalMatchingPursuit', 'PLSCanonical', 'PLSRegression',
'RANSACRegressor', 'RadiusNeighborsRegressor',
'RandomForestRegressor', 'Ridge', 'RidgeCV']
def _yield_non_meta_checks(name, Estimator):
yield check_estimators_dtypes
yield check_fit_score_takes_y
yield check_dtype_object
yield check_estimators_fit_returns_self
# Check that all estimator yield informative messages when
# trained on empty datasets
yield check_estimators_empty_data_messages
if name not in CROSS_DECOMPOSITION + ['SpectralEmbedding']:
# SpectralEmbedding is non-deterministic,
# see issue #4236
# cross-decomposition's "transform" returns X and Y
yield check_pipeline_consistency
if name not in ['Imputer']:
# Test that all estimators check their input for NaN's and infs
yield check_estimators_nan_inf
if name not in ['GaussianProcess']:
# FIXME!
# in particular GaussianProcess!
yield check_estimators_overwrite_params
if hasattr(Estimator, 'sparsify'):
yield check_sparsify_coefficients
yield check_estimator_sparse_data
# Test that estimators can be pickled, and once pickled
# give the same answer as before.
yield check_estimators_pickle
def _yield_classifier_checks(name, Classifier):
# test classfiers can handle non-array data
yield check_classifier_data_not_an_array
# test classifiers trained on a single label always return this label
yield check_classifiers_one_label
yield check_classifiers_classes
yield check_estimators_partial_fit_n_features
# basic consistency testing
yield check_classifiers_train
if (name not in ["MultinomialNB", "LabelPropagation", "LabelSpreading"]
# TODO some complication with -1 label
and name not in ["DecisionTreeClassifier",
"ExtraTreeClassifier"]):
# We don't raise a warning in these classifiers, as
# the column y interface is used by the forests.
yield check_supervised_y_2d
# test if NotFittedError is raised
yield check_estimators_unfitted
if 'class_weight' in Classifier().get_params().keys():
yield check_class_weight_classifiers
def _yield_regressor_checks(name, Regressor):
# TODO: test with intercept
# TODO: test with multiple responses
# basic testing
yield check_regressors_train
yield check_regressor_data_not_an_array
yield check_estimators_partial_fit_n_features
yield check_regressors_no_decision_function
yield check_supervised_y_2d
if name != 'CCA':
# check that the regressor handles int input
yield check_regressors_int
# Test if NotFittedError is raised
yield check_estimators_unfitted
def _yield_transformer_checks(name, Transformer):
# All transformers should either deal with sparse data or raise an
# exception with type TypeError and an intelligible error message
if name not in ['AdditiveChi2Sampler', 'Binarizer', 'Normalizer',
'PLSCanonical', 'PLSRegression', 'CCA', 'PLSSVD']:
yield check_transformer_data_not_an_array
# these don't actually fit the data, so don't raise errors
if name not in ['AdditiveChi2Sampler', 'Binarizer',
'FunctionTransformer', 'Normalizer']:
# basic tests
yield check_transformer_general
yield check_transformers_unfitted
def _yield_clustering_checks(name, Clusterer):
yield check_clusterer_compute_labels_predict
if name not in ('WardAgglomeration', "FeatureAgglomeration"):
# this is clustering on the features
# let's not test that here.
yield check_clustering
yield check_estimators_partial_fit_n_features
def _yield_all_checks(name, Estimator):
for check in _yield_non_meta_checks(name, Estimator):
yield check
if issubclass(Estimator, ClassifierMixin):
for check in _yield_classifier_checks(name, Estimator):
yield check
if issubclass(Estimator, RegressorMixin):
for check in _yield_regressor_checks(name, Estimator):
yield check
if issubclass(Estimator, TransformerMixin):
for check in _yield_transformer_checks(name, Estimator):
yield check
if issubclass(Estimator, ClusterMixin):
for check in _yield_clustering_checks(name, Estimator):
yield check
def check_estimator(Estimator):
"""Check if estimator adheres to sklearn conventions.
This estimator will run an extensive test-suite for input validation,
shapes, etc.
Additional tests for classifiers, regressors, clustering or transformers
will be run if the Estimator class inherits from the corresponding mixin
from sklearn.base.
Parameters
----------
Estimator : class
Class to check.
"""
name = Estimator.__class__.__name__
check_parameters_default_constructible(name, Estimator)
for check in _yield_all_checks(name, Estimator):
check(name, Estimator)
def _boston_subset(n_samples=200):
global BOSTON
if BOSTON is None:
boston = load_boston()
X, y = boston.data, boston.target
X, y = shuffle(X, y, random_state=0)
X, y = X[:n_samples], y[:n_samples]
X = StandardScaler().fit_transform(X)
BOSTON = X, y
return BOSTON
def set_fast_parameters(estimator):
# speed up some estimators
params = estimator.get_params()
if ("n_iter" in params
and estimator.__class__.__name__ != "TSNE"):
estimator.set_params(n_iter=5)
if "max_iter" in params:
# NMF
if estimator.max_iter is not None:
estimator.set_params(max_iter=min(5, estimator.max_iter))
# LinearSVR
if estimator.__class__.__name__ == 'LinearSVR':
estimator.set_params(max_iter=20)
if "n_resampling" in params:
# randomized lasso
estimator.set_params(n_resampling=5)
if "n_estimators" in params:
# especially gradient boosting with default 100
estimator.set_params(n_estimators=min(5, estimator.n_estimators))
if "max_trials" in params:
# RANSAC
estimator.set_params(max_trials=10)
if "n_init" in params:
# K-Means
estimator.set_params(n_init=2)
if estimator.__class__.__name__ == "SelectFdr":
# be tolerant of noisy datasets (not actually speed)
estimator.set_params(alpha=.5)
if estimator.__class__.__name__ == "TheilSenRegressor":
estimator.max_subpopulation = 100
if isinstance(estimator, BaseRandomProjection):
# Due to the jl lemma and often very few samples, the number
# of components of the random matrix projection will be probably
# greater than the number of features.
# So we impose a smaller number (avoid "auto" mode)
estimator.set_params(n_components=1)
if isinstance(estimator, SelectKBest):
# SelectKBest has a default of k=10
# which is more feature than we have in most case.
estimator.set_params(k=1)
class NotAnArray(object):
" An object that is convertable to an array"
def __init__(self, data):
self.data = data
def __array__(self, dtype=None):
return self.data
def _is_32bit():
"""Detect if process is 32bit Python."""
return struct.calcsize('P') * 8 == 32
def check_estimator_sparse_data(name, Estimator):
rng = np.random.RandomState(0)
X = rng.rand(40, 10)
X[X < .8] = 0
X = sparse.csr_matrix(X)
y = (4 * rng.rand(40)).astype(np.int)
# catch deprecation warnings
with warnings.catch_warnings():
if name in ['Scaler', 'StandardScaler']:
estimator = Estimator(with_mean=False)
else:
estimator = Estimator()
set_fast_parameters(estimator)
# fit and predict
try:
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, 'predict_proba'):
estimator.predict_proba(X)
except TypeError as e:
if 'sparse' not in repr(e):
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: error message state explicitly that "
"sparse input is not supported if this is not the case."
% name)
raise
except Exception:
print("Estimator %s doesn't seem to fail gracefully on "
"sparse data: it should raise a TypeError if sparse input "
"is explicitly not supported." % name)
raise
def check_dtype_object(name, Estimator):
# check that estimators treat dtype object as numeric if possible
rng = np.random.RandomState(0)
X = rng.rand(40, 10).astype(object)
y = (X[:, 0] * 4).astype(np.int)
y = multioutput_estimator_convert_y_2d(name, y)
with warnings.catch_warnings():
estimator = Estimator()
set_fast_parameters(estimator)
estimator.fit(X, y)
if hasattr(estimator, "predict"):
estimator.predict(X)
if hasattr(estimator, "transform"):
estimator.transform(X)
try:
estimator.fit(X, y.astype(object))
except Exception as e:
if "Unknown label type" not in str(e):
raise
X[0, 0] = {'foo': 'bar'}
msg = "argument must be a string or a number"
assert_raises_regex(TypeError, msg, estimator.fit, X, y)
def check_transformer_general(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
X -= X.min()
_check_transformer(name, Transformer, X, y)
_check_transformer(name, Transformer, X.tolist(), y.tolist())
def check_transformer_data_not_an_array(name, Transformer):
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
this_X = NotAnArray(X)
this_y = NotAnArray(np.asarray(y))
_check_transformer(name, Transformer, this_X, this_y)
def check_transformers_unfitted(name, Transformer):
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
transformer = Transformer()
assert_raises((AttributeError, ValueError), transformer.transform, X)
def _check_transformer(name, Transformer, X, y):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# on numpy & scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
n_samples, n_features = np.asarray(X).shape
# catch deprecation warnings
with warnings.catch_warnings(record=True):
transformer = Transformer()
set_random_state(transformer)
set_fast_parameters(transformer)
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.c_[y, y]
y_[::2, 1] *= 2
else:
y_ = y
transformer.fit(X, y_)
X_pred = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple):
for x_pred in X_pred:
assert_equal(x_pred.shape[0], n_samples)
else:
# check for consistent n_samples
assert_equal(X_pred.shape[0], n_samples)
if hasattr(transformer, 'transform'):
if name in CROSS_DECOMPOSITION:
X_pred2 = transformer.transform(X, y_)
X_pred3 = transformer.fit_transform(X, y=y_)
else:
X_pred2 = transformer.transform(X)
X_pred3 = transformer.fit_transform(X, y=y_)
if isinstance(X_pred, tuple) and isinstance(X_pred2, tuple):
for x_pred, x_pred2, x_pred3 in zip(X_pred, X_pred2, X_pred3):
assert_array_almost_equal(
x_pred, x_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
x_pred, x_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
else:
assert_array_almost_equal(
X_pred, X_pred2, 2,
"fit_transform and transform outcomes not consistent in %s"
% Transformer)
assert_array_almost_equal(
X_pred, X_pred3, 2,
"consecutive fit_transform outcomes not consistent in %s"
% Transformer)
assert_equal(len(X_pred2), n_samples)
assert_equal(len(X_pred3), n_samples)
# raises error on malformed input for transform
if hasattr(X, 'T'):
# If it's not an array, it does not have a 'T' property
assert_raises(ValueError, transformer.transform, X.T)
@ignore_warnings
def check_pipeline_consistency(name, Estimator):
if name in ('CCA', 'LocallyLinearEmbedding', 'KernelPCA') and _is_32bit():
# Those transformers yield non-deterministic output when executed on
# a 32bit Python. The same transformers are stable on 64bit Python.
# FIXME: try to isolate a minimalistic reproduction case only depending
# scipy and/or maybe generate a test dataset that does not
# cause such unstable behaviors.
msg = name + ' is non deterministic on 32bit Python'
raise SkipTest(msg)
# check that make_pipeline(est) gives same score as est
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min()
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
pipeline = make_pipeline(estimator)
estimator.fit(X, y)
pipeline.fit(X, y)
funcs = ["score", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func_pipeline = getattr(pipeline, func_name)
result = func(X, y)
result_pipe = func_pipeline(X, y)
assert_array_almost_equal(result, result_pipe)
@ignore_warnings
def check_fit_score_takes_y(name, Estimator):
# check that all estimators accept an optional y
# in fit and score so they can be used in pipelines
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
y = multioutput_estimator_convert_y_2d(name, y)
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
funcs = ["fit", "score", "partial_fit", "fit_predict", "fit_transform"]
for func_name in funcs:
func = getattr(estimator, func_name, None)
if func is not None:
func(X, y)
args = inspect.getargspec(func).args
assert_true(args[2] in ["y", "Y"])
@ignore_warnings
def check_estimators_dtypes(name, Estimator):
rnd = np.random.RandomState(0)
X_train_32 = 3 * rnd.uniform(size=(20, 5)).astype(np.float32)
X_train_64 = X_train_32.astype(np.float64)
X_train_int_64 = X_train_32.astype(np.int64)
X_train_int_32 = X_train_32.astype(np.int32)
y = X_train_int_64[:, 0]
y = multioutput_estimator_convert_y_2d(name, y)
for X_train in [X_train_32, X_train_64, X_train_int_64, X_train_int_32]:
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
estimator.fit(X_train, y)
for method in ["predict", "transform", "decision_function",
"predict_proba"]:
if hasattr(estimator, method):
getattr(estimator, method)(X_train)
def check_estimators_empty_data_messages(name, Estimator):
e = Estimator()
set_fast_parameters(e)
set_random_state(e, 1)
X_zero_samples = np.empty(0).reshape(0, 3)
# The precise message can change depending on whether X or y is
# validated first. Let us test the type of exception only:
assert_raises(ValueError, e.fit, X_zero_samples, [])
X_zero_features = np.empty(0).reshape(3, 0)
# the following y should be accepted by both classifiers and regressors
# and ignored by unsupervised models
y = multioutput_estimator_convert_y_2d(name, np.array([1, 0, 1]))
msg = "0 feature(s) (shape=(3, 0)) while a minimum of 1 is required."
assert_raise_message(ValueError, msg, e.fit, X_zero_features, y)
def check_estimators_nan_inf(name, Estimator):
rnd = np.random.RandomState(0)
X_train_finite = rnd.uniform(size=(10, 3))
X_train_nan = rnd.uniform(size=(10, 3))
X_train_nan[0, 0] = np.nan
X_train_inf = rnd.uniform(size=(10, 3))
X_train_inf[0, 0] = np.inf
y = np.ones(10)
y[:5] = 0
y = multioutput_estimator_convert_y_2d(name, y)
error_string_fit = "Estimator doesn't check for NaN and inf in fit."
error_string_predict = ("Estimator doesn't check for NaN and inf in"
" predict.")
error_string_transform = ("Estimator doesn't check for NaN and inf in"
" transform.")
for X_train in [X_train_nan, X_train_inf]:
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator, 1)
# try to fit
try:
estimator.fit(X_train, y)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_fit, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_fit, Estimator, exc)
traceback.print_exc(file=sys.stdout)
raise exc
else:
raise AssertionError(error_string_fit, Estimator)
# actually fit
estimator.fit(X_train_finite, y)
# predict
if hasattr(estimator, "predict"):
try:
estimator.predict(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_predict, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_predict, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_predict, Estimator)
# transform
if hasattr(estimator, "transform"):
try:
estimator.transform(X_train)
except ValueError as e:
if 'inf' not in repr(e) and 'NaN' not in repr(e):
print(error_string_transform, Estimator, e)
traceback.print_exc(file=sys.stdout)
raise e
except Exception as exc:
print(error_string_transform, Estimator, exc)
traceback.print_exc(file=sys.stdout)
else:
raise AssertionError(error_string_transform, Estimator)
def check_estimators_pickle(name, Estimator):
"""Test that we can pickle all estimators"""
check_methods = ["predict", "transform", "decision_function",
"predict_proba"]
X, y = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
# some estimators can't do features less than 0
X -= X.min()
# some estimators only take multioutputs
y = multioutput_estimator_convert_y_2d(name, y)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_random_state(estimator)
set_fast_parameters(estimator)
estimator.fit(X, y)
result = dict()
for method in check_methods:
if hasattr(estimator, method):
result[method] = getattr(estimator, method)(X)
# pickle and unpickle!
pickled_estimator = pickle.dumps(estimator)
unpickled_estimator = pickle.loads(pickled_estimator)
for method in result:
unpickled_result = getattr(unpickled_estimator, method)(X)
assert_array_almost_equal(result[method], unpickled_result)
def check_estimators_partial_fit_n_features(name, Alg):
# check if number of features changes between calls to partial_fit.
if not hasattr(Alg, 'partial_fit'):
return
X, y = make_blobs(n_samples=50, random_state=1)
X -= X.min()
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if isinstance(alg, ClassifierMixin):
classes = np.unique(y)
alg.partial_fit(X, y, classes=classes)
else:
alg.partial_fit(X, y)
assert_raises(ValueError, alg.partial_fit, X[:, :-1], y)
def check_clustering(name, Alg):
X, y = make_blobs(n_samples=50, random_state=1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
n_samples, n_features = X.shape
# catch deprecation and neighbors warnings
with warnings.catch_warnings(record=True):
alg = Alg()
set_fast_parameters(alg)
if hasattr(alg, "n_clusters"):
alg.set_params(n_clusters=3)
set_random_state(alg)
if name == 'AffinityPropagation':
alg.set_params(preference=-100)
alg.set_params(max_iter=100)
# fit
alg.fit(X)
# with lists
alg.fit(X.tolist())
assert_equal(alg.labels_.shape, (n_samples,))
pred = alg.labels_
assert_greater(adjusted_rand_score(pred, y), 0.4)
# fit another time with ``fit_predict`` and compare results
if name is 'SpectralClustering':
# there is no way to make Spectral clustering deterministic :(
return
set_random_state(alg)
with warnings.catch_warnings(record=True):
pred2 = alg.fit_predict(X)
assert_array_equal(pred, pred2)
def check_clusterer_compute_labels_predict(name, Clusterer):
"""Check that predict is invariant of compute_labels"""
X, y = make_blobs(n_samples=20, random_state=0)
clusterer = Clusterer()
if hasattr(clusterer, "compute_labels"):
# MiniBatchKMeans
if hasattr(clusterer, "random_state"):
clusterer.set_params(random_state=0)
X_pred1 = clusterer.fit(X).predict(X)
clusterer.set_params(compute_labels=False)
X_pred2 = clusterer.fit(X).predict(X)
assert_array_equal(X_pred1, X_pred2)
def check_classifiers_one_label(name, Classifier):
error_string_fit = "Classifier can't train when only one class is present."
error_string_predict = ("Classifier can't predict when only one class is "
"present.")
rnd = np.random.RandomState(0)
X_train = rnd.uniform(size=(10, 3))
X_test = rnd.uniform(size=(10, 3))
y = np.ones(10)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
set_fast_parameters(classifier)
# try to fit
try:
classifier.fit(X_train, y)
except ValueError as e:
if 'class' not in repr(e):
print(error_string_fit, Classifier, e)
traceback.print_exc(file=sys.stdout)
raise e
else:
return
except Exception as exc:
print(error_string_fit, Classifier, exc)
traceback.print_exc(file=sys.stdout)
raise exc
# predict
try:
assert_array_equal(classifier.predict(X_test), y)
except Exception as exc:
print(error_string_predict, Classifier, exc)
raise exc
def check_classifiers_train(name, Classifier):
X_m, y_m = make_blobs(n_samples=300, random_state=0)
X_m, y_m = shuffle(X_m, y_m, random_state=7)
X_m = StandardScaler().fit_transform(X_m)
# generate binary problem from multi-class one
y_b = y_m[y_m != 2]
X_b = X_m[y_m != 2]
for (X, y) in [(X_m, y_m), (X_b, y_b)]:
# catch deprecation warnings
classes = np.unique(y)
n_classes = len(classes)
n_samples, n_features = X.shape
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name in ['BernoulliNB', 'MultinomialNB']:
X -= X.min()
set_fast_parameters(classifier)
set_random_state(classifier)
# raises error on malformed input for fit
assert_raises(ValueError, classifier.fit, X, y[:-1])
# fit
classifier.fit(X, y)
# with lists
classifier.fit(X.tolist(), y.tolist())
assert_true(hasattr(classifier, "classes_"))
y_pred = classifier.predict(X)
assert_equal(y_pred.shape, (n_samples,))
# training set performance
if name not in ['BernoulliNB', 'MultinomialNB']:
assert_greater(accuracy_score(y, y_pred), 0.83)
# raises error on malformed input for predict
assert_raises(ValueError, classifier.predict, X.T)
if hasattr(classifier, "decision_function"):
try:
# decision_function agrees with predict
decision = classifier.decision_function(X)
if n_classes is 2:
assert_equal(decision.shape, (n_samples,))
dec_pred = (decision.ravel() > 0).astype(np.int)
assert_array_equal(dec_pred, y_pred)
if (n_classes is 3
and not isinstance(classifier, BaseLibSVM)):
# 1on1 of LibSVM works differently
assert_equal(decision.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(decision, axis=1), y_pred)
# raises error on malformed input
assert_raises(ValueError,
classifier.decision_function, X.T)
# raises error on malformed input for decision_function
assert_raises(ValueError,
classifier.decision_function, X.T)
except NotImplementedError:
pass
if hasattr(classifier, "predict_proba"):
# predict_proba agrees with predict
y_prob = classifier.predict_proba(X)
assert_equal(y_prob.shape, (n_samples, n_classes))
assert_array_equal(np.argmax(y_prob, axis=1), y_pred)
# check that probas for all classes sum to one
assert_array_almost_equal(np.sum(y_prob, axis=1),
np.ones(n_samples))
# raises error on malformed input
assert_raises(ValueError, classifier.predict_proba, X.T)
# raises error on malformed input for predict_proba
assert_raises(ValueError, classifier.predict_proba, X.T)
def check_estimators_fit_returns_self(name, Estimator):
"""Check if self is returned when calling fit"""
X, y = make_blobs(random_state=0, n_samples=9, n_features=4)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
assert_true(estimator.fit(X, y) is estimator)
@ignore_warnings
def check_estimators_unfitted(name, Estimator):
"""Check that predict raises an exception in an unfitted estimator.
Unfitted estimators should raise either AttributeError or ValueError.
The specific exception type NotFittedError inherits from both and can
therefore be adequately raised for that purpose.
"""
# Common test for Regressors as well as Classifiers
X, y = _boston_subset()
with warnings.catch_warnings(record=True):
est = Estimator()
msg = "fit"
if hasattr(est, 'predict'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict, X)
if hasattr(est, 'decision_function'):
assert_raise_message((AttributeError, ValueError), msg,
est.decision_function, X)
if hasattr(est, 'predict_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_proba, X)
if hasattr(est, 'predict_log_proba'):
assert_raise_message((AttributeError, ValueError), msg,
est.predict_log_proba, X)
def check_supervised_y_2d(name, Estimator):
if "MultiTask" in name:
# These only work on 2d, so this test makes no sense
return
rnd = np.random.RandomState(0)
X = rnd.uniform(size=(10, 3))
y = np.arange(10) % 3
# catch deprecation warnings
with warnings.catch_warnings(record=True):
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# fit
estimator.fit(X, y)
y_pred = estimator.predict(X)
set_random_state(estimator)
# Check that when a 2D y is given, a DataConversionWarning is
# raised
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always", DataConversionWarning)
warnings.simplefilter("ignore", RuntimeWarning)
estimator.fit(X, y[:, np.newaxis])
y_pred_2d = estimator.predict(X)
msg = "expected 1 DataConversionWarning, got: %s" % (
", ".join([str(w_x) for w_x in w]))
if name not in MULTI_OUTPUT:
# check that we warned if we don't support multi-output
assert_greater(len(w), 0, msg)
assert_true("DataConversionWarning('A column-vector y"
" was passed when a 1d array was expected" in msg)
assert_array_almost_equal(y_pred.ravel(), y_pred_2d.ravel())
def check_classifiers_classes(name, Classifier):
X, y = make_blobs(n_samples=30, random_state=0, cluster_std=0.1)
X, y = shuffle(X, y, random_state=7)
X = StandardScaler().fit_transform(X)
# We need to make sure that we have non negative data, for things
# like NMF
X -= X.min() - .1
y_names = np.array(["one", "two", "three"])[y]
for y_names in [y_names, y_names.astype('O')]:
if name in ["LabelPropagation", "LabelSpreading"]:
# TODO some complication with -1 label
y_ = y
else:
y_ = y_names
classes = np.unique(y_)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
classifier = Classifier()
if name == 'BernoulliNB':
classifier.set_params(binarize=X.mean())
set_fast_parameters(classifier)
set_random_state(classifier)
# fit
classifier.fit(X, y_)
y_pred = classifier.predict(X)
# training set performance
assert_array_equal(np.unique(y_), np.unique(y_pred))
if np.any(classifier.classes_ != classes):
print("Unexpected classes_ attribute for %r: "
"expected %s, got %s" %
(classifier, classes, classifier.classes_))
def check_regressors_int(name, Regressor):
X, _ = _boston_subset()
X = X[:50]
rnd = np.random.RandomState(0)
y = rnd.randint(3, size=X.shape[0])
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
regressor_1 = Regressor()
regressor_2 = Regressor()
set_fast_parameters(regressor_1)
set_fast_parameters(regressor_2)
set_random_state(regressor_1)
set_random_state(regressor_2)
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
# fit
regressor_1.fit(X, y_)
pred1 = regressor_1.predict(X)
regressor_2.fit(X, y_.astype(np.float))
pred2 = regressor_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_regressors_train(name, Regressor):
X, y = _boston_subset()
y = StandardScaler().fit_transform(y) # X is already scaled
y = multioutput_estimator_convert_y_2d(name, y)
rnd = np.random.RandomState(0)
# catch deprecation warnings
with warnings.catch_warnings(record=True):
regressor = Regressor()
set_fast_parameters(regressor)
if not hasattr(regressor, 'alphas') and hasattr(regressor, 'alpha'):
# linear regressors need to set alpha, but not generalized CV ones
regressor.alpha = 0.01
if name == 'PassiveAggressiveRegressor':
regressor.C = 0.01
# raises error on malformed input for fit
assert_raises(ValueError, regressor.fit, X, y[:-1])
# fit
if name in CROSS_DECOMPOSITION:
y_ = np.vstack([y, 2 * y + rnd.randint(2, size=len(y))])
y_ = y_.T
else:
y_ = y
set_random_state(regressor)
regressor.fit(X, y_)
regressor.fit(X.tolist(), y_.tolist())
y_pred = regressor.predict(X)
assert_equal(y_pred.shape, y_.shape)
# TODO: find out why PLS and CCA fail. RANSAC is random
# and furthermore assumes the presence of outliers, hence
# skipped
if name not in ('PLSCanonical', 'CCA', 'RANSACRegressor'):
print(regressor)
assert_greater(regressor.score(X, y_), 0.5)
@ignore_warnings
def check_regressors_no_decision_function(name, Regressor):
# checks whether regressors have decision_function or predict_proba
rng = np.random.RandomState(0)
X = rng.normal(size=(10, 4))
y = multioutput_estimator_convert_y_2d(name, X[:, 0])
regressor = Regressor()
set_fast_parameters(regressor)
if hasattr(regressor, "n_components"):
# FIXME CCA, PLS is not robust to rank 1 effects
regressor.n_components = 1
regressor.fit(X, y)
funcs = ["decision_function", "predict_proba", "predict_log_proba"]
for func_name in funcs:
func = getattr(regressor, func_name, None)
if func is None:
# doesn't have function
continue
# has function. Should raise deprecation warning
msg = func_name
assert_warns_message(DeprecationWarning, msg, func, X)
def check_class_weight_classifiers(name, Classifier):
if name == "NuSVC":
# the sparse version has a parameter that doesn't do anything
raise SkipTest
if name.endswith("NB"):
# NaiveBayes classifiers have a somewhat different interface.
# FIXME SOON!
raise SkipTest
for n_centers in [2, 3]:
# create a very noisy dataset
X, y = make_blobs(centers=n_centers, random_state=0, cluster_std=20)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=.5,
random_state=0)
n_centers = len(np.unique(y_train))
if n_centers == 2:
class_weight = {0: 1000, 1: 0.0001}
else:
class_weight = {0: 1000, 1: 0.0001, 2: 0.0001}
with warnings.catch_warnings(record=True):
classifier = Classifier(class_weight=class_weight)
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
if hasattr(classifier, "min_weight_fraction_leaf"):
classifier.set_params(min_weight_fraction_leaf=0.01)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
assert_greater(np.mean(y_pred == 0), 0.89)
def check_class_weight_balanced_classifiers(name, Classifier, X_train, y_train,
X_test, y_test, weights):
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
classifier.set_params(n_iter=100)
set_random_state(classifier)
classifier.fit(X_train, y_train)
y_pred = classifier.predict(X_test)
classifier.set_params(class_weight='balanced')
classifier.fit(X_train, y_train)
y_pred_balanced = classifier.predict(X_test)
assert_greater(f1_score(y_test, y_pred_balanced, average='weighted'),
f1_score(y_test, y_pred, average='weighted'))
def check_class_weight_balanced_linear_classifier(name, Classifier):
"""Test class weights with non-contiguous class labels."""
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = np.array([1, 1, 1, -1, -1])
with warnings.catch_warnings(record=True):
classifier = Classifier()
if hasattr(classifier, "n_iter"):
# This is a very small dataset, default n_iter are likely to prevent
# convergence
classifier.set_params(n_iter=1000)
set_random_state(classifier)
# Let the model compute the class frequencies
classifier.set_params(class_weight='balanced')
coef_balanced = classifier.fit(X, y).coef_.copy()
# Count each label occurrence to reweight manually
n_samples = len(y)
n_classes = float(len(np.unique(y)))
class_weight = {1: n_samples / (np.sum(y == 1) * n_classes),
-1: n_samples / (np.sum(y == -1) * n_classes)}
classifier.set_params(class_weight=class_weight)
coef_manual = classifier.fit(X, y).coef_.copy()
assert_array_almost_equal(coef_balanced, coef_manual)
def check_estimators_overwrite_params(name, Estimator):
X, y = make_blobs(random_state=0, n_samples=9)
y = multioutput_estimator_convert_y_2d(name, y)
# some want non-negative input
X -= X.min()
with warnings.catch_warnings(record=True):
# catch deprecation warnings
estimator = Estimator()
set_fast_parameters(estimator)
set_random_state(estimator)
# Make a physical copy of the orginal estimator parameters before fitting.
params = estimator.get_params()
original_params = deepcopy(params)
# Fit the model
estimator.fit(X, y)
# Compare the state of the model parameters with the original parameters
new_params = estimator.get_params()
for param_name, original_value in original_params.items():
new_value = new_params[param_name]
# We should never change or mutate the internal state of input
# parameters by default. To check this we use the joblib.hash function
# that introspects recursively any subobjects to compute a checksum.
# The only exception to this rule of immutable constructor parameters
# is possible RandomState instance but in this check we explicitly
# fixed the random_state params recursively to be integer seeds.
assert_equal(hash(new_value), hash(original_value),
"Estimator %s should not change or mutate "
" the parameter %s from %s to %s during fit."
% (name, param_name, original_value, new_value))
def check_sparsify_coefficients(name, Estimator):
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1],
[-1, -2], [2, 2], [-2, -2]])
y = [1, 1, 1, 2, 2, 2, 3, 3, 3]
est = Estimator()
est.fit(X, y)
pred_orig = est.predict(X)
# test sparsify with dense inputs
est.sparsify()
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
# pickle and unpickle with sparse coef_
est = pickle.loads(pickle.dumps(est))
assert_true(sparse.issparse(est.coef_))
pred = est.predict(X)
assert_array_equal(pred, pred_orig)
def check_classifier_data_not_an_array(name, Estimator):
X = np.array([[3, 0], [0, 1], [0, 2], [1, 1], [1, 2], [2, 1]])
y = [1, 1, 1, 2, 2, 2]
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_regressor_data_not_an_array(name, Estimator):
X, y = _boston_subset(n_samples=50)
y = multioutput_estimator_convert_y_2d(name, y)
check_estimators_data_not_an_array(name, Estimator, X, y)
def check_estimators_data_not_an_array(name, Estimator, X, y):
if name in CROSS_DECOMPOSITION:
raise SkipTest
# catch deprecation warnings
with warnings.catch_warnings(record=True):
# separate estimators to control random seeds
estimator_1 = Estimator()
estimator_2 = Estimator()
set_fast_parameters(estimator_1)
set_fast_parameters(estimator_2)
set_random_state(estimator_1)
set_random_state(estimator_2)
y_ = NotAnArray(np.asarray(y))
X_ = NotAnArray(np.asarray(X))
# fit
estimator_1.fit(X_, y_)
pred1 = estimator_1.predict(X_)
estimator_2.fit(X, y)
pred2 = estimator_2.predict(X)
assert_array_almost_equal(pred1, pred2, 2, name)
def check_parameters_default_constructible(name, Estimator):
classifier = LDA()
# test default-constructibility
# get rid of deprecation warnings
with warnings.catch_warnings(record=True):
if name in META_ESTIMATORS:
estimator = Estimator(classifier)
else:
estimator = Estimator()
# test cloning
clone(estimator)
# test __repr__
repr(estimator)
# test that set_params returns self
assert_true(estimator.set_params() is estimator)
# test if init does nothing but set parameters
# this is important for grid_search etc.
# We get the default parameters from init and then
# compare these against the actual values of the attributes.
# this comes from getattr. Gets rid of deprecation decorator.
init = getattr(estimator.__init__, 'deprecated_original',
estimator.__init__)
try:
args, varargs, kws, defaults = inspect.getargspec(init)
except TypeError:
# init is not a python function.
# true for mixins
return
params = estimator.get_params()
if name in META_ESTIMATORS:
# they need a non-default argument
args = args[2:]
else:
args = args[1:]
if args:
# non-empty list
assert_equal(len(args), len(defaults))
else:
return
for arg, default in zip(args, defaults):
assert_in(type(default), [str, int, float, bool, tuple, type(None),
np.float64, types.FunctionType, Memory])
if arg not in params.keys():
# deprecated parameter, not in get_params
assert_true(default is None)
continue
if isinstance(params[arg], np.ndarray):
assert_array_equal(params[arg], default)
else:
assert_equal(params[arg], default)
def multioutput_estimator_convert_y_2d(name, y):
# Estimators in mono_output_task_error raise ValueError if y is of 1-D
# Convert into a 2-D y for those estimators.
if name in (['MultiTaskElasticNetCV', 'MultiTaskLassoCV',
'MultiTaskLasso', 'MultiTaskElasticNet']):
return y[:, np.newaxis]
return y
def check_non_transformer_estimators_n_iter(name, estimator,
multi_output=False):
# Check if all iterative solvers, run for more than one iteratiom
iris = load_iris()
X, y_ = iris.data, iris.target
if multi_output:
y_ = y_[:, np.newaxis]
set_random_state(estimator, 0)
if name == 'AffinityPropagation':
estimator.fit(X)
else:
estimator.fit(X, y_)
assert_greater(estimator.n_iter_, 0)
def check_transformer_n_iter(name, estimator):
if name in CROSS_DECOMPOSITION:
# Check using default data
X = [[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [2., 5., 4.]]
y_ = [[0.1, -0.2], [0.9, 1.1], [0.1, -0.5], [0.3, -0.2]]
else:
X, y_ = make_blobs(n_samples=30, centers=[[0, 0, 0], [1, 1, 1]],
random_state=0, n_features=2, cluster_std=0.1)
X -= X.min() - 0.1
set_random_state(estimator, 0)
estimator.fit(X, y_)
# These return a n_iter per component.
if name in CROSS_DECOMPOSITION:
for iter_ in estimator.n_iter_:
assert_greater(iter_, 1)
else:
assert_greater(estimator.n_iter_, 1)
def check_get_params_invariance(name, estimator):
class T(BaseEstimator):
"""Mock classifier
"""
def __init__(self):
pass
def fit(self, X, y):
return self
if name in ('FeatureUnion', 'Pipeline'):
e = estimator([('clf', T())])
elif name in ('GridSearchCV' 'RandomizedSearchCV'):
return
else:
e = estimator()
shallow_params = e.get_params(deep=False)
deep_params = e.get_params(deep=True)
assert_true(all(item in deep_params.items() for item in
shallow_params.items()))
| bsd-3-clause |
shunw/pythonML_code | ch_7_example.py | 1 | 15803 | from itertools import product
import matplotlib.pyplot as plt
import math
import numpy as np
import operator
import pandas as pd
from scipy.misc import comb
from sklearn import datasets
from sklearn.base import BaseEstimator
from sklearn.base import ClassifierMixin
from sklearn.base import clone
from sklearn.cross_validation import cross_val_score
# from sklearn.cross_validation import train_test_split
from sklearn.model_selection import train_test_split
from sklearn.ensemble import BaggingClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.externals import six
from sklearn.grid_search import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import auc
from sklearn.metrics import roc_curve
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import _name_estimators
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.tree import DecisionTreeClassifier
def ensemble_error(n_classifier, error):
k_start = math.ceil(n_classifier / 2.0)
probs = [comb(n_classifier, k) * error ** k * (1 - error) ** (n_classifier - k) for k in range(int(k_start), n_classifier + 1)]
return sum(probs)
def ensembles_errors():
'''learning with ensembles'''
error_range = np.arange(0.0, 1.01, 0.01)
ens_errors = [ensemble_error(n_classifier = 11, error = error) for error in error_range]
plt.plot(error_range, ens_errors, label = 'Ensemble error', linewidth = 2)
plt.plot(error_range, error_range, linestyle = '--', label = 'Base error', linewidth = 2)
plt.xlabel('Base error')
plt.ylabel('Base/Ensemble error')
plt.legend(loc = 'upper left')
plt.grid()
plt.show()
class MajorityVoteClassifier(BaseEstimator, ClassifierMixin):
''' A majority vote ensemble classifier
Parameters
---------------------
classifiers: array-like, shape = [n_classifiers]
Different classifiers for the ensemble
vote: str, {'classlabel', 'probability'}
Default: 'classlabel'
If 'classlabel' the prediction is based on the argmax of class labels. Else if 'probability', the argmax of the sum of probabilities is used to predict the class label (recommended for calibrated classifiers).
weights: array-like, shape = [n_classifiers]
Optional, default: None
If a list of 'int' or 'float' values are provided, the classifiers are weighted by importance; Uses uniform weights if 'weights = None'.
'''
def __init__(self, classifiers, vote = 'classlabel', weights = None):
self.classifiers = classifiers
self.named_classifiers = {key: value for key, value in _name_estimators(classifiers)}
self.vote = vote
self.weights = weights
def fit(self, X, y):
'''Fit classifiers.
Parameters
--------------
X: {array-like, sparse matrix},
shape = [n_samples, n_features]
Matrix of training samples
y: array-like, shape = [n_samples]
Vector of target class labels.
Returns
--------------
self: object
'''
# Use LabelEncoder to ensure class labels start with 0, which is important for np.argmax call in self.predict
self.lablenc_ = LabelEncoder()
self.lablenc_.fit(y)
self.classes_ = self.lablenc_.classes_
self.classifiers_ = []
for clf in self.classifiers:
fitted_clf = clone(clf).fit(X, self.lablenc_.transform(y))
self.classifiers_.append(fitted_clf)
return self
def predict(self, X):
'''Predict class labels for X
Parameters
----------
X: {array-like, sparse matrix},
Shape = [n_samples, n_features]
Matrix of training samples
Returns
----------
maj_vote: array-like, shape = [n_samples]
Predictd class labels
'''
if self.vote == 'probability':
maj_vote == np.argmax(self.predict_proba(X), axis = 1)
else: #'classlabel' vote
#collect results from clf.predict calls
predictions = np.asarray([clf.predict(X) for clf in self.classifiers_]).T
maj_vote = np.apply_along_axis(lambda x: np.argmax(np.bincount(x, weights = self.weights)), axis = 1, arr = predictions)
maj_vote = self.lablenc_.inverse_transform(maj_vote)
return maj_vote
def predict_proba(self, X):
'''Predict class probabilities for X
Paramters
-----------
X: {array-like, sparse matrix},
shape = [n_sampels, n_features]
Training vectors, where n_samples is the number of samples and n_features is the number of features.
Returns
-----------
avg_proba: array-like
shape = [n_samples, n_classes]
Weigthed average probability for each class per sample.
'''
probas = np.asarray([clf.predict_proba(X) for clf in self.classifiers_])
avg_proba = np.average(probas, axis = 0, weights = self.weights)
return avg_proba
def get_params(self, deep = True):
'''Get classifier paramter names for GridSearch'''
if not deep:
return super(MajorityVoteClassifier, self).get_params(deep = False)
else:
out = self.named_classifiers.copy()
for name, step in six.iteritems(self.named_classifiers):
for key, value in six.iteritems(step.get_params(deep = True)):
out['{name}__{key}'.format(name = name, key = key)] = value
return out
def iris_data():
iris = datasets.load_iris()
X, y = iris.data[50:, [1, 2]], iris.target[50:]
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .5, random_state = 1)
return X_train, X_test, y_train, y_test
def simple_majority_vote():
# weighted multi vote
# print np.argmax(np.bincount([0, 0, 1], weights = [.2, .2, .6]))
ex = np.array([[.9, .1], [.8, .2], [.4, .6]])
p = np.average(ex, axis = 0, weights = [.2, .2, .6])
# print p
# print np.argmax(p)
X_train, X_test, y_train, y_test = iris_data()
clf1 = LogisticRegression(penalty = 'l2', C = .001, random_state = 0)
clf2 = DecisionTreeClassifier(max_depth = 1, criterion = 'entropy', random_state = 0)
clf3 = KNeighborsClassifier(n_neighbors = 1, p = 2, metric = 'minkowski')
pipe1 = Pipeline([['sc', StandardScaler()], ['clf', clf1]])
pipe3 = Pipeline([['sc', StandardScaler()], ['clf', clf3]])
clf_labels = ['Logistic Regression', 'Decision Tree', 'KNN']
# print ('10-fold cross validation: \n')
for clf, label in zip([pipe1, clf2, pipe3], clf_labels):
scores = cross_val_score(estimator = clf, X = X_train, y = y_train, cv = 10, scoring = 'roc_auc')
# print ('ROC AUC: {score_mean:.2f} (+/- {score_std:.2f}) [{label}]'.format(score_mean = scores.mean(), score_std = scores.std(), label = label))
mv_clf = MajorityVoteClassifier(classifiers = [pipe1, clf2, pipe3])
clf_labels += ['Majority Voting']
all_clf = [pipe1, clf2, pipe3, mv_clf]
for clf, label in zip(all_clf, clf_labels):
scores = cross_val_score(estimator = clf, X = X_train, y = y_train, cv = 10, scoring = 'roc_auc')
# print ('Accuracy: {score_mean:.2f} (+/- {score_std:.2f}) [{label}]'.format(score_mean = scores.mean(), score_std = scores.std(), label = label))
# # plot the ROC comparision plot
# colors = ['black', 'orange', 'blue', 'green']
# linestyles = [':', '--', '-.', '-']
# for clf, label, clr, ls in zip(all_clf, clf_labels, colors, linestyles):
# # assume the label of the positive class is 1
# y_pred = clf.fit(X_train, y_train).predict_proba(X_test)[:, 1]
# fpr, tpr, thresholds = roc_curve(y_true = y_test, y_score = y_pred)
# roc_auc = auc(x = fpr, y = tpr)
# plt.plot(fpr, tpr, color = clr, linestyle = ls, label = '{label} (auc = {roc_auc:.2f})'.format(label = label, roc_auc = roc_auc))
# plt.legend(loc = 'lower right')
# plt.plot([0, 1], [0, 1], linestyle = '--', color = 'gray', linewidth = 2)
# plt.xlim([-.1, 1.1])
# plt.ylim([-.1, 1.1])
# plt.grid()
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.show()
# # plot the decision region
# sc = StandardScaler()
# X_train_std = sc.fit_transform(X_train)
# x_min = X_train_std[:, 0].min() - 1
# x_max = X_train_std[:, 0].max() + 1
# y_min = X_train_std[: ,1].min() - 1
# y_max = X_train_std[: ,1].max() + 1
# xx, yy = np.meshgrid(np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1))
# f, axarr = plt.subplots(nrows = 2, ncols = 2, sharex = 'col', sharey = 'row', figsize = (7, 5))
# for idx, clf, tt in zip(product([0, 1], [0, 1]), all_clf, clf_labels):
# clf.fit(X_train_std, y_train)
# Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Z = Z.reshape(xx.shape)
# axarr[idx[0], idx[1]].contourf(xx, yy, Z, alpha = .3)
# axarr[idx[0], idx[1]].scatter(X_train_std[y_train == 0, 0], X_train_std[y_train == 0, 1], c = 'blue', marker = '^', s = 50)
# axarr[idx[0], idx[1]].scatter(X_train_std[y_train == 1, 0], X_train_std[y_train == 1, 1], c = 'red', marker = 'o', s = 50)
# axarr[idx[0], idx[1]].set_title(tt)
# plt.text(-3.5, -4.5, s = 'Sepal width [standardized]', ha = 'center', va = 'center', fontsize = 12)
# plt.text(-10.5, 4.5, s = 'Petal length [standardized]', ha = 'center', va = 'center', fontsize = 12, rotation = 90)
# plt.show()
# page 216/241
# print mv_clf.get_params()
params = {'decisiontreeclassifier__max_depth': [1, 2], 'pipeline-1__clf__C': [.001, .1, 100.0]}
grid = GridSearchCV(estimator = mv_clf, param_grid = params, cv = 10, scoring = 'roc_auc')
grid.fit(X_train, y_train)
for params, mean_score, scores in grid.grid_scores_:
print ('{mean_score:3f} +/- {scores_std:2f} {params}'.format(mean_score = mean_score, scores_std = scores.std()/2, params = params))
def bagging_sample():
df_wine = pd.read_csv('wine.data', header = None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD289/OD315 of diluted wines', 'Proline']
df_wine = df_wine[df_wine['Class label'] != 1]
y = df_wine['Class label'].values
X = df_wine[['Alcohol', 'Hue']].values
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .4, random_state = 1)
tree = DecisionTreeClassifier(criterion = 'entropy', max_depth = None, random_state = 1)
bag = BaggingClassifier(base_estimator = tree, n_estimators = 500, max_samples = 1.0, max_features = 1.0, bootstrap = True, bootstrap_features = False, n_jobs = 1, random_state = 1)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print ('Decision tree train/ test accuracies {tree_train:.3f}/{tree_test:.3f}'.format(tree_train = tree_train, tree_test = tree_test))
bag = bag.fit(X_train, y_train)
y_train_pred = bag.predict(X_train)
y_test_pred = bag.predict(X_test)
bag_train = accuracy_score(y_train, y_train_pred)
bag_test = accuracy_score(y_test, y_test_pred)
print ('Bagging train/ test accuracies {bag_train:.3f}/{bag_test:.3f}'.format(bag_train = bag_train, bag_test = bag_test))
x_min = X_train[:, 0].min() - 1
x_max = X_train[:, 0].max() + 1
y_min = X_train[:, 1].min() - 1
y_max = X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1))
f, axarr = plt.subplots(nrows = 1, ncols = 2, sharex = 'col', sharey = 'row', figsize = (8, 3))
# print ('X_train is {x_train}'.format(x_train = X_train[y_train == 0]))
# print ('y_train is {y_train}'.format(y_train = y_train))
for idx, clf, tt in zip([0, 1], [tree, bag], ['Decision Tree', 'Bagging']):
clf.fit(X_train, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
# print ('idx: {idx}: X.shape is {x_shape}'.format(idx = idx, x_shape = X_train[y_train == 1].shape))
# print (X_train[y_train == 0])
axarr[idx].contourf(xx, yy, Z, alpha = .3)
axarr[idx].scatter(X_train[y_train == 0, 0], X_train[y_train == 0, 1], c = 'blue', marker = '^', alpha = .9)
axarr[idx].scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], c = 'red', marker = 'o', alpha = .9)
axarr[idx].set_title(tt)
axarr[0].set_ylabel('Alcohol', fontsize = 12)
plt.text(10.2, -0.75, s = 'Hue', ha = 'center', va = 'center', fontsize = 12)
plt.show()
def ada_boost():
df_wine = pd.read_csv('wine.data', header = None)
df_wine.columns = ['Class label', 'Alcohol', 'Malic acid', 'Ash', 'Alcalinity of ash', 'Magnesium', 'Total phenols', 'Flavanoids', 'Nonflavanoid phenols', 'Proanthocyanins', 'Color intensity', 'Hue', 'OD289/OD315 of diluted wines', 'Proline']
df_wine = df_wine[df_wine['Class label'] != 1]
y = df_wine['Class label'].values
X = df_wine[['Alcohol', 'Hue']].values
# X = df_wine.iloc[:, 1:].values
le = LabelEncoder()
y = le.fit_transform(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .4, random_state = 1)
tree = DecisionTreeClassifier(criterion = "entropy", max_depth = 1, random_state = 0)
ada = AdaBoostClassifier(base_estimator = tree, n_estimators = 500, learning_rate = .1, random_state = 0)
tree = tree.fit(X_train, y_train)
y_train_pred = tree.predict(X_train)
y_test_pred = tree.predict(X_test)
tree_train = accuracy_score(y_train, y_train_pred)
tree_test = accuracy_score(y_test, y_test_pred)
print ('Decision tree train/test accuracies {tree_train:.3f} / {tree_test:.3f}'.format(tree_train = tree_train, tree_test = tree_test))
ada = ada.fit(X_train, y_train)
y_train_pred = ada.predict(X_train)
y_test_pred = ada.predict(X_test)
ada_train = accuracy_score(y_train, y_train_pred)
ada_test = accuracy_score(y_test, y_test_pred)
print ('AdaBoost train/test accuracies {ada_train:.3f} / {ada_test:.3f}'.format(ada_train = ada_train, ada_test = ada_test))
x_min = X_train[:, 0].min() - 1
x_max = X_train[:, 0].max() + 1
y_min = X_train[:, 1].min() - 1
y_max = X_train[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, .1), np.arange(y_min, y_max, .1))
f, axarr = plt.subplots(1, 2, sharex = 'col', sharey = 'row', figsize = (8, 3))
for idx, clf, tt in zip([0, 1], [tree, ada], ['Decision Tree', 'AdaBoost']):
clf.fit(X_train, y_train)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
axarr[idx].contourf(xx, yy, Z, alpha = .3)
axarr[idx].scatter(X_train[y_train == 1, 0], X_train[y_train == 1, 1], c = 'red', marker = 'o')
axarr[idx].set_title(tt)
axarr[0].set_ylabel('Alcohol', fontsize = 12)
plt.text(10.2, -.75, s = 'Hue', ha = 'center', va = 'center', fontsize = 12)
plt.show()
if __name__ == '__main__':
# print ensemble_error(n_classifier = 11, error = .25)
# ensembles_errors()
# simple_majority_vote()
# bagging_sample()
ada_boost()
| mit |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/window/test_apply.py | 1 | 4869 | import numpy as np
import pytest
from pandas import DataFrame, Index, MultiIndex, Series, Timestamp, date_range
import pandas._testing as tm
@pytest.mark.parametrize("bad_raw", [None, 1, 0])
def test_rolling_apply_invalid_raw(bad_raw):
with pytest.raises(ValueError, match="raw parameter must be `True` or `False`"):
Series(range(3)).rolling(1).apply(len, raw=bad_raw)
def test_rolling_apply_out_of_bounds(engine_and_raw):
# gh-1850
engine, raw = engine_and_raw
vals = Series([1, 2, 3, 4])
result = vals.rolling(10).apply(np.sum, engine=engine, raw=raw)
assert result.isna().all()
result = vals.rolling(10, min_periods=1).apply(np.sum, engine=engine, raw=raw)
expected = Series([1, 3, 6, 10], dtype=float)
tm.assert_almost_equal(result, expected)
@pytest.mark.parametrize("window", [2, "2s"])
def test_rolling_apply_with_pandas_objects(window):
# 5071
df = DataFrame(
{"A": np.random.randn(5), "B": np.random.randint(0, 10, size=5)},
index=date_range("20130101", periods=5, freq="s"),
)
# we have an equal spaced timeseries index
# so simulate removing the first period
def f(x):
if x.index[0] == df.index[0]:
return np.nan
return x.iloc[-1]
result = df.rolling(window).apply(f, raw=False)
expected = df.iloc[2:].reindex_like(df)
tm.assert_frame_equal(result, expected)
with pytest.raises(AttributeError):
df.rolling(window).apply(f, raw=True)
def test_rolling_apply(engine_and_raw):
engine, raw = engine_and_raw
expected = Series([], dtype="float64")
result = expected.rolling(10).apply(lambda x: x.mean(), engine=engine, raw=raw)
tm.assert_series_equal(result, expected)
# gh-8080
s = Series([None, None, None])
result = s.rolling(2, min_periods=0).apply(lambda x: len(x), engine=engine, raw=raw)
expected = Series([1.0, 2.0, 2.0])
tm.assert_series_equal(result, expected)
result = s.rolling(2, min_periods=0).apply(len, engine=engine, raw=raw)
tm.assert_series_equal(result, expected)
def test_all_apply(engine_and_raw):
engine, raw = engine_and_raw
df = (
DataFrame(
{"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}
).set_index("A")
* 2
)
er = df.rolling(window=1)
r = df.rolling(window="1s")
result = r.apply(lambda x: 1, engine=engine, raw=raw)
expected = er.apply(lambda x: 1, engine=engine, raw=raw)
tm.assert_frame_equal(result, expected)
def test_ragged_apply(engine_and_raw):
engine, raw = engine_and_raw
df = DataFrame({"B": range(5)})
df.index = [
Timestamp("20130101 09:00:00"),
Timestamp("20130101 09:00:02"),
Timestamp("20130101 09:00:03"),
Timestamp("20130101 09:00:05"),
Timestamp("20130101 09:00:06"),
]
f = lambda x: 1
result = df.rolling(window="1s", min_periods=1).apply(f, engine=engine, raw=raw)
expected = df.copy()
expected["B"] = 1.0
tm.assert_frame_equal(result, expected)
result = df.rolling(window="2s", min_periods=1).apply(f, engine=engine, raw=raw)
expected = df.copy()
expected["B"] = 1.0
tm.assert_frame_equal(result, expected)
result = df.rolling(window="5s", min_periods=1).apply(f, engine=engine, raw=raw)
expected = df.copy()
expected["B"] = 1.0
tm.assert_frame_equal(result, expected)
def test_invalid_engine():
with pytest.raises(ValueError, match="engine must be either 'numba' or 'cython'"):
Series(range(1)).rolling(1).apply(lambda x: x, engine="foo")
def test_invalid_engine_kwargs_cython():
with pytest.raises(ValueError, match="cython engine does not accept engine_kwargs"):
Series(range(1)).rolling(1).apply(
lambda x: x, engine="cython", engine_kwargs={"nopython": False}
)
def test_invalid_raw_numba():
with pytest.raises(
ValueError, match="raw must be `True` when using the numba engine"
):
Series(range(1)).rolling(1).apply(lambda x: x, raw=False, engine="numba")
@pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]])
def test_rolling_apply_args_kwargs(args_kwargs):
# GH 33433
def foo(x, par):
return np.sum(x + par)
df = DataFrame({"gr": [1, 1], "a": [1, 2]})
idx = Index(["gr", "a"])
expected = DataFrame([[11.0, 11.0], [11.0, 12.0]], columns=idx)
result = df.rolling(1).apply(foo, args=args_kwargs[0], kwargs=args_kwargs[1])
tm.assert_frame_equal(result, expected)
midx = MultiIndex.from_tuples([(1, 0), (1, 1)], names=["gr", None])
expected = Series([11.0, 12.0], index=midx, name="a")
gb_rolling = df.groupby("gr")["a"].rolling(1)
result = gb_rolling.apply(foo, args=args_kwargs[0], kwargs=args_kwargs[1])
tm.assert_series_equal(result, expected)
| gpl-2.0 |
jkarnows/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
mackelab/theano_shim | theano_shim/core.py | 1 | 70735 | """
A unified interface for Numpy and Theano, so we don't need
conditionals just to select between e.g. T.sum and np.sum.
More specific calls can be dealt with in the related code by
conditioning on this module's `use_theano` flag
This module provides an interchangeable interface to common operations,
such as type casting and checking, assertions and rounding, as well
as 'shim' datatypes for random number streams and shared variables.
Usage
-----
At the top of your code, include the line
``import theano_shim as shim``
By default this will not even try to load Theano, so you can use it on
a machine where Theano is not installed.
To 'switch on' Theano, add the following below the import:
``shim.load('theano')``
You can switch it back to its default state with ``shim.load('numpy')``.
Pointers for writing theano switches
------------------------------------
- Type checking
+ isinstance(x, theano.tensor.TensorVariable) will be True when
x is a theano variable, but False for wrappers around Python
objects such as shared variables.
+ isinstance(x, theano.graph.basic.Variable) is more inclusive, returning
True for shared variables as well.
+ These two tests are provided by the `is_theano_variable` and
`is_theano_object` convenience methods.
"""
import logging
import builtins
from collections import OrderedDict
from collections.abc import Sequence, Iterable, Callable
from numbers import Number
import inspect
import sys
import copy as copymodule
import numpy as np
import scipy as sp
import scipy.signal
import scipy.special
from .config import config
from .config import _gettheano, _getT
cf = config
logger = logging.getLogger('theano_shim')
#logger.setLevel(logging.INFO)
######################
def load_theano():
load('theano')
class DummyT:
def __getattr__(self, attr):
global T
if not cf.use_theano:
raise RuntimeError("Tried to access theano.tensor, but Theano has not been loaded.")
else:
assert not isinstance(T, DummyT)
return getattr(T, attr)
T = DummyT()
def load(library='theano', reraise=False):
"""Reset the module to use or not use Theano.
This should be called once at the top of your code.
Parameters
----------
library: 'numpy' | 'theano'
- 'numpy' : Module will simulate Theano using pure Numpy.
This is the state before calling `load()`.
- 'theano' : Module will act as an interface to Theano
reraise: Boolean
If true, import errors will be reraised to allow them to propagate to the parent.
"""
#TODO: Move as much as possible to config.Config
# And/or move the function to config, and import config.load
global theano, T
if library == 'theano':
try:
import theano
except ImportError:
logger.error("The theano library was not found.")
cf.library = 'numpy'
if reraise:
raise
else:
cf.library = 'theano'
else:
cf.library = library # Raises error if `library` is invalid
if cf.floatX == 'float32':
config.make_constants_32bit()
if cf.use_theano:
import theano.ifelse
import theano.tensor as T
import theano.tensor.signal.conv
import theano.sparse
import theano.sandbox
#import theano.tensor.shared_RandomStream # CPU only
# from theano.tensor.random.utils import RandomStream
# import theano.sandbox.rng_mrg
from theano.sandbox.rng_mrg import MRG_RandomStream as RandomStream # CPU & GPU
# The theano-pymc docs now recommend MRG_RandomStream, and that seems
# to be where the development effort is. For example,
# `RandomStream().binomial(n=1, p=0.9)` fails with “unknown keyword argument 'n'
# but `MRG_RandomStream().binomial(n=1, p=0.9)` works fine
from . import theano_types
cf.add_terminating_types((T.TensorType, T.TensorVariable))
cf.inf = 1e12
# cf.RandomStream = \
# make_TheanoRNG(theano.tensor.shared_RandomStream.RandomStream)
cf.RandomStream = theano_types.MRG_RNG
# if cf.sys.version_info.minor >= 5:
# cf.Numeric = cf.Union[np.ndarray, T.TensorVariable]
else:
cf.inf = np.inf
cf.RandomStream = NumpyRNG
# if cf.sys.version_info.minor >= 5:
# cf.Numeric = cf.Union[np.ndarray]
# Call the `reload()` methods in the submodules
from . import graph
graph.reload()
def gettheano():
if not cf.use_theano:
raise RuntimeError("Tried to access theano, but it has not been loaded.")
else:
return theano
def getT():
if not cf.use_theano:
raise RuntimeError("Tried to access theano.tensor, but Theano has not been loaded.")
else:
return T
class LazyEval:
"""
Small wrapper to permit lazy evaluation of arguments.
Python by default evaluates every argument being passed to a function,
which can lead to problems e.g. when using ifelse as a guard:
a = ifelse( neq(x, 0),
y/x,
y )
In this case we can rewrite the above as
a = ifelse( neq(x, 0),
LazyEval(lambda x,y: y/x, x, y),
y )
Current functions compatible with LazyEval:
- ifelse
"""
def __init__(self, f, *args, **kwargs):
"""
Parameters
----------
f: callable
An expression which returns the desired value
*args:
The variables appearing in the function f.
"""
self.f = f
self.args = args
self.kwargs = kwargs
def eval(self):
return self.f(*self.args, **self.kwargs)
##########################
# Managing theano updates
def add_update(variable, value=None):
"""
Parameters
----------
variable: shared variable | dict | iterable
Shared variable to update.
Can also be a dictionary of `variable:value` pairs, or an iterable of
`(variable, value)` tuples.
value: symbolic expression
Value to assign to variable. Ignored if `variable` is a dict or iterable
Returns
-------
None
"""
if isinstance(variable, dict):
for key, val in variable.items():
add_update(key, val)
elif isinstance(variable, Sequence):
for key, val in variable:
add_update(key, val)
else:
logger.debug(f"Adding Theano update: {variable.name} -> {str(value)}")
if not isshared(variable):
raise ValueError("The updates mechanism only applies to shared variables.")
cf.symbolic_updates[variable] = value
add_updates = add_update
def remove_update(variable):
"""
Parameters
----------
variable: shared variable | dict | iterable
Shared variable to update.
Can also be a dictionary of `variable:value` pairs, or an iterable of
`(variable, value)` tuples.
Returns
-------
None
"""
if isinstance(variable, dict):
for key, val in variable.items():
add_update(key, val)
elif isinstance(variable, Sequence):
for key, val in variable:
add_update(key, val)
else:
logger.debug(f"Removing Theano update: {variable.name}")
if not isshared(variable):
raise ValueError("The updates mechanism only applies to shared variables.")
del cf.symbolic_updates[variable]
remove_updates = remove_update
def get_updates():
return cf.symbolic_updates
def reset_updates():
logger.debug("Clearing Theano updates")
cf.symbolic_updates = OrderedDict()
def pending_update(*args):
"""
Return True if there is a pending symbolic updates for any one of the
variables in `args`.
If called with no arguments, return True if the update dictionary is nonempty.
"""
if len(args) == 0:
return len(cf.symbolic_updates) > 0
else:
for x in _expand_args(args):
if is_graph_object(x) and x in cf.symbolic_updates:
return True
return False
pending_updates = pending_update
#######################
# Print statement
def _get_print_fn(file=sys.stdout):
"""Return the same function as theano.printing._print_fn, with
the difference that 'file' is passed as a keyword argument to print().
"""
def _print_fn(op, xin,):
for attr in op.attrs:
temp = getattr(xin, attr)
if callable(temp):
pmsg = temp()
else:
pmsg = temp
print(op.message, attr, '=', pmsg, file=file)
return _print_fn
def print(x, message=None, printfn='print', message_prefix="SHIM - ",
file=sys.stdout):
"""
Non-Theano version outputs to the logger at the debug level.
Parameters
----------
x:
The value of this graph will be output
message: string
Will be prepended to the output. If unspecified, the function
checks if `x` has a `name` attribute and uses it if present.
printfn: string
Determines the function used to print the variable; only
has an effect on Theano variables. Possible values are:
- 'print' (default): use theano.printing.Print
- 'debugprint': use theano.printing.debugprint
- 'eval' : try to call x's `eval` method. If successful,
print the output, otherwise fall back on theano.printing.Print
message_prefix: string
String to prepend to the message. Can be used to distinguish
different types of outputs. Defaults to "SHIM - ".
file: file handle
Where to print the value; default is 'sys.stdout'.
Same argument as used in print() or theano.printing.debugprint.
"""
if message is None:
message = getattr(x, 'name', "")
if message is None: message = "" # x.name might be None
if is_theano_object(x):
# EARLY EXIT: - slice
# TODO?: list, tuple
if isinstance(x, slice):
kw = dict(printfn=printfn, message_prefix=message_prefix, file=file)
start = print(x.start, message=message+" (start)", **kw)
stop = print(x.stop, message=message+" (stop)", **kw)
step = x.step and print(x.step, message=message+" (step)", **kw)
return slice(start, stop, step)
msg = message_prefix + message
if printfn == 'print':
return _gettheano().printing.Print(msg, global_fn=_get_print_fn(file))(x)
elif printfn == 'debugprint':
builtins.print(msg)
_gettheano().printing.debugprint(x, file=file)
return x
elif printfn == 'eval':
try:
val = x.eval()
except _gettheano().graph.fg.MissingInputError:
return _gettheano().printing.Print(msg, global_fn=_get_print_fn(file))(x)
else:
builtins.print(msg + " Value of {}: {}".format(str(x), val))
return x
else:
raise ValueError("Unrecognized print flag '{}'."
.format(printfn))
else:
if len(message) > 0 and message[-1] != " ":
msg = message + " "
else:
msg = message
#logger.debug(msg + str(x))
builtins.print(message_prefix + msg + str(x), file=file)
return x
def print_array(x, idx=slice(None), message=None, message_prefix="SHIM - ",
file=sys.stdout):
"""
Helper function for printing just one element in an array.
All parameters except `idx` are the same as for `print`.
Returns an identity operation on `x`, so that it can be used as follows
>>> x = shim.tensor(np.arange(100, 0.1))
>>> x = shim.print_array(x, idx=3)
>>> for i in range(2):
>>> x *= shim.print_array(x, idx=np.s_[2:5])
0.3__str__ DEBUG -
[0.4, 0.6, 0.8]__str__ DEBUG -
[0.8, 1.2, 1.6]__str__ DEBUG -
"""
return set_subtensor(x[idx],
print(x[idx],
message=message,
message_prefix=message_prefix,
file=file
)
)
def pprint(x):
"""
Call pretty printer (`pprint`) on Theano objects, otherwise standard `print`
"""
if is_theano_object(x):
return _gettheano().printing.pprint(x)
else:
return str(x)
def debugprint(x, file=sys.stdout):
return print(x, printfn='debugprint', message="", message_prefix="", file=file)
#######################
# Assert equivalent
def check(stmt):
"""
FIXME: In its current state this function is essentially broken, because
even for statements like `x == y`, with x,y Theano variables, the
*statement* is still a pure Python object, and this a plain assert
gets used (and then usually fails).
Check is a library-aware wrapper for assert.
If stmt is a Theano variable, the behaviour depends on whether
theano.config.compute_test_value:
- If it is 'off', `check` is a no-op
- Otherwise, use the test values to evaluate the assert
"""
if not cf.use_theano or not isinstance(stmt, theano.graph.basic.Variable):
assert(stmt)
else:
if theano.config.compute_test_value == 'off':
return None
else:
assert(stmt.tag.test_value)
######################
# Retrieving test values
def get_test_value(var, nofail=False):
"""
If `value` is a Theano variable, return its test value if it is defined.
Otherwise just return `value` unchanged.
If `nofail` is False (default), will raise an error if no test value is found.
Otherwise returns None
"""
if 'theano' in sys.modules and isinstance(var, _getT().sharedvar.SharedVariable):
retval = var.get_value()
elif 'theano' in sys.modules and isinstance(var, _gettheano().graph.basic.Variable):
try:
retval = var.tag.test_value
except AttributeError:
if nofail:
return None
else:
raise AttributeError("You've attempted to execute a function that "
"requires a test_value for the variable {} to "
"be set, and this value is not set.".format(var))
else:
retval = var
return retval
# TODO: Using the .get_test_value() method is likely better, when possible::
# def get_test_value(expr):
# if cf.library == 'theano':
# return expr.get_test_value()
# elif cf.library == 'numpy':
# return expr
# else:
# raise NotImplementedError
# # if hasattr(x, 'tag') and hasattr(x.tag, 'test_value'):
# # return x.tag.test_value
# # elif shim.isshared(x):
# # return x.get_value()
# # elif shim.is_constant(x):
# # return x.data
# # else:
# # logger.warning(f"Unable to compute test value for {x}.")
# # return None
######################
# Type checking
def istype(obj, type_str):
"""
Parameters
----------
obj: object
The object of which we want to check the type.
type_str: string or dtype or iterable
If `obj` is of this type, the function returns True,
otherwise it returns False. Valid values of `type_str`
are those expected for a dtype. Examples are:
- 'int', 'int32', 'uint', 'uint32', etc.
- 'float', 'float32', etc.
- any NumPy dtype
`type_str` can also be an iterable of aforementioned
strings. Function will return True if `obj` is of any
of the specified types
Returns
-------
bool
"""
# Wrap type_str if it was not passed as an iterable
if isinstance(type_str, str) or not isinstance(type_str, Iterable):
type_str = [type_str]
# Ensure we only have strings (not dtypes)
type_str = [str(ts) for ts in type_str]
# Check type
if ('theano' not in sys.modules
or not isinstance(obj, _gettheano().graph.basic.Variable)):
return builtins.any(ts in str(np.asarray(obj).dtype) for ts in type_str)
# We cast to string to be consistent with Theano, which uses
# strings for it's dtypes
else:
return builtins.any(ts in obj.dtype for ts in type_str)
def _expand_args(arglst):
"""
Recursively expand slices, iterables, dictionaries into a list of scalar data type.
Scalars are returned as a 1 element list.
"""
if not isinstance(arglst, Iterable):
arglst = [arglst]
elif isinstance(arglst, dict):
arglst = [arglst]
elif ('theano' in sys.modules
and isinstance(arglst, _gettheano().graph.basic.Variable)):
arglst = [arglst]
elif isinstance(arglst, cf.TerminatingTypes):
arglst = [arglst]
for arg in arglst:
if 'theano' in sys.modules and isinstance(arg, _gettheano().graph.basic.Variable):
# Theano variables aren't iterable
yield arg
elif isinstance(arg, cf.TerminatingTypes):
yield arg
elif isinstance(arg, slice):
yield arg.start
yield arg.stop
yield arg.step
elif isinstance(arg, dict):
for key in arg.keys():
yield key
for val in arg.values():
#yield from nwlst.extend(_expand_args(val))
yield from _expand_args(val)
elif isinstance(arg, np.ndarray):
if arg.ndim == 0:
yield arg # can't iterate over a 0-dim array
else:
yield from _expand_args(arg)
elif isinstance(arg, Iterable):
try:
yield from _expand_args(arg)
except TypeError:
# Pint objects with scalars report 'iterable' but then fail
# on __iter__. Might be the case with other objects as well.
# For Pint, see https://github.com/hgrecco/pint-pandas/issues/33#issuecomment-647198749
# Should be fixed by this currently open PR https://github.com/hgrecco/pint/pull/1125
yield arg
else:
yield arg
def is_graph_object(*obj):
# return 'theano' in sys.modules and builtins.any(isinstance(o, _gettheano().graph.basic.Variable)
return 'theano' in sys.modules and builtins.any(isinstance(o, cf.GraphTypes)
for o in _expand_args(obj))
is_theano_object = is_graph_object
def is_constant(*obj):
# Both symbolic and shared objects return False
return 'theano' not in sys.modules or builtins.all(
isinstance(c, cf.ConstantTypes)
for c in _expand_args(obj))
def is_pure_symbolic(*var):
"""
Todo
----
There seems to be some redundancy between ``is_pure_symbolic(x)``
and ``not graph.is_computable(x)``.
"""
# return 'theano' in sys.modules and builtins.any(isinstance(v, _gettheano().tensor.TensorVariable)
return 'theano' in sys.modules and builtins.any(isinstance(v, cf.PureSymbolicTypes)
for v in _expand_args(var))
is_theano_variable = is_pure_symbolic
def is_symbolic(*var):
return 'theano' in sys.modules and builtins.any(
isinstance(v, cf.GraphTypes)
and not isinstance(v, cf.ConstantTypes)
for v in _expand_args(var))
issymbolic = is_symbolic # With NumPy having no consistent convention, it's nigh to impossible to memorize, so just accept both
def is_shimmed_or_symbolic(*var):
return builtins.any(isinstance(v, cf.ShimmedAndGraphTypes) for v in _expand_args(var))
def isshared(var):
return isinstance(var, cf.SharedTypes)
# def isshared(*var):
# if 'theano' in sys.modules:
# return builtins.any(isinstance(v, (cf.SymbolicSharedType, ShimmedTensorShared))
# for v in _expand_args(var))
# else:
# return builtins.any(isinstance(v, ShimmedTensorShared)
# for v in _expand_args(var))
#######################
# Casting functions
def can_cast(from_, dtype, casting='safe'):
# As far as I can tell, `np.can_cast` also works on Theano types.
return np.can_cast(from_, dtype, casting=casting)
def cast(x, dtype, same_kind=True):
"""
Parameters
----------
x: scalar, array or Theano variable
The variable to cast
dtype: str
The type to which cast the variable. One of
- 'int8'
- 'int16'
- 'int32'
- 'int64'
- 'uint8'
- 'uint16'
- 'uint32'
- 'uint64'
- 'float16'
- 'float32'
- 'float64'
Can optionally be an equivalent numpy dtype, as returned by
<numpy var>.dtype.
same_kind: bool
When `same_kind` is `True`, only casts e.g. between 'float32' and 'float64'
are permitted; others raise `TypeError`.
"""
if isinstance(dtype, np.dtype):
dtype = str(dtype)
elif isinstance(dtype, type) and issubclass(dtype, np.generic):
dtype = str(np.dtype(dtype))
elif dtype == 'floatX':
dtype = cf.floatX
if same_kind:
# Test that arguments are of the same kind
# We get the 'kind' by stripping the number from dtype's string
dtype_x = x.dtype if hasattr(x, 'dtype') else asarray(x).dtype
kind_x = ''.join(c for c in str(dtype_x) if c.isalpha())
kind_dtype = ''.join(c for c in str(dtype) if c.isalpha())
if kind_x != kind_dtype:
raise TypeError("Unsafe cast: trying to convert a {} to a {}. "
"If you want to disable this check, pass "
"`same_kind=False` to `cast()`"
.format(asarray(x).dtype, dtype))
# FIXME: Why did I need this test ? If I have a plain Python variable,
# I *should* cast to a numpy dtype.
# if str(asarray(x).dtype) == dtype:
# # Already the right dtype; no conversion to make
# return x
if is_theano_object(x):
return T.cast(x, dtype)
elif hasattr(x, 'astype'):
return x.astype(dtype)
else:
if np.__version__ >= '1.19':
return np.dtype(dtype).type(x)
else:
# I don't remember which NumPy version I was using when I added the
# keepdims arg, but I'm pretty sure it was required then
return np.dtype(dtype).type(x, keepdims=True)
def cast_floatX(x, same_kind=True):
return cast(x, dtype=cf.floatX, same_kind=same_kind)
#####################
# Simple convenience functions
def round(x):
try:
res = x.round() # Theano variables have a round method
except AttributeError:
res = round(x)
return res
def asvariable(x, dtype=None, name=None):
if 'theano' in sys.modules:
# No `isinstance` here: the point is to cast to a Theano variable
if dtype is not None:
return cast(T.as_tensor_variable(x, name=name), dtype)
else:
return T.as_tensor_variable(x, name=name)
else:
return np.asarray(x, dtype=dtype)
def asarray(x, dtype=None, broadcastable=None, symbolic=None):
"""Make x array-like.
Note that if broadcastable is not None, and that Theano is loaded,
the return value will always be a Theano variable, even if x is
pure Python or Numpy. This is because `broadcastable` is a Theano-only
property.
Parameters
----------
x: (scalar | array) | (numeric | symbolic)
The value we want to ensure is array-like.
dtype: str | dtype (optional)
If ≠ None, ensure the result is of this type
broadcastable: Tuple[bool] (optional)
Broadcast pattern
This is a Theano-only argument, and will force the result to be symbolic.
symbolic: bool
Override automatic selection of numeric vs symbolic. Useful to force
symbolic output when the inputs are all numeric.
Setting ``symbolic=False`` with symbolic arguments or `broadcastable`
≠ ``None`` will raise an error.
Raises
------
ValueError:
If `x` is symbolic or `broadcastable` ≠ ``None``, but `symbolic` is
``False``.
TypeError:
If `x` is symbolic or `broadcastable` ≠ ``None`` but
`config.use_theano` is False.
"""
_symbolic = 'theano' in sys.modules and isinstance(x, _gettheano().graph.basic.Variable)
if symbolic is None:
symbolic = _symbolic
elif symbolic is False and _symbolic is True:
raise ValueError("Cannot force a symbolic variable to be numeric.")
if (symbolic or broadcastable is not None) and not cf.use_theano:
raise TypeError("Attempting to create a symbolic array while "
"`shim.config.use_theano` is False.")
if symbolic:
T = _getT()
if dtype is not None:
retval = T.cast(T.as_tensor_variable(x), dtype)
else:
retval = T.as_tensor_variable(x)
else:
retval = np.asarray(x, dtype=dtype)
if cf.use_theano and broadcastable is not None:
# Only Theano variables carry broadcasting information
retval = T.as_tensor_variable(retval)
if broadcastable is not None:
for i, (vc, vn) in enumerate(zip(retval.broadcastable,
broadcastable)):
if vc != vn and vn:
retval = T.addbroadcast(retval, i)
elif vc != vn and not vn:
retval = T.unbroadcast(retval, i)
return retval
def asscalar(x):
if isscalar(x) and not hasattr(x, 'ndim'):
# shim.isscalar(x) returns True for 0-dim arrays
return x
elif is_theano_object(x):
if builtins.all(x.broadcastable):
return T.flatten(x)[0]
else:
raise ValueError("To cast a Theano tensor as a scalar, "
"all its dimensions must be broadcastable.")
else:
return np.asscalar(x)
def isscalar(x):
"""
Return True if `x` is a scalar.
Note that in contrast to Numpy's isscalar, this returns True for 0-dim arrays.
"""
arrayed_x = asarray(x)
return asarray(x).ndim == 0 and arrayed_x.dtype != 'object'
def isarray(x):
# Some scalar numpy types (e.g. np.int64) have the 'ndim' attribute
return (not np.isscalar(x)) and hasattr(x, 'ndim')
def issparse(var):
"""Return True if `var` is any recognized sparse format."""
if 'theano.sparse' in sys.modules:
return (sp.sparse.issparse(var)
or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable))
else:
return sp.sparse.issparse(var)
def isspsparse(var):
"""Return True if `var` is sparse with `scipy.sparse` interface.
True for scipy.sparse, theano.sparse."""
if 'theano.sparse' in sys.modules:
return (sp.sparse.issparse(var)
or isinstance(var, sys.modules['theano.sparse'].basic.SparseVariable))
else:
return sp.sparse.issparse(var)
def flatten(x, outdim=1):
if 'theano' in sys.modules and isinstance(x, theano.graph.basic.Variable):
return T.flatten(x, outdim)
else:
outshape = x.shape[:outdim-1] + (np.prod(x.shape[outdim-1:]), )
return x.reshape(outshape)
def addbroadcast(x, *axes):
"""
Equivalent to theano.tensor.addbroadcast.
For NumPy objects, checks that broadcasted dimensions have length 1,
but otherwise does nothing.
Compared to the Theano version, negative values for axes are supported:
-1 refers to the last axis, -2 to the second last, etc.
"""
if is_theano_object(x):
# T.addbroadcast only works with positive axes
axes = [ ax if ax >= 0 else x.ndim + ax for ax in axes ]
return T.addbroadcast(x, *axes)
else:
for ax in axes:
if x.shape[ax] != 1:
raise ValueError("Tried to make axis {} of a variable with shape {} broadcastable. "
"Only dimensions with length 1 can be broadcasted."
.format(ax, x.shape))
return x
# def eval(x, *args, **kwargs):
# """
# If `x` is has an 'eval' method, return `x.eval(*args, **kwargs)`. Otherwise just
# return `x`. In the latter case, `*args` and `**kwargs` are ignored, and a
# warning is printed if they are not empty.
# """
# if hasattr(x, 'eval'):
# return x.eval(*args, **kwargs)
# else:
# if len(args) + len(kwargs) > 0:
# logger.warning("Ignoring arguments to `eval`: object does not have "
# "an `eval` method.")
# return x
#####################
# Convenience function for max / min
def largest(*args):
"""Element-wise max operation."""
if len(args) == 0:
raise TypeError("`largest` expects at least one argument.")
elif len(args) == 1:
return args[0]
if 'theano' in sys.modules and builtins.any(isinstance(arg, _gettheano().graph.basic.Variable) for arg in args):
return _getT().largest(*args)
else:
retval = np.maximum(args[0], args[1])
for arg in args[2:]:
retval = np.maximum(retval, arg)
return retval
def smallest(*args):
"""Element-wise min operation."""
if len(args) == 0:
raise TypeError("`smallest` expects at least one argument.")
if len(args) == 1:
return args[0]
if 'theano' in sys.modules and builtins.any(isinstance(arg, _gettheano().graph.basic.Variable) for arg in args):
return _getT().smallest(*args)
else:
retval = np.minimum(args[0], args[1])
for arg in args[2:]:
retval = np.minimum(retval, arg)
return retval
def abs(x):
if 'theano' in sys.modules and isinstance(x, _gettheano().graph.basic.Variable):
if x.ndim == 2:
return __builtins__['abs'](x)
else:
# Theano requires 2D objects for abs
shape = x.shape
return __builtins__['abs'](add_axes(x.flatten())).reshape(shape)
else:
return __builtins__['abs'](x)
######################
# Logical and comparison operators
def lt(a, b):
if (cf.use_theano and (isinstance(a, theano.graph.basic.Variable)
or isinstance(b, theano.graph.basic.Variable))):
return T.lt(a, b)
else:
return a < b
def le(a, b):
if (cf.use_theano and (isinstance(a, theano.graph.basic.Variable)
or isinstance(b, theano.graph.basic.Variable))):
return T.le(a, b)
else:
return a <= b
def gt(a, b):
if (cf.use_theano and (isinstance(a, theano.graph.basic.Variable)
or isinstance(b, theano.graph.basic.Variable))):
return T.gt(a, b)
else:
return a > b
def ge(a, b):
if (cf.use_theano and (isinstance(a, theano.graph.basic.Variable)
or isinstance(b, theano.graph.basic.Variable))):
return T.ge(a, b)
else:
return a >= b
def eq(a, b):
if (cf.use_theano and (isinstance(a, theano.graph.basic.Variable)
or isinstance(b, theano.graph.basic.Variable))):
return T.eq(a, b)
else:
return a == b
def bool(a):
"""
Call this function on any expression that might
appear in a Theano graph as a boolean (Theano expects
integers rather than booleans.)
"""
# Booleans need to be converted to integers for Theano
if cf.use_theano and isinstance(a, (builtins.bool, np.bool_)):
return np.int8(a)
elif cf.use_theano or is_theano_object(a):
return a
else:
return builtins.bool(a)
def and_(a, b):
# Special case scalars so they don't return length 1 arrays
if isscalar(a) and isscalar(b):
return bool(bool(a) * bool(b))
# matrix function
if ('theano' in sys.modules and (isinstance(a, _gettheano().graph.basic.Variable)
or isinstance(b, _gettheano().graph.basic.Variable))):
return _getT().and_(a, b)
else:
return np.logical_and(a, b)
def or_(a, b):
# Special case scalars so they don't return length 1 arrays
if isscalar(a) and isscalar(b):
return bool(bool(a) + bool(b))
# matrix function
if ('theano' in sys.modules and (isinstance(a, _gettheano().graph.basic.Variable)
or isinstance(b, _gettheano().graph.basic.Variable))):
return _getT().or_(a, b)
else:
return np.logical_or(a, b)
######################
# Conditionals
def ifelse(condition, then_branch, else_branch, name=None, outshape=None):
"""
All parameters except `outshape` are the same as for theano.ifelse.ifelse
`outshape` is an extra parameter to allow the then_branch and else_branch
to have a different shape: the output will be reshaped into this form, but
only if Theano is used. The reason we need this is as follows:
Suppose we have a vector x which should be reshaped to (2,2). We might write
(in pseudocode)::
ifelse(x.shape == (2,),
concatenate((x, x)),
x.reshape((2,2)))
The Python version of this code has no trouble, because the correct branch
will always reshape to (2,2). However, the Theano version wants a result with
a well defined shape. Here the branch with `concatenate((x,x))` won't in
general have the same shape as `x.reshape((2,2))`.
We can get around this by defining `outshape=(2,2)` and writing instead::
ifelse(x.shape == (2,),
concatenate((x, x)).reshape(outshape),
x.reshape((2,2)).reshape(outshape))
Now this makes Theano happy, but Python with its greedy evaluation
evaluates both arguments before calling ifelse. So if x.shape=(2,2), the
call will fail on `concatenate((x,x)).reshape(outshape)`. The solution
is to only apply the reshape when using Theano, which is what specifying
`outshape` as an argument does.
"""
# First check if we can replace an Theano conditional by a Python one
if is_theano_object(condition) and is_constant(condition):
condition = bool(condition.data)
# Now the actual function
if (cf.use_theano
and not isinstance(condition, builtins.bool)
and (isinstance(condition, theano.graph.basic.Variable)
or isinstance(then_branch, theano.graph.basic.Variable)
or isinstance(else_branch, theano.graph.basic.Variable))):
# Theano function
if isinstance(then_branch, LazyEval):
then_branch = then_branch.eval()
if isinstance(else_branch, LazyEval):
else_branch = else_branch.eval()
if outshape is None:
# We call `bool` on the condition, in case it's a Python boolean
# (even shim.ge & friends can return bools)
return theano.ifelse.ifelse(bool(condition), then_branch,
else_branch, name)
else:
return theano.ifelse.ifelse(bool(condition), then_branch.reshape(outshape),
else_branch.reshape(outshape), name)
else:
# Python function
if condition:
if isinstance(then_branch, LazyEval):
then_branch = then_branch.eval()
return then_branch
else:
if isinstance(else_branch, LazyEval):
else_branch = else_branch.eval()
return else_branch
def switch(cond, ift, iff):
"""
For the equivalent to the single-argument version of `np.where`,
see `nonzero`.
"""
if (cf.use_theano and (isinstance(cond, theano.graph.basic.Variable)
or isinstance(ift, theano.graph.basic.Variable)
or isinstance(iff, theano.graph.basic.Variable))):
return T.switch(cond, ift, iff)
else:
return np.where(cond, ift, iff)
where = switch
where.__doc__ = """Alias for `switch`."""
#####################
# Loop constructs
def scan(fn, sequences=None, outputs_info=None, non_sequences=None, n_steps=None,
truncate_gradient=-1, go_backwards=False, mode=None, name=None, profile=False,
allow_gc=None, strict=False, return_list=False):
"""
WIP: Does not support taps. When using NumPy, every argument after `n_steps`
except `return_list` is ignored.
"""
if is_theano_object(sequences, outputs_info, non_sequences, n_steps):
return gettheano().scan(
fn, sequences, outputs_info, non_sequences, n_steps,
truncate_gradient, go_backwards, mode, name, profile, allow_gc,
strict, return_list)
else:
if not isinstance(sequences, (tuple, list)):
sequences = (sequences,)
if non_sequences is None:
non_sequences = ()
if isinstance(outputs_info, dict):
raise TypeError("Taps not yet supported.")
if n_steps is None:
n_steps = len(sequences[0])
accumulator = [np.zeros((n_steps,) + o.shape) for o in outputs_info]
cur_val = outputs_info
for t, i in zip(zip(*sequences), range(n_steps)):
cur_val, updates = fn(*t, *cur_val, *non_sequences)
for a, v in zip(accumulator, cur_val):
a[i] = v
if len(accumulator) == 1 and not return_list:
accumulator = accumulator[0]
if len(updates) > 0:
logger.warning("NumPy `scan` produced updates for: {}, which were "
"ignored.".format(updates.keys()))
return accumulator, updates
#####################
# Random number generation
class NumpyRNG(np.random.RandomState):
"""
Note: For compatibility with Theano random streams, `size=None` is
replaced with `size=()`, which returns a scalar array instead of
a plain float.
"""
# We inherit from the legacy RNG because that's what Theano uses.
# def __init__(self, seed=None):
# self.seed(seed)
#
# def seed(self, seed=None):
# np.random.seed(seed)
#
def normal(self, size=(), avg=0.0, std=1.0, ndim=None, name=None):
return super().normal(loc=avg, scale=std, size=size)
def uniform(self, size=(), low=0.0, high=1.0, ndim=None, name=None):
return super().uniform(low, high, size)
def binomial(self, size=(), n=1, p=0.5, ndim=None, name=None):
return super().binomial(n, p, size)
@property
def gen_seedgen(self):
return self
def make_TheanoRNG(rng_class):
"""
This function is deprecated if you can import `RandomStream` from
`theano.tensor.random.utils`.
"""
def add_kwarg_name(f):
def wrapper(self, *args, **kwargs):
name = kwargs.pop('name', None)
sf = getattr(super(type(self), self), f.__name__)
rndstream = sf(*args, **kwargs)
if name is not None: rndstream.name = name
return rndstream
return wrapper
class TheanoRNG(rng_class):
"""
Wraps Theano RNG to allow for passing `name` as keyword argument when
instantiating a random stream.
"""
@add_kwarg_name
def normal(self, size=(), avg=0.0, std=1.0, ndim=None, name=None):
pass
@add_kwarg_name
def uniform(self, size=(), low=0.0, high=1.0, ndim=None, name=None):
pass
@add_kwarg_name
def binomial(self, size=(), n=1, p=0.5, ndim=None, name=None):
pass
return TheanoRNG
def copy_random_state(from_rng, to_rng):
"""
Set the state of the random number generator (RNG) :param:to so that it
matches that of :param:from.
Parameters
----------
from: theano RandomStream | MRG_RandomStream
to: theano RandomStream | MRG_RandomStream
"""
# Based on a function defined in the Theano docs: http://deeplearning.net/software/theano/tutorial/examples.html#copying-random-state-between-theano-graphs
# Ensure the two RNGs are of the same type
assert type(from_rng) is type(to_rng)
# Ensure that their state updates are consistent
# `str(su1[1])` retrieves something like `RandomFunction{uniform}.1`
assert len(from_rng.state_updates) == len(to_rng.state_updates)
assert builtins.all(str(su1[1]) == str(su2[1])
for su1, su2 in zip(from_rng.state_updates,
to_rng.state_updates))
if isinstance(from_rng, _get_rng_mrg().MRG_RandomStream):
to_rng.rstate = from_rng.rstate
for (su1, su2) in zip(from_rng.state_updates, to_rng.state_updates):
su2[0].set_value(su1[0].get_value())
def reseed_rng(rng, new_seed):
"""
For Numpy legacy RandomState, just calls `rng.seed`.
For Numpy Generator, sets the state of the underlying `BitGenerator` as
though it had just been created with `BitGenerator(new_seed)`.
For Theano, reseeds both the seeds of the current random streams, and
the seed generator for future ones.
"""
if isinstance(rng, np.random.RandomState):
rng.seed(new_seed)
elif isinstance(rng, np.random.Generator):
rng.bit_generator.state = type(rng.bit_generator)(new_seed).state
#elif is_symbolic(rng):
elif isinstance(rng, cf.SymbolicNumpyRNGType):
# I don't know why Theano chose to create a throwaway seedgen inside `seed`,
# but it means that to set reliable seeds for both current and new RNG streams,
# we need to emulate `gen_seedgen` being used to reseed the RNGs.
# `rng.seed` reseeds existing RNG streams, calling `seedgen.randint(2**30)`
# as many times as there are RNG streams
rng.seed(new_seed)
# Reseed the gen_seedgen for new RNGs, and advance it as though it was
# used in `seed`.
rng.gen_seedgen.seed(new_seed)
for i in range(len(rng.state_updates)):
rng.randint(2**30)
elif isinstance(rng, cf.SymbolicMRGRNGType):
from .theano_types import MRG_RNG
# Reset the rstate
rng.seed(new_seed)
# rng.rstate = MRG_RNG(new_seed).rstate
# TODO: Check whether we need to advance the state as we do with
# with SymbolicNumpyRNGType (code below can't work: MRG_RNG does not
# define randint)
# for i in range(len(rng.state_updates)):
# rng.randint(2**30)
else:
raise RuntimeError(f"Unrecognized RNG type; received {rng} (type: {type(rng)}).")
######################
# Tensor constructors
def shape_to_broadcast(shape):
"""
Returns the default broadcastable pattern for a shape, replacing
1s with `True`.
"""
return tuple(n==1 for n in shape)
def constant(x, name=None, ndim=None, dtype=None):
if cf.use_theano:
return _getT().constant(x, name=name, ndim=ndim, dtype=dtype)
else:
x_ = np.dtype(dtype).type(x)
if ndim is not None:
# Copied from theano.tensor.constant
if x_.ndim < ndim:
x_ = np.expand_dims(x_, axis=tuple(range(ndim - x_.ndim)))
elif x_.ndim > ndim:
try:
x_ = np.squeeze(x_, axis=tuple(range(x_.ndim - ndim)))
except np.AxisError:
raise ValueError(
f"ndarray could not be cast to constant with {int(ndim)} dimensions"
)
assert x_.ndim == ndim
return x_
def tensor(object, name=None, dtype=None):
"""
Make an object into a tensor. If `object` is a numpy array, a new tensor
matching its shape and dtype is returned. The array values are used to set
the test value.
Not implemented: creating tensors from scalar objects.
Examples:
>>> import numpy as np
>>> import theano_shim as shim
>>> a = np.arange(5)
>>> x = shim.tensor(a)
>>> x2 = shim.tensor(a, name='a')
>>> y = shim.tensor((5,), dtype='float64')
>>> z = shim.tensor((5,3), name='z', dtype='int32')
"""
# Try to infer the tensor shape, test_value, dtype and broadcast pattern
broadcastable = None
shape = None
if isinstance(object, np.ndarray):
# Numpy arrays become the symbolic's test value
shape = object.shape
test_value = object
if dtype is None: dtype = object.dtype
broadcastable = shape_to_broadcast(shape)
elif isinstance(object, Number):
# Scalar inputs become 0-dim arrays
shape = ()
test_value = object
if dtype is None: dtype = str(np.dtype(type(object)))
broadcastable = ()
elif hasattr(object, 'broadcastable'):
# Theano symbolics end up here
# shape = object.shape # This is going to be a symbolic expression
if dtype is None: dtype = object.dtype
broadcastable = object.broadcastable
if name is None:
name = f"{object.name} (tensor)"
if hasattr(object.tag, 'test_value'):
test_value = object.tag.test_value
elif isshared(object):
test_value = object.get_value()
else:
# Not possible to set test_value
test_value = None
if not cf.use_theano:
raise TypeError("Somehow you specified what looks like a symbolic "
"object, yet Theano is not loaded.\n"
f"object: {object}\ntype: {type(object)}")
elif isinstance(object, tuple):
# All we have is a shape – we use array of ones as test_value
shape = object
if dtype is None:
raise TypeError(
"You must specify `dtype` if `object` does not provide one.")
test_value = np.ones(shape, dtype=dtype)
broadcastable = shape_to_broadcast(shape)
else:
raise TypeError("Unrecognized input type for `theano_shim.tensor`: "
f"{object} (type: {type(object)}.")
if not cf.use_theano:
# `test_value` should be defined at this point
return np.array(test_value, dtype=dtype)
else:
if broadcastable is None: broadcastable = shape_to_broadcast(shape)
tensor = getT().tensor(dtype, broadcastable, name=name)
if test_value is not None:
tensor.tag.test_value = test_value
return tensor
######################
# Shared variable constructor
class ShimmedTensorShared(np.ndarray):
# See https://docs.scipy.org/doc/numpy/user/basics.subclassing.html
# for indications on subclassing ndarray
def __new__(cls, value, name=None, strict=False, allow_downcast=None, **kwargs):
if not isinstance(value, np.ndarray):
value = np.asarray(value)
if hasattr(value, 'shim_class'):
cls = value.shim_class
obj = value.view(cls).copy()
obj.name = name
return obj
def __array_finalize__(self, obj):
if obj is None: return
self.name = getattr(obj, 'name', None)
# We are emulating theano.shared, where different instances
# are considered distinct
def __hash__(self):
return id(self)
def __eq__(self, other):
return id(self) == id(other)
def _as_TensorVariable(self):
# Allow mixing of ShimmedTensorShared and Theano variables
# Theano looks for this function first when multiplying with non-Theano types
if cf.use_theano:
return T.constant(self.get_value())
else:
return self.get_value()
# Usual theano.shared interface
def get_value(self, borrow=False, return_internal_type=False):
return self.view(np.ndarray)
# On values obtained by get_value, equality testing shold
# follow the usual rules for arrays, hence the view(np.ndarray)
def set_value(self, new_value, borrow=False):
"""
If `allow_resize` is false (default), will raise an error if
new_value has a different shape than the stored variable.
"""
new_value = np.asarray(new_value)
try:
if self.shape != new_value.shape:
self.resize(new_value.shape, refcheck=False)
# refcheck is necessary to get this to work, but bypasses
# the reference checks. Reference errors might occur if
# a reference to this ShimmedTensorShared variable exists elsewhere,
# and we try to access it after the resize. This is the kind
# of thing you shouldn't do anyway with Theano variables.
self[:] = new_value
except IndexError:
# Scalars will fail on the above
assert(isscalar(new_value))
# np.isscalar will fail on 0-dim arrays; isscalar works
self = super(ShimmedTensorShared, self).__setitem__(None, new_value)
def eval(self, inputs_to_values=None):
return self.get_value()
@property
def broadcastable(self):
"""For Numpy arrays, an axis is broadcastable iff it has length one."""
return tuple(s==1 for s in self.shape)
cf.add_terminating_types([ShimmedTensorShared])
cf._shared_types += (ShimmedTensorShared,)
def shared(value, name=None, strict=False, allow_downcast=None, symbolic=True,
**kwargs):
"""
In contrast to Theano's `shared()`, the broadcast pattern is set to be
compatible with NumPy's behaviour; i.e., any axis in `value` with dimension
1 is considered broadcastable by default.
As with Theano's `shared()`, broadcast pattern can by changed by passing
the :param:broadcastable keyword argument.
"""
if not isinstance(value, np.ndarray):
value = np.asarray(value)
if 'dtype' in kwargs:
logger.warning("You passed the keyword 'dtype' to the shared constructor. "
"Theano doesn't support this keyword for shared variables.")
if symbolic and cf.use_theano:
# Unless a broadcast pattern is specified, we create one to match
# the NumPy behaviour (broadcastable on all axes of dimension 1).
broadcast_pattern = kwargs.pop('broadcastable', None)
if broadcast_pattern is None:
broadcast_pattern = tuple(True if s==1 else False for s in value.shape)
return theano.shared(value, name, strict, allow_downcast,
broadcastable=broadcast_pattern, **kwargs)
else:
return ShimmedTensorShared(value, name, strict, allow_downcast, **kwargs)
######################
# Interchangeable set_subtensor
def set_subtensor(x, y, inplace=False, tolerate_aliasing=False):
if 'theano' in sys.modules and (isinstance(x, _gettheano().graph.basic.Variable)
or isinstance(y, _gettheano().graph.basic.Variable)):
return _getT().set_subtensor(x, y, inplace, tolerate_aliasing)
else:
assert x.base is not None
# Ensure that x is a view of another ndarray
assert x.shape == y.shape
x[:] = y
return x.base
def inc_subtensor(x, y, inplace=False, tolerate_aliasing=False):
if 'theano' in sys.modules and (isinstance(x, _gettheano().graph.basic.Variable)
or isinstance(y, _gettheano().graph.basic.Variable)):
return T.inc_subtensor(x, y, inplace, tolerate_aliasing)
else:
assert x.base is not None
# Ensure that x is a view of another ndarray
# assert x.shape == y.shape
x[:] += y
return x.base
# TODO: Deprecate: numpy arrays have ndim
def get_ndims(x):
if cf.use_theano and isinstance(x, theano.graph.basic.Variable):
return x.ndim
else:
return len(x.shape)
######################
# Axis manipulation functions
# E.g. to treat a scalar as a 1x1 matrix
def reshape(array, newshape, ndim=None):
if is_theano_object(array):
return array.reshape(newshape, ndim)
else:
return array.reshape(newshape)
def atleast_1d(*arrays):
"""
In contrast to `numpy.atleast_1d`, will not cast lists or tuples to arrays.
This is to allow lists of symbolic variables.
"""
if len(arrays) == 1:
a = arrays[0]
if isscalar(a):
a = add_axes(a, 1)
return a
else:
assert len(arrays) > 1
return [atleast_1d(a) for a in arrays]
def add_axes(x, num=1, pos='left'):
"""
Add an axis to `x`, e.g. to treat a scalar as a 1x1 matrix.
String arguments for `pos` should cover most typical use cases;
for more complex operations, like adding axes to the middle,
specify the insertion position for the axes directly.
Parameters
----------
num: int
Number of axes to add. Default: 1.
pos: 'before' | 'left' | 'after' | 'right' | 'before last' | int
- 'before', 'left', 'begin', 'last' turns a 1D vector into a row vector. (Default)
- 'after', 'right', 'end', 'first' turns a 1D vector into a column vector.
- 'before last' adds axes to the second-last position.
Equivalent to 'left' on 1D vectors.'.
- An integer adds the axes before this position
+ 0 : equivalent to 'before'
+ -1 : equivalent to 'before last'
+ `x.ndim` : equivalent to 'after'
"""
if is_theano_object(x):
if pos in ['left', 'before', 'begin', 'first']:
shuffle_pattern = ['x']*num
shuffle_pattern.extend(range(x.ndim))
elif pos in ['right', 'after', 'end', 'last']:
shuffle_pattern = list(range(x.ndim))
shuffle_pattern.extend( ['x']*num )
elif pos == 'before last':
shuffle_pattern = list(range(x.ndim))
shuffle_pattern = shuffle_pattern[:-1] + ['x']*num + shuffle_pattern[-1:]
else:
try:
shuffle_pattern = list(range(x.ndim))
shuffle_pattern = shuffle_pattern[:pos] + ['x']*num + shuffle_pattern[pos:]
except TypeError:
raise ValueError("Unrecognized argument `{}` for pos.".format(pos))
return x.dimshuffle(shuffle_pattern)
else:
x = np.asarray(x)
if pos in ['left', 'before', 'begin', 'first']:
return x.reshape( (1,)*num + x.shape )
elif pos in ['right', 'after', 'end', 'last']:
return x.reshape( x.shape + (1,)*num )
elif pos == 'before last':
return x.reshape( x.shape[:-1] + (1,)*num + x.shape[-1:] )
else:
try:
return x.reshape( x.shape[:pos] + (1,)*num + x.shape[pos:] )
except TypeError:
raise ValueError("Unrecognized argument {} for pos.".format(pos))
def moveaxis(a, source, destination):
if is_theano_object(x):
axes_lst = list(range(x.ndim))
axes_lst.pop(source)
axes_lst = axes_lst[:destination] + [source] + axes_lst[destination:]
return a.dimshuffle(axes_lst)
else:
return np.moveaxis(a, source, destination)
def pad(array, array_shape, pad_width, mode='constant', **kwargs):
"""
All parameters except `array_shape` are the same as for np.pad.
`array_shape` is necessary because while we can deal with a Theano array,
we need to know its shape.
"""
if mode not in ['constant']:
raise ValueError("theano_shim does not support mode '{}'".format(mode))
if not is_theano_object(array):
assert(array.shape == array_shape)
# If this fails, than the Theano code will also fail
# (perhaps cryptically).
return np.pad(array, pad_width, mode, **kwargs)
elif isshared(array):
assert(array.get_value(borrow=True).shape == array_shape)
return np.pad(array.get_value(borrow=True), pad_width, mode, **kwargs)
else:
def expand_arg(arg):
if isscalar(arg):
arg = (arg, arg) # before, after
if isscalar(arg[0]):
if len(arg) == 1:
arg = (arg[0], arg[0])
arg = (arg,)
if len(arg) == 1:
assert(isinstance(arg, (tuple, list)))
arg = arg * array.ndim
assert(len(arg) == array.ndim)
assert(builtins.all(len(tup) == 2 for tup in arg))
return arg
pad_width = expand_arg(pad_width)
if mode == 'constant':
vals = kwargs.pop('constant_values', None)
if vals is None:
vals = 0
vals = expand_arg(vals)
res = array
new_shape = tuple( w[0] + shape + w[1]
for w, shape in zip(pad_width, array_shape) )
for i, (w, v) in enumerate(zip(pad_width, vals)):
if (w[0] != 0 or w[1] != 0):
shape1 = new_shape[:i] + (w[0],) + array_shape[i+1:]
shape2 = new_shape[:i] + (w[1],) + array_shape[i+1:]
res = T.concatenate( ( np.ones(shape1)*v[0],
res,
np.ones(shape2)*v[1]),
axis=i)
return res
########################
# Functions from scipy.misc
def factorial(n, exact=False):
"""Note: the Theano version uses `gamma` regardless of `exact`"""
assert(istype(n, 'int'))
check(np.all(n >= 0))
if is_theano_object(n):
return T.gamma(n+1)
else:
return sp.misc.factorial(n, exact)
########################
# Wrapper for discrete 1D convolutions
# TODO: Use fftconvolve if ~500 time bins or more
def conv1d(data_arr, kernel_arr, tarr_len, discrete_kernel_shape, mode='valid'):
"""
Convolve each component of data_arr with kernel_arr and stack the result
into an array. data_arr is an NxM array, where N is the number of time bins
and M the number of components kernel_arr is an MxM array, for which the
element with index (i,j) represents the contribution of component j to
component i. (Consistent with a dot product where the kernel is on the left.)
In other words, each row j of kernel_arr is convolved with the row j of data_arr.
Parameters
----------
data_arr : 2D ndarray or theano.tensor
NxM array
kernel_arr : 2D ndarray | theano.tensor
MxM array
tarr_shape : tuple
The length of the history's time array. Theano can't determine the
shape from a tensor, so it is specified separately. When computing
using NumPy, validated agains data_arr.shape[0]
discrete_kernel_shape : tuple
Shape of the discrete kernel array. Theano can't determine the shape
from a tensor, so it is specified separately. When computing using
NumPy, this is checked for consistency.
Returns
-------
ndarray:
Result has shape (M, M)
"""
assert(data_arr.ndim == 2)
output_shape = discrete_kernel_shape[1:]
if (kernel_arr.ndim == 2):
# Algorithm assumes a "to" axis on the kernel. Add it.
kernel_arr = add_axes(kernel_arr, 1, 'before last')
discrete_kernel_shape = discrete_kernel_shape[0:1] + (1,) + discrete_kernel_shape[1:2]
else:
check(kernel_arr.ndim == 3)
# Convolutions leave the time component on the inside, but we want it on the outside
# So we do the iterations in reverse order, and flip the result with transpose()
# The result is indexed as [tidx][to idx][from idx]
if cf.use_theano:
# We use slices from_idx:from_idx+1 because conv2d expects 2D objects
# We then index [:,0] to remove the spurious dimension
result = T.stack(
[ T.stack(
[ T.signal.conv.conv2d(data_arr[:, from_idx:from_idx+1 ],
kernel_arr[:, to_idx, from_idx:from_idx+1 ],
image_shape = (tarr_len, 1),
filter_shape = (discrete_kernel_shape[0], 1),
border_mode = mode)[:,0]
for to_idx in np.arange(discrete_kernel_shape[1]) ] )
for from_idx in np.arange(discrete_kernel_shape[2]) ] ).T
else:
assert(discrete_kernel_shape == kernel_arr.shape)
assert(tarr_len == data_arr.shape[0])
result = np.stack(
[ np.stack(
[ scipy.signal.convolve(data_arr[:, from_idx ],
kernel_arr[:, to_idx, from_idx ],
mode=mode)
for to_idx in np.arange(kernel_arr.shape[1]) ] )
for from_idx in np.arange(kernel_arr.shape[2]) ] ).T
return result.reshape((tarr_len - discrete_kernel_shape[0] + 1,) + output_shape)
def lfilter(size, b, a, x, *args, **kwargs):
"""
Wrapper for the linear filter operator implemented by scipy.signal.lfilter
At the moment, the implementation is restricted to the case a = 1.
:param b: array of size M. The moving average coefficients.
:param a: array of size N. The autoregressive coefficients.
:param x: array.
:param size: tuple (M, N)
:return:
"""
sym_a = is_theano_object(a)
sym_b = is_theano_object(b)
sym_x = is_theano_object(x)
M, N = size
if sym_b or sym_x:
s = x * b[0]
for tau in range(1, M):
u = x[:-tau] * b[tau]
s = T.inc_subtensor(s[tau:], u)
else:
s = scipy.signal.lfilter(b, a, x, *args, **kwargs)
return s
################################
# Module initialization
load('numpy')
# By default, don't load Theano
#####################
# Gradients
def grad(expr, wrt, *args, **kwargs):
if not isinstance(wrt, (list, tuple)):
wrt = [wrt]
if not builtins.all(is_symbolic(w) for w in wrt):
raise TypeError("Gradient must be with respect to symbolic variables.")
if not is_symbolic(expr):
raise TypeError("Expression must be symbolic.")
# elif not set(wrt).issubset(shim.symbolic_inputs(expr)):
# raise TypeError("Attempted to take gradient with respect to the "
# "following values, which are not part of the "
# "computational graph: {}"
# .format(', '.join(v.name for v in set(wrt).difference(
# shim.symbolic_inputs(expr)))))
return getT().grad(expr, wrt, *args, **kwargs)
#######################
# NumPy functions
def all(x):
if is_theano_object(x):
return T.all(x)
else:
return np.all(x)
def any(x):
if is_theano_object(x):
return T.any(x)
else:
return np.any(x)
def arange(start, stop=None, step=1, dtype=None, symbolic=None):
_symb = is_theano_object(start, stop, step, dtype)
if symbolic is None:
symbolic = _symb
elif _symb and not symbolic:
raise TypeError("Attempting to create a symbolic array while "
"`shim.config.use_theano` is False.")
if symbolic:
dtype = str(np.dtype(dtype)) # Convert nptype and dtype to string
return T.arange(start, stop, step, dtype)
else:
return np.arange(start, stop, step, dtype)
def bincount(x, weights=None, minlength=None, assert_nonneg=False):
if is_theano_object(x, weights):
return T.bincount(x, weights, minlength, assert_nonneg)
else:
return np.bincount(x, weights, minlength)
def broadcast_to(array, shape, subok=False):
if is_theano_object(array, shape):
return T.ones(shape) * array
else:
return np.broadcast_to(array, shape, subok)
def choose(a, choices, out=None, mode='raise'):
if is_theano_object(a, choices):
return T.choose(a, choices, out, mode)
else:
return np.choose(a, choices, out, mode)
def copy(array, symbolic=True, name=None):
"""
NumPy `array`:
Calls ``array.copy()``.
Symbolic `array` & `symbolic` == True:
Make a symbolic copy of the `array` by calling ``array.copy(name=name)``.
`array` appears in the computational graph of the copy.
Symbolic `array` & `symbolic` == False:
Make a copy of the Python object by calling ``copy.copy(array)``.
``array`` does not appear in the computational graph of the copy, but
inputs to ``array`` do.
If `name` != `None` and `array` is symbolic, it is renamed accordingly.
>>> import theano_shim as shim
>>> shim.load('theano')
>>> y = shim.tensor(np.array(3.), 'y')
>>> z = shim.copy(y)
>>> z2 = shim.copy(y**2)
>>> zsymb = shim.copy(y, symbolic=True)
>>> y in shim.graph.inputs(z) # False
>>> y in shim.graph.inputs(z2) # True
>>> y in shim.graph.inputs(zsymb) # True
"""
if not is_theano_object(array):
if hasattr(array, 'copy') and isinstance(array.copy, Callable):
return array.copy()
else:
return copymodule.copy(array)
elif symbolic:
# Theano's copy() doesn't copy tags, and therefore fails
# immediately if compute_test_value == 'raise'
# So we turn it off during the copy and add the test value ourselves
compute_test_value = cf.compute_test_value
cf.compute_test_value = 'off'
array_copied = array.copy(name=name)
if compute_test_value != 'off':
array_copied.tag.test_value = get_test_value(array)
cf.compute_test_value = compute_test_value
return array_copied
else:
c = copymodule.copy(array)
if name is not None:
c.name = name
return c
def concatenate(tensor_list, axis=0):
if builtins.any(is_theano_object(x) for x in tensor_list):
return T.concatenate(tensor_list, axis)
else:
return np.concatenate(tensor_list, axis)
def cos(x):
if is_theano_object(x):
return T.cos(x)
else:
return np.cos(x)
def cosh(x):
if is_theano_object(x):
return T.cosh(x)
else:
return np.cosh(x)
def clip(a, a_min, a_max):
if is_theano_object(a, a_min, a_max):
return T.clip(a, a_min, a_max)
else:
return np.clip(a, a_min, a_max)
def cumsum(x, axis=None, dtype=None):
if is_theano_object(x):
return T.cumsum(x, axis)
else:
return np.cumsum(x, axis, dtype)
def diag(x, k=0):
if is_theano_object(x, k):
return T.diag(x, k=k)
else:
return np.diag(x, k=k)
def dot(x, y):
if is_theano_object(x) or is_theano_object(y):
return T.dot(x, y)
else:
return np.dot(x, y)
def exp(x):
if is_theano_object(x):
return T.exp(x)
else:
# if isinstance(x, ShimmedTensorShared):
# x = x.get_value()
return np.exp(x)
def gammaln(x):
if is_theano_object(x):
return T.gammaln(x)
else:
return sp.special.gammaln(x)
def isfinite(x, *args, **kwargs):
"""Always returns `True` on symbolic inputs."""
if is_theano_object(x):
return True
else:
assert not is_theano_object(kwargs.values())
return np.isfinite(x, **kwargs)
def log(x):
if is_theano_object(x):
return T.log(x)
else:
return np.log(x)
def log10(x):
if is_theano_object(x):
return T.log10(x)
else:
return np.log10(x)
def max(x):
if is_theano_object(x):
return T.max(x)
else:
return np.max(x)
def mean(x):
if is_theano_object(x):
return T.mean(x)
else:
return np.mean(x)
def min(x):
if is_theano_object(x):
return T.min(x)
else:
return np.min(x)
def multiply(x, y):
if is_theano_object(x, y):
return x*y
else:
return np.multiply(x, y)
def nonzero(x):
"""
Returns:
(numeric x) tuple of Array[int], one array per dimension.
(symbolic x) Python tuple of symbolic Subtensor, one Subtensor per dimension.
"""
if isscalar(x):
raise ValueError("Nonzero only supports non-scalar arrays")
if is_theano_object(x):
return T.nonzero(x)
else:
return np.nonzero(x)
def ones(shape, dtype=None, symbolic=None):
":param:symbolic: Always return symbolic tensor, if symbolic lib is loaded."
if config.library == 'numpy':
symbolic = False
if is_theano_object(shape) or symbolic:
return T.ones(shape, dtype)
else:
return np.ones(shape, dtype)
def prod(x, *args):
if is_theano_object(x):
return T.prod(x, *args)
else:
return np.prod(x, *args)
def sin(x):
if is_theano_object(x):
return T.sin(x)
else:
return np.sin(x)
def sinh(x):
if is_theano_object(x):
return T.sinh(x)
else:
return np.sinh(x)
def sqrt(x):
if is_theano_object(x):
return T.sqrt(x)
else:
return np.sqrt(x)
def stack(tensors, axis=0):
if is_theano_object(*tensors):
return T.stack(tensors, axis)
else:
return np.stack(tensors, axis)
def sum(x, axis=None, dtype=None, acc_dtype=None, keepdims=np._NoValue):
if is_theano_object(x):
result = T.sum(x, axis, dtype, acc_dtype)
if keepdims and keepdims is not np._NoValue:
if not isinstance(axis, Iterable):
axes = [axis]
else:
axes = sorted(axis)
for axis in axes:
result = add_axes(result, pos=axis)
return result
else:
return np.sum(x, axis=axis, dtype=dtype, keepdims=keepdims)
def tan(x):
if is_theano_object(x):
return T.tan(x)
else:
return np.tan(x)
def tanh(x):
if is_theano_object(x):
return T.tanh(x)
else:
return np.tanh(x)
def tile(x, reps, ndim=None):
if is_theano_object(x):
return T.tile(x, reps, ndim)
else:
return np.tile(x, reps)
def zeros(shape, dtype=None, symbolic=None):
":param:symbolic: Always return symbolic tensor, if symbolic lib is loaded."
if config.library == 'numpy':
symbolic = False
if is_theano_object(shape) or symbolic:
return T.zeros(shape, dtype)
else:
return np.zeros(shape, dtype)
def zeros_like(x, dtype=None):
if is_theano_object(x):
return T.zeros_like(x, dtype)
else:
return np.zeros_like(x, dtype)
| mit |
ananth95/ananth95.github.io-simQuad | ground_station/kalman_binary.py | 1 | 7102 | '''
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
This file visualises the attitude of the MPU6050 chip.
COMPATIBLE WITH:
+ ~/arduino/saber.cpp
+ ~/arduino/mpu_reflowed.cpp {CAL_DEBUG shoud NOT be defined}
YOU MUST ENSURE:
* In .cpp files, timer_init() must be called with same TIME_INTERVAL as specified below!
* Each serial packets are 6 bytes of binary data
* You need to specify correct Serial port
* You need to set the Y-limits of the plot axis.
* You need to set the correct conversion factor for Gyro readings.
Mode 0 1 2 3
Range +-250 +-500 +-1000 +-2000
Conv. 131 65.5 32.75 16.375
AND it DELIVERS:
* 3 axis loss-less Gyro readings plot (almost real time).
* 3D visualisation of current orientation based on gyro vals
.
/ \ You might have to launch this script repeatedly if no data is transmitted by the arduino
/ ! \ If problem persists, flash the .cpp once more and the repeat.
-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import serial, time, at_talk
TIME_INTERVAL = 0.005
def rotate(v, axis, theta):
c = np.cos(theta)
s = np.sin(theta)
t = 1-c
mat = np.array([ [c+axis[0]*axis[0]*t, axis[0]*axis[1]*t-axis[2]*s, axis[0]*axis[2]*t+axis[1]*s],
[axis[0]*axis[1]*t+axis[2]*s, c+axis[1]*axis[1]*t, axis[1]*axis[2]*t-axis[0]*s],
[axis[0]*axis[2]*t-axis[1]*s, axis[1]*axis[2]*t+axis[0]*s, c+axis[2]*axis[2]*t] ])
return mat.dot(v.T)
def calcPose(omega):
dt = TIME_INTERVAL*np.pi/180.0
dtheta = omega*dt
rpy[1] = rotate(rpy[1], rpy[0], dtheta[0])
rpy[0] = rotate(rpy[0], rpy[1], dtheta[1])
rpy[2] = np.cross(rpy[0], rpy[1])
rpy[1] = rotate(rpy[1], rpy[2], dtheta[2])
rpy[0] = rotate(rpy[0], rpy[2], dtheta[2])
def calcAngle(accel):
'''
angle = np.zeros(3)
angle[0] = np.arctan(accel[1]/np.sqrt((accel[0]*accel[0])+(accel[2]*accel[2])))+1.5707963267948966
angle[1] = np.arctan(accel[0]/np.sqrt((accel[1]*accel[1])+(accel[2]*accel[2])))+1.5707963267948966
angle[2] = np.arctan(accel[2]/np.sqrt((accel[1]*accel[1])+(accel[0]*accel[0])))+1.5707963267948966
'''
angle = np.array([
np.arctan(accel[0]/np.sqrt(accel[1]**2+accel[2]**2))+1.5707963267948966,
np.arctan(accel[1]/np.sqrt(accel[0]**2+accel[2]**2))+1.5707963267948966,
np.arctan(accel[2]/np.sqrt(accel[1]**2+accel[0]**2))+1.5707963267948966,
])
return angle
def variance(readings):
sum1 = np.zeros(3)
sum2 = np.zeros(3)
i=0
while i<10:
sum1 += readings[i]
sum2 += readings[i]*readings[i]
i+=1
num = (sum1*sum1)/10
sd = (sum2-num)/9
var = np.sqrt(sd)
return var
plt.ion()
arduino = at_talk.radio('/dev/ttyACM0', 57600)
rpy = np.eye(3)
fig = plt.figure(figsize=(16,6))
axes = fig.add_subplot(121)
#axesacc = fig.add_subplot(122)
a3d = fig.add_subplot(122, projection='3d')
#a3dacc = fig.add_subplot(224, projection='3d')
a3d.set_xlim(-1.2,1.2)
a3d.set_ylim(-1.2,1.2)
a3d.set_zlim(-1.2,1.2)
a3d.scatter([0], [0], [0], s=40)
r, = a3d.plot([0,1], [0,0], [0,0], lw=2)
p, = a3d.plot([0,0], [0,1], [0,0], lw=2)
y, = a3d.plot([0,0], [0,0], [0,1], lw=2)
gyaw, = a3d.plot([0,0], [0,0], [0,1], lw=2)
a3d.plot([0,2], [0,0], [0,0])
a3d.plot([0,0], [0,2], [0,0])
a3d.plot([0,0], [0,0], [0,2])
'''a3dacc.set_xlim(-1.2,1.2)
a3dacc.set_ylim(-1.2,1.2)
a3dacc.set_zlim(-1.2,1.2)
a3dacc.scatter([0], [0], [0], s=40)
r, = a3dacc.plot([0,1], [0,0], [0,0], lw=2)
p, = a3dacc.plot([0,0], [0,1], [0,0], lw=2)
a3dacc.plot([0,2], [0,0], [0,0])
a3dacc.plot([0,0], [0,2], [0,0])
a3dacc.plot([0,0], [0,0], [0,2])
'''
num_samples = 0
buff = 0
kyaw_alpha = [0]
kyaw_beta = [0]
kyaw_gamma = [0]
t = [0]
acc_x=[0]
acc_y=[0]
acc_z=[0]
scopes = [axes.plot(t, kyaw_alpha, label=r'$\theta_x$')[0], axes.plot(t, kyaw_beta, label=r'$\theta_y$')[0], axes.plot(t, kyaw_gamma, label=r'$\theta_z$')[0]]
axes.legend(prop=dict(size=14))
plt.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3,
ncol=3, mode="expand", borderaxespad=0.)
axes.set_ylim(-55,195)
g_scale = 65.5 #Gyro 500
a_scale = 16384.0 #Accel 2g
accel_history = np.zeros((10,3))
ktheta_history = np.zeros((10,3))
bias_history = np.zeros((10,3))
bias = np.zeros(3)
ktheta = np.array([1.5707963267948966,1.5707963267948966,0])
P = np.array([np.zeros(3),np.zeros(3),np.zeros(3),np.zeros(3)])
K = np.array([np.zeros(3),np.zeros(3)])
print 'Me Ready'
time.sleep(2.5)
#Handshake MAY BE REDUNDANT
arduino.notify()
data = [0]*6
while True:
try:
num = arduino.readn(12)
num = [ord(x) for x in num]
except:
print 'Serial error!'
raise RuntimeError
_ind=0 #this var is connected to for loop below!!
for i in range(0,12, 2):
data[_ind] = (num[i]<<8)|num[i+1]
if data[_ind] & 0x8000:
data[_ind] = data[_ind] - 0x10000
_ind += 1
accel3= np.array([float(data[0])/a_scale, float(data[1])/a_scale, float(data[2])/a_scale])
gyro3 = np.array([float(data[3])/g_scale, float(data[4])/g_scale, float(data[5])/g_scale])
#variance work
accel_history = np.delete(accel_history,9,0)
accel_history = np.insert(accel_history,0,accel3,0)
R_measure = variance(accel_history)
bias_history = np.delete(bias_history,9,0)
bias_history = np.insert(bias_history,0,bias,0)
Q_bias = variance(bias_history)
ktheta_history = np.delete(ktheta_history,9,0)
ktheta_history = np.insert(ktheta_history,0,ktheta,0)
Q_angle = variance(ktheta_history)
#ktheta = kalman(accel3,bias,gyro3,ktheta*180/np.pi,Q_angle,Q_bias)
omega = gyro3 - bias #dps - dps
ktheta = ktheta + omega*TIME_INTERVAL #d - d
P[0] += TIME_INTERVAL * (TIME_INTERVAL*P[3] - P[1] - P[2] + Q_angle)
P[1] -= TIME_INTERVAL * P[3]
P[2] -= TIME_INTERVAL * P[3]
P[3] += Q_bias * TIME_INTERVAL
S = P[0] + R_measure
K[0] = P[0] / S
K[1] = P[2] / S
kangle = calcAngle(accel3)*180/np.pi #degree
ky = kangle - ktheta
ktheta += K[0] * ky
bias += K[1] * ky #dps
P00_temp = P[0]
P01_temp = P[1]
P[0] -= K[0] * P00_temp
P[1] -= K[0] * P01_temp
P[2] -= K[1] * P00_temp
P[3] -= K[1] * P01_temp
#print "%.3f %.3f %.3f" %(ktheta[0], ktheta[1], ktheta[2])
kyaw_alpha.append(ktheta[0])
kyaw_beta.append(ktheta[1])
kyaw_gamma.append(ktheta[2])
num_samples += 1
t.append(num_samples)
if num_samples>200:
del t[0]
del kyaw_alpha[0]
del kyaw_beta[0]
del kyaw_gamma[0]
calcPose(omega) #--- has to be changed
pose = np.array([np.array([np.zeros(3), rpy[0]]).T, np.array([np.zeros(3), rpy[1]]).T, np.array([np.zeros(3), rpy[2]]).T])
axes.set_xlim(t[0], num_samples)
scopes[0].set_data(t, kyaw_alpha)
scopes[1].set_data(t, kyaw_beta)
scopes[2].set_data(t, kyaw_gamma)
r.set_data(pose[0][:2])
r.set_3d_properties(pose[0][2])
p.set_data(pose[1][:2])
p.set_3d_properties(pose[1][2])
y.set_data(pose[2][:2])
y.set_3d_properties(pose[2][2])
# Kalman G-Yaw
gyaw.set_data([0, -np.cos(ktheta[0]*np.pi/180)], [0, -np.cos(ktheta[1]*np.pi/180)])
gyaw.set_3d_properties([0, np.cos(ktheta[2]*np.pi/180)])
if buff>15:
buff=0
plt.draw()
buff += 1
plt.ioff()
plt.show()
arduino.notify()
arduino.powerdown() | gpl-2.0 |
leesavide/pythonista-docs | Documentation/matplotlib/mpl_toolkits/axes_grid/figures/demo_axis_direction.py | 6 | 2933 |
import numpy as np
import mpl_toolkits.axisartist.angle_helper as angle_helper
import mpl_toolkits.axisartist.grid_finder as grid_finder
from matplotlib.projections import PolarAxes
from matplotlib.transforms import Affine2D
import mpl_toolkits.axisartist as axisartist
from mpl_toolkits.axisartist.grid_helper_curvelinear import GridHelperCurveLinear
def setup_axes(fig, rect):
"""
polar projection, but in a rectangular box.
"""
# see demo_curvelinear_grid.py for details
tr = Affine2D().scale(np.pi/180., 1.) + PolarAxes.PolarTransform()
extreme_finder = angle_helper.ExtremeFinderCycle(20, 20,
lon_cycle = 360,
lat_cycle = None,
lon_minmax = None,
lat_minmax = (0, np.inf),
)
grid_locator1 = angle_helper.LocatorDMS(12)
grid_locator2 = grid_finder.MaxNLocator(5)
tick_formatter1 = angle_helper.FormatterDMS()
grid_helper = GridHelperCurveLinear(tr,
extreme_finder=extreme_finder,
grid_locator1=grid_locator1,
grid_locator2=grid_locator2,
tick_formatter1=tick_formatter1
)
ax1 = axisartist.Subplot(fig, rect, grid_helper=grid_helper)
ax1.axis[:].toggle(ticklabels=False)
fig.add_subplot(ax1)
ax1.set_aspect(1.)
ax1.set_xlim(-5, 12)
ax1.set_ylim(-5, 10)
#ax1.grid(True)
return ax1
def add_floating_axis1(ax1):
ax1.axis["lat"] = axis = ax1.new_floating_axis(0, 30)
axis.label.set_text(r"$\theta = 30^{\circ}$")
axis.label.set_visible(True)
return axis
def add_floating_axis2(ax1):
ax1.axis["lon"] = axis = ax1.new_floating_axis(1, 6)
axis.label.set_text(r"$r = 6$")
axis.label.set_visible(True)
return axis
import matplotlib.pyplot as plt
fig = plt.figure(1, figsize=(8, 4.))
fig.clf()
fig.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.99,
wspace=0.01, hspace=0.01)
for i, d in enumerate(["bottom", "left", "top", "right"]):
ax1 = setup_axes(fig, rect=241++i)
axis = add_floating_axis1(ax1)
axis.set_axis_direction(d)
ax1.annotate(d, (0, 1), (5, -5),
xycoords="axes fraction", textcoords="offset points",
va="top", ha="left")
for i, d in enumerate(["bottom", "left", "top", "right"]):
ax1 = setup_axes(fig, rect=245++i)
axis = add_floating_axis2(ax1)
axis.set_axis_direction(d)
ax1.annotate(d, (0, 1), (5, -5),
xycoords="axes fraction", textcoords="offset points",
va="top", ha="left")
plt.show()
| apache-2.0 |
OshynSong/scikit-learn | sklearn/tests/test_pipeline.py | 162 | 14875 | """
Test the pipeline module.
"""
import numpy as np
from scipy import sparse
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_raises, assert_raises_regex, assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.base import clone
from sklearn.pipeline import Pipeline, FeatureUnion, make_pipeline, make_union
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.linear_model import LinearRegression
from sklearn.cluster import KMeans
from sklearn.feature_selection import SelectKBest, f_classif
from sklearn.decomposition import PCA, RandomizedPCA, TruncatedSVD
from sklearn.datasets import load_iris
from sklearn.preprocessing import StandardScaler
from sklearn.feature_extraction.text import CountVectorizer
JUNK_FOOD_DOCS = (
"the pizza pizza beer copyright",
"the pizza burger beer copyright",
"the the pizza beer beer copyright",
"the burger beer beer copyright",
"the coke burger coke copyright",
"the coke burger burger",
)
class IncorrectT(object):
"""Small class to test parameter dispatching.
"""
def __init__(self, a=None, b=None):
self.a = a
self.b = b
class T(IncorrectT):
def fit(self, X, y):
return self
def get_params(self, deep=False):
return {'a': self.a, 'b': self.b}
def set_params(self, **params):
self.a = params['a']
return self
class TransfT(T):
def transform(self, X, y=None):
return X
class FitParamT(object):
"""Mock classifier
"""
def __init__(self):
self.successful = False
pass
def fit(self, X, y, should_succeed=False):
self.successful = should_succeed
def predict(self, X):
return self.successful
def test_pipeline_init():
# Test the various init parameters of the pipeline.
assert_raises(TypeError, Pipeline)
# Check that we can't instantiate pipelines with objects without fit
# method
pipe = assert_raises(TypeError, Pipeline, [('svc', IncorrectT)])
# Smoke test with only an estimator
clf = T()
pipe = Pipeline([('svc', clf)])
assert_equal(pipe.get_params(deep=True),
dict(svc__a=None, svc__b=None, svc=clf,
**pipe.get_params(deep=False)
))
# Check that params are set
pipe.set_params(svc__a=0.1)
assert_equal(clf.a, 0.1)
assert_equal(clf.b, None)
# Smoke test the repr:
repr(pipe)
# Test with two objects
clf = SVC()
filter1 = SelectKBest(f_classif)
pipe = Pipeline([('anova', filter1), ('svc', clf)])
# Check that we can't use the same stage name twice
assert_raises(ValueError, Pipeline, [('svc', SVC()), ('svc', SVC())])
# Check that params are set
pipe.set_params(svc__C=0.1)
assert_equal(clf.C, 0.1)
# Smoke test the repr:
repr(pipe)
# Check that params are not set when naming them wrong
assert_raises(ValueError, pipe.set_params, anova__C=0.1)
# Test clone
pipe2 = clone(pipe)
assert_false(pipe.named_steps['svc'] is pipe2.named_steps['svc'])
# Check that apart from estimators, the parameters are the same
params = pipe.get_params(deep=True)
params2 = pipe2.get_params(deep=True)
for x in pipe.get_params(deep=False):
params.pop(x)
for x in pipe2.get_params(deep=False):
params2.pop(x)
# Remove estimators that where copied
params.pop('svc')
params.pop('anova')
params2.pop('svc')
params2.pop('anova')
assert_equal(params, params2)
def test_pipeline_methods_anova():
# Test the various methods of the pipeline (anova).
iris = load_iris()
X = iris.data
y = iris.target
# Test with Anova + LogisticRegression
clf = LogisticRegression()
filter1 = SelectKBest(f_classif, k=2)
pipe = Pipeline([('anova', filter1), ('logistic', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_fit_params():
# Test that the pipeline can take fit parameters
pipe = Pipeline([('transf', TransfT()), ('clf', FitParamT())])
pipe.fit(X=None, y=None, clf__should_succeed=True)
# classifier should return True
assert_true(pipe.predict(None))
# and transformer params should not be changed
assert_true(pipe.named_steps['transf'].a is None)
assert_true(pipe.named_steps['transf'].b is None)
def test_pipeline_raise_set_params_error():
# Test pipeline raises set params error message for nested models.
pipe = Pipeline([('cls', LinearRegression())])
# expected error message
error_msg = ('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.')
assert_raise_message(ValueError,
error_msg % ('fake', 'Pipeline'),
pipe.set_params,
fake='nope')
# nested model check
assert_raise_message(ValueError,
error_msg % ("fake", pipe),
pipe.set_params,
fake__estimator='nope')
def test_pipeline_methods_pca_svm():
# Test the various methods of the pipeline (pca + svm).
iris = load_iris()
X = iris.data
y = iris.target
# Test with PCA + SVC
clf = SVC(probability=True, random_state=0)
pca = PCA(n_components='mle', whiten=True)
pipe = Pipeline([('pca', pca), ('svc', clf)])
pipe.fit(X, y)
pipe.predict(X)
pipe.predict_proba(X)
pipe.predict_log_proba(X)
pipe.score(X, y)
def test_pipeline_methods_preprocessing_svm():
# Test the various methods of the pipeline (preprocessing + svm).
iris = load_iris()
X = iris.data
y = iris.target
n_samples = X.shape[0]
n_classes = len(np.unique(y))
scaler = StandardScaler()
pca = RandomizedPCA(n_components=2, whiten=True)
clf = SVC(probability=True, random_state=0)
for preprocessing in [scaler, pca]:
pipe = Pipeline([('preprocess', preprocessing), ('svc', clf)])
pipe.fit(X, y)
# check shapes of various prediction functions
predict = pipe.predict(X)
assert_equal(predict.shape, (n_samples,))
proba = pipe.predict_proba(X)
assert_equal(proba.shape, (n_samples, n_classes))
log_proba = pipe.predict_log_proba(X)
assert_equal(log_proba.shape, (n_samples, n_classes))
decision_function = pipe.decision_function(X)
assert_equal(decision_function.shape, (n_samples, n_classes))
pipe.score(X, y)
def test_fit_predict_on_pipeline():
# test that the fit_predict method is implemented on a pipeline
# test that the fit_predict on pipeline yields same results as applying
# transform and clustering steps separately
iris = load_iris()
scaler = StandardScaler()
km = KMeans(random_state=0)
# first compute the transform and clustering step separately
scaled = scaler.fit_transform(iris.data)
separate_pred = km.fit_predict(scaled)
# use a pipeline to do the transform and clustering in one step
pipe = Pipeline([('scaler', scaler), ('Kmeans', km)])
pipeline_pred = pipe.fit_predict(iris.data)
assert_array_almost_equal(pipeline_pred, separate_pred)
def test_fit_predict_on_pipeline_without_fit_predict():
# tests that a pipeline does not have fit_predict method when final
# step of pipeline does not have fit_predict defined
scaler = StandardScaler()
pca = PCA()
pipe = Pipeline([('scaler', scaler), ('pca', pca)])
assert_raises_regex(AttributeError,
"'PCA' object has no attribute 'fit_predict'",
getattr, pipe, 'fit_predict')
def test_feature_union():
# basic sanity check for feature union
iris = load_iris()
X = iris.data
X -= X.mean(axis=0)
y = iris.target
svd = TruncatedSVD(n_components=2, random_state=0)
select = SelectKBest(k=1)
fs = FeatureUnion([("svd", svd), ("select", select)])
fs.fit(X, y)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape, (X.shape[0], 3))
# check if it does the expected thing
assert_array_almost_equal(X_transformed[:, :-1], svd.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
# test if it also works for sparse input
# We use a different svd object to control the random_state stream
fs = FeatureUnion([("svd", svd), ("select", select)])
X_sp = sparse.csr_matrix(X)
X_sp_transformed = fs.fit_transform(X_sp, y)
assert_array_almost_equal(X_transformed, X_sp_transformed.toarray())
# test setting parameters
fs.set_params(select__k=2)
assert_equal(fs.fit_transform(X, y).shape, (X.shape[0], 4))
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("svd", svd), ("select", select)])
X_transformed = fs.fit_transform(X, y)
assert_equal(X_transformed.shape, (X.shape[0], 8))
def test_make_union():
pca = PCA()
mock = TransfT()
fu = make_union(pca, mock)
names, transformers = zip(*fu.transformer_list)
assert_equal(names, ("pca", "transft"))
assert_equal(transformers, (pca, mock))
def test_pipeline_transform():
# Test whether pipeline works with a transformer at the end.
# Also test pipeline.transform and pipeline.inverse_transform
iris = load_iris()
X = iris.data
pca = PCA(n_components=2)
pipeline = Pipeline([('pca', pca)])
# test transform and fit_transform:
X_trans = pipeline.fit(X).transform(X)
X_trans2 = pipeline.fit_transform(X)
X_trans3 = pca.fit_transform(X)
assert_array_almost_equal(X_trans, X_trans2)
assert_array_almost_equal(X_trans, X_trans3)
X_back = pipeline.inverse_transform(X_trans)
X_back2 = pca.inverse_transform(X_trans)
assert_array_almost_equal(X_back, X_back2)
def test_pipeline_fit_transform():
# Test whether pipeline works with a transformer missing fit_transform
iris = load_iris()
X = iris.data
y = iris.target
transft = TransfT()
pipeline = Pipeline([('mock', transft)])
# test fit_transform:
X_trans = pipeline.fit_transform(X, y)
X_trans2 = transft.fit(X, y).transform(X)
assert_array_almost_equal(X_trans, X_trans2)
def test_make_pipeline():
t1 = TransfT()
t2 = TransfT()
pipe = make_pipeline(t1, t2)
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
pipe = make_pipeline(t1, t2, FitParamT())
assert_true(isinstance(pipe, Pipeline))
assert_equal(pipe.steps[0][0], "transft-1")
assert_equal(pipe.steps[1][0], "transft-2")
assert_equal(pipe.steps[2][0], "fitparamt")
def test_feature_union_weights():
# test feature union with transformer weights
iris = load_iris()
X = iris.data
y = iris.target
pca = RandomizedPCA(n_components=2, random_state=0)
select = SelectKBest(k=1)
# test using fit followed by transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
fs.fit(X, y)
X_transformed = fs.transform(X)
# test using fit_transform
fs = FeatureUnion([("pca", pca), ("select", select)],
transformer_weights={"pca": 10})
X_fit_transformed = fs.fit_transform(X, y)
# test it works with transformers missing fit_transform
fs = FeatureUnion([("mock", TransfT()), ("pca", pca), ("select", select)],
transformer_weights={"mock": 10})
X_fit_transformed_wo_method = fs.fit_transform(X, y)
# check against expected result
# We use a different pca object to control the random_state stream
assert_array_almost_equal(X_transformed[:, :-1], 10 * pca.fit_transform(X))
assert_array_equal(X_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_array_almost_equal(X_fit_transformed[:, :-1],
10 * pca.fit_transform(X))
assert_array_equal(X_fit_transformed[:, -1],
select.fit_transform(X, y).ravel())
assert_equal(X_fit_transformed_wo_method.shape, (X.shape[0], 7))
def test_feature_union_parallel():
# test that n_jobs work for FeatureUnion
X = JUNK_FOOD_DOCS
fs = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
])
fs_parallel = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs_parallel2 = FeatureUnion([
("words", CountVectorizer(analyzer='word')),
("chars", CountVectorizer(analyzer='char')),
], n_jobs=2)
fs.fit(X)
X_transformed = fs.transform(X)
assert_equal(X_transformed.shape[0], len(X))
fs_parallel.fit(X)
X_transformed_parallel = fs_parallel.transform(X)
assert_equal(X_transformed.shape, X_transformed_parallel.shape)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel.toarray()
)
# fit_transform should behave the same
X_transformed_parallel2 = fs_parallel2.fit_transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
# transformers should stay fit after fit_transform
X_transformed_parallel2 = fs_parallel2.transform(X)
assert_array_equal(
X_transformed.toarray(),
X_transformed_parallel2.toarray()
)
def test_feature_union_feature_names():
word_vect = CountVectorizer(analyzer="word")
char_vect = CountVectorizer(analyzer="char_wb", ngram_range=(3, 3))
ft = FeatureUnion([("chars", char_vect), ("words", word_vect)])
ft.fit(JUNK_FOOD_DOCS)
feature_names = ft.get_feature_names()
for feat in feature_names:
assert_true("chars__" in feat or "words__" in feat)
assert_equal(len(feature_names), 35)
def test_classes_property():
iris = load_iris()
X = iris.data
y = iris.target
reg = make_pipeline(SelectKBest(k=1), LinearRegression())
reg.fit(X, y)
assert_raises(AttributeError, getattr, reg, "classes_")
clf = make_pipeline(SelectKBest(k=1), LogisticRegression(random_state=0))
assert_raises(AttributeError, getattr, clf, "classes_")
clf.fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
| bsd-3-clause |
wehr-lab/RPilot | autopilot/core/utils.py | 1 | 7010 | from autopilot import prefs
# if prefs.get('AGENT') in ("TERMINAL", "DOCS"):
HAVE_PYSIDE = False
try:
from PySide2 import QtCore
HAVE_PYSIDE = True
except ImportError:
pass
import json
import pandas as pd
from scipy.stats import linregress
# from subprocess import call
from threading import Thread
import os
import numpy as np
class Param(object):
"""
In the future, we will implement a coherent Parameter management system
Warning:
Not Implemented.
"""
# Class to hold and verify task and gui parameters
tag = None # human-readable description of parameter
type = None # string that describes the type of input or param
# possible types
types = ['int', 'bool', 'list']
def __init__(self, **kwargs):
"""
Args:
**kwargs:
"""
for k, v in kwargs.items():
setattr(self, k, v)
# enable dictionary-like behavior
def __getitem__(self, key):
"""
Args:
key:
"""
return self.__dict__[key]
def __setitem__(self, key, value):
"""
Args:
key:
value:
"""
self.__dict__[key] = value
def __delitem__(self, key):
"""
Args:
key:
"""
del self.__dict__[key]
def __contains__(self, key):
"""
Args:
key:
"""
return key in self.__dict__
def __len__(self):
return len(self.__dict__)
# def validate(self):
# if all([self.id, self.to, self.sender, self.key]):
# return True
# else:
# return False
_INVOKER = None
if HAVE_PYSIDE:
class InvokeEvent(QtCore.QEvent):
"""
Sends signals to the main QT thread from spawned message threads
See `stackoverflow <https://stackoverflow.com/a/12127115>`_
"""
EVENT_TYPE = QtCore.QEvent.Type(QtCore.QEvent.registerEventType())
def __init__(self, fn, *args, **kwargs):
# type: (function, object, object) -> None
"""
Accepts a function, its args and kwargs and wraps them as a
:class:`QtCore.QEvent`
"""
QtCore.QEvent.__init__(self, InvokeEvent.EVENT_TYPE)
self.fn = fn
self.args = args
self.kwargs = kwargs
class Invoker(QtCore.QObject):
"""
Wrapper that calls an evoked event made by :class:`.InvokeEvent`
"""
def event(self, event):
"""
Args:
event:
"""
event.fn(*event.args, **event.kwargs)
return True
def get_invoker():
if globals()['_INVOKER'] is None:
globals()['_INVOKER'] = Invoker()
return globals()['_INVOKER']
class ReturnThread(Thread):
"""
Thread whose .join() method returns the value from the function
thx to https://stackoverflow.com/a/6894023
"""
def __init__(self, group=None, target=None, name=None,
args=(), kwargs={}, Verbose=None):
Thread.__init__(self, group, target, name, args, kwargs, Verbose)
self._return = None
def run(self):
if self._Thread__target is not None:
self._return = self._Thread__target(*self._Thread__args,
**self._Thread__kwargs)
def join(self, timeout=None):
Thread.join(self, timeout)
return self._return
def list_subjects(pilot_db=None):
"""
Given a dictionary of a pilot_db, return the subjects that are in it.
Args:
pilot_db (dict): a pilot_db. if None tried to load pilot_db with :method:`.load_pilotdb`
Returns:
subjects (list): a list of currently active subjects
"""
if pilot_db is None:
pilot_db = load_pilotdb()
subjects = []
for pilot, values in pilot_db.items():
if 'subjects' in values.keys():
subjects.extend(values['subjects'])
return subjects
def load_pilotdb(file_name=None, reverse=False):
"""
Try to load the file_db
Args:
reverse:
file_name:
Returns:
"""
if file_name is None:
file_name = '/usr/autopilot/pilot_db.json'
with open(file_name) as pilot_file:
pilot_db = json.load(pilot_file)
if reverse:
# simplify pilot db
pilot_db = {k: v['subjects'] for k, v in pilot_db.items()}
pilot_dict = {}
for pilot, subjectlist in pilot_db.items():
for ms in subjectlist:
pilot_dict[ms] = pilot
pilot_db = pilot_dict
return pilot_db
def coerce_discrete(df, col, mapping={'L':0, 'R':1}):
"""
Coerce a discrete/string column of a pandas dataframe into numeric values
Default is to map 'L' to 0 and 'R' to 1 as in the case of Left/Right 2AFC tasks
Args:
df (:class:`pandas.DataFrame`) : dataframe with the column to transform
col (str): name of column
mapping (dict): mapping of strings to numbers
Returns:
df (:class:`pandas.DataFrame`) : transformed dataframe
"""
for key, val in mapping.items():
df.loc[df[col]==key,col] = val
# if blanks, warn and remove
if '' in df[col].unique():
n_blanks = sum(df[col]=='')
Warning('{} blank rows detected, removing.'.format(n_blanks))
df.drop(df.index[df[col]==''], axis=0, inplace=True)
df = df.astype({col:float})
return df
def find_recursive(key, dictionary):
"""
Find all instances of a key in a dictionary, recursively.
Args:
key:
dictionary:
Returns:
list
"""
for k, v in dictionary.items():
if k == key:
yield v
elif isinstance(v, dict):
for result in find_recursive(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in find_recursive(key, d):
yield result
#
# def update_pis(github=True, apt=False, pilot_select = None, prefs_fn = None):
# """
# Args:
# github:
# apt:
# pilot_select:
# prefs_fn:
# """
# # update github, or apt?
# # should limit pilots or use all?
# # load prefs from default location or use different?
# if prefs_fn is None:
# prefs = get_prefs()
# else:
# prefs = get_prefs(prefs_fn)
#
# # get ips from pilot db
# with open(prefs['PILOT_DB'], 'r') as pilot_db:
# pilots = json.load(pilot_db)
#
# # if we were passed a list of pilots to subset then do it
# if pilot_select is not None:
# pilots = {k: v for k, v in pilots.items() if k in pilot_select }
#
# if github is True:
# ips = ['pi@'+v['ip'] for k,v in pilots.items()]
# ip_string = " ".join(ips)
# call('parallel-ssh', '-H', ip_string, 'git --git-dir=/home/pi/git/autopilot/.git pull')
| gpl-3.0 |
KeplerGO/kpub | scripts/kepler-vs-k2-publication-rate/plot-first-quarters.py | 1 | 5283 | """Creates beautiful visualizations of the publication database."""
import datetime
import sqlite3 as sql
import numpy as np
from astropy import log
from matplotlib import pyplot as plt
import matplotlib.patheffects as path_effects
import matplotlib as mpl
import kpub
# Configure the aesthetics
mpl.rcParams["figure.figsize"] = (8.485, 6)
mpl.rcParams["interactive"] = False
mpl.rcParams["lines.antialiased"] = True
# Patches
mpl.rcParams["patch.linewidth"] = 0.5
mpl.rcParams["patch.facecolor"] = "348ABD"
mpl.rcParams["patch.edgecolor"] = "eeeeee"
mpl.rcParams["patch.antialiased"] = True
# Font
mpl.rcParams["font.family"] = "sans-serif"
mpl.rcParams["font.size"] = 16
mpl.rcParams["font.sans-serif"] = "Open Sans"
mpl.rcParams["text.color"] = "333333"
# Axes
mpl.rcParams["axes.facecolor"] = "ecf0f1"
mpl.rcParams["axes.edgecolor"] = "bdc3c7"
mpl.rcParams["axes.linewidth"] = 1.0
mpl.rcParams["axes.grid"] = False
mpl.rcParams["axes.titlesize"] = "x-large"
mpl.rcParams["axes.labelsize"] = "x-large"
mpl.rcParams["axes.labelweight"] = "normal"
mpl.rcParams["axes.labelcolor"] = "333333"
mpl.rcParams["axes.axisbelow"] = True
mpl.rcParams["axes.unicode_minus"] = True
# Ticks
mpl.rcParams["xtick.color"] = "333333"
mpl.rcParams["ytick.color"] = "333333"
mpl.rcParams["xtick.major.size"] = 0
mpl.rcParams["ytick.major.size"] = 0
# Grid
mpl.rcParams["grid.color"] = "bdc3c7"
mpl.rcParams["grid.linestyle"] = "-"
mpl.rcParams["grid.linewidth"] = 1
MISSIONS = ['kepler', 'k2']
SCIENCES = ['exoplanets', 'astrophysics']
def quarterly_count(db, mission="k2", start="2015-01", stop="2015-04"):
cur = db.con.execute("""SELECT COUNT(*) FROM pubs
WHERE mission = ?
AND month >= ?
AND month < ?;""",
[mission, start, stop])
rows = list(cur.fetchall())
return rows[0][0]
if __name__ == "__main__":
barwidth = 0.75
output_fn = "kpub-first-quarters.pdf"
dpi = 200
ymax = 35
yticks = [0, 10, 20, 30]
db = kpub.PublicationDB()
# First collect the data
k1_labels = ["2009/1", "2009/2", "2009/3", "2009/4", "2010/1", "2010/2", "2010/3", "2010/4"]
k1_start = ["2009-01", "2009-04", "2009-07", "2009-10", "2010-01", "2010-04", "2010-07", "2010-10"]
k1_stop = ["2009-04", "2009-07", "2009-10", "2010-01", "2010-04", "2010-07", "2010-10", "2011-01"]
k1_counts = []
for idx, label in enumerate(k1_labels):
count = quarterly_count(db, mission="kepler", start=k1_start[idx], stop=k1_stop[idx])
k1_counts.append(count)
k2_labels = ["2014/1", "2014/2", "2014/3", "2014/4", "2015/1", "2015/2", "2015/3", "2015/4"]
k2_start = ["2014-01", "2014-04", "2014-07", "2014-10", "2015-01", "2015-04", "2015-07", "2015-10"]
k2_stop = ["2014-04", "2014-07", "2014-10", "2015-01", "2015-04", "2015-07", "2015-10", "2016-01"]
k2_counts = []
for idx, label in enumerate(k2_labels):
count = quarterly_count(db, mission="k2", start=k2_start[idx], stop=k2_stop[idx])
k2_counts.append(count)
# Now make the actual plot
fig = plt.figure()
plt.subplots_adjust(left=0.12, right=0.98,
bottom=0.15, top=0.98,
hspace=0.2)
ax = fig.add_subplot(211)
plt.bar(np.arange(len(k1_counts)) - 0.5 * barwidth,
k1_counts,
facecolor="#3498db",
width=barwidth,
label="First Kepler publications")
# Aesthetics
ax.get_xaxis().get_major_formatter().set_useOffset(False)
plt.xticks(range(len(k1_counts)), k1_labels)
plt.xlim([0 - 0.75*barwidth, len(k1_counts) - 1 + 0.75*barwidth])
plt.legend(loc="upper left", frameon=True)
plt.yticks(yticks)
plt.ylim([0, ymax])
# Disable spines
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
# Only show bottom and left ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Only show horizontal grid lines
ax.grid(axis='y')
ax.set_ylabel("Number of publications")
ax.yaxis.set_label_coords(-0.06, -.15)
ax = fig.add_subplot(212)
plt.bar(np.arange(len(k2_counts)) - 0.5 * barwidth,
k2_counts,
facecolor="#e74c3c",
width=barwidth,
label="First K2 publications")
# Aesthetics
#plt.ylabel("Number of publications")
plt.xlabel("Quarter")
ax.get_xaxis().get_major_formatter().set_useOffset(False)
plt.xticks(range(len(k2_counts)), k2_labels)
plt.xlim([0 - 0.75*barwidth, len(k2_counts) - 1 + 0.75*barwidth])
plt.legend(loc="upper left", frameon=True)
plt.yticks(yticks)
plt.ylim([0, ymax])
# Disable spines
ax.spines["left"].set_visible(False)
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
# Only show bottom and left ticks
ax.get_xaxis().tick_bottom()
ax.get_yaxis().tick_left()
# Only show horizontal grid lines
ax.grid(axis='y')
#plt.tight_layout(h_pad=1.5)
log.info("Writing {}".format(output_fn))
plt.savefig(output_fn, dpi=dpi)
plt.close()
| mit |
bcharlas/mytrunk | examples/adaptiveintegrator/simple-scene-plot-RungeKuttaCashKarp54.py | 6 | 2549 | #!/usr/bin/python
# Burak ER
# burak.er@btu.edu.tr
# github.com/burak-er
# Mechanical Engineering Department
# Bursa Technical University
#
# -*- coding: utf-8 -*-
import matplotlib
matplotlib.use('TkAgg')
# Use an integrator engine that is derived from the interface Integrator.
#RungeKuttaCashKarp54Integrator integrator performs one step of simulation for the given tolerances. Whether the time step is given, it completes it then stops.
integrator=RungeKuttaCashKarp54Integrator([
ForceResetter(),
GeneralIntegratorInsertionSortCollider([Bo1_Sphere_Aabb(),Bo1_Box_Aabb()]),
InteractionLoop(
[Ig2_Sphere_Sphere_ScGeom(),Ig2_Box_Sphere_ScGeom()],
[Ip2_FrictMat_FrictMat_FrictPhys()],
[Law2_ScGeom_FrictPhys_CundallStrack()]
),
GravityEngine(gravity=Vector3(0,0,-9.81)),
PyRunner(virtPeriod=1e-99,command='myAddPlotData()')#use virtPeriod on this integrator.
]);
#Tolerances can be set for the optimum accuracy
integrator.rel_err=1e-6;
integrator.abs_err=1e-6;
O.engines=[integrator,
]
O.bodies.append(box(center=[0,0,0],extents=[.5,.5,.5],fixed=True,color=[1,0,0]))
O.bodies.append(sphere([0,0,2],1,color=[0,1,0]))
O.dt=1e-2# this signifies the endpoint. It is not much important for the accuracy of the integration where accuracy is defined by rel_err and abs_err of the integrator.
############################################
##### now the part pertaining to plots #####
############################################
from yade import plot
## we will have 2 plots:
## 1. t as function of i (joke test function)
## 2. i as function of t on left y-axis ('|||' makes the separation) and z_sph, v_sph (as green circles connected with line) and z_sph_half again as function of t
plot.plots={'i':('t'),'t':('z_sph',None,('v_sph','go-'),'z_sph_half')}
## this function is called by plotDataCollector
## it should add data with the labels that we will plot
## if a datum is not specified (but exists), it will be NaN and will not be plotted
def myAddPlotData():
sph=O.bodies[1]
## store some numbers under some labels
plot.addData(t=O.time,i=O.iter,z_sph=sph.state.pos[2],z_sph_half=.5*sph.state.pos[2],v_sph=sph.state.vel.norm())
print "Now calling plot.plot() to show the figures. The timestep is artificially low so that you can watch graphs being updated live."
plot.liveInterval=.2
plot.plot(subPlots=False)
print "Number of threads ", os.environ['OMP_NUM_THREADS']
O.run(int(5./O.dt));
#plot.saveGnuplot('/tmp/a')
## you can also access the data in plot.data['i'], plot.data['t'] etc, under the labels they were saved.
| gpl-2.0 |
cauchycui/scikit-learn | benchmarks/bench_covertype.py | 154 | 7296 | """
===========================
Covertype dataset benchmark
===========================
Benchmark stochastic gradient descent (SGD), Liblinear, and Naive Bayes, CART
(decision tree), RandomForest and Extra-Trees on the forest covertype dataset
of Blackard, Jock, and Dean [1]. The dataset comprises 581,012 samples. It is
low dimensional with 54 features and a sparsity of approx. 23%. Here, we
consider the task of predicting class 1 (spruce/fir). The classification
performance of SGD is competitive with Liblinear while being two orders of
magnitude faster to train::
[..]
Classification performance:
===========================
Classifier train-time test-time error-rate
--------------------------------------------
liblinear 15.9744s 0.0705s 0.2305
GaussianNB 3.0666s 0.3884s 0.4841
SGD 1.0558s 0.1152s 0.2300
CART 79.4296s 0.0523s 0.0469
RandomForest 1190.1620s 0.5881s 0.0243
ExtraTrees 640.3194s 0.6495s 0.0198
The same task has been used in a number of papers including:
* `"SVM Optimization: Inverse Dependence on Training Set Size"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.139.2112>`_
S. Shalev-Shwartz, N. Srebro - In Proceedings of ICML '08.
* `"Pegasos: Primal estimated sub-gradient solver for svm"
<http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.74.8513>`_
S. Shalev-Shwartz, Y. Singer, N. Srebro - In Proceedings of ICML '07.
* `"Training Linear SVMs in Linear Time"
<www.cs.cornell.edu/People/tj/publications/joachims_06a.pdf>`_
T. Joachims - In SIGKDD '06
[1] http://archive.ics.uci.edu/ml/datasets/Covertype
"""
from __future__ import division, print_function
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Arnaud Joly <arnaud.v.joly@gmail.com>
# License: BSD 3 clause
import os
from time import time
import argparse
import numpy as np
from sklearn.datasets import fetch_covtype, get_data_home
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier, ExtraTreesClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.metrics import zero_one_loss
from sklearn.externals.joblib import Memory
from sklearn.utils import check_array
# Memoize the data extraction and memory map the resulting
# train / test splits in readonly mode
memory = Memory(os.path.join(get_data_home(), 'covertype_benchmark_data'),
mmap_mode='r')
@memory.cache
def load_data(dtype=np.float32, order='C', random_state=13):
"""Load the data, then cache and memmap the train/test split"""
######################################################################
## Load dataset
print("Loading dataset...")
data = fetch_covtype(download_if_missing=True, shuffle=True,
random_state=random_state)
X = check_array(data['data'], dtype=dtype, order=order)
y = (data['target'] != 1).astype(np.int)
## Create train-test split (as [Joachims, 2006])
print("Creating train-test split...")
n_train = 522911
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
## Standardize first 10 features (the numerical ones)
mean = X_train.mean(axis=0)
std = X_train.std(axis=0)
mean[10:] = 0.0
std[10:] = 1.0
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
return X_train, X_test, y_train, y_test
ESTIMATORS = {
'GBRT': GradientBoostingClassifier(n_estimators=250),
'ExtraTrees': ExtraTreesClassifier(n_estimators=20),
'RandomForest': RandomForestClassifier(n_estimators=20),
'CART': DecisionTreeClassifier(min_samples_split=5),
'SGD': SGDClassifier(alpha=0.001, n_iter=2),
'GaussianNB': GaussianNB(),
'liblinear': LinearSVC(loss="l2", penalty="l2", C=1000, dual=False,
tol=1e-3)
}
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--classifiers', nargs="+",
choices=ESTIMATORS, type=str,
default=['liblinear', 'GaussianNB', 'SGD', 'CART'],
help="list of classifiers to benchmark.")
parser.add_argument('--n-jobs', nargs="?", default=1, type=int,
help="Number of concurrently running workers for "
"models that support parallelism.")
parser.add_argument('--order', nargs="?", default="C", type=str,
choices=["F", "C"],
help="Allow to choose between fortran and C ordered "
"data")
parser.add_argument('--random-seed', nargs="?", default=13, type=int,
help="Common seed used by random number generator.")
args = vars(parser.parse_args())
print(__doc__)
X_train, X_test, y_train, y_test = load_data(
order=args["order"], random_state=args["random_seed"])
print("")
print("Dataset statistics:")
print("===================")
print("%s %d" % ("number of features:".ljust(25), X_train.shape[1]))
print("%s %d" % ("number of classes:".ljust(25), np.unique(y_train).size))
print("%s %s" % ("data type:".ljust(25), X_train.dtype))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of train samples:".ljust(25),
X_train.shape[0], np.sum(y_train == 1),
np.sum(y_train == 0), int(X_train.nbytes / 1e6)))
print("%s %d (pos=%d, neg=%d, size=%dMB)"
% ("number of test samples:".ljust(25),
X_test.shape[0], np.sum(y_test == 1),
np.sum(y_test == 0), int(X_test.nbytes / 1e6)))
print()
print("Training Classifiers")
print("====================")
error, train_time, test_time = {}, {}, {}
for name in sorted(args["classifiers"]):
print("Training %s ... " % name, end="")
estimator = ESTIMATORS[name]
estimator_params = estimator.get_params()
estimator.set_params(**{p: args["random_seed"]
for p in estimator_params
if p.endswith("random_state")})
if "n_jobs" in estimator_params:
estimator.set_params(n_jobs=args["n_jobs"])
time_start = time()
estimator.fit(X_train, y_train)
train_time[name] = time() - time_start
time_start = time()
y_pred = estimator.predict(X_test)
test_time[name] = time() - time_start
error[name] = zero_one_loss(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print("%s %s %s %s"
% ("Classifier ", "train-time", "test-time", "error-rate"))
print("-" * 44)
for name in sorted(args["classifiers"], key=error.get):
print("%s %s %s %s" % (name.ljust(12),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % error[name]).center(10)))
print()
| bsd-3-clause |
HyperloopTeam/FullOpenMDAO | lib/python2.7/site-packages/matplotlib/tests/test_pickle.py | 9 | 6967 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import cPickle as pickle
from six.moves import xrange
from io import BytesIO
from nose.tools import assert_equal, assert_not_equal
import numpy as np
from matplotlib.testing.decorators import cleanup, image_comparison
import matplotlib.pyplot as plt
def depth_getter(obj,
current_depth=0,
depth_stack=None,
nest_info='top level object'):
"""
Returns a dictionary mapping:
id(obj): (shallowest_depth, obj, nest_info)
for the given object (and its subordinates).
This, in conjunction with recursive_pickle, can be used to debug
pickling issues, although finding others is sometimes a case of
trial and error.
"""
if depth_stack is None:
depth_stack = {}
if id(obj) in depth_stack:
stack = depth_stack[id(obj)]
if stack[0] > current_depth:
del depth_stack[id(obj)]
else:
return depth_stack
depth_stack[id(obj)] = (current_depth, obj, nest_info)
if isinstance(obj, (list, tuple)):
for i, item in enumerate(obj):
depth_getter(item, current_depth=current_depth + 1,
depth_stack=depth_stack,
nest_info=('list/tuple item #%s in '
'(%s)' % (i, nest_info)))
else:
if isinstance(obj, dict):
state = obj
elif hasattr(obj, '__getstate__'):
state = obj.__getstate__()
if not isinstance(state, dict):
state = {}
elif hasattr(obj, '__dict__'):
state = obj.__dict__
else:
state = {}
for key, value in six.iteritems(state):
depth_getter(value, current_depth=current_depth + 1,
depth_stack=depth_stack,
nest_info=('attribute "%s" in '
'(%s)' % (key, nest_info)))
return depth_stack
def recursive_pickle(top_obj):
"""
Recursively pickle all of the given objects subordinates, starting with
the deepest first. **Very** handy for debugging pickling issues, but
also very slow (as it literally pickles each object in turn).
Handles circular object references gracefully.
"""
objs = depth_getter(top_obj)
# sort by depth then by nest_info
objs = sorted(six.itervalues(objs), key=lambda val: (-val[0], val[2]))
for _, obj, location in objs:
# print('trying %s' % location)
try:
pickle.dump(obj, BytesIO(), pickle.HIGHEST_PROTOCOL)
except Exception as err:
print(obj)
print('Failed to pickle %s. \n Type: %s. Traceback '
'follows:' % (location, type(obj)))
raise
@cleanup
def test_simple():
fig = plt.figure()
# un-comment to debug
# recursive_pickle(fig)
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.subplot(121)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
ax = plt.axes(projection='polar')
plt.plot(list(xrange(10)), label='foobar')
plt.legend()
# Uncomment to debug any unpicklable objects. This is slow so is not
# uncommented by default.
# recursive_pickle(fig)
pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
# ax = plt.subplot(121, projection='hammer')
# recursive_pickle(ax, 'figure')
# pickle.dump(ax, BytesIO(), pickle.HIGHEST_PROTOCOL)
plt.figure()
plt.bar(left=list(xrange(10)), height=list(xrange(10)))
pickle.dump(plt.gca(), BytesIO(), pickle.HIGHEST_PROTOCOL)
fig = plt.figure()
ax = plt.axes()
plt.plot(list(xrange(10)))
ax.set_yscale('log')
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
@image_comparison(baseline_images=['multi_pickle'],
extensions=['png'], remove_text=True)
def test_complete():
fig = plt.figure('Figure with a label?', figsize=(10, 6))
plt.suptitle('Can you fit any more in a figure?')
# make some arbitrary data
x, y = np.arange(8), np.arange(10)
data = u = v = np.linspace(0, 10, 80).reshape(10, 8)
v = np.sin(v * -0.6)
plt.subplot(3, 3, 1)
plt.plot(list(xrange(10)))
plt.subplot(3, 3, 2)
plt.contourf(data, hatches=['//', 'ooo'])
plt.colorbar()
plt.subplot(3, 3, 3)
plt.pcolormesh(data)
plt.subplot(3, 3, 4)
plt.imshow(data)
plt.subplot(3, 3, 5)
plt.pcolor(data)
ax = plt.subplot(3, 3, 6)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.streamplot(x, y, u, v)
ax = plt.subplot(3, 3, 7)
ax.set_xlim(0, 7)
ax.set_ylim(0, 9)
plt.quiver(x, y, u, v)
plt.subplot(3, 3, 8)
plt.scatter(x, x**2, label='$x^2$')
plt.legend(loc='upper left')
plt.subplot(3, 3, 9)
plt.errorbar(x, x * -0.5, xerr=0.2, yerr=0.4)
###### plotting is done, now test its pickle-ability #########
# Uncomment to debug any unpicklable objects. This is slow (~200 seconds).
# recursive_pickle(fig)
result_fh = BytesIO()
pickle.dump(fig, result_fh, pickle.HIGHEST_PROTOCOL)
plt.close('all')
# make doubly sure that there are no figures left
assert_equal(plt._pylab_helpers.Gcf.figs, {})
# wind back the fh and load in the figure
result_fh.seek(0)
fig = pickle.load(result_fh)
# make sure there is now a figure manager
assert_not_equal(plt._pylab_helpers.Gcf.figs, {})
assert_equal(fig.get_label(), 'Figure with a label?')
def test_no_pyplot():
# tests pickle-ability of a figure not created with pyplot
from matplotlib.backends.backend_pdf import FigureCanvasPdf as fc
from matplotlib.figure import Figure
fig = Figure()
_ = fc(fig)
ax = fig.add_subplot(1, 1, 1)
ax.plot([1, 2, 3], [1, 2, 3])
pickle.dump(fig, BytesIO(), pickle.HIGHEST_PROTOCOL)
def test_renderer():
from matplotlib.backends.backend_agg import RendererAgg
renderer = RendererAgg(10, 20, 30)
pickle.dump(renderer, BytesIO())
def test_image():
# Prior to v1.4.0 the Image would cache data which was not picklable
# once it had been drawn.
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.imshow(np.arange(12).reshape(3, 4))
manager.canvas.draw()
pickle.dump(fig, BytesIO())
def test_grid():
from matplotlib.backends.backend_agg import new_figure_manager
manager = new_figure_manager(1000)
fig = manager.canvas.figure
ax = fig.add_subplot(1, 1, 1)
ax.grid()
# Drawing the grid triggers instance methods to be attached
# to the Line2D object (_lineFunc).
manager.canvas.draw()
pickle.dump(ax, BytesIO())
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s'])
| gpl-2.0 |
chilleo/ALPHA | module/informativeSites.py | 1 | 9154 | from collections import defaultdict
from natsort import natsorted
import os
import math
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LinearSegmentedColormap
from PyQt4 import QtCore
"""
Functions:
__init__(self, parent=None)
is_site_informative(self, site)
calculate_informativeness(self, window_directory, window_offset)
line_graph_generator(self, dictionary, xlabel, ylabel, name)
heat_map_generator(self, dictionary, name)
~
Chabrielle Allen
Travis Benedict
Peter Dulworth
"""
class InformativeSites(QtCore.QThread):
def __init__(self, parent=None):
super(InformativeSites, self).__init__(parent)
def is_site_informative(self, site):
"""
Determines if a site is informative or not
Input:
site --- a list of bases located at a site in the alignment
Output:
1 if a site is informative 0 if a site is uninformative
"""
# Create a mapping of bases to the number of times they occur
base_to_counts = defaultdict(int)
# Iterate over each base in the list site
for base in site:
# Add one each time a base occurs
base_to_counts[base] += 1
# Create a list of counts in descending order
base_counts = sorted(base_to_counts.values(), reverse=True)
if len(base_counts) >= 2:
# If two different bases occur at least twice the site is informative
if (base_counts[0] >= 2) and (base_counts[1] >= 2):
return 1
else:
return 0
else:
return 0
def calculate_informativeness(self, window_directory, window_offset, percentage, alignment=False):
"""
Calculates information about informative sites in an alignment
Input:
window_directory --- the location of the folder containing the phylip window files
window_offset --- the offset that was used to create the windows
percentage --- the percent of the total alignment to look at the site index is scaled accordingly
alignment --- an alignment file
Output:
sites_to_informative --- a mapping of each site in the alignment to 1 if informative 0 if not
windows_to_informative_count --- a mapping of each window number to the number of informative sites it has
windows_to_informative_pct --- a mapping of each window number to the percentage of informative sites it has
pct_informative --- the percentage of informative sites over the entire alignment
"""
# Initialize the site index to 0
site_idx = 0
# Represent percentage as a decimal
pct = float(percentage) / 100
sites_to_informative = defaultdict(int)
windows_to_informative_count = defaultdict(int)
windows_to_informative_pct = {}
total_window_size = 0
if alignment:
with open(alignment) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
number_of_sequences = int(first_line[0])
length_of_sequences = int(first_line[1])
sequence_list = []
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Increment based on the percentage of the alignment desired
increment = int(math.ceil((1 / pct)))
# Iterate over the indices in each window
for window_idx in range(0, length_of_sequences, increment):
site = []
# Iterate over each sequence in the alignment
for sequence in sequence_list:
# Add each base in a site to a list
site.append(sequence[window_idx])
# Determine if a site is informative
informative = self.is_site_informative(site)
# If the site has not been visited before add to mappings (deals with overlapping windows)
if site_idx not in sites_to_informative:
# If the site is informative add 1 to the mappings otherwise add 0
sites_to_informative[site_idx] += informative
# Increment the site index
site_idx += increment
total_window_size += length_of_sequences
total_num_informative = sum(windows_to_informative_count.values())
if total_window_size == 0:
pct_informative = 0
else:
pct_informative = float(total_num_informative * 100) / total_window_size
else:
# Iterate over each folder in the given directory in numerical order
for filename in natsorted(os.listdir(window_directory)):
# If file is a phylip file get the number of the window
if filename.endswith(".phylip"):
file_number = filename.replace("window", "")
file_number = int(file_number.replace(".phylip", ""))
input_file = os.path.join(window_directory, filename)
sequence_list = []
with open(input_file) as f:
# Create a list of each line in the file
lines = f.readlines()
# First line contains the number and length of the sequences
first_line = lines[0].split()
number_of_sequences = int(first_line[0])
length_of_sequences = int(first_line[1])
for line in lines[1:]:
# Add each sequence to a list
sequence = line.split()[1]
sequence_list.append(sequence)
# Increment based on the percentage of the alignment desired
increment = int(math.ceil((1 / pct)))
# Iterate over the indices in each window
for window_idx in range(0, length_of_sequences, increment):
site = []
# Iterate over each sequence in the alignment
for sequence in sequence_list:
# Add each base in a site to a list
site.append(sequence[window_idx])
# Determine if a site is informative
informative = self.is_site_informative(site)
# If the site has not been visited before add to mappings (deals with overlapping windows)
if site_idx not in sites_to_informative:
# If the site is informative add 1 to the mappings otherwise add 0
sites_to_informative[site_idx] += informative
windows_to_informative_count[file_number] += informative
# Increment the site index
site_idx += increment
# Account for overlapping windows
site_idx += (window_offset - length_of_sequences)
# Map windows_to_informative_count to a percentage
windows_to_informative_pct[file_number] = windows_to_informative_count[file_number] *\
(100 / float(length_of_sequences))
total_window_size += length_of_sequences
total_num_informative = sum(windows_to_informative_count.values())
# Add in the last site index if it is not already in the informative mapping
# This is useful for plotting reasons
if site_idx not in sites_to_informative:
sites_to_informative[site_idx] = 0
if total_window_size == 0:
pct_informative = 0
else:
pct_informative = float(total_num_informative * 100) / total_window_size
return sites_to_informative, windows_to_informative_count, windows_to_informative_pct, pct_informative
if __name__ == '__main__': # if we're running file directly and not importing it
# travys window dir
# window_dir = "C:\\Users\\travi\\Documents\\Evolutionary-Diversity-Visualization-Python\\windows"
# peters window dir
# window_dir = '/Users/Peter/PycharmProjects/Evolutionary-Diversity-Visualization-Python/windows'
# chabs window dir ?
# window_dir = ''
infs = InformativeSites()
sites_to_informative, windows_to_informative_count, windows_to_informative_pct, pct_informative = infs.calculate_informativeness(window_dir, 50000)
# print str(pct_informative) + "%"
infs.line_graph_generator(windows_to_informative_pct, "Windows", "Percentage of Informative Sites", "pctInformative.png")
infs.heat_map_generator(sites_to_informative, "HeatMapInfSites.png")
| mit |
CVML/scikit-learn | benchmarks/bench_sgd_regression.py | 283 | 5569 | """
Benchmark for SGD regression
Compares SGD regression against coordinate descent and Ridge
on synthetic data.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# License: BSD 3 clause
import numpy as np
import pylab as pl
import gc
from time import time
from sklearn.linear_model import Ridge, SGDRegressor, ElasticNet
from sklearn.metrics import mean_squared_error
from sklearn.datasets.samples_generator import make_regression
if __name__ == "__main__":
list_n_samples = np.linspace(100, 10000, 5).astype(np.int)
list_n_features = [10, 100, 1000]
n_test = 1000
noise = 0.1
alpha = 0.01
sgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
elnet_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
ridge_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
asgd_results = np.zeros((len(list_n_samples), len(list_n_features), 2))
for i, n_train in enumerate(list_n_samples):
for j, n_features in enumerate(list_n_features):
X, y, coef = make_regression(
n_samples=n_train + n_test, n_features=n_features,
noise=noise, coef=True)
X_train = X[:n_train]
y_train = y[:n_train]
X_test = X[n_train:]
y_test = y[n_train:]
print("=======================")
print("Round %d %d" % (i, j))
print("n_features:", n_features)
print("n_samples:", n_train)
# Shuffle data
idx = np.arange(n_train)
np.random.seed(13)
np.random.shuffle(idx)
X_train = X_train[idx]
y_train = y_train[idx]
std = X_train.std(axis=0)
mean = X_train.mean(axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
std = y_train.std(axis=0)
mean = y_train.mean(axis=0)
y_train = (y_train - mean) / std
y_test = (y_test - mean) / std
gc.collect()
print("- benchmarking ElasticNet")
clf = ElasticNet(alpha=alpha, l1_ratio=0.5, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
elnet_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
elnet_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.01, power_t=0.25)
tstart = time()
clf.fit(X_train, y_train)
sgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
sgd_results[i, j, 1] = time() - tstart
gc.collect()
print("n_iter", n_iter)
print("- benchmarking A-SGD")
n_iter = np.ceil(10 ** 4.0 / n_train)
clf = SGDRegressor(alpha=alpha / n_train, fit_intercept=False,
n_iter=n_iter, learning_rate="invscaling",
eta0=.002, power_t=0.05,
average=(n_iter * n_train // 2))
tstart = time()
clf.fit(X_train, y_train)
asgd_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
asgd_results[i, j, 1] = time() - tstart
gc.collect()
print("- benchmarking RidgeRegression")
clf = Ridge(alpha=alpha, fit_intercept=False)
tstart = time()
clf.fit(X_train, y_train)
ridge_results[i, j, 0] = mean_squared_error(clf.predict(X_test),
y_test)
ridge_results[i, j, 1] = time() - tstart
# Plot results
i = 0
m = len(list_n_features)
pl.figure('scikit-learn SGD regression benchmark results',
figsize=(5 * 2, 4 * m))
for j in range(m):
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 0]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 0]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 0]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 0]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("RMSE")
pl.title("Test error - %d features" % list_n_features[j])
i += 1
pl.subplot(m, 2, i + 1)
pl.plot(list_n_samples, np.sqrt(elnet_results[:, j, 1]),
label="ElasticNet")
pl.plot(list_n_samples, np.sqrt(sgd_results[:, j, 1]),
label="SGDRegressor")
pl.plot(list_n_samples, np.sqrt(asgd_results[:, j, 1]),
label="A-SGDRegressor")
pl.plot(list_n_samples, np.sqrt(ridge_results[:, j, 1]),
label="Ridge")
pl.legend(prop={"size": 10})
pl.xlabel("n_train")
pl.ylabel("Time [sec]")
pl.title("Training time - %d features" % list_n_features[j])
i += 1
pl.subplots_adjust(hspace=.30)
pl.show()
| bsd-3-clause |
rbharath/deepchem | examples/pcba/pcba_sklearn.py | 3 | 1806 | """
Script that trains Sklearn multitask models on PCBA dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import numpy as np
import shutil
from pcba_datasets import load_pcba
from sklearn.ensemble import RandomForestClassifier
from deepchem.models.multitask import SingletaskToMultitask
from deepchem import metrics
from deepchem.metrics import Metric
from deepchem.models.sklearn_models import SklearnModel
from deepchem.utils.evaluate import Evaluator
np.random.seed(123)
# Set some global variables up top
reload = True
verbosity = "high"
base_dir = "/tmp/pcba_sklearn"
model_dir = os.path.join(base_dir, "model")
if os.path.exists(base_dir):
shutil.rmtree(base_dir)
os.makedirs(base_dir)
pcba_tasks, pcba_datasets, transformers = load_pcba(
base_dir, reload=reload)
(train_dataset, valid_dataset) = pcba_datasets
classification_metric = Metric(metrics.roc_auc_score, np.mean,
verbosity=verbosity,
mode="classification")
def model_builder(model_dir):
sklearn_model = RandomForestClassifier(
class_weight="balanced", n_estimators=500)
return SklearnModel(sklearn_model, model_dir)
model = SingletaskToMultitask(muv_tasks, model_builder, model_dir)
# Fit trained model
model.fit(train_dataset)
model.save()
train_evaluator = Evaluator(model, train_dataset, transformers, verbosity=verbosity)
train_scores = train_evaluator.compute_model_performance([classification_metric])
print("Train scores")
print(train_scores)
valid_evaluator = Evaluator(model, valid_dataset, transformers, verbosity=verbosity)
valid_scores = valid_evaluator.compute_model_performance([classification_metric])
print("Validation scores")
print(valid_scores)
| mit |
kubeflow/kfp-tekton-backend | samples/core/ai_platform/training/trainer/util.py | 2 | 2547 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities to download and pre-process the data.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import pandas as pd
from pandas.compat import StringIO
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.python.lib.io import file_io
def create_dataset(dataset, window_size = 1):
data_X, data_y = [], []
df = pd.DataFrame(dataset)
columns = [df.shift(i) for i in reversed(range(1, window_size+1))]
data_X = pd.concat(columns, axis=1).dropna().values
data_y = df.shift(-window_size).dropna().values
return data_X, data_y
def load_data(data_file_url, window_size):
"""Loads data into preprocessed (train_X, train_y, eval_X, eval_y) dataframes.
Returns:
A tuple (train_X, train_y, eval_X, eval_y), where train_X and eval_X are
Pandas dataframes with features for training and train_y and eval_y are
numpy arrays with the corresponding labels.
"""
# The % of data we should use for training
TRAINING_SPLIT = 0.8
# Download CSV and import into Pandas DataFrame
file_stream = file_io.FileIO(data_file_url, mode='r')
df = pd.read_csv(StringIO(file_stream.read()))
df.index = df[df.columns[0]]
df = df[['count']]
scaler = StandardScaler()
# Time series: split latest data into test set
train = df.values[:int(TRAINING_SPLIT * len(df)), :]
print(train)
train = scaler.fit_transform(train)
test = df.values[int(TRAINING_SPLIT * len(df)):, :]
test = scaler.transform(test)
# Create test and training sets
train_X, train_y = create_dataset(train, window_size)
test_X, test_y = create_dataset(test, window_size)
# Reshape input data
train_X = np.reshape(train_X, (train_X.shape[0], 1, train_X.shape[1]))
test_X = np.reshape(test_X, (test_X.shape[0], 1, test_X.shape[1]))
return train_X, train_y, test_X, test_y
| apache-2.0 |
CVML/scikit-learn | examples/neighbors/plot_regression.py | 349 | 1402 | """
============================
Nearest Neighbors regression
============================
Demonstrate the resolution of a regression problem
using a k-Nearest Neighbor and the interpolation of the
target using both barycenter and constant weights.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause (C) INRIA
###############################################################################
# Generate sample data
import numpy as np
import matplotlib.pyplot as plt
from sklearn import neighbors
np.random.seed(0)
X = np.sort(5 * np.random.rand(40, 1), axis=0)
T = np.linspace(0, 5, 500)[:, np.newaxis]
y = np.sin(X).ravel()
# Add noise to targets
y[::5] += 1 * (0.5 - np.random.rand(8))
###############################################################################
# Fit regression model
n_neighbors = 5
for i, weights in enumerate(['uniform', 'distance']):
knn = neighbors.KNeighborsRegressor(n_neighbors, weights=weights)
y_ = knn.fit(X, y).predict(T)
plt.subplot(2, 1, i + 1)
plt.scatter(X, y, c='k', label='data')
plt.plot(T, y_, c='g', label='prediction')
plt.axis('tight')
plt.legend()
plt.title("KNeighborsRegressor (k = %i, weights = '%s')" % (n_neighbors,
weights))
plt.show()
| bsd-3-clause |
vybstat/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting_loss_functions.py | 221 | 5517 | """
Testing for the gradient boosting loss functions and initial estimators.
"""
import numpy as np
from numpy.testing import assert_array_equal
from numpy.testing import assert_almost_equal
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.utils import check_random_state
from sklearn.ensemble.gradient_boosting import BinomialDeviance
from sklearn.ensemble.gradient_boosting import LogOddsEstimator
from sklearn.ensemble.gradient_boosting import LeastSquaresError
from sklearn.ensemble.gradient_boosting import RegressionLossFunction
from sklearn.ensemble.gradient_boosting import LOSS_FUNCTIONS
from sklearn.ensemble.gradient_boosting import _weighted_percentile
def test_binomial_deviance():
# Check binomial deviance loss.
# Check against alternative definitions in ESLII.
bd = BinomialDeviance(2)
# pred has the same BD for y in {0, 1}
assert_equal(bd(np.array([0.0]), np.array([0.0])),
bd(np.array([1.0]), np.array([0.0])))
assert_almost_equal(bd(np.array([1.0, 1.0, 1.0]),
np.array([100.0, 100.0, 100.0])),
0.0)
assert_almost_equal(bd(np.array([1.0, 0.0, 0.0]),
np.array([100.0, -100.0, -100.0])), 0)
# check if same results as alternative definition of deviance (from ESLII)
alt_dev = lambda y, pred: np.mean(np.logaddexp(0.0, -2.0 *
(2.0 * y - 1) * pred))
test_data = [(np.array([1.0, 1.0, 1.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]), np.array([100.0, 100.0, 100.0])),
(np.array([0.0, 0.0, 0.0]),
np.array([-100.0, -100.0, -100.0])),
(np.array([1.0, 1.0, 1.0]),
np.array([-100.0, -100.0, -100.0]))]
for datum in test_data:
assert_almost_equal(bd(*datum), alt_dev(*datum))
# check the gradient against the
alt_ng = lambda y, pred: (2 * y - 1) / (1 + np.exp(2 * (2 * y - 1) * pred))
for datum in test_data:
assert_almost_equal(bd.negative_gradient(*datum), alt_ng(*datum))
def test_log_odds_estimator():
# Check log odds estimator.
est = LogOddsEstimator()
assert_raises(ValueError, est.fit, None, np.array([1]))
est.fit(None, np.array([1.0, 0.0]))
assert_equal(est.prior, 0.0)
assert_array_equal(est.predict(np.array([[1.0], [1.0]])),
np.array([[0.0], [0.0]]))
def test_sample_weight_smoke():
rng = check_random_state(13)
y = rng.rand(100)
pred = rng.rand(100)
# least squares
loss = LeastSquaresError(1)
loss_wo_sw = loss(y, pred)
loss_w_sw = loss(y, pred, np.ones(pred.shape[0], dtype=np.float32))
assert_almost_equal(loss_wo_sw, loss_w_sw)
def test_sample_weight_init_estimators():
# Smoke test for init estimators with sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
else:
k = 2
y = clf_y
if Loss.is_multi_class:
# skip multiclass
continue
loss = Loss(k)
init_est = loss.init_estimator()
init_est.fit(X, y)
out = init_est.predict(X)
assert_equal(out.shape, (y.shape[0], 1))
sw_init_est = loss.init_estimator()
sw_init_est.fit(X, y, sample_weight=sample_weight)
sw_out = init_est.predict(X)
assert_equal(sw_out.shape, (y.shape[0], 1))
# check if predictions match
assert_array_equal(out, sw_out)
def test_weighted_percentile():
y = np.empty(102, dtype=np.float)
y[:50] = 0
y[-51:] = 2
y[-1] = 100000
y[50] = 1
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 1
def test_weighted_percentile_equal():
y = np.empty(102, dtype=np.float)
y.fill(0.0)
sw = np.ones(102, dtype=np.float)
sw[-1] = 0.0
score = _weighted_percentile(y, sw, 50)
assert score == 0
def test_weighted_percentile_zero_weight():
y = np.empty(102, dtype=np.float)
y.fill(1.0)
sw = np.ones(102, dtype=np.float)
sw.fill(0.0)
score = _weighted_percentile(y, sw, 50)
assert score == 1.0
def test_sample_weight_deviance():
# Test if deviance supports sample weights.
rng = check_random_state(13)
X = rng.rand(100, 2)
sample_weight = np.ones(100)
reg_y = rng.rand(100)
clf_y = rng.randint(0, 2, size=100)
mclf_y = rng.randint(0, 3, size=100)
for Loss in LOSS_FUNCTIONS.values():
if Loss is None:
continue
if issubclass(Loss, RegressionLossFunction):
k = 1
y = reg_y
p = reg_y
else:
k = 2
y = clf_y
p = clf_y
if Loss.is_multi_class:
k = 3
y = mclf_y
# one-hot encoding
p = np.zeros((y.shape[0], k), dtype=np.float64)
for i in range(k):
p[:, i] = y == i
loss = Loss(k)
deviance_w_w = loss(y, p, sample_weight)
deviance_wo_w = loss(y, p)
assert deviance_wo_w == deviance_w_w
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_kNN/Single_Contact_Classification/Feature_Comparison/single_feature/results/test10_cross_validate_categories_1200ms_scaled_method_v_force.py | 1 | 4672 |
# Principal Component Analysis Code :
from numpy import mean,cov,double,cumsum,dot,linalg,array,rank,size,flipud
from pylab import *
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
from mvpa.clfs.knn import kNN
from mvpa.datasets import Dataset
from mvpa.clfs.transerror import TransferError
from mvpa.misc.data_generators import normalFeatureDataset
from mvpa.algorithms.cvtranserror import CrossValidatedTransferError
from mvpa.datasets.splitters import NFoldSplitter
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_kNN/Scaled')
from data_method_V import Fmat_original
def pca(X):
#get dimensions
num_data,dim = X.shape
#center data
mean_X = X.mean(axis=1)
M = (X-mean_X) # subtract the mean (along columns)
Mcov = cov(M)
###### Sanity Check ######
i=0
n=0
while i < 41:
j=0
while j < 140:
if X[i,j] != X[i,j]:
print X[i,j]
print i,j
n=n+1
j = j+1
i=i+1
print n
##########################
print 'PCA - COV-Method used'
val,vec = linalg.eig(Mcov)
#return the projection matrix, the variance and the mean
return vec,val,mean_X, M, Mcov
if __name__ == '__main__':
Fmat = Fmat_original[0:41,:]
# Checking the Data-Matrix
m_tot, n_tot = np.shape(Fmat)
print 'Total_Matrix_Shape:',m_tot,n_tot
eigvec_total, eigval_total, mean_data_total, B, C = pca(Fmat)
#print eigvec_total
#print eigval_total
#print mean_data_total
m_eigval_total, n_eigval_total = np.shape(np.matrix(eigval_total))
m_eigvec_total, n_eigvec_total = np.shape(eigvec_total)
m_mean_data_total, n_mean_data_total = np.shape(np.matrix(mean_data_total))
print 'Eigenvalue Shape:',m_eigval_total, n_eigval_total
print 'Eigenvector Shape:',m_eigvec_total, n_eigvec_total
print 'Mean-Data Shape:',m_mean_data_total, n_mean_data_total
#Recall that the cumulative sum of the eigenvalues shows the level of variance accounted by each of the corresponding eigenvectors. On the x axis there is the number of eigenvalues used.
perc_total = cumsum(eigval_total)/sum(eigval_total)
# Reduced Eigen-Vector Matrix according to highest Eigenvalues..(Considering First 20 based on above figure)
W = eigvec_total[:,0:2]
m_W, n_W = np.shape(W)
print 'Reduced Dimension Eigenvector Shape:',m_W, n_W
# Normalizes the data set with respect to its variance (Not an Integral part of PCA, but sometimes useful)
length = len(eigval_total)
s = np.matrix(np.zeros(length)).T
i = 0
while i < length:
s[i] = sqrt(C[i,i])
i = i+1
Z = np.divide(B,s)
m_Z, n_Z = np.shape(Z)
print 'Z-Score Shape:', m_Z, n_Z
#Projected Data:
Y = (W.T)*B # 'B' for my Laptop: otherwise 'Z' instead of 'B'
m_Y, n_Y = np.shape(Y.T)
print 'Transposed Projected Data Shape:', m_Y, n_Y
#Using PYMVPA
PCA_data = np.array(Y.T)
PCA_label_1 = ['Rigid-Fixed']*35 + ['Rigid-Movable']*35 + ['Soft-Fixed']*35 + ['Soft-Movable']*35
PCA_chunk_1 = ['Styrofoam-Fixed']*5 + ['Books-Fixed']*5 + ['Bucket-Fixed']*5 + ['Bowl-Fixed']*5 + ['Can-Fixed']*5 + ['Box-Fixed']*5 + ['Pipe-Fixed']*5 + ['Styrofoam-Movable']*5 + ['Container-Movable']*5 + ['Books-Movable']*5 + ['Cloth-Roll-Movable']*5 + ['Black-Rubber-Movable']*5 + ['Can-Movable']*5 + ['Box-Movable']*5 + ['Rug-Fixed']*5 + ['Bubble-Wrap-1-Fixed']*5 + ['Pillow-1-Fixed']*5 + ['Bubble-Wrap-2-Fixed']*5 + ['Sponge-Fixed']*5 + ['Foliage-Fixed']*5 + ['Pillow-2-Fixed']*5 + ['Rug-Movable']*5 + ['Bubble-Wrap-1-Movable']*5 + ['Pillow-1-Movable']*5 + ['Bubble-Wrap-2-Movable']*5 + ['Pillow-2-Movable']*5 + ['Cushion-Movable']*5 + ['Sponge-Movable']*5
clf = kNN(k=3)
terr = TransferError(clf)
ds1 = Dataset(samples=PCA_data,labels=PCA_label_1,chunks=PCA_chunk_1)
print ds1.samples.shape
cvterr = CrossValidatedTransferError(terr,NFoldSplitter(cvtype=1),enable_states=['confusion'])
error = cvterr(ds1)
print error
print cvterr.confusion.asstring(description=False)
figure(1)
cvterr.confusion.plot(numbers='True')
#show()
# Variances
figure(2)
title('Variances of PCs')
stem(range(len(perc_total)),perc_total,'--b')
axis([-0.3,30.3,0,1.2])
grid('True')
show()
| mit |
padipadou/CADL | session-5/libs/i2v.py | 14 | 5180 | """
Creative Applications of Deep Learning w/ Tensorflow.
Kadenze, Inc.
Copyright Parag K. Mital, June 2016.
"""
import json
import numpy as np
from tensorflow.python.platform import gfile
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage.transform import resize as imresize
from .utils import download
def i2v_download():
"""Download a pretrained i2v network."""
model = download('https://s3.amazonaws.com/cadl/models/illust2vec.tfmodel')
return model
def i2v_tag_download():
"""Download a pretrained i2v network."""
model = download('https://s3.amazonaws.com/cadl/models/illust2vec_tag.tfmodel')
tags = download('https://s3.amazonaws.com/cadl/models/tag_list.json')
return model, tags
def get_i2v_model():
"""Get a pretrained i2v network.
Returns
-------
net : dict
{'graph_def': graph_def, 'labels': synsets}
where the graph_def is a tf.GraphDef and the synsets
map an integer label from 0-1000 to a list of names
"""
# Download the trained net
model = i2v_download()
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/issues/582')
return {'graph_def': graph_def}
def get_i2v_tag_model():
"""Get a pretrained i2v tag network.
Returns
-------
net : dict
{'graph_def': graph_def, 'labels': synsets}
where the graph_def is a tf.GraphDef and the synsets
map an integer label from 0-1000 to a list of names
"""
# Download the trained net
model, tags = i2v_tag_download()
tags = json.load(open(tags, 'r'))
# Load the saved graph
with gfile.GFile(model, 'rb') as f:
graph_def = tf.GraphDef()
try:
graph_def.ParseFromString(f.read())
except:
print('try adding PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python' +
'to environment. e.g.:\n' +
'PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION=python ipython\n' +
'See here for info: ' +
'https://github.com/tensorflow/tensorflow/issues/582')
return {
'graph_def': graph_def,
'labels': tags,
'preprocess': preprocess,
'deprocess': deprocess
}
def preprocess(img, crop=True, resize=True, dsize=(224, 224)):
mean_img = np.array([164.76139251, 167.47864617, 181.13838569])
if img.dtype == np.uint8:
img = (img[..., ::-1] - mean_img).astype(np.float32)
else:
img = img[..., ::-1] * 255.0 - mean_img
if crop:
short_edge = min(img.shape[:2])
yy = int((img.shape[0] - short_edge) / 2)
xx = int((img.shape[1] - short_edge) / 2)
crop_img = img[yy: yy + short_edge, xx: xx + short_edge]
else:
crop_img = img
if resize:
norm_img = imresize(crop_img, dsize, preserve_range=True)
else:
norm_img = crop_img
return (norm_img).astype(np.float32)
def deprocess(img):
mean_img = np.array([164.76139251, 167.47864617, 181.13838569])
processed = (img + mean_img)[..., ::-1]
return np.clip(processed, 0, 255).astype(np.uint8)
# return ((img / np.max(np.abs(img))) * 127.5 +
# 127.5).astype(np.uint8)
def test_i2v():
"""Loads the i2v network and applies it to a test image.
"""
with tf.Session() as sess:
net = get_i2v_model()
tf.import_graph_def(net['graph_def'], name='i2v')
g = tf.get_default_graph()
names = [op.name for op in g.get_operations()]
x = g.get_tensor_by_name(names[0] + ':0')
softmax = g.get_tensor_by_name(names[-3] + ':0')
from skimage import data
img = preprocess(data.coffee())[np.newaxis]
res = np.squeeze(softmax.eval(feed_dict={x: img}))
print([(res[idx], net['labels'][idx])
for idx in res.argsort()[-5:][::-1]])
"""Let's visualize the network's gradient activation
when backpropagated to the original input image. This
is effectively telling us which pixels contribute to the
predicted class or given neuron"""
pools = [name for name in names if 'pool' in name.split('/')[-1]]
fig, axs = plt.subplots(1, len(pools))
for pool_i, poolname in enumerate(pools):
pool = g.get_tensor_by_name(poolname + ':0')
pool.get_shape()
neuron = tf.reduce_max(pool, 1)
saliency = tf.gradients(neuron, x)
neuron_idx = tf.arg_max(pool, 1)
this_res = sess.run([saliency[0], neuron_idx],
feed_dict={x: img})
grad = this_res[0][0] / np.max(np.abs(this_res[0]))
axs[pool_i].imshow((grad * 128 + 128).astype(np.uint8))
axs[pool_i].set_title(poolname)
| apache-2.0 |
sarahgrogan/scikit-learn | sklearn/tree/tests/test_export.py | 130 | 9950 | """
Testing for export functions of decision trees (sklearn.tree.export).
"""
from re import finditer
from numpy.testing import assert_equal
from nose.tools import assert_raises
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.tree import export_graphviz
from sklearn.externals.six import StringIO
from sklearn.utils.testing import assert_in
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
y2 = [[-1, 1], [-1, 2], [-1, 3], [1, 1], [1, 2], [1, 3]]
w = [1, 1, 1, .5, .5, .5]
def test_graphviz_toy():
# Check correctness of export_graphviz
clf = DecisionTreeClassifier(max_depth=3,
min_samples_split=1,
criterion="gini",
random_state=2)
clf.fit(X, y)
# Test export code
out = StringIO()
export_graphviz(clf, out_file=out)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with feature_names
out = StringIO()
export_graphviz(clf, out_file=out, feature_names=["feature0", "feature1"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="feature0 <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test with class_names
out = StringIO()
export_graphviz(clf, out_file=out, class_names=["yes", "no"])
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = yes"] ;\n' \
'1 [label="gini = 0.0\\nsamples = 3\\nvalue = [3, 0]\\n' \
'class = yes"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="gini = 0.0\\nsamples = 3\\nvalue = [0, 3]\\n' \
'class = no"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test plot_options
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False,
proportion=True, special_characters=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'edge [fontname=helvetica] ;\n' \
'0 [label=<X<SUB>0</SUB> ≤ 0.0<br/>samples = 100.0%<br/>' \
'value = [0.5, 0.5]>, fillcolor="#e5813900"] ;\n' \
'1 [label=<samples = 50.0%<br/>value = [1.0, 0.0]>, ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label=<samples = 50.0%<br/>value = [0.0, 1.0]>, ' \
'fillcolor="#399de5ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, class_names=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box] ;\n' \
'0 [label="X[0] <= 0.0\\ngini = 0.5\\nsamples = 6\\n' \
'value = [3, 3]\\nclass = y[0]"] ;\n' \
'1 [label="(...)"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test max_depth with plot_options
out = StringIO()
export_graphviz(clf, out_file=out, max_depth=0, filled=True,
node_ids=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="node #0\\nX[0] <= 0.0\\ngini = 0.5\\n' \
'samples = 6\\nvalue = [3, 3]", fillcolor="#e5813900"] ;\n' \
'1 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 1 ;\n' \
'2 [label="(...)", fillcolor="#C0C0C0"] ;\n' \
'0 -> 2 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test multi-output with weighted samples
clf = DecisionTreeClassifier(max_depth=2,
min_samples_split=1,
criterion="gini",
random_state=2)
clf = clf.fit(X, y2, sample_weight=w)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, impurity=False)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled", color="black"] ;\n' \
'0 [label="X[0] <= 0.0\\nsamples = 6\\n' \
'value = [[3.0, 1.5, 0.0]\\n' \
'[1.5, 1.5, 1.5]]", fillcolor="#e5813900"] ;\n' \
'1 [label="X[1] <= -1.5\\nsamples = 3\\n' \
'value = [[3, 0, 0]\\n[1, 1, 1]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=45, ' \
'headlabel="True"] ;\n' \
'2 [label="samples = 1\\nvalue = [[1, 0, 0]\\n' \
'[0, 0, 1]]", fillcolor="#e58139ff"] ;\n' \
'1 -> 2 ;\n' \
'3 [label="samples = 2\\nvalue = [[2, 0, 0]\\n' \
'[1, 1, 0]]", fillcolor="#e581398c"] ;\n' \
'1 -> 3 ;\n' \
'4 [label="X[0] <= 1.5\\nsamples = 3\\n' \
'value = [[0.0, 1.5, 0.0]\\n[0.5, 0.5, 0.5]]", ' \
'fillcolor="#e5813965"] ;\n' \
'0 -> 4 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="False"] ;\n' \
'5 [label="samples = 2\\nvalue = [[0.0, 1.0, 0.0]\\n' \
'[0.5, 0.5, 0.0]]", fillcolor="#e581398c"] ;\n' \
'4 -> 5 ;\n' \
'6 [label="samples = 1\\nvalue = [[0.0, 0.5, 0.0]\\n' \
'[0.0, 0.0, 0.5]]", fillcolor="#e58139ff"] ;\n' \
'4 -> 6 ;\n' \
'}'
assert_equal(contents1, contents2)
# Test regression output with plot_options
clf = DecisionTreeRegressor(max_depth=3,
min_samples_split=1,
criterion="mse",
random_state=2)
clf.fit(X, y)
out = StringIO()
export_graphviz(clf, out_file=out, filled=True, leaves_parallel=True,
rotate=True, rounded=True)
contents1 = out.getvalue()
contents2 = 'digraph Tree {\n' \
'node [shape=box, style="filled, rounded", color="black", ' \
'fontname=helvetica] ;\n' \
'graph [ranksep=equally, splines=polyline] ;\n' \
'edge [fontname=helvetica] ;\n' \
'rankdir=LR ;\n' \
'0 [label="X[0] <= 0.0\\nmse = 1.0\\nsamples = 6\\n' \
'value = 0.0", fillcolor="#e581397f"] ;\n' \
'1 [label="mse = 0.0\\nsamples = 3\\nvalue = -1.0", ' \
'fillcolor="#e5813900"] ;\n' \
'0 -> 1 [labeldistance=2.5, labelangle=-45, ' \
'headlabel="True"] ;\n' \
'2 [label="mse = 0.0\\nsamples = 3\\nvalue = 1.0", ' \
'fillcolor="#e58139ff"] ;\n' \
'0 -> 2 [labeldistance=2.5, labelangle=45, ' \
'headlabel="False"] ;\n' \
'{rank=same ; 0} ;\n' \
'{rank=same ; 1; 2} ;\n' \
'}'
assert_equal(contents1, contents2)
def test_graphviz_errors():
# Check for errors of export_graphviz
clf = DecisionTreeClassifier(max_depth=3, min_samples_split=1)
clf.fit(X, y)
# Check feature_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, feature_names=[])
# Check class_names error
out = StringIO()
assert_raises(IndexError, export_graphviz, clf, out, class_names=[])
def test_friedman_mse_in_graphviz():
clf = DecisionTreeRegressor(criterion="friedman_mse", random_state=0)
clf.fit(X, y)
dot_data = StringIO()
export_graphviz(clf, out_file=dot_data)
clf = GradientBoostingClassifier(n_estimators=2, random_state=0)
clf.fit(X, y)
for estimator in clf.estimators_:
export_graphviz(estimator[0], out_file=dot_data)
for finding in finditer("\[.*?samples.*?\]", dot_data.getvalue()):
assert_in("friedman_mse", finding.group())
| bsd-3-clause |
lcharleux/numerical_analysis | doc/Traitement_images/Example_code/europe_hist.py | 1 | 1408 | #----------------------------------------------------
# Histogramme d'altitude de l'Europe
#----------------------------------------------------
# PACKAGES
from PIL import Image # On charge Python Image Library
import numpy as np # On charge Numpy
from matplotlib import pyplot as plt # On charge pyplot (un sous module de Matplotlib) et on le renomme plt
# TRAITEMENT IMAGE
im = Image.open('europe.tif') # PIL permet de lire tous les formats d'images
Nx, Ny = im.size # On reduit la definition de l'image
im = im.resize((Nx/5, Ny/5), Image.ANTIALIAS)
Z = np.array(im).astype(np.float64) # On convertir l'image en array
max_altitude = 1000. # Altitude maximale en metres, cette donnee est un peu douteuse, (a confirmer).
Z = Z / Z.max() * max_altitude # On recale les altitudes
# AFFICHAGE
n_classes = 10 # Nombre de classes
fig = plt.figure()
fig.add_subplot(121)
plt.title('Hisogramme')
plt.ylabel('Surfaces $km^2$') # On specifie le label en y
plt.hist(Z.flatten(), bins=n_classes) # Histogramme
fig.add_subplot(122)
plt.title('Hisogramme cumule')
plt.hist(Z.flatten(), bins=n_classes, cumulative=True) # Histogramme cumule
plt.xlabel('Altitudes $m$') # On specifie le label en x
plt.show() # On affiche l'image
| gpl-2.0 |
zhenv5/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
Jimmy-Morzaria/scikit-learn | sklearn/preprocessing/tests/test_label.py | 2 | 18162 | import numpy as np
from scipy.sparse import issparse
from scipy.sparse import coo_matrix
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import dok_matrix
from scipy.sparse import lil_matrix
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.preprocessing.label import LabelBinarizer
from sklearn.preprocessing.label import MultiLabelBinarizer
from sklearn.preprocessing.label import LabelEncoder
from sklearn.preprocessing.label import label_binarize
from sklearn.preprocessing.label import _inverse_binarize_thresholding
from sklearn.preprocessing.label import _inverse_binarize_multiclass
from sklearn import datasets
iris = datasets.load_iris()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def test_label_binarizer():
lb = LabelBinarizer()
# one-class case defaults to negative label
inp = ["pos", "pos", "pos", "pos"]
expected = np.array([[0, 0, 0, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["pos"])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
# two-class case
inp = ["neg", "pos", "pos", "neg"]
expected = np.array([[0, 1, 1, 0]]).T
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ["neg", "pos"])
assert_array_equal(expected, got)
to_invert = np.array([[1, 0],
[0, 1],
[0, 1],
[1, 0]])
assert_array_equal(lb.inverse_transform(to_invert), inp)
# multi-class case
inp = ["spam", "ham", "eggs", "ham", "0"]
expected = np.array([[0, 0, 0, 1],
[0, 0, 1, 0],
[0, 1, 0, 0],
[0, 0, 1, 0],
[1, 0, 0, 0]])
got = lb.fit_transform(inp)
assert_array_equal(lb.classes_, ['0', 'eggs', 'ham', 'spam'])
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
def test_label_binarizer_unseen_labels():
lb = LabelBinarizer()
expected = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
got = lb.fit_transform(['b', 'd', 'e'])
assert_array_equal(expected, got)
expected = np.array([[0, 0, 0],
[1, 0, 0],
[0, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 0, 0]])
got = lb.transform(['a', 'b', 'c', 'd', 'e', 'f'])
assert_array_equal(expected, got)
@ignore_warnings
def test_label_binarizer_column_y():
# first for binary classification vs multi-label with 1 possible class
# lists are multi-label, array is multi-class :-/
inp_list = [[1], [2], [1]]
inp_array = np.array(inp_list)
multilabel_indicator = np.array([[1, 0], [0, 1], [1, 0]])
binaryclass_array = np.array([[0], [1], [0]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, multilabel_indicator)
assert_array_equal(out_2, binaryclass_array)
# second for multiclass classification vs multi-label with multiple
# classes
inp_list = [[1], [2], [1], [3]]
inp_array = np.array(inp_list)
# the indicator matrix output is the same in this case
indicator = np.array([[1, 0, 0], [0, 1, 0], [1, 0, 0], [0, 0, 1]])
lb_1 = LabelBinarizer()
out_1 = lb_1.fit_transform(inp_list)
lb_2 = LabelBinarizer()
out_2 = lb_2.fit_transform(inp_array)
assert_array_equal(out_1, out_2)
assert_array_equal(out_2, indicator)
def test_label_binarizer_set_label_encoding():
lb = LabelBinarizer(neg_label=-2, pos_label=0)
# two-class case with pos_label=0
inp = np.array([0, 1, 1, 0])
expected = np.array([[-2, 0, 0, -2]]).T
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
lb = LabelBinarizer(neg_label=-2, pos_label=2)
# multi-class case
inp = np.array([3, 2, 1, 2, 0])
expected = np.array([[-2, -2, -2, +2],
[-2, -2, +2, -2],
[-2, +2, -2, -2],
[-2, -2, +2, -2],
[+2, -2, -2, -2]])
got = lb.fit_transform(inp)
assert_array_equal(expected, got)
assert_array_equal(lb.inverse_transform(got), inp)
@ignore_warnings
def test_label_binarizer_errors():
# Check that invalid arguments yield ValueError
one_class = np.array([0, 0, 0, 0])
lb = LabelBinarizer().fit(one_class)
multi_label = [(2, 3), (0,), (0, 2)]
assert_raises(ValueError, lb.transform, multi_label)
lb = LabelBinarizer()
assert_raises(ValueError, lb.transform, [])
assert_raises(ValueError, lb.inverse_transform, [])
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=1)
assert_raises(ValueError, LabelBinarizer, neg_label=2, pos_label=2)
assert_raises(ValueError, LabelBinarizer, neg_label=1, pos_label=2,
sparse_output=True)
# Fail on y_type
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2], threshold=0)
# Fail on the number of classes
assert_raises(ValueError, _inverse_binarize_thresholding,
y=csr_matrix([[1, 2], [2, 1]]), output_type="foo",
classes=[1, 2, 3], threshold=0)
# Fail on the dimension of 'binary'
assert_raises(ValueError, _inverse_binarize_thresholding,
y=np.array([[1, 2, 3], [2, 1, 3]]), output_type="binary",
classes=[1, 2, 3], threshold=0)
# Fail on multioutput data
assert_raises(ValueError, LabelBinarizer().fit, np.array([[1, 3], [2, 1]]))
assert_raises(ValueError, label_binarize, np.array([[1, 3], [2, 1]]),
[1, 2, 3])
def test_label_encoder():
# Test LabelEncoder's transform and inverse_transform methods
le = LabelEncoder()
le.fit([1, 1, 4, 5, -1, 0])
assert_array_equal(le.classes_, [-1, 0, 1, 4, 5])
assert_array_equal(le.transform([0, 1, 4, 4, 5, -1, -1]),
[1, 2, 3, 3, 4, 0, 0])
assert_array_equal(le.inverse_transform([1, 2, 3, 3, 4, 0, 0]),
[0, 1, 4, 4, 5, -1, -1])
assert_raises(ValueError, le.transform, [0, 6])
def test_label_encoder_fit_transform():
# Test fit_transform
le = LabelEncoder()
ret = le.fit_transform([1, 1, 4, 5, -1, 0])
assert_array_equal(ret, [2, 2, 3, 4, 0, 1])
le = LabelEncoder()
ret = le.fit_transform(["paris", "paris", "tokyo", "amsterdam"])
assert_array_equal(ret, [1, 1, 2, 0])
def test_label_encoder_errors():
# Check that invalid arguments yield ValueError
le = LabelEncoder()
assert_raises(ValueError, le.transform, [])
assert_raises(ValueError, le.inverse_transform, [])
def test_sparse_output_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for sparse_output in [True, False]:
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit_transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer(sparse_output=sparse_output)
got = mlb.fit(inp()).transform(inp())
assert_equal(issparse(got), sparse_output)
if sparse_output:
got = got.toarray()
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
assert_raises(ValueError, mlb.inverse_transform,
csr_matrix(np.array([[0, 1, 1],
[2, 0, 0],
[1, 1, 0]])))
def test_multilabel_binarizer():
# test input as iterable of iterables
inputs = [
lambda: [(2, 3), (1,), (1, 2)],
lambda: (set([2, 3]), set([1]), set([1, 2])),
lambda: iter([iter((2, 3)), iter((1,)), set([1, 2])]),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
inverse = inputs[0]()
for inp in inputs:
# With fit_tranform
mlb = MultiLabelBinarizer()
got = mlb.fit_transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
# With fit
mlb = MultiLabelBinarizer()
got = mlb.fit(inp()).transform(inp())
assert_array_equal(indicator_mat, got)
assert_array_equal([1, 2, 3], mlb.classes_)
assert_equal(mlb.inverse_transform(got), inverse)
def test_multilabel_binarizer_empty_sample():
mlb = MultiLabelBinarizer()
y = [[1, 2], [1], []]
Y = np.array([[1, 1],
[1, 0],
[0, 0]])
assert_array_equal(mlb.fit_transform(y), Y)
def test_multilabel_binarizer_unknown_class():
mlb = MultiLabelBinarizer()
y = [[1, 2]]
assert_raises(KeyError, mlb.fit(y).transform, [[0]])
mlb = MultiLabelBinarizer(classes=[1, 2])
assert_raises(KeyError, mlb.fit_transform, [[0]])
def test_multilabel_binarizer_given_classes():
inp = [(2, 3), (1,), (1, 2)]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# fit().transform()
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, [1, 3, 2])
# ensure works with extra class
mlb = MultiLabelBinarizer(classes=[4, 1, 3, 2])
assert_array_equal(mlb.fit_transform(inp),
np.hstack(([[0], [0], [0]], indicator_mat)))
assert_array_equal(mlb.classes_, [4, 1, 3, 2])
# ensure fit is no-op as iterable is not consumed
inp = iter(inp)
mlb = MultiLabelBinarizer(classes=[1, 3, 2])
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
def test_multilabel_binarizer_same_length_sequence():
# Ensure sequences of the same length are not interpreted as a 2-d array
inp = [[1], [0], [2]]
indicator_mat = np.array([[0, 1, 0],
[1, 0, 0],
[0, 0, 1]])
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
def test_multilabel_binarizer_non_integer_labels():
tuple_classes = np.empty(3, dtype=object)
tuple_classes[:] = [(1,), (2,), (3,)]
inputs = [
([('2', '3'), ('1',), ('1', '2')], ['1', '2', '3']),
([('b', 'c'), ('a',), ('a', 'b')], ['a', 'b', 'c']),
([((2,), (3,)), ((1,),), ((1,), (2,))], tuple_classes),
]
indicator_mat = np.array([[0, 1, 1],
[1, 0, 0],
[1, 1, 0]])
for inp, classes in inputs:
# fit_transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
# fit().transform()
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit(inp).transform(inp), indicator_mat)
assert_array_equal(mlb.classes_, classes)
assert_array_equal(mlb.inverse_transform(indicator_mat), inp)
mlb = MultiLabelBinarizer()
assert_raises(TypeError, mlb.fit_transform, [({}), ({}, {'a': 'b'})])
def test_multilabel_binarizer_non_unique():
inp = [(1, 1, 1, 0)]
indicator_mat = np.array([[1, 1]])
mlb = MultiLabelBinarizer()
assert_array_equal(mlb.fit_transform(inp), indicator_mat)
def test_multilabel_binarizer_inverse_validation():
inp = [(1, 1, 1, 0)]
mlb = MultiLabelBinarizer()
mlb.fit_transform(inp)
# Not binary
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 3]]))
# The following binary cases are fine, however
mlb.inverse_transform(np.array([[0, 0]]))
mlb.inverse_transform(np.array([[1, 1]]))
mlb.inverse_transform(np.array([[1, 0]]))
# Wrong shape
assert_raises(ValueError, mlb.inverse_transform, np.array([[1]]))
assert_raises(ValueError, mlb.inverse_transform, np.array([[1, 1, 1]]))
def test_label_binarize_with_class_order():
out = label_binarize([1, 6], classes=[1, 2, 4, 6])
expected = np.array([[1, 0, 0, 0], [0, 0, 0, 1]])
assert_array_equal(out, expected)
# Modified class order
out = label_binarize([1, 6], classes=[1, 6, 4, 2])
expected = np.array([[1, 0, 0, 0], [0, 1, 0, 0]])
assert_array_equal(out, expected)
def check_binarized_results(y, classes, pos_label, neg_label, expected):
for sparse_output in [True, False]:
if ((pos_label == 0 or neg_label != 0) and sparse_output):
assert_raises(ValueError, label_binarize, y, classes,
neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
continue
# check label_binarize
binarized = label_binarize(y, classes, neg_label=neg_label,
pos_label=pos_label,
sparse_output=sparse_output)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
# check inverse
y_type = type_of_target(y)
if y_type == "multiclass":
inversed = _inverse_binarize_multiclass(binarized, classes=classes)
else:
inversed = _inverse_binarize_thresholding(binarized,
output_type=y_type,
classes=classes,
threshold=((neg_label +
pos_label) /
2.))
assert_array_equal(toarray(inversed), toarray(y))
# Check label binarizer
lb = LabelBinarizer(neg_label=neg_label, pos_label=pos_label,
sparse_output=sparse_output)
binarized = lb.fit_transform(y)
assert_array_equal(toarray(binarized), expected)
assert_equal(issparse(binarized), sparse_output)
inverse_output = lb.inverse_transform(binarized)
assert_array_equal(toarray(inverse_output), toarray(y))
assert_equal(issparse(inverse_output), issparse(y))
def test_label_binarize_binary():
y = [0, 1, 0]
classes = [0, 1]
pos_label = 2
neg_label = -1
expected = np.array([[2, -1], [-1, 2], [2, -1]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
# Binary case where sparse_output = True will not result in a ValueError
y = [0, 1, 0]
classes = [0, 1]
pos_label = 3
neg_label = 0
expected = np.array([[3, 0], [0, 3], [3, 0]])[:, 1].reshape((-1, 1))
yield check_binarized_results, y, classes, pos_label, neg_label, expected
def test_label_binarize_multiclass():
y = [0, 1, 2]
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = 2 * np.eye(3)
yield check_binarized_results, y, classes, pos_label, neg_label, expected
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_label_binarize_multilabel():
y_ind = np.array([[0, 1, 0], [1, 1, 1], [0, 0, 0]])
classes = [0, 1, 2]
pos_label = 2
neg_label = 0
expected = pos_label * y_ind
y_sparse = [sparse_matrix(y_ind)
for sparse_matrix in [coo_matrix, csc_matrix, csr_matrix,
dok_matrix, lil_matrix]]
for y in [y_ind] + y_sparse:
yield (check_binarized_results, y, classes, pos_label, neg_label,
expected)
assert_raises(ValueError, label_binarize, y, classes, neg_label=-1,
pos_label=pos_label, sparse_output=True)
def test_invalid_input_label_binarize():
assert_raises(ValueError, label_binarize, [0, 2], classes=[0, 2],
pos_label=0, neg_label=1)
def test_inverse_binarize_multiclass():
got = _inverse_binarize_multiclass(csr_matrix([[0, 1, 0],
[-1, 0, -1],
[0, 0, 0]]),
np.arange(3))
assert_array_equal(got, np.array([1, 1, 0]))
| bsd-3-clause |
tdhopper/scikit-learn | examples/semi_supervised/plot_label_propagation_structure.py | 247 | 2432 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
plt.figure(figsize=(8.5, 4))
plt.subplot(1, 2, 1)
plot_outer_labeled, = plt.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = plt.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = plt.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
plt.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
plt.title("Raw data (2 classes=red and blue)")
plt.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = plt.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = plt.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
plt.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
plt.title("Labels learned with Label Spreading (KNN)")
plt.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
plt.show()
| bsd-3-clause |
pombo-lab/gamtools | lib/gamtools/qc/segregation.py | 1 | 3007 | """
=========================
The qc.segregation module
=========================
The qc.segregation module contains functions for calculating quality control
statistics from segregation tables.
"""
import os
import itertools
import pandas as pd
from .. import segregation
def proportion_with_neighbours(block_list):
"""
Calculate the percentage of positive windows that have positive neighbours
:param list block_list: list of positive windows.
:returns: Percentage positive windows with positive neighbours.
"""
no_with_neighbours = 0
for key, group in itertools.groupby(block_list):
group = list(group)
if key and len(group) > 1:
no_with_neighbours += len(group)
try:
return float(no_with_neighbours) / sum(block_list)
except ZeroDivisionError:
return 0.
def extract_sample_name(path):
"""
Get the sample name from the path
"""
return os.path.basename(path).split('.')[0]
def get_df_stats(segregation_df):
"""
Generate a table of summary statistics for each NP in a segregation table.
Statistics calculated are as follows:
- *Proportion_with_neighbours*: Percentage of positive windows with a
positive neighbour
- *Genome_coverage*: Percentage of all windows which are positive
- *Positive_chromosomes*: Number of chromosomes with greater than
mean percentage of positive windows.
:param segregation_df: Input :ref:`~segregation table`
:returns: :class:`~pandas.DataFrame` of statistics for each NP.
"""
prop_neighb = segregation_df.apply(proportion_with_neighbours, axis=0)
prop_neighb.name = 'Proportion_with_neighbours'
genome_cover = segregation_df.mean()
genome_cover.name = 'Genome_coverage'
positive_chroms = (segregation_df.groupby(level=0).mean() >
segregation_df.groupby(level=0).mean().mean()).sum()
positive_chroms.name = 'Positive_chromosomes'
stats_df = pd.concat([genome_cover, positive_chroms, prop_neighb], axis=1).reset_index()
stats_df['Sample'] = stats_df['index'].apply(extract_sample_name)
return stats_df[['Sample', 'Genome_coverage', 'Positive_chromosomes',
'Proportion_with_neighbours']]
def get_segregation_stats(input_segregation, output_file):
"""
Given the path to a segregation table file, open the file and save the
summary statistics to output_file.
:param str input_segregation: Path to input segregation file.
:param str output_file: Path to save the output file.
"""
segregation_df = segregation.open_segregation(input_segregation)
stats_df = get_df_stats(segregation_df)
stats_df.to_csv(output_file, sep='\t', index=False)
def get_segregation_stats_doit(dependencies, targets):
"""Wrapper function to call get_segregation_stats from argparse"""
assert len(dependencies) == 1
assert len(targets) == 1
get_segregation_stats(list(dependencies)[0], list(targets)[0])
| apache-2.0 |
jlenain/flaapluc | flaapluc/automaticLightCurve.py | 1 | 65121 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Time-stamp: "2017-10-24 12:33:35 jlenain"
"""
FLaapLUC (Fermi/LAT automatic aperture photometry Light C<->Urve)
Automatic generation of aperture photometric light curves of
high energy sources, for a given source.
No likelihood fit is performed, the results solely rely on the
3FGL spectral fits, if available.
More information are available at:
http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/aperture_photometry.html
@author Jean-Philippe Lenain <mailto:jlenain@in2p3.fr>
"""
import datetime
import glob
import logging
import matplotlib
matplotlib.use('Agg')
import os
import sys
import time
import numpy as np
from ConfigParser import ConfigParser
from matplotlib import pyplot as plt
from matplotlib.ticker import FuncFormatter
import ephem
from astropy.io import ascii
from astropy.io import fits
from astropy.coordinates import Angle
from astropy.coordinates import SkyCoord as Coords
from astropy import units as u
import gt_apps as fermi
from flaapluc import extras
# Flags
BATCH = True
# Flag to know whether Gamma is assumed to be ASSUMEDGAMMA
# or taken from the 3FGL.
FLAGASSUMEDGAMMA = False
# Global variables
TOFFSET = 54000. # offset in MJD for plot creation
# assumed photon index for a source not belonging to the 3FGL
ASSUMEDGAMMA = -2.5
def getConfigList(option, sep=','):
return [stuff for stuff in option.split(sep)]
def processSrc(mysrc=None, useThresh=False, daily=False, mail=True, longTerm=False, test=False, yearmonth=None,
mergelongterm=False, withhistory=False, update=False, configfile='default.cfg', force_daily=False,
stopmonth=None, stopday=None, forcealert=False, log=logging.INFO):
"""
Process a given source.
"""
logging.basicConfig(format='[%(levelname)s] %(message)s', level=log)
if mysrc is None:
logging.error('Missing input source !')
sys.exit(1)
# If we asked for a daily light curve, first make sure that the long time-binned data already exists, otherwise this script will crash, since the daily-binned PNG needs the long time-binned data to be created. No mail alert is sent at this step.
# We automatically recreate here any missing long time-binned data.
if daily and not longTerm and not force_daily:
logging.info('[%s] Daily light curve asked for, I will first process the long time-binned one', mysrc)
longtermactive, visible = processSrc(mysrc=mysrc,
useThresh=useThresh,
daily=False,
mail=False,
longTerm=longTerm,
yearmonth=yearmonth,
mergelongterm=mergelongterm,
withhistory=withhistory,
update=update,
configfile=configfile,
stopmonth=stopmonth,
stopday=stopday,
forcealert=forcealert,
log=log)
if longtermactive and visible:
logging.info('[%s] Source %s is active and visible in long time-binned data, processing daily-binned light curve...',
mysrc, mysrc)
elif longtermactive and not visible:
logging.info('[%s] \033[91mSource %s is active but not visible. Daily-binned light curve aborted...\033[0m',
mysrc, mysrc)
return False
elif not longtermactive and visible:
logging.info('[%s] \033[91mSource %s is visible but not active. Daily-binned light curve aborted...\033[0m',
mysrc, mysrc)
return False
elif not longtermactive and not visible:
logging.info('[%s] \033[91mSource %s is neither active nor visible. Daily-binned light curve aborted...\033[0m',
mysrc, mysrc)
return False
else:
logging.info('[%s] \033[91mDaily-binned light curve aborted, for unknown reason...\033[0m', mysrc)
return False
elif force_daily:
logging.info('[%s] Forcing daily light curve, I will first process the long time-binned one', mysrc)
longtermactive, visible = processSrc(mysrc=mysrc,
useThresh=useThresh,
daily=False,
mail=False,
longTerm=longTerm,
yearmonth=yearmonth,
mergelongterm=mergelongterm,
withhistory=withhistory,
update=update,
configfile=configfile,
stopmonth=stopmonth,
stopday=stopday,
forcealert=forcealert,
log=log)
else:
logging.info('[%s] Processing long time-binned light curve...', mysrc)
auto = automaticLightCurve(customThreshold=useThresh, daily=daily, longTerm=longTerm, yearmonth=yearmonth,
mergelongterm=mergelongterm, withhistory=withhistory, configfile=configfile, stopmonth=stopmonth,
stopday=stopday, forcealert=forcealert, log=log)
auto.readSourceList(mysrc)
if longTerm is True and mergelongterm is True:
# Remove all the old merged file for this source, before reprocessing the merged data
if not auto.daily:
for file in glob.glob(auto.workDir + '/' + auto.src + '*'):
os.remove(file)
if auto.daily:
for file in glob.glob(auto.workDir + '/' + auto.src + '*daily*'):
os.remove(file)
# TO BE CHANGED !!!
startyearmonth = '200808'
# Hardcoded !!! Beurk, not good, ugly, bad !!!
if auto.stopmonth is not None:
thisyearmonth = auto.stopmonth
else:
thisyearmonth = datetime.date.today().strftime('%Y%m')
thisyear = thisyearmonth[:-2]
thismonth = thisyearmonth[-2:]
startyear = startyearmonth[:-2]
startmonth = startyearmonth[-2:]
# First make sure that all the month-by-month long-term data have been processed
#
# Loop on month from 2008/08 to this month
for year in range(int(startyear), int(thisyear) + 1):
for month in range(1, 12 + 1):
# To retrieve the correct results directories, 'month' should be made of 2 digits
month = '%02d' % month
tmpyearmonth = str(year) + str(month)
if (year == int(startyear) and int(month) < int(startmonth)) or (
year == int(thisyear) and int(month) > int(thismonth)):
continue
# If year=thisyear and month=thismonth, we should remove all data for this source and reprocess everything again with fresh, brand new data !
# BUT only if update=True
if year == int(thisyear) and int(month) == int(thismonth) and update is True:
tmpworkdir = auto.baseOutDir + "/longTerm/" + str(year) + str(month)
if not auto.daily:
for file in glob.glob(tmpworkdir + '/' + auto.src + '*'):
os.remove(file)
if auto.daily:
for file in glob.glob(tmpworkdir + '/' + auto.src + '*daily*'):
os.remove(file)
processSrc(mysrc=auto.src, useThresh=useThresh, daily=auto.daily, mail=False, longTerm=True, test=False,
yearmonth=tmpyearmonth, mergelongterm=False, update=update, configfile=configfile,
stopmonth=stopmonth, forcealert=forcealert)
# Then merge the GTI files together, and run createXML, photoLC, exposure, createDAT, createLCfig, createEnergyTimeFig. No mail is sent here.
auto.mergeGTIfiles()
if auto.fglName is not None:
auto.createXML()
mygamma = None
else:
mygamma = ASSUMEDGAMMA
logging.info('[%s] \033[93mNo 3FGL counterpart given in the list of sources, assuming photon index of %.2f for the light curve generation.\033[0m',
auto.src, mygamma)
auto.photoLC()
auto.exposure(gamma=mygamma)
auto.createDAT()
auto.createLCfig()
auto.createEnergyTimeFig()
# Exit here
return False
# End mergelongterm
# When mergelongterm is False, we do the following:
auto.selectSrc()
auto.makeTime()
# If we are in --long-term mode, but not in --merge-long-term mode, we can stop here, since the --merge-long-term mode then starts at the mergeGTIfiles level
if longTerm:
return False
global FLAGASSUMEDGAMMA
if auto.fglName is not None:
auto.createXML()
mygamma = None
FLAGASSUMEDGAMMA = False
else:
mygamma = ASSUMEDGAMMA
logging.info('[%s] \033[93mNo 3FGL counterpart given in the list of sources, assuming photon index of %.2f for the light curve generation.\033[0m',
auto.src, mygamma)
FLAGASSUMEDGAMMA = True
auto.photoLC()
auto.exposure(gamma=mygamma)
auto.createDAT()
auto.createLCfig()
auto.createEnergyTimeFig()
auto.sendAlert(nomailall=test, sendmail=mail)
return auto.active, auto.visible
class automaticLightCurve:
"""
FLaapLUC
Automatic aperture photometry light curve generation.
Main class, for a given of source.
"""
def __init__(self, file=None, customThreshold=False, daily=False,
longTerm=False, yearmonth=None, mergelongterm=False,
withhistory=False, stopmonth=None, stopday=None,
configfile='default.cfg', forcealert=False,
log=logging.INFO):
self.config = self.getConfig(configfile=configfile)
self.allskyDir = self.config.get('InputDirs', 'AllskyDir')
self.archiveDir = self.config.get('InputDirs', 'ArchiveDir')
self.templatesDir = self.config.get('InputDirs', 'TemplatesDir')
self.catalogFile = self.config.get('InputFiles', 'CatalogFile')
if file is None:
self.file = self.config.get('InputFiles', 'SourceList')
else:
self.file = file
self.baseOutDir = self.config.get('OutputDirs', 'OutputResultsDir')
self.allskyFile = self.allskyDir + "/" + self.config.get('InputFiles', 'WholeAllskyFile')
self.lastAllskyFile = self.allskyDir + "/" + self.config.get('InputFiles', 'LastAllskyFile')
self.spacecraftFile = self.allskyDir + "/" + self.config.get('InputFiles', 'SpacecraftFile')
self.forcealert = forcealert
try:
self.longtimebin = float(self.config.get('AlertTrigger', 'LongTimeBin'))
except:
# Take 7 days by default
self.longtimebin = 7.
logging.warning('\033[93mCan not read LongTimeBin in config file, taking %.1f as default.\033[0m', self.longtimebin)
try:
self.sigma = float(self.config.get('AlertTrigger', 'Sigma'))
except:
# Take 2 sigma by default
self.sigma = 3.
logging.warning('\033[93mCan not read Sigma in config file, taking %.1f as default.\033[0m', self.sigma)
try:
self.sigmaLT = float(self.config.get('AlertTrigger', 'SigmaLT'))
except:
# Take 2 sigma by default
self.sigmaLT = 1.5
logging.warning('\033[93mCan not read SigmaLT in config file, taking %.1f as default.\033[0m', self.sigmaLT)
# Read maxz and maxZA as lists, not as single floats
self.maxz = [float(i) for i in getConfigList(self.config.get('AlertTrigger', 'MaxZ'))]
self.maxZA = [float(i) for i in getConfigList(self.config.get('AlertTrigger', 'MaxZA'))]
try:
self.checkVisibility = self.config.get('Site', 'CheckVisibility')
self.siteLon = float(self.config.get('Site', 'SiteLongitude'))
self.siteLat = float(self.config.get('Site', 'SiteLatitude'))
self.siteAlt = float(self.config.get('Site', 'SiteAltitude'))
except:
# Don't check the source visibility, by default
self.checkVisibility = False
self.daily = daily
self.withhistory = withhistory
# Mail sender and recipients
self.usualRecipients = getConfigList(self.config.get('MailConfig', 'UsualRecipients'))
self.testRecipients = getConfigList(self.config.get('MailConfig', 'TestRecipients'))
self.mailSender = self.config.get('MailConfig', 'MailSender')
today = datetime.date.today().strftime('%Y%m%d')
self.stopday = stopday
if self.stopday is not None:
today = self.stopday.replace('-', '')
self.lastAllskyFile = self.allskyFile
# Setting file names and directories
if longTerm:
self.allsky = self.allskyFile
if not mergelongterm:
self.workDir = self.baseOutDir + "/longTerm/" + yearmonth
else:
self.workDir = self.baseOutDir + "/longTerm/merged"
else:
self.allsky = self.lastAllskyFile
self.workDir = self.baseOutDir + "/" + today
self.spacecraft = self.spacecraftFile
if not os.path.isdir(self.workDir):
try:
os.makedirs(self.workDir)
except OSError:
pass
self.fermiDir = os.getenv('FERMI_DIR')
# Setting default parameters
self.roi = 1. # degrees (http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/aperture_photometry.html: "For aperture photometry we select a very small aperture (rad=1 degree), because we are not fitting the background.")
try:
self.emin = float(self.config.get('Erange', 'Emin'))
except:
# Take 100 MeV by default
self.emin = 1.e2 # E min
logging.warning('\033[93mCan not read Emin in config file, taking %.1g as default.\033[0m', self.emin)
try:
self.emax = float(self.config.get('Erange', 'Emax'))
except:
# Take 500 GeV by default
self.emax = 5.e5 # E max
logging.warning('\033[93mCan not read Emax in config file, taking %.1g as default.\033[0m', self.emax)
self.zmax = 90. # degrees
self.rockangle = 52. # maximal allowed rocking angle
if self.daily:
self.tbin = 24. * 60. * 60. # seconds, daily bins
else:
self.tbin = self.longtimebin * 24. * 60. * 60. # seconds, longtimebin by defaults
self.threshold = 1.e-6 # ph cm^-2 s^-1
self.customThreshold = customThreshold
self.stopmonth = stopmonth
# Open allsky file to get the start and stop dates
try:
hdu = fits.open(self.allsky)
except IOError as e:
logging.error("""I/O error ({0}): can not open file {1}: {2}
I will create the allsky file on the fly for you, for the last month of available data, using enrico.
First, retrieving the last photon files...
""".format(e.errno, self.allsky, e.strerror))
cmd = 'enrico_download --download_data'
r = os.system(cmd)
assert (r == 0), "Could not properly download the last data."
logging.error('Second, retrieving the last spacecraft file...')
cmd = 'enrico_download --download_spacecraft'
r = os.system(cmd)
assert (r == 0), "Could not properly download the last spacecraft file."
logging.error('Third, creating the allsky file with enrico...')
cmd = 'enrico_download --preprocess_data --steps=gtselect --event_classes=source --selections=all --emins=100'
r = os.system(cmd)
assert (r == 0), "Could not properly generate the allsky file."
import enrico.data
self.allsky = enrico.data.PREPROCESSED_DIR + '/source/all/emin_000100/gtselect.fits'
hdu = fits.open(self.allsky)
header = hdu[0].header
self.filtermoon = self.hasmoon()
if not longTerm:
self.tstart = header['TSTART']
self.tstop = header['TSTOP']
if self.stopday is not None:
from astropy.time import Time
self.tstop = extras.mjd2met(Time('%s 00:00:00' % self.stopday, format='iso', scale='utc').mjd)
self.tstart = self.tstop - 30 * 24 * 3600 # stop - 30 days
else:
missionStart = header['TSTART'] # in MET
missionStop = header['TSTOP'] # in MET
if not mergelongterm:
# Need to convert 'yearmonth' in MET
# self.tstart is the first day of yearmonth at 00:00:00, or missionStart
# self.tstop is the first day of next month at 00:00:00, or missionStop
year = yearmonth[:-2]
month = yearmonth[-2:]
# Get date of first day of yearmonth at 00:00:00, in UNIX time (timetuple transform a datetime object in time object ???)
# year month day hour minute second microsecond
yearmonthStart = time.mktime(datetime.datetime(int(year), int(month), 1, 0, 0, 0, 0).timetuple())
if int(month) < 12:
yearmonthStop = time.mktime(datetime.datetime(int(year), int(month) + 1, 1, 0, 0, 0, 0).timetuple())
else:
yearmonthStop = time.mktime(datetime.datetime(int(year) + 1, 1, 1, 0, 0, 0, 0).timetuple())
# Convert these from UNIX time to MET
tmptstart = extras.mjd2met(extras.unixtime2mjd(yearmonthStart))
tmptstop = extras.mjd2met(extras.unixtime2mjd(yearmonthStop))
logging.debug('INIT yearmonthStart=', yearmonthStart)
logging.debug('INIT yearmonthStop=', yearmonthStop)
# Make sure that start of yearmonth is after the launch of Fermi, and that stop of yearmonth is before the very last data we have from NASA servers !
if tmptstart > missionStart:
self.tstart = tmptstart
else:
self.tstart = missionStart
if tmptstop < missionStop:
self.tstop = tmptstop
else:
self.tstop = missionStop
if mergelongterm is True:
self.tstart = missionStart
self.tstop = missionStop
def getConfig(self, configfile='./default.cfg'):
"""Get configuration from a configuration file."""
self.config = ConfigParser()
self.config.readfp(open(configfile))
return self.config
def readSourceList(self, mysrc=None):
"""
Read the list of sources.
@todo Use a mySQL database instead of an ASCII file for the list of sources ?
"""
try:
srcList = ascii.read(self.file)
except IOError:
logging.error('Can not open %s', self.file)
sys.exit(1)
src = srcList['Name']
ra = srcList['RA']
dec = srcList['Dec']
z = srcList['z']
fglName = srcList['3FGLname']
# Read the threshold for the source from the source list, if we asked to process with custom thresholds when instanciating the class
if self.customThreshold:
myThreshold = srcList['Threshold']
# If we ask for a particular source, return the parameters for that source
if mysrc != None:
# Find our input src in the list of sources
for i in range(len(src)):
if src[i] == mysrc:
# Redefine the threshold if we provided a custom threshold
if self.customThreshold and myThreshold[i] != 0.:
try:
float(myThreshold[i])
self.threshold = myThreshold[i]
except ValueError:
logging.warning('The threshold of the source %s is not a float. Please, check the list of sources !', mysrc)
sys.exit(2)
self.src = src[i]
self.ra = ra[i]
self.dec = dec[i]
self.z = z[i]
self.fglName = fglName[i]
if self.fglName == 'None':
self.fglName = None
return
# If we end up without any found source, print out a WARNING
logging.warning('Can\'t find your source %s in the list of sources !', str(mysrc))
self.src = None
self.ra = None
self.dec = None
self.z = None
self.fglName = None
return
# Otherwise, return the whole list of parameters for all the sources
else:
return src, ra, dec, z, fglName
def selectSrc(self):
"""
Filter a given source, running gtselect
"""
# Do we have to deal with a FITS file or an ASCII list of FITS file ?
allskyext = os.path.splitext(self.allsky)[1]
if allskyext in [".fit", ".fits"]:
fermi.filter['infile'] = self.allsky
else:
fermi.filter['infile'] = '@%s' % self.allsky
if self.daily:
outfile = self.workDir + '/' + str(self.src) + '_daily.fits'
else:
outfile = self.workDir + '/' + str(self.src) + '.fits'
fermi.filter['outfile'] = outfile
# If outfile already exists, we don't do anything
if os.path.isfile(outfile):
return True
fermi.filter['ra'] = self.ra
fermi.filter['dec'] = self.dec
fermi.filter['rad'] = self.roi
fermi.filter['emin'] = self.emin
fermi.filter['emax'] = self.emax
fermi.filter['tmin'] = self.tstart
fermi.filter['tmax'] = self.tstop
fermi.filter['zmax'] = self.zmax
fermi.filter['evclass'] = 128
logging.info('Running gtselect')
fermi.filter.run()
def hasmoon(self):
"""
Return True if the FT2 spacecraft file has the Moon coordinates (using the moonpos user contributed script), False otherwise
"""
d = fits.open(self.spacecraft)[1].data
try:
m = d.field('RA_MOON')
r = True
except KeyError:
r = False
return r
def makeTime(self):
"""
Filter the GTI for a given source
"""
fermi.maketime['scfile'] = self.spacecraft
if self.daily:
fermi.maketime['evfile'] = self.workDir + '/' + str(self.src) + '_daily.fits'
outfile = self.workDir + '/' + str(self.src) + '_daily_gti.fits'
else:
fermi.maketime['evfile'] = self.workDir + '/' + str(self.src) + '.fits'
outfile = self.workDir + '/' + str(self.src) + '_gti.fits'
fermi.maketime['outfile'] = outfile
# If outfile already exists, we don't do anything
if os.path.isfile(outfile):
return True
# cf. http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/aperture_photometry.html
gtifilter = "LAT_CONFIG==1 && DATA_QUAL>0 && (angsep(" + str(self.ra) + "," + str(
self.dec) + ",RA_SUN,DEC_SUN)>5.)"
if self.filtermoon:
gtifilter += " && (angsep(" + str(self.ra) + "," + str(
self.dec) + ",RA_MOON,DEC_MOON)>5.)"
fermi.maketime['filter'] = gtifilter
fermi.maketime['roicut'] = 'no'
fermi.maketime['tstart'] = self.tstart
fermi.maketime['tstop'] = self.tstop
logging.info('Running gtmktime')
fermi.maketime.run()
def mergeGTIfiles(self):
"""
Merge multiple GTI files when mergelongterm is True.
Use gtselect.
Assume the current workDir is longTerm/merged.
"""
# Create list of GTI files
if not self.daily:
listname = self.workDir + '/' + self.src + '_gti.list'
else:
listname = self.workDir + '/' + self.src + '_daily_gti.list'
filelist = open(listname, 'w')
list = []
if not self.daily:
for file in glob.glob(self.workDir + '/../20????/' + self.src + '_gti.fits'):
list.append(file)
else:
for file in glob.glob(self.workDir + '/../20????/' + self.src + '_daily_gti.fits'):
list.append(file)
# Sort the list of GTI files
list = sorted(list)
for item in list:
filelist.write(item + '\n')
filelist.close()
fermi.filter['infile'] = '@' + listname
if not self.daily:
outfile = self.workDir + '/' + str(self.src) + '_gti.fits'
else:
outfile = self.workDir + '/' + str(self.src) + '_daily_gti.fits'
fermi.filter['outfile'] = outfile
# If outfile already exists, we re-create it
if os.path.isfile(outfile):
os.remove(outfile)
fermi.filter['ra'] = self.ra
fermi.filter['dec'] = self.dec
fermi.filter['rad'] = self.roi
fermi.filter['emin'] = self.emin
fermi.filter['emax'] = self.emax
fermi.filter['tmin'] = self.tstart
fermi.filter['tmax'] = self.tstop
fermi.filter['zmax'] = self.zmax
fermi.filter['evclass'] = 128
logging.info('Running gtselect')
fermi.filter.run()
def createXML(self):
"""
Create an XML model file based on the 3FGL catalogue
"""
if self.daily:
evfile = self.workDir + '/' + str(self.src) + '_daily_gti.fits'
modelfile = self.workDir + '/' + str(self.src) + '_daily.xml'
else:
evfile = self.workDir + '/' + str(self.src) + '_gti.fits'
modelfile = self.workDir + '/' + str(self.src) + '.xml'
# If modelfile already exists, we don't do anything
if os.path.isfile(modelfile):
return True
import make3FGLxml
mymodel = make3FGLxml.srcList(self.catalogFile, evfile, modelfile)
logging.info('Running makeModel')
mymodel.makeModel(GDfile=self.fermiDir + '/refdata/fermi/galdiffuse/gll_iem_v06.fits', GDname='GalDiffuse',
ISOfile=self.fermiDir + '/refdata/fermi/galdiffuse/iso_P8R2_SOURCE_V6_v06.txt',
ISOname='IsotropicDiffuse', extDir=self.templatesDir, makeRegion=False)
def photoLC(self):
"""
Compute the photometric light curve for a given source
"""
if self.daily:
fermi.evtbin['evfile'] = self.workDir + '/' + str(self.src) + '_daily_gti.fits'
outfile = self.workDir + '/' + str(self.src) + '_daily_lc.fits'
else:
fermi.evtbin['evfile'] = self.workDir + '/' + str(self.src) + '_gti.fits'
outfile = self.workDir + '/' + str(self.src) + '_lc.fits'
# If outfile already exists, we don't do anything
if os.path.isfile(outfile):
return True
fermi.evtbin['outfile'] = outfile
fermi.evtbin['scfile'] = self.spacecraft
fermi.evtbin['algorithm'] = 'LC'
fermi.evtbin['tbinalg'] = 'LIN'
fermi.evtbin['tstart'] = self.tstart
fermi.evtbin['tstop'] = self.tstop
fermi.evtbin['dtime'] = self.tbin
logging.info('Running gtbin')
fermi.evtbin.run()
def exposure(self, gamma=None):
"""
Compute exposure on source src, to add a flux column for the photometric light curve.
Warning: the input file is modified in place, with an additional exposure column added to the file !
"""
if self.daily:
infile = self.workDir + '/' + str(self.src) + '_daily_lc.fits'
srcmdl = self.workDir + '/' + str(self.src) + '_daily.xml'
else:
infile = self.workDir + '/' + str(self.src) + '_lc.fits'
srcmdl = self.workDir + '/' + str(self.src) + '.xml'
# If infile already contains an EXPOSURE column, we don't do anything
hdu = fits.open(infile)
if hdu[1].header.get('TTYPE5') == 'EXPOSURE':
return True
scfile = self.spacecraft
irfs = 'P8R2_SOURCE_V6'
rad = str(self.roi)
options = 'infile=' + infile + ' scfile=' + scfile + ' irfs=' + irfs + ' rad=' + rad
if self.fglName is not None:
target = self.fglName.replace('3FGLJ', '3FGL J')
logging.debug('exposure: target=%s', target)
options += ' srcmdl=' + srcmdl + ' target="' + target + '"'
else:
options += ' srcmdl="none" specin=' + str(gamma)
cmd = 'time -p ' + self.fermiDir + '/bin/gtexposure ' + options
logging.info('Running gtexposure')
os.system(cmd)
def createDAT(self):
"""
Create a data file with the light curve of a given source.
"""
# Read LC file
if self.daily:
infile = self.workDir + '/' + str(self.src) + '_daily_lc.fits'
outfile = self.workDir + '/' + str(self.src) + '_daily_lc.dat'
else:
infile = self.workDir + '/' + str(self.src) + '_lc.fits'
outfile = self.workDir + '/' + str(self.src) + '_lc.dat'
# If outfile already exists, we don't do anything
if os.path.isfile(outfile):
return True
try:
hdu = fits.open(infile)
except:
logging.critical('Exception: can not open file %s', infile)
raise
data = hdu[1].data
file = open(outfile, 'w')
file.write("MET\tMJD\tFlux\tFluxError\n")
file.write("#[MET]\t[MJD]\t[ph cm^-2 s^-1]\t[ph cm^-2 s^-1]\n")
time = data.field('TIME') # MET
counts = data.field('COUNTS')
countsErr = data.field('ERROR') # error on counts
exposure = data.field('EXPOSURE') # cm^2 s^1
flux = counts / exposure # approximate flux in ph cm^-2 s^-1
fluxErr = countsErr / exposure # approximate flux error in ph cm^-2 s^-1
timeMjd = extras.met2mjd(time)
# We can do this because time is NOT a list, but a numpy.array
for i in range(len(time)):
# Exposure can be 0 if longTerm=True and TSTOP in photon file > TSTOP in spacecraft file, or if Fermi operated in pointed mode for a while.
if exposure[i] != 0.:
file.write(str(time[i]) + "\t" + str(timeMjd[i]) + "\t" + str(flux[i]) + "\t" + str(fluxErr[i]) + "\n")
file.close()
def getBAT(self):
import urllib2
# daily fits example url:
# http://swift.gsfc.nasa.gov/docs/swift/results/transients/CygX-3.lc.fits
# Some sources need replacement names to match the BAT names
urls = {
'4U1907+09': 'H1907+097',
'1FGLJ1018.6-5856': '0FGLJ1018.2-5858',
'H1743-322': 'IGRJ17464-3213',
'V4641Sgr': 'SAXJ1819.3-2525',
'1E1841-04.5': 'Kes73',
}
# Remove '+', add file ending
if urls.has_key(self.src):
file = urls[self.src].replace('+', 'p') + ".lc.fits"
else:
file = self.src.replace('+', 'p') + ".lc.fits"
urlprefix = "http://swift.gsfc.nasa.gov/docs/swift/results/transients/"
# lc files can be in a weak/ subdir for weak sources, we try both
try:
baturl = urlprefix + file
webfile = urllib2.urlopen(baturl)
except (urllib2.HTTPError, urllib2.URLError) as e:
try:
baturl = urlprefix + 'weak/' + file
webfile = urllib2.urlopen(baturl)
except (urllib2.HTTPError, urllib2.URLError) as e:
return False, None
# save lc to local file
localfile = open(file, 'w')
localfile.write(webfile.read())
webfile.close()
localfile.close()
# read local file with fits into batlc
batfits = fits.open(file)
batlc = np.array(batfits[1].data)
batfits.close()
# delete local file
os.unlink(file)
return True, batlc
def createLCfig(self):
"""
Create a PNG figure with the light curve of a given source. Any existing PNG file is overwritten !
"""
# Read the .dat LC file
if self.daily:
infile = self.workDir + '/' + str(self.src) + '_daily_lc.dat'
outfig = self.workDir + '/' + str(self.src) + '_daily_lc.png'
infileLongTimeBin = self.workDir + '/' + str(self.src) + '_lc.dat'
duration = 1. # duration of a time bin, in days
else:
infile = self.workDir + '/' + str(self.src) + '_lc.dat'
outfig = self.workDir + '/' + str(self.src) + '_lc.png'
duration = self.longtimebin # duration of a time bin, in days
data = ascii.read(infile)
# the times are already read as MJD, cf createDAT function.
timelc = data['MJD']
flux = data['Flux']
fluxErr = data['FluxError']
if self.daily:
dataLongTimeBin = ascii.read(infileLongTimeBin)
# the times are already read as MJD, cf createDAT function.
timeLongTimeBin = dataLongTimeBin['MJD']
fluxLongTimeBin = dataLongTimeBin['Flux']
fluxErrLongTimeBin = dataLongTimeBin['FluxError']
durationLongTimeBin = self.longtimebin # duration of a time bin, in days
# Download Swift/BAT data if available
# xray is boolean flag indicating that X-ray BAT data is available
xray, batlc = self.getBAT()
# Redefine the trigger threshold if withhistory=True
if self.withhistory:
(fluxAverage, fluxRMS) = self.dynamicalTrigger()
fig = plt.figure()
if xray:
ax = fig.add_subplot(211)
axbat = fig.add_subplot(212, sharex=ax)
else:
ax = fig.add_subplot(111)
if self.fglName is not None:
title = str(self.src) + ', ' + str(self.fglName).replace('_2FGLJ', '2FGL J').replace('3FGLJ', '3FGL J')
else:
title = str(self.src) + ', no known 3FGL counterpart'
if self.z == 'None':
title = title + ' (z unknown)'
else:
title = title + ' (z=' + str(self.z) + ')'
ax.set_title(title)
# Force the y-axis ticks to use 1e-6 as a base exponent
ax.yaxis.set_major_formatter(FuncFormatter(lambda x, pos: ('%.2f') % (x * 1e6)))
ax.set_ylabel('F (%.0f MeV-%.0f GeV) (%s 10$^{-6}$ ph cm$^{-2}$ s$^{-1}$)' % (
self.emin, self.emax / 1000., r'$\times$')) # , size='x-small')
## Make the x-axis ticks shifted by some value
ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%.0f' % (x - TOFFSET)))
ax.set_xlabel('MJD-' + str(TOFFSET))
# ax.set_xlabel('MJD')
# Plot the Fermi/LAT light curve
if self.daily:
# Also plot the long time-binned light curve
ax.errorbar(x=timelc, xerr=duration / 2., y=flux, yerr=fluxErr, fmt='ro')
ax.errorbar(x=timeLongTimeBin, xerr=durationLongTimeBin / 2., y=fluxLongTimeBin, yerr=fluxErrLongTimeBin,
fmt='bo')
# The last plot called is on top of the others in matplotlib (are you sure ???). Here, we want the long time-binned LC on top, for visibility.
else:
ax.errorbar(x=timelc, xerr=duration / 2., y=flux, yerr=fluxErr, fmt='bo')
# Plot a line at the threshold value
ax.axhline(y=self.threshold, linewidth=3, linestyle='--', color='r')
if self.withhistory:
ax.axhline(y=fluxAverage, linewidth=1, linestyle='-', color='b')
ax.axhline(y=fluxAverage + fluxRMS, linewidth=1, linestyle='--', color='b')
ax.axhline(y=fluxAverage - fluxRMS, linewidth=1, linestyle='--', color='b')
# Plot a line at flux=0, for visibility/readibility
ax.axhline(y=0., color='k')
# Add a label for the creation date of this figure
# x,y in relative 0-1 coords in figure
plt.figtext(0.98, 0.95,
'plot creation date: %s (UTC)' % (time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime())),
horizontalalignment="right",
rotation='vertical',
size='xx-small'
)
# Plot Swift/BAT lightcurve
if xray:
axbat.errorbar(batlc['TIME'] + 0.5, batlc['RATE'], batlc['ERROR'], fmt="none", capsize=0, elinewidth=1,
ecolor='b', color='b')
axbat.set_xlabel('MJD-' + str(TOFFSET))
# axbat.set_xlabel('MJD')
axbat.set_ylabel('F (15-50 keV) (count cm^-2 s^-1)', size='x-small')
try:
axbat.set_xlim(xmin=timelc[0] - duration / 2. - 1., xmax=timelc[-1:] + duration / 2. + 1.)
axbat.set_ylim(ymin=0.)
except:
pass
# Need to zoom in or not, at the very end, after any call to other matplotlib functions
NEEDTOZOOMIN = False
for i in range(len(flux)):
if fluxErr[i] > 5. * flux[i]:
NEEDTOZOOMIN = True
if NEEDTOZOOMIN:
maxy = 1.5 * max(flux)
if maxy > self.threshold:
ax.set_ylim(ymin=-1.e-7, ymax=maxy)
else:
ax.set_ylim(ymin=-1.e-7, ymax=self.threshold)
# Don't show the figure in batch mode
if not BATCH:
plt.show()
# Save the figure
fig.savefig(outfig)
def createEnergyTimeFig(self, eThresh=1.e2):
"""
Create a PNG figure with the energy vs time of a given source, above eThresh MeV. Any existing PNG file is overwritten !
"""
# Read the GTI FITS file
infile = self.workDir + '/' + str(self.src) + '_gti.fits'
outfig = self.workDir + '/' + str(self.src) + '_energyTime.png'
hdu = fits.open(infile)
data = hdu[1].data
mask = data.field('ENERGY') > eThresh
datac = data[mask]
if not datac.size:
logging.warning('[%s] \033[92mEmpty energy vs time plot above %0.f GeV\033[0m', self.src, eThresh / 1.e3)
return
t = extras.met2mjd(datac['TIME'])
e = datac['ENERGY']
fig = plt.figure()
ax = fig.add_subplot(111)
if self.fglName is not None:
title = str(self.src) + ', ' + str(self.fglName).replace('_2FGLJ', '2FGL J').replace('3FGLJ', '3FGL J')
else:
title = str(self.src) + ', no known 3FGL counterpart'
if self.z == 'None':
title = title + ' (z unknown)'
else:
title = title + ' (z=' + str(self.z) + ')'
ax.set_title(title)
ylabel = 'Energy (MeV)'
if eThresh > self.emin:
ylabel += ' -- only data above %.1f GeV are shown' % (eThresh / 1.e3)
# ax.set_ylabel(ylabel, size='x-small')
ax.set_ylabel(ylabel)
## Make the x-axis ticks shifted by some value
ax.xaxis.set_major_formatter(FuncFormatter(lambda x, pos: '%.0f' % (x - TOFFSET)))
ax.set_xlabel('MJD-' + str(TOFFSET))
try:
ax.set_xlim(xmin=t[0] - 1., xmax=t[-1:] + 1.)
except:
pass
# Plot the energy vs time distribution
try:
# cf. http://stackoverflow.com/questions/20105364/how-can-i-make-a-scatter-plot-colored-by-density-in-matplotlib
from scipy.stats import gaussian_kde
xy = np.vstack([t, e])
z = gaussian_kde(xy)(xy)
# Re-normalize the density
z = z / max(z)
idx = z.argsort()
t, e, z = t[idx], e[idx], z[idx]
pcm = ax.scatter(t, e, c=z, s=100, edgecolor='')
cbar = plt.colorbar(pcm, ax=ax)
cbar.set_label('Kernel-density estimates (arb. unit)', rotation=90)
except ImportError:
ax.plot(t, e, 'bo')
ax.set_yscale('log')
# Add a label for the creation date of this figure
# x,y in relative 0-1 coords in figure
plt.figtext(0.98, 0.95,
'plot creation date: %s (UTC)' % (time.strftime("%a, %d %b %Y %H:%M:%S", time.gmtime())),
horizontalalignment="right",
rotation='vertical',
size='xx-small'
)
# Don't show the figure in batch mode
if not BATCH:
plt.show()
# Save the figure
fig.savefig(outfig)
def zaAtCulmination(self):
"""
Returns the zenith angle of a source at culmination, for the provided site.
"""
za = np.abs(self.dec - self.siteLat)
return za
def is_visible(self):
'''
Check whether the current source is visible at the site provided.
'''
# Define site for pyephem
site = ephem.Observer()
astroHorizon = ephem.degrees('-18:00') # astronomical twilight
civilHorizon = ephem.degrees('-0:34')
site.horizon = astroHorizon
site.lon = ephem.degrees(str(self.siteLon)) # ephem needs this as string
site.lat = ephem.degrees(str(self.siteLat)) # ephem needs this as string
site.elev = self.siteAlt # meters
site.compute_pressure()
srcCoords = Coords(ra=self.ra * u.degree, dec=self.dec * u.degree, frame='icrs')
# If input z is None, make it believe it is 0, otherwise msk crashes:
if self.z == 'None':
z = 0.
else:
z = float(self.z)
# We also want the max allowed ZA for the given z of the source
maxz = np.array(self.maxz)
maxZA = np.array(self.maxZA)
if z > np.max(maxz):
thismaxZA = np.min(maxZA)
logging.warning('z is greater than maxz !')
else:
msk = np.where(z < maxz)
# Get the first item in the mask, to get the corresponding ZA:
thismaxZA = maxZA[msk[0][0]]
# Convert ZA to Alt
thisminAlt = np.abs(90. - thismaxZA)
ephemSrc = ephem.FixedBody()
ephemSrc._ra = ephem.hours(str(srcCoords.ra.to_string(unit=u.hourangle,
sep=':'))) # Careful: ephem should be given hours here, but only for RA !
ephemSrc._dec = ephem.degrees(str(srcCoords.dec.to_string(unit=u.degree, sep=':')))
visibleFlag = False
zaAtCulmin = self.zaAtCulmination()
if zaAtCulmin > 90.:
# the source is basically NEVER visible at the site
logging.info('[%s] \033[91mNEVER above horizon at the site, consider discarding this source from your source list...\033[0m', self.src)
return False
if thismaxZA < zaAtCulmin:
# the source is never above maxZA set by 2D mask on Dec/z
logging.info('[%s]\033[91m Never above allowed max ZA, consider relaxing the Dec/z cuts or discarding this source from your source list...\033[0m', self.src)
logging.debug('[%s] thismaxZA=%f, zaAtCulmin=%f', self.src, thismaxZA, zaAtCulmin)
return False
# All times are handled here in UTC (pyEphem only uses UTC)
now = datetime.datetime.utcnow()
# tomorrow = now + datetime.timedelta(days=1)
site.date = now
sun = ephem.Sun()
nextSunset = site.next_setting(sun)
nextSunrise = site.next_rising(sun)
# The Moon just needs to be below the horizon, not below astronomical twilight angle
site.horizon = civilHorizon
moon = ephem.Moon()
nextMoonset = site.next_setting(moon)
nextMoonrise = site.next_rising(moon)
site.horizon = astroHorizon
# so far, so good. All of this is OK if we execute the program during day time.
# However, if the program is run during dark time, we should look at the ephemerids of next night (not current night):
if nextSunrise < nextSunset:
logging.info('looking at visibility for tomorrow')
# we just put the current time at next sunrise + 10 min., to be sure to fall on tomorrow's morning day time
site.date = nextSunrise.datetime() + datetime.timedelta(minutes=10)
nextSunset = site.next_setting(sun)
nextSunrise = site.next_rising(sun)
site.horizon = civilHorizon
nextMoonset = site.next_setting(moon)
nextMoonrise = site.next_rising(moon)
site.horizon = astroHorizon
ephemSrc.compute(site)
srcTransitTime = site.next_transit(ephemSrc)
site.date = srcTransitTime
ephemSrc.compute(site)
srcAltAtTransit = Angle(ephemSrc.alt, unit=u.rad).degree
# If srcAltAtTransit is below thisminAlt, the source is just not optimally visible and we stop here
logging.debug('thisminAlt = {0}'.format(thisminAlt))
if srcAltAtTransit < thisminAlt:
return False
# Compute start and end of darkness time
if nextMoonset > nextSunset and nextMoonset < nextSunrise:
beginDarkness = nextMoonset
else:
beginDarkness = nextSunset
if nextMoonrise < nextSunrise and nextMoonrise > nextSunset:
endDarkness = nextMoonrise
else:
endDarkness = nextSunrise
site.date = beginDarkness
ephemSrc.compute(site)
srcAltAtStartDarkTime = Angle(ephemSrc.alt, unit=u.rad).degree
site.date = endDarkness
ephemSrc.compute(site)
srcAltAtEndDarkTime = Angle(ephemSrc.alt, unit=u.rad).degree
darknessDuration = (endDarkness - beginDarkness) * 24. * 60. # day to minutes
logging.debug('darkness begin={0}'.format(beginDarkness))
logging.debug('srcAltAtStartDarkTime={0}'.format(srcAltAtStartDarkTime))
logging.debug('srcTransitTime={0}'.format(srcTransitTime))
logging.debug('srcAltAtTransit={0}'.format(srcAltAtTransit))
logging.debug('darkness ends={0}'.format(endDarkness))
logging.debug('srcAltAtEndDarkTime={0}'.format(srcAltAtEndDarkTime))
logging.debug('darkness duration={0} minutes'.format(darknessDuration))
# check if source is visible, above minAlt, during this night
for step in range(0, np.int(darknessDuration)):
site.date = beginDarkness.datetime() + datetime.timedelta(minutes=step)
ephemSrc.compute(site)
srcAlt = Angle(ephemSrc.alt, unit=u.rad).degree
logging.debug('LOOPING: it is {0} and {1} is at alt. of {2}'.format(site.date, self.src, srcAlt))
if srcAlt > thisminAlt:
visibleFlag = True
logging.info('{0} starts to be optimally visible, above {1}°, at {2}'.format(self.src, thisminAlt,
site.date))
break
logging.debug('is_visible: %s', str(visibleFlag))
return visibleFlag
def killTrigger(self):
"""
Defines cuts on (RA,Dec,z) before assessing whether a mail alert should be sent for a source which flux is above the trigger threshold.
We cut on a combination (z, ZenithAngle), using a bit mask.
@rtype bool
@todo Introduce an additional cut on Gal latitude ?
The 'return' value is a bit counter-intuitive. It answers the question 'Should we kill an imminent mail alert ?', i.e. if a source has the last flux point above the flux threshold, does it also fulfill the requirements on both z (not too far away) and zenith angle (not too low in the sky) ? So if an alert should definitely be sent, this function returns 'False' !
"""
# Numpy array
# combination of acceptable
# z ZA@culmination
grid = np.array(zip(self.maxz, self.maxZA))
zaAtCulmin = self.zaAtCulmination()
# If input z is None, make it believe it is 0, otherwise msk crashes:
if self.z == 'None':
z = 0.
else:
z = float(self.z)
# Mask on both (z, ZA at culmin)
# z column ZA column
msk = (z <= grid[:, 0]) & (zaAtCulmin <= grid[:, 1])
# Assess whether the source is currently visible at the provided site
if self.checkVisibility == 'True':
self.visible = self.is_visible()
else:
# The source is assumed to be visible in any case, i.e. we don't care about its visibility status at the provided site to send a potential alert
self.visible = True
# if the mask has at least one 'True' element, we should send an alert
if True in msk and self.visible:
# An alert should be triggered !
return False
else:
# No alert triggered
return True
def dynamicalTrigger(self):
'''
If long-term data are available for a source, dynamically computes a flux trigger threshold based on the flux history of the source. Otherwise, fall back with default fixed trigger threshold.
@return (fluxAverage,fluxRMS)
@rtype tuple
'''
# Read the longterm .dat LC file
infile = self.baseOutDir + '/longTerm/merged/' + str(self.src) + '_lc.dat'
try:
data = ascii.read(infile)
except IOError:
logging.error('[%s] \033[95m* Long term data file unavailable for source %s\033[0m', self.src, self.src)
# Falling back to default fixed trigger threshold
self.withhistory = False
return (False, False)
flux = data['Flux']
fluxErr = data['FluxError']
try:
from uncertainties import unumpy as unp
logging.info('[%s] The long-term flux average is %.2g ph cm^-2 s^-1', self.src, unp.uarray(flux, fluxErr).mean())
except:
pass
# weighted average of the historical fluxes, weighted by their errors
fluxAverage = np.average(flux, weights=1. / fluxErr)
fluxRMS = np.std(flux, dtype=np.float64)
# Dynamically redefine the flux trigger threshold, using a 2-level criteria depending on whether we are currently looking at short- or long-term data
if self.daily:
self.threshold = fluxAverage + self.sigma * fluxRMS
else:
self.threshold = fluxAverage + self.sigmaLT * fluxRMS
return (fluxAverage, fluxRMS)
def Triggered(self):
'''
Has the source fulfilled the trigger conditions ?
@return True
@rtype bool
'''
# Read the light curve file
if self.daily:
infile = self.workDir + '/' + str(self.src) + '_daily_lc.dat'
self.pngFig = self.workDir + '/' + str(self.src) + '_daily_lc.png'
# Also take a look in the long time-binned data
infileLongTimeBin = self.workDir + '/' + str(self.src) + '_lc.dat'
dataLongTimeBin = ascii.read(infileLongTimeBin)
timeLongTimeBin = dataLongTimeBin['MET']
fluxLongTimeBin = dataLongTimeBin['Flux']
fluxErrLongTimeBin = dataLongTimeBin['FluxError']
# Catch the last flux point
self.lastTimeLongTimeBin = timeLongTimeBin[-1:]
self.lastFluxLongTimeBin = fluxLongTimeBin[-1:]
self.lastFluxErrLongTimeBin = fluxErrLongTimeBin[-1:]
# Get the arrival time of the last photon analysed
photonfileLongTimeBin = self.workDir + '/' + str(self.src) + '_gti.fits'
photonsLongTimeBin = fits.open(photonfileLongTimeBin)
photonsLongTimeBinTime = photonsLongTimeBin[1].data.field('TIME')
self.arrivalTimeLastPhotonLongTimeBin = photonsLongTimeBinTime[-1:]
photonfile = self.workDir + '/' + str(self.src) + '_daily_gti.fits'
photons = fits.open(photonfile)
photonsTime = photons[1].data.field('TIME')
self.arrivalTimeLastPhoton = photonsTime[-1:]
else:
infile = self.workDir + '/' + str(self.src) + '_lc.dat'
self.pngFig = self.workDir + '/' + str(self.src) + '_lc.png'
photonfile = self.workDir + '/' + str(self.src) + '_gti.fits'
photons = fits.open(photonfile)
photonsTime = photons[1].data.field('TIME')
self.arrivalTimeLastPhoton = photonsTime[-1:]
data = ascii.read(infile)
time = data['MET']
flux = data['Flux']
fluxErr = data['FluxError']
# Catch the last flux point
self.lastTime = time[-1:]
self.lastFlux = flux[-1:]
self.lastFluxErr = fluxErr[-1:]
self.energyTimeFig = self.workDir + '/' + str(self.src) + '_energyTime.png'
logging.debug('%s, threshold=%g, lastFlux=%g, lastFluxErr=%g',
self.src, self.threshold, self.lastFlux, self.lastFluxErr)
# Do we kill potential trigger due to (ra, dec, z) cut ?
self.triggerkilled = self.killTrigger()
# Assess whether flux is above threshold, looking at the last flux point
if (self.lastFlux - self.lastFluxErr) >= self.threshold:
self.active = True
else:
self.active = False
# Combine killTrigger and flux above threshold criteria
if (not self.triggerkilled and self.active) or self.forcealert:
SENDALERT = True
else:
SENDALERT = False
logging.debug('triggerkilled=%s', str(self.triggerkilled))
logging.debug('active=%s', str(self.active))
logging.debug('visible=%s', str(self.visible))
logging.debug('SENDALERT=%s', str(SENDALERT))
logging.debug('DEBUG {0}, dec={1}, z={2}, maxZA={3}, maxz={4}, triggerkilled={5}, sendalert={6}'.format(self.src,
self.dec,
self.z,
self.maxZA,
self.maxz,
self.triggerkilled,
SENDALERT))
return SENDALERT
def sendAlert(self, nomailall=False, sendmail=False):
'''
Send a mail alert in case a source fulfills the trigger conditions.
@param nomailall Boolean, should the mail be sent to a restricted list of recipients ?
@return True
@rtype bool
'''
# Import modules
try:
# Import smtplib to send mails
import smtplib
# Here are the email package modules we'll need
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEBase import MIMEBase
from email import Encoders
except:
logging.error('sendAlert: Can\'t import mail modules.')
sys.exit(1)
SENDALERT = self.Triggered()
# If trigger condition is met, we send a mail
if SENDALERT and sendmail:
# Create the container email message.
msg = MIMEMultipart()
sender = self.mailSender
fhlName = self.search2FHLcounterpart()
if fhlName is not None:
fhlmessage = "2FHL counterpart is %s" % fhlName
else:
fhlmessage = "No 2FHL counterpart found"
fglName = self.search3FGLcounterpart()
if fglName is not None:
fglmessage = "3FGL counterpart is %s" % fglName
else:
fglmessage = "No 3FGL counterpart found"
# To whom the mail should be sent (cf. __init__ function of the class)
if not nomailall:
recipient = self.usualRecipients
msg['Subject'] = '[FLaapLUC] Fermi/LAT flare alert on %s [2FHL counterpart: %s]' % (self.src, fhlName)
else:
recipient = self.testRecipients
msg['Subject'] = '[FLaapLUC TEST MAIL] Fermi/LAT flare alert on %s [2FHL counterpart: %s]' % (
self.src, fhlName)
msg['From'] = sender
COMMASPACE = ', '
msg['To'] = COMMASPACE.join(recipient)
msg.preamble = 'You will not see this in a MIME-aware mail reader.\n'
# Guarantees the message ends in a newline
msg.epilogue = ''
mailtext = """
FLaapLUC (Fermi/LAT automatic aperture photometry Light C<->Urve) report
*** The Fermi/LAT flux (%.0f MeV-%.0f GeV) of %s (%s, %s) exceeds the trigger threshold of %.2g ph cm^-2 s^-1 ***
""" % (self.emin, self.emax / 1000., self.src, fhlmessage, fglmessage, self.threshold)
if self.daily:
mailtext = mailtext + """
The last daily-binned flux is: %.2g +/- %.2g ph cm^-2 s^-1, centred on MET %.0f (MJD %.5f, i.e. %s) (arrival time of last photon analysed: MET %.0f, MJD %.5f, %s)
and the last %.0f-day binned flux is: %.2g +/- %.2g ph cm^-2 s^-1, centred on MET %.0f (MJD %.5f, i.e. %s) (arrival time of last photon analysed: MET %.0f, MJD %.5f, %s)
""" % (self.lastFlux,
self.lastFluxErr,
self.lastTime, extras.met2mjd(self.lastTime), str(extras.mjd2gd(extras.met2mjd(self.lastTime))),
self.arrivalTimeLastPhoton, extras.met2mjd(self.arrivalTimeLastPhoton),
str(extras.mjd2gd(extras.met2mjd(self.arrivalTimeLastPhoton))),
self.longtimebin,
self.lastFluxLongTimeBin,
self.lastFluxErrLongTimeBin,
self.lastTimeLongTimeBin, extras.met2mjd(self.lastTimeLongTimeBin),
str(extras.mjd2gd(extras.met2mjd(self.lastTimeLongTimeBin))),
self.arrivalTimeLastPhotonLongTimeBin, extras.met2mjd(self.arrivalTimeLastPhotonLongTimeBin),
str(extras.mjd2gd(extras.met2mjd(self.arrivalTimeLastPhotonLongTimeBin))))
mailtext = mailtext + "The most recent lightcurve (%.0f-day binned in red, and %.0f-day binned in blue) is attached." % (
self.tbin / 24. / 60. / 60., self.longtimebin)
else:
mailtext = mailtext + """
The last %.0f-day binned flux is: %.2g +/- %.2g ph cm^-2 s^-1, centred on MET %.0f (MJD %.5f, i.e. %s) (arrival time of last photon analysed: %.0f, MJD %.5f, %s)
""" % (self.longtimebin,
self.lastFlux,
self.lastFluxErr,
self.lastTime, extras.met2mjd(self.lastTime), str(extras.mjd2gd(extras.met2mjd(self.lastTime))),
self.arrivalTimeLastPhoton, extras.met2mjd(self.arrivalTimeLastPhoton),
str(extras.mjd2gd(extras.met2mjd(self.arrivalTimeLastPhoton))))
mailtext = mailtext + "The most recent lightcurve (%.0f-day binned) is attached." % (
self.tbin / 24. / 60. / 60.)
if FLAGASSUMEDGAMMA is True:
mailtext = mailtext + """
*WARNING*: The source %s is not found in the 3FGL catalogue, its photon index is thus assumed to be %.2f for the light curve computation.
""" % (self.src, ASSUMEDGAMMA)
mailtext = mailtext + """
*Disclaimer*: Be careful, though, that these light curves are not computed using the usual, clean, standard (un)binned likelihood procedure one should normally use for a good quality, publication-ready result. Those reported here only rely on a "quick & dirty" aperture photometric analysis (cf. e.g. http://fermi.gsfc.nasa.gov/ssc/data/analysis/scitools/aperture_photometry.html), which basically assumes that the data set, within 1 degree around the source, is background-free.
Cheers,
FLaapLUC.
"""
txt = MIMEText(mailtext)
msg.attach(txt)
# Attach the figures
for fig in [self.pngFig, self.energyTimeFig]:
try:
# Open the files in binary mode. Let the MIMEImage class automatically guess the specific image type.
fp = open(fig, 'rb')
img = MIMEBase('application', 'octet-stream')
img.set_payload(fp.read())
Encoders.encode_base64(img)
img.add_header('Content-Disposition',
'attachment; filename="%s"' % os.path.basename(fig))
fp.close()
msg.attach(img)
except:
pass
# Send the email via our own SMTP server.
s = smtplib.SMTP()
s.set_debuglevel(0)
s.connect()
s.sendmail(sender, recipient, msg.as_string())
s.quit()
logging.info('\033[94m*** Alert sent for %s\033[0m', self.src)
return True
else:
return False
def search3FGLcounterpart(self):
"""
Search the 3FGL name of a 2FGL source name
"""
if self.fglName is not None:
if "3FGL" in self.fglName:
return self.fglName.replace('_3FGLJ', '3FGL J').replace('3FGLJ', '3FGL J')
cat3FGLfile = self.catalogFile.replace('gll_psc_v08', 'gll_psc_v16')
hdulist = fits.open(cat3FGLfile)
cat = hdulist[1].data
logging.debug('2FGL name is %s', self.fglName.replace('_2FGLJ', '2FGL J').replace('2FGLJ', '2FGL J'))
found = False
for stuff in cat:
if stuff.field('2FGL_Name') == self.fglName.replace('_2FGLJ', '2FGL J').replace('2FGLJ', '2FGL J'):
threefglName = stuff.field('Source_Name')
logging.info('Found the 3FGL counterpart of %s: %s', self.fglName, threefglName)
found = True
break
if not found:
threefglName = None
logging.info('No 3FGL counterpart found for %s', self.fglName)
hdulist.close()
return threefglName
else:
return None
def search2FHLcounterpart(self):
"""
Search the 2FHL name of a 2FGL or a 3FGL source name
"""
if self.fglName is not None:
if "2FHL" in self.fglName:
return self.fglName.replace('_2FHLJ', '2FHL J').replace('2FHLJ', '2FHL J')
cat2FHLfile = self.catalogFile.replace('/3FGL/', '/2FHL/').replace('psc_v08', 'psch_v08').replace('psc_v16',
'psch_v08')
try:
hdulist = fits.open(cat2FHLfile)
except IOError:
logging.info('2FHL catalog file not found')
return None
cat = hdulist[1].data
found = False
threefglName = self.search3FGLcounterpart()
for stuff in cat:
if stuff.field('3FGL_Name') == self.fglName.replace('_3FGLJ', '3FGL J').replace('3FGLJ',
'3FGL J') or stuff.field(
'3FGL_Name') == str(threefglName).replace('3FGLJ', '3FGL J'):
fhlName = stuff.field('Source_Name')
logging.info('Found the 2FHL counterpart of %s: %s', self.fglName, fhlName)
found = True
break
if not found:
fhlName = None
logging.info('No 2FHL counterpart found for %s', self.fglName)
hdulist.close()
return fhlName
else:
return None
| bsd-3-clause |
eusi/MissionPlanerHM | Lib/site-packages/scipy/signal/fir_filter_design.py | 53 | 18572 | """Functions for FIR filter design."""
from math import ceil, log
import numpy as np
from numpy.fft import irfft
from scipy.special import sinc
import sigtools
# Some notes on function parameters:
#
# `cutoff` and `width` are given as a numbers between 0 and 1. These
# are relative frequencies, expressed as a fraction of the Nyquist rate.
# For example, if the Nyquist rate is 2KHz, then width=0.15 is a width
# of 300 Hz.
#
# The `order` of a FIR filter is one less than the number of taps.
# This is a potential source of confusion, so in the following code,
# we will always use the number of taps as the parameterization of
# the 'size' of the filter. The "number of taps" means the number
# of coefficients, which is the same as the length of the impulse
# response of the filter.
def kaiser_beta(a):
"""Compute the Kaiser parameter `beta`, given the attenuation `a`.
Parameters
----------
a : float
The desired attenuation in the stopband and maximum ripple in
the passband, in dB. This should be a *positive* number.
Returns
-------
beta : float
The `beta` parameter to be used in the formula for a Kaiser window.
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
if a > 50:
beta = 0.1102 * (a - 8.7)
elif a > 21:
beta = 0.5842 * (a - 21)**0.4 + 0.07886 * (a - 21)
else:
beta = 0.0
return beta
def kaiser_atten(numtaps, width):
"""Compute the attenuation of a Kaiser FIR filter.
Given the number of taps `N` and the transition width `width`, compute the
attenuation `a` in dB, given by Kaiser's formula:
a = 2.285 * (N - 1) * pi * width + 7.95
Parameters
----------
N : int
The number of taps in the FIR filter.
width : float
The desired width of the transition region between passband and stopband
(or, in general, at any discontinuity) for the filter.
Returns
-------
a : float
The attenuation of the ripple, in dB.
See Also
--------
kaiserord, kaiser_beta
"""
a = 2.285 * (numtaps - 1) * np.pi * width + 7.95
return a
def kaiserord(ripple, width):
"""Design a Kaiser window to limit ripple and width of transition region.
Parameters
----------
ripple : float
Positive number specifying maximum ripple in passband (dB) and minimum
ripple in stopband.
width : float
Width of transition region (normalized so that 1 corresponds to pi
radians / sample).
Returns
-------
numtaps : int
The length of the kaiser window.
beta :
The beta parameter for the kaiser window.
Notes
-----
There are several ways to obtain the Kaiser window:
signal.kaiser(numtaps, beta, sym=0)
signal.get_window(beta, numtaps)
signal.get_window(('kaiser', beta), numtaps)
The empirical equations discovered by Kaiser are used.
See Also
--------
kaiser_beta, kaiser_atten
References
----------
Oppenheim, Schafer, "Discrete-Time Signal Processing", p.475-476.
"""
A = abs(ripple) # in case somebody is confused as to what's meant
if A < 8:
# Formula for N is not valid in this range.
raise ValueError("Requested maximum ripple attentuation %f is too "
"small for the Kaiser formula." % A)
beta = kaiser_beta(A)
# Kaiser's formula (as given in Oppenheim and Schafer) is for the filter
# order, so we have to add 1 to get the number of taps.
numtaps = (A - 7.95) / 2.285 / (np.pi * width) + 1
return int(ceil(numtaps)), beta
def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True,
scale=True, nyq=1.0):
"""
FIR filter design using the window method.
This function computes the coefficients of a finite impulse response filter.
The filter will have linear phase; it will be Type I if `numtaps` is odd and
Type II if `numtaps` is even.
Type II filters always have zero response at the Nyquist rate, so a
ValueError exception is raised if firwin is called with `numtaps` even and
having a passband whose right end is at the Nyquist rate.
Parameters
----------
numtaps : int
Length of the filter (number of coefficients, i.e. the filter
order + 1). `numtaps` must be even if a passband includes the
Nyquist frequency.
cutoff : float or 1D array_like
Cutoff frequency of filter (expressed in the same units as `nyq`)
OR an array of cutoff frequencies (that is, band edges). In the
latter case, the frequencies in `cutoff` should be positive and
monotonically increasing between 0 and `nyq`. The values 0 and
`nyq` must not be included in `cutoff`.
width : float or None
If `width` is not None, then assume it is the approximate width
of the transition region (expressed in the same units as `nyq`)
for use in Kaiser FIR filter design. In this case, the `window`
argument is ignored.
window : string or tuple of string and parameter values
Desired window to use. See `scipy.signal.get_window` for a list
of windows and required parameters.
pass_zero : bool
If True, the gain at the frequency 0 (i.e. the "DC gain") is 1.
Otherwise the DC gain is 0.
scale : bool
Set to True to scale the coefficients so that the frequency
response is exactly unity at a certain frequency.
That frequency is either:
0 (DC) if the first passband starts at 0 (i.e. pass_zero
is True);
`nyq` (the Nyquist rate) if the first passband ends at
`nyq` (i.e the filter is a single band highpass filter);
center of first passband otherwise.
nyq : float
Nyquist frequency. Each frequency in `cutoff` must be between 0
and `nyq`.
Returns
-------
h : 1D ndarray
Coefficients of length `numtaps` FIR filter.
Raises
------
ValueError
If any value in `cutoff` is less than or equal to 0 or greater
than or equal to `nyq`, if the values in `cutoff` are not strictly
monotonically increasing, or if `numtaps` is even but a passband
includes the Nyquist frequency.
Examples
--------
Low-pass from 0 to f::
>>> firwin(numtaps, f)
Use a specific window function::
>>> firwin(numtaps, f, window='nuttall')
High-pass ('stop' from 0 to f)::
>>> firwin(numtaps, f, pass_zero=False)
Band-pass::
>>> firwin(numtaps, [f1, f2], pass_zero=False)
Band-stop::
>>> firwin(numtaps, [f1, f2])
Multi-band (passbands are [0, f1], [f2, f3] and [f4, 1])::
>>>firwin(numtaps, [f1, f2, f3, f4])
Multi-band (passbands are [f1, f2] and [f3,f4])::
>>> firwin(numtaps, [f1, f2, f3, f4], pass_zero=False)
See also
--------
scipy.signal.firwin2
"""
# The major enhancements to this function added in November 2010 were
# developed by Tom Krauss (see ticket #902).
cutoff = np.atleast_1d(cutoff) / float(nyq)
# Check for invalid input.
if cutoff.ndim > 1:
raise ValueError("The cutoff argument must be at most one-dimensional.")
if cutoff.size == 0:
raise ValueError("At least one cutoff frequency must be given.")
if cutoff.min() <= 0 or cutoff.max() >= 1:
raise ValueError("Invalid cutoff frequency: frequencies must be greater than 0 and less than nyq.")
if np.any(np.diff(cutoff) <= 0):
raise ValueError("Invalid cutoff frequencies: the frequencies must be strictly increasing.")
if width is not None:
# A width was given. Find the beta parameter of the Kaiser window
# and set `window`. This overrides the value of `window` passed in.
atten = kaiser_atten(numtaps, float(width)/nyq)
beta = kaiser_beta(atten)
window = ('kaiser', beta)
pass_nyquist = bool(cutoff.size & 1) ^ pass_zero
if pass_nyquist and numtaps % 2 == 0:
raise ValueError("A filter with an even number of coefficients must "
"have zero response at the Nyquist rate.")
# Insert 0 and/or 1 at the ends of cutoff so that the length of cutoff is even,
# and each pair in cutoff corresponds to passband.
cutoff = np.hstack(([0.0]*pass_zero, cutoff, [1.0]*pass_nyquist))
# `bands` is a 2D array; each row gives the left and right edges of a passband.
bands = cutoff.reshape(-1,2)
# Build up the coefficients.
alpha = 0.5 * (numtaps-1)
m = np.arange(0, numtaps) - alpha
h = 0
for left, right in bands:
h += right * sinc(right * m)
h -= left * sinc(left * m)
# Get and apply the window function.
from signaltools import get_window
win = get_window(window, numtaps, fftbins=False)
h *= win
# Now handle scaling if desired.
if scale:
# Get the first passband.
left, right = bands[0]
if left == 0:
scale_frequency = 0.0
elif right == 1:
scale_frequency = 1.0
else:
scale_frequency = 0.5 * (left + right)
c = np.cos(np.pi * m * scale_frequency)
s = np.sum(h * c)
h /= s
return h
# Original version of firwin2 from scipy ticket #457, submitted by "tash".
#
# Rewritten by Warren Weckesser, 2010.
def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0):
"""FIR filter design using the window method.
From the given frequencies `freq` and corresponding gains `gain`,
this function constructs an FIR filter with linear phase and
(approximately) the given frequency response.
Parameters
----------
numtaps : int
The number of taps in the FIR filter. `numtaps` must be less than
`nfreqs`. If the gain at the Nyquist rate, `gain[-1]`, is not 0,
then `numtaps` must be odd.
freq : array-like, 1D
The frequency sampling points. Typically 0.0 to 1.0 with 1.0 being
Nyquist. The Nyquist frequency can be redefined with the argument
`nyq`.
The values in `freq` must be nondecreasing. A value can be repeated
once to implement a discontinuity. The first value in `freq` must
be 0, and the last value must be `nyq`.
gain : array-like
The filter gains at the frequency sampling points.
nfreqs : int, optional
The size of the interpolation mesh used to construct the filter.
For most efficient behavior, this should be a power of 2 plus 1
(e.g, 129, 257, etc). The default is one more than the smallest
power of 2 that is not less than `numtaps`. `nfreqs` must be greater
than `numtaps`.
window : string or (string, float) or float, or None, optional
Window function to use. Default is "hamming". See
`scipy.signal.get_window` for the complete list of possible values.
If None, no window function is applied.
nyq : float
Nyquist frequency. Each frequency in `freq` must be between 0 and
`nyq` (inclusive).
Returns
-------
taps : numpy 1D array of length `numtaps`
The filter coefficients of the FIR filter.
Example
-------
A lowpass FIR filter with a response that is 1 on [0.0, 0.5], and
that decreases linearly on [0.5, 1.0] from 1 to 0:
>>> taps = firwin2(150, [0.0, 0.5, 1.0], [1.0, 1.0, 0.0])
>>> print(taps[72:78])
[-0.02286961 -0.06362756 0.57310236 0.57310236 -0.06362756 -0.02286961]
See also
--------
scipy.signal.firwin
Notes
-----
From the given set of frequencies and gains, the desired response is
constructed in the frequency domain. The inverse FFT is applied to the
desired response to create the associated convolution kernel, and the
first `numtaps` coefficients of this kernel, scaled by `window`, are
returned.
The FIR filter will have linear phase. The filter is Type I if `numtaps`
is odd and Type II if `numtaps` is even. Because Type II filters always
have a zero at the Nyquist frequency, `numtaps` must be odd if `gain[-1]`
is not zero.
.. versionadded:: 0.9.0
References
----------
.. [1] Oppenheim, A. V. and Schafer, R. W., "Discrete-Time Signal
Processing", Prentice-Hall, Englewood Cliffs, New Jersey (1989).
(See, for example, Section 7.4.)
.. [2] Smith, Steven W., "The Scientist and Engineer's Guide to Digital
Signal Processing", Ch. 17. http://www.dspguide.com/ch17/1.htm
"""
if len(freq) != len(gain):
raise ValueError('freq and gain must be of same length.')
if nfreqs is not None and numtaps >= nfreqs:
raise ValueError('ntaps must be less than nfreqs, but firwin2 was '
'called with ntaps=%d and nfreqs=%s' % (numtaps, nfreqs))
if freq[0] != 0 or freq[-1] != nyq:
raise ValueError('freq must start with 0 and end with `nyq`.')
d = np.diff(freq)
if (d < 0).any():
raise ValueError('The values in freq must be nondecreasing.')
d2 = d[:-1] + d[1:]
if (d2 == 0).any():
raise ValueError('A value in freq must not occur more than twice.')
if numtaps % 2 == 0 and gain[-1] != 0.0:
raise ValueError("A filter with an even number of coefficients must "
"have zero gain at the Nyquist rate.")
if nfreqs is None:
nfreqs = 1 + 2 ** int(ceil(log(numtaps,2)))
# Tweak any repeated values in freq so that interp works.
eps = np.finfo(float).eps
for k in range(len(freq)):
if k < len(freq)-1 and freq[k] == freq[k+1]:
freq[k] = freq[k] - eps
freq[k+1] = freq[k+1] + eps
# Linearly interpolate the desired response on a uniform mesh `x`.
x = np.linspace(0.0, nyq, nfreqs)
fx = np.interp(x, freq, gain)
# Adjust the phases of the coefficients so that the first `ntaps` of the
# inverse FFT are the desired filter coefficients.
shift = np.exp(-(numtaps-1)/2. * 1.j * np.pi * x / nyq)
fx2 = fx * shift
# Use irfft to compute the inverse FFT.
out_full = irfft(fx2)
if window is not None:
# Create the window to apply to the filter coefficients.
from signaltools import get_window
wind = get_window(window, numtaps, fftbins=False)
else:
wind = 1
# Keep only the first `numtaps` coefficients in `out`, and multiply by
# the window.
out = out_full[:numtaps] * wind
return out
def remez(numtaps, bands, desired, weight=None, Hz=1, type='bandpass',
maxiter=25, grid_density=16):
"""
Calculate the minimax optimal filter using the Remez exchange algorithm.
Calculate the filter-coefficients for the finite impulse response
(FIR) filter whose transfer function minimizes the maximum error
between the desired gain and the realized gain in the specified
frequency bands using the Remez exchange algorithm.
Parameters
----------
numtaps : int
The desired number of taps in the filter. The number of taps is
the number of terms in the filter, or the filter order plus one.
bands : array_like
A monotonic sequence containing the band edges in Hz.
All elements must be non-negative and less than half the sampling
frequency as given by `Hz`.
desired : array_like
A sequence half the size of bands containing the desired gain
in each of the specified bands.
weight : array_like, optional
A relative weighting to give to each band region. The length of
`weight` has to be half the length of `bands`.
Hz : scalar, optional
The sampling frequency in Hz. Default is 1.
type : {'bandpass', 'differentiator', 'hilbert'}, optional
The type of filter:
'bandpass' : flat response in bands. This is the default.
'differentiator' : frequency proportional response in bands.
'hilbert' : filter with odd symmetry, that is, type III
(for even order) or type IV (for odd order)
linear phase filters.
maxiter : int, optional
Maximum number of iterations of the algorithm. Default is 25.
grid_density : int, optional
Grid density. The dense grid used in `remez` is of size
``(numtaps + 1) * grid_density``. Default is 16.
Returns
-------
out : ndarray
A rank-1 array containing the coefficients of the optimal
(in a minimax sense) filter.
See Also
--------
freqz : Compute the frequency response of a digital filter.
References
----------
.. [1] J. H. McClellan and T. W. Parks, "A unified approach to the
design of optimum FIR linear phase digital filters",
IEEE Trans. Circuit Theory, vol. CT-20, pp. 697-701, 1973.
.. [2] J. H. McClellan, T. W. Parks and L. R. Rabiner, "A Computer
Program for Designing Optimum FIR Linear Phase Digital
Filters", IEEE Trans. Audio Electroacoust., vol. AU-21,
pp. 506-525, 1973.
Examples
--------
We want to construct a filter with a passband at 0.2-0.4 Hz, and
stop bands at 0-0.1 Hz and 0.45-0.5 Hz. Note that this means that the
behavior in the frequency ranges between those bands is unspecified and
may overshoot.
>>> bpass = sp.signal.remez(72, [0, 0.1, 0.2, 0.4, 0.45, 0.5], [0, 1, 0])
>>> freq, response = sp.signal.freqz(bpass)
>>> ampl = np.abs(response)
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(111)
>>> ax1.semilogy(freq/(2*np.pi), ampl, 'b-') # freq in Hz
[<matplotlib.lines.Line2D object at 0xf486790>]
>>> plt.show()
"""
# Convert type
try:
tnum = {'bandpass':1, 'differentiator':2, 'hilbert':3}[type]
except KeyError:
raise ValueError("Type must be 'bandpass', 'differentiator', or 'hilbert'")
# Convert weight
if weight is None:
weight = [1] * len(desired)
bands = np.asarray(bands).copy()
return sigtools._remez(numtaps, bands, desired, weight, tnum, Hz,
maxiter, grid_density)
| gpl-3.0 |
ericagol/GenRP | examples/benchmark/plot.py | 3 | 2745 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division, print_function
import os
import argparse
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from celerite.plot_setup import setup, get_figsize, COLOR_CYCLE
setup(auto=True)
parser = argparse.ArgumentParser()
parser.add_argument("platform")
parser.add_argument("--suffix", default=None)
parser.add_argument("--directory",
default=os.path.dirname(os.path.abspath(__file__)))
args = parser.parse_args()
# Compile into a matrix
suffix = ""
if args.suffix is not None:
suffix = "_" + args.suffix
fn = "benchmark_{0}{1}.csv".format(args.platform, suffix)
fn = os.path.join(args.directory, fn)
data = pd.read_csv(fn, comment="#")
data_matrix = np.empty((data.xi.max() + 1, data.yi.max() + 1))
data_matrix[:] = np.nan
data_matrix[data.xi, data.yi] = data.comp_time + data.ll_time
try:
np_time = np.array(data.numpy_comp_time + data.numpy_ll_time)
except AttributeError:
np_time = np.nan + np.zeros(len(data))
np_m = np.isfinite(np_time)
np_time = np_time[np_m]
np_n = np.array(data.n)[np_m]
np_j = np.array(data.j)[np_m]
J = np.sort(np.array(data.j.unique()))
N = np.sort(np.array(data.n.unique()))
fig, (ax1, ax2) = plt.subplots(1, 2, figsize=get_figsize(1, 2), sharey=True)
for i, j in enumerate(J):
x = N
y = data_matrix[i]
m = np.isfinite(y)
ax1.plot(x[m], y[m], ".-", color=COLOR_CYCLE[i],
label="{0:.0f}".format(j))
if len(np_time):
ax1.plot(np_n, np_time, ":k", label="direct")
if suffix == "_george":
f = N * np.log(N)**2
ax1.plot(N, 4.0 * f / f[-1], ":k", label=r"$\mathcal{O}(N\,\log^2N)$")
ax1.plot(N, 3e-2 * N / N[-1], "k", label=r"$\mathcal{O}(N)$")
ax1.legend(loc="lower right", bbox_to_anchor=(1.05, 0), fontsize=8)
for i, n in enumerate(N[::2]):
x = J
y = data_matrix[:, 2*i]
m = np.isfinite(y)
ax2.plot(x[m], y[m], ".-", color=COLOR_CYCLE[i % len(COLOR_CYCLE)],
label="{0:.0f}".format(n))
if suffix == "_george":
f = J
ax2.plot(J, 0.1 * f / f[-1], ":k", label=r"$\mathcal{O}(J)$")
ax2.plot(J, 1e-2 * J**2 / J[-1]**2, "k",
label=r"$\mathcal{O}(J^2)$")
ax2.legend(loc="lower right", bbox_to_anchor=(1.05, 0), fontsize=8)
ax1.set_xscale("log")
ax2.set_xscale("log")
ax1.set_yscale("log")
ax1.set_xlim(N.min(), N.max())
ax2.set_xlim(1, J.max())
ax2.set_ylim(6e-6, 6.0)
ax1.set_ylabel("computational cost [seconds]")
ax1.set_xlabel("number of data points [$N$]")
ax2.set_xlabel("number of terms [$J$]")
fn = "benchmark_{0}{1}".format(args.platform, suffix)
fn = os.path.join(args.directory, fn)
fig.savefig(fn + ".png", bbox_inches="tight", dpi=300)
fig.savefig(fn + ".pdf", bbox_inches="tight", dpi=300)
| mit |
IssamLaradji/scikit-learn | sklearn/svm/setup.py | 321 | 3157 | import os
from os.path import join
import numpy
from sklearn._build_utils import get_blas_info
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration('svm', parent_package, top_path)
config.add_subpackage('tests')
# Section LibSVM
# we compile both libsvm and libsvm_sparse
config.add_library('libsvm-skl',
sources=[join('src', 'libsvm', 'libsvm_template.cpp')],
depends=[join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')],
# Force C++ linking in case gcc is picked up instead
# of g++ under windows with some versions of MinGW
extra_link_args=['-lstdc++'],
)
libsvm_sources = ['libsvm.c']
libsvm_depends = [join('src', 'libsvm', 'libsvm_helper.c'),
join('src', 'libsvm', 'libsvm_template.cpp'),
join('src', 'libsvm', 'svm.cpp'),
join('src', 'libsvm', 'svm.h')]
config.add_extension('libsvm',
sources=libsvm_sources,
include_dirs=[numpy.get_include(),
join('src', 'libsvm')],
libraries=['libsvm-skl'],
depends=libsvm_depends,
)
### liblinear module
cblas_libs, blas_info = get_blas_info()
if os.name == 'posix':
cblas_libs.append('m')
liblinear_sources = ['liblinear.c',
join('src', 'liblinear', '*.cpp')]
liblinear_depends = [join('src', 'liblinear', '*.h'),
join('src', 'liblinear', 'liblinear_helper.c')]
config.add_extension('liblinear',
sources=liblinear_sources,
libraries=cblas_libs,
include_dirs=[join('..', 'src', 'cblas'),
numpy.get_include(),
blas_info.pop('include_dirs', [])],
extra_compile_args=blas_info.pop('extra_compile_args',
[]),
depends=liblinear_depends,
# extra_compile_args=['-O0 -fno-inline'],
** blas_info)
## end liblinear module
# this should go *after* libsvm-skl
libsvm_sparse_sources = ['libsvm_sparse.c']
config.add_extension('libsvm_sparse', libraries=['libsvm-skl'],
sources=libsvm_sparse_sources,
include_dirs=[numpy.get_include(),
join("src", "libsvm")],
depends=[join("src", "libsvm", "svm.h"),
join("src", "libsvm",
"libsvm_sparse_helper.c")])
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(**configuration(top_path='').todict())
| bsd-3-clause |
evanbiederstedt/CMBintheLikeHoodz | fake_data_patches/Patch3_nside512_vary_CDM_smica_Dec_7_2015_lmax1600_fakedatasigma100main.py | 1 | 13645 |
# coding: utf-8
# December 5, 2015
# In[6]:
from __future__ import (division, print_function, absolute_import)
# In[7]:
import math
import matplotlib.pyplot as plt
import numpy as np
import healpy as hp
import pyfits as pf
import astropy as ap
import os
from scipy.special import eval_legendre ##special scipy function
# In[8]:
# http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.io.readsav.html
# http://www.astrobetter.com/blog/2009/11/24/read-idl-save-files-into-python/
# In[4]:
import scipy
import scipy.io
# In[6]:
import os
os.getcwd()
os.chdir("/Users/evanbiederstedt/downloads")
# In[11]:
# In[8]:
patch_file = scipy.io.readsav('listpix_patch3.sav')
# In[ ]:
# In[9]:
type(patch_file)
# In[10]:
arr3 = patch_file['listpix_patch3']
#print(arr3)
# In[11]:
type(arr3)
# In[12]:
print(len(arr3)) # pixels total 12476
# In[13]:
smica_map = "COM_CompMap_CMB-smica_2048_R1.20.fits"
# In[ ]:
# In[14]:
nside=512
npix = 12*(nside**2) #total number of pixels, npix
LMAX = ((2*nside)) #maximum l of the power spectrum C_l
heal_npix = hp.nside2npix(nside) # Healpix calculated npix
print("The total number of pixels is " + str(npix))
print("The maximum ell of the power spectrum C_l set to lmax = 2*nside " +str(LMAX))
print("Healpix tells me total number of pixels npix is equal to " + str(heal_npix))
# In[15]:
mapread_smica = hp.read_map(smica_map, field=0)
#hp.mollview(mapread_camb512)
#hp.mollview(mapread_smica)
print("CMB map, Noise map")
smica_noise = hp.read_map(smica_map, field=1)
#hp.mollview(smica_noise)
# In[16]:
print(mapread_smica[:20])
print(smica_noise[:20])
# In[17]:
smica512 = hp.pixelfunc.ud_grade(mapread_smica, 512)
noise512 = hp.pixelfunc.ud_grade(smica_noise, 512)
print(smica512[:20])
print(noise512[:20])
# In[18]:
print(len(smica512))
print(len(noise512))
# In[ ]:
# In[19]:
# rename array for convenience
tempval = smica512
# Data:
# tempval # the array of pixel values, (3145728,)
# In[20]:
print(len(tempval))
print(tempval.shape)
tempval[:10]
# In[21]:
#
# We only wish to use the pixels defined in our patch
# These pixel indices are listed in arr3 such that total number pixels total 12476
#
# arr3: this defines pixel indices within patch
#
# To access pixel indices within array of CMB pixels, just use tempval[arr3]
#
patch=smica512[arr3]
noisepatch = noise512[arr3]
# In[22]:
print(len(patch))
print(len(noisepatch))
# In[23]:
print(patch[:30])
print(noisepatch[:30])
# In[ ]:
# In[12]:
# For lmax = 1600, we must create an array of ell values, i.e. [0 1 2 3....1599 1600]
ell = np.arange(1601)
#print(ell)
#
# Subtract the monopole and dipole, l=0, l=1
ellval = ell[2:]
#print(ellval)
# In[ ]:
# In[13]:
PlM_50 = "cl_varyCDMlmax1600ptPlMat50fakesigma100.npy"
PlM_100 = "cl_varyCDMlmax1600ptPlMat100fakesigma100.npy"
PlM_150 = "cl_varyCDMlmax1600ptPlMat150fakesigma100.npy"
# In[14]:
data1 = np.load(PlM_50)
data2 = np.load(PlM_100)
data3 = np.load(PlM_150)
# In[15]:
print(data1.shape)
print(data2.shape)
print(data3.shape)
# In[16]:
type(data1)
# In[ ]:
ff = "CAMB_cl_varyCDMlmax1600.npy"
cell_array = np.load(ff)
# In[ ]:
# In[ ]:
# In[17]:
PlMat_total = np.concatenate((data1, data2, data3))
# In[18]:
PlMat_total.shape
# In[ ]:
# In[19]:
PlMat = PlMat_total
# In[20]:
PlMat[2]
# In[ ]:
# Step 3: (2*l +1)/4pi from l=2 to l=lmax
# [5/4pi 7/4pi 9/4pi 11/4pi .... 65/4pi ]
norm = ((2*ellval + 1))/(4*math.pi)
print(len(ellval))
print(norm.shape)
print(norm[2])
# In[ ]:
# Step 4: multiply
# [5/4pi*P_2(M) + 7/4pi*P_3(M) +...... + 65/4pi*P_32(M)]
#
# multiply PlMat by (2*l+1)/4pi, i.e. norm
norm_matrix = norm[:, None, None] * PlMat
# [5/4pi * P_2(M) 7/4pi * P_3(M) .... 65/4pi * P_32(M)]
# In[ ]:
print(norm_matrix.shape)
# In[ ]:
print(PlMat.shape)
# In[ ]:
# Step 5: multiply by theoretical CAMB values, [C_2 C_3 C_31 C_32]
# [5/4pi**C_2* P_2(M) + 7/4pi*C_3* P_3(M) +...... + 65/4pi*C_32* P_32(M)]
# In[ ]:
# define pixel-value arrays
mT = np.matrix(patch) # mT.shape = (1, 3072)
m = np.matrix(patch).T # m.shape = (3072, 1)
Npix2pi = (len(patch))*2*math.pi # LF constant
print(mT.shape)
print(m.shape)
print(Npix2pi)
# In[ ]:
tempp = patch
noise = noisepatch
def LogLF(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms + logdetC[1] + Npix2pi
def modelfit(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms
def logdet(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return logdetC[1]
def squaredLogLF(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = (noise**2) * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms + logdetC[1] + Npix2pi
def squared_modelfit(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = (noise**2) * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms
def squared_logdet(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = (noise**2) * id_matrix
Cij = Sij + Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return logdetC[1]
def noiselessLogLF(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij #+ Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms + logdetC[1] + Npix2pi
def noiselessmodelfit(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij #+ Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms
def noiselesslogdet(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Sij #+ Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return logdetC[1]
# In[ ]:
def noiseonlyLogLF(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms + logdetC[1] + Npix2pi
def noiseonlymodelfit(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return model_fit_terms
def noiseonlylogdet(cell):
# norm_matrix is (2*l+1)/4pi * P_ell(Mat)
CellPellM = cell[:, None, None] * norm_matrix # elementwise (2*l+1)/4pi * C^th_ell * P_ell(Mat)
Sij = np.sum(CellPellM, axis=0) # now one matrix
id_matrix = np.identity(len(tempp))
Nij = noise * id_matrix
Cij = Nij
model_fit_terms = np.array([np.dot(tempp.T , (np.linalg.solve(Cij, tempp)) )])
logdetC = np.linalg.slogdet(Cij)
return logdetC[1]
# In[ ]:
forty_samples = np.linspace(0.075, 0.1655, num=40)
# In[ ]:
logLF_40 = [LogLF(cell_array[i]) for i in range(40)]
# In[ ]:
modelfit_terms = [modelfit(cell_array[i]) for i in range(40)]
# In[ ]:
logdet_terms = [logdet(cell_array[i]) for i in range(40)]
# In[ ]:
sqlogLF_40 = [squaredLogLF(cell_array[i]) for i in range(40)]
# In[ ]:
sqmodelfit_terms = [squared_modelfit(cell_array[i]) for i in range(40)]
# In[ ]:
sqlogdet_terms = [squared_logdet(cell_array[i]) for i in range(40)]
# In[ ]:
noise_logLF = [noiselessLogLF(cell_array[i]) for i in range(40)]
# In[ ]:
noise_modelfits = [noiselessmodelfit(cell_array[i]) for i in range(40)]
# In[ ]:
noise_logdet = [noiselesslogdet(cell_array[i]) for i in range(40)]
# In[ ]:
onlynoise_logLF = [noiseonlyLogLF(cell_array[i]) for i in range(40)]
# In[ ]:
onlynoise_modelfits = [noiseonlymodelfit(cell_array[i]) for i in range(40)]
# In[ ]:
onlynoise_logdet = [noiseonlylogdet(cell_array[i]) for i in range(40)]
# In[ ]:
# In[ ]:
modelfit_terms
# In[ ]:
plt.plot(forty_samples, logLF_40)
plt.title("-2loglF ouput, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_1.png")
# In[ ]:
plt.plot(forty_samples, modelfit_terms)
plt.title("only model fit terms, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_2.png")
# In[ ]:
plt.plot(forty_samples, logdet_terms)
plt.title("only logdetC, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_3.png")
# In[ ]:
plt.plot(forty_samples, sqlogLF_40)
plt.title("squared noise, -2logLF, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_4.png")
# In[ ]:
plt.plot(forty_samples, sqmodelfit_terms)
plt.title("squared noise, model fit terms, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_5.png")
# In[ ]:
plt.plot(forty_samples, sqlogdet_terms)
plt.title("squared noise, logdet C terms, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_6.png")
# In[ ]:
plt.plot(forty_samples, onlynoise_logLF)
plt.title("Sij=0, -2logLF, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_7.png")
# In[ ]:
plt.plot(forty_samples, onlynoise_modelfits)
plt.title("Sij=0, model fit terms, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_8.png")
# In[ ]:
plt.plot(forty_samples, onlynoise_logdet)
plt.title("Sij=0, logdet C, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_9.png")
# In[ ]:
# In[ ]:
plt.plot(forty_samples, noise_logLF)
plt.title("Nij=0, -2logLF, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_10.png")
# In[ ]:
plt.plot(forty_samples, noise_modelfits)
plt.title("Nij=0, model fit terms, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_11.png")
# In[ ]:
plt.plot(forty_samples, noise_logdet)
plt.title("Nij=0, logdet C, SMICA Planck map")
plt.ylabel("-2logLF")
plt.xlabel("Omega_CDM")
plt.axvline(x = 0.12029, color = 'r')
plt.savefig("fake100_12.png")
# In[ ]:
# In[ ]:
# In[ ]:
# In[ ]:
| mit |
abhisg/scikit-learn | examples/mixture/plot_gmm_classifier.py | 250 | 3918 | """
==================
GMM classification
==================
Demonstration of Gaussian mixture models for classification.
See :ref:`gmm` for more information on the estimator.
Plots predicted labels on both training and held out test data using a
variety of GMM classifiers on the iris dataset.
Compares GMMs with spherical, diagonal, full, and tied covariance
matrices in increasing order of performance. Although one would
expect full covariance to perform best in general, it is prone to
overfitting on small datasets and does not generalize well to held out
test data.
On the plots, train data is shown as dots, while test data is shown as
crosses. The iris dataset is four-dimensional. Only the first two
dimensions are shown here, and thus some points are separated in other
dimensions.
"""
print(__doc__)
# Author: Ron Weiss <ronweiss@gmail.com>, Gael Varoquaux
# License: BSD 3 clause
# $Id$
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
from sklearn import datasets
from sklearn.cross_validation import StratifiedKFold
from sklearn.externals.six.moves import xrange
from sklearn.mixture import GMM
def make_ellipses(gmm, ax):
for n, color in enumerate('rgb'):
v, w = np.linalg.eigh(gmm._get_covars()[n][:2, :2])
u = w[0] / np.linalg.norm(w[0])
angle = np.arctan2(u[1], u[0])
angle = 180 * angle / np.pi # convert to degrees
v *= 9
ell = mpl.patches.Ellipse(gmm.means_[n, :2], v[0], v[1],
180 + angle, color=color)
ell.set_clip_box(ax.bbox)
ell.set_alpha(0.5)
ax.add_artist(ell)
iris = datasets.load_iris()
# Break up the dataset into non-overlapping training (75%) and testing
# (25%) sets.
skf = StratifiedKFold(iris.target, n_folds=4)
# Only take the first fold.
train_index, test_index = next(iter(skf))
X_train = iris.data[train_index]
y_train = iris.target[train_index]
X_test = iris.data[test_index]
y_test = iris.target[test_index]
n_classes = len(np.unique(y_train))
# Try GMMs using different types of covariances.
classifiers = dict((covar_type, GMM(n_components=n_classes,
covariance_type=covar_type, init_params='wc', n_iter=20))
for covar_type in ['spherical', 'diag', 'tied', 'full'])
n_classifiers = len(classifiers)
plt.figure(figsize=(3 * n_classifiers / 2, 6))
plt.subplots_adjust(bottom=.01, top=0.95, hspace=.15, wspace=.05,
left=.01, right=.99)
for index, (name, classifier) in enumerate(classifiers.items()):
# Since we have class labels for the training data, we can
# initialize the GMM parameters in a supervised manner.
classifier.means_ = np.array([X_train[y_train == i].mean(axis=0)
for i in xrange(n_classes)])
# Train the other parameters using the EM algorithm.
classifier.fit(X_train)
h = plt.subplot(2, n_classifiers / 2, index + 1)
make_ellipses(classifier, h)
for n, color in enumerate('rgb'):
data = iris.data[iris.target == n]
plt.scatter(data[:, 0], data[:, 1], 0.8, color=color,
label=iris.target_names[n])
# Plot the test data with crosses
for n, color in enumerate('rgb'):
data = X_test[y_test == n]
plt.plot(data[:, 0], data[:, 1], 'x', color=color)
y_train_pred = classifier.predict(X_train)
train_accuracy = np.mean(y_train_pred.ravel() == y_train.ravel()) * 100
plt.text(0.05, 0.9, 'Train accuracy: %.1f' % train_accuracy,
transform=h.transAxes)
y_test_pred = classifier.predict(X_test)
test_accuracy = np.mean(y_test_pred.ravel() == y_test.ravel()) * 100
plt.text(0.05, 0.8, 'Test accuracy: %.1f' % test_accuracy,
transform=h.transAxes)
plt.xticks(())
plt.yticks(())
plt.title(name)
plt.legend(loc='lower right', prop=dict(size=12))
plt.show()
| bsd-3-clause |
IssamLaradji/scikit-learn | doc/sphinxext/gen_rst.py | 4 | 39150 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
import warnings
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_001.png': (1, 600),
'plot_outlier_detection_001.png': (3, 372),
'plot_gp_regression_001.png': (2, 250),
'plot_adaboost_twoclass_001.png': (1, 372),
'plot_compare_methods_001.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery.\n"
"Please check the layout of your"
" example file:\n {}\n and make sure"
" it's correct".format(filename))
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
width: 0px;
overflow: hidden;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for directory in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, directory)):
generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif (tok_type == 'STRING') and check_docstring:
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
""" % (ref_name, snippet))
return ''.join(out)
def generate_dir_rst(directory, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not directory == '.':
target_dir = os.path.join(root_dir, directory)
src_dir = os.path.join(example_dir, directory)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(directory, 'images', 'thumb')):
os.makedirs(os.path.join(directory, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(directory, directory, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (directory, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', directory)
ex_file.write(_thumbnail_div(directory, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip().expandtabs()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_mngr.num)
plt.savefig(image_path % fig_mngr.num)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
resolver_urls = {
'matplotlib': 'http://matplotlib.org',
'numpy': 'http://docs.scipy.org/doc/numpy-1.6.0',
'scipy': 'http://docs.scipy.org/doc/scipy-0.11.0/reference',
}
for this_module, url in resolver_urls.items():
try:
doc_resolvers[this_module] = SphinxDocLinkResolver(url)
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding `{0}` links due to a URL "
"Error:\n".format(this_module))
print(e.args)
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
try:
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
except (HTTPError, URLError) as e:
print("The following error has occurred:\n")
print(repr(e))
continue
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
profxj/old_xastropy | xastropy/xguis/spec_guis.py | 1 | 22801 | """
#;+
#; NAME:
#; spec_guis
#; Version 1.0
#;
#; PURPOSE:
#; Module for Spectroscopy Guis with QT
#; These call pieces from spec_widgets
#; 12-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys
import matplotlib.pyplot as plt
import glob
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib import mpl
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
from astropy.units import Quantity
from astropy import units as u
from linetools.lists.linelist import LineList
from xastropy.xutils import xdebug as xdb
from xastropy.xguis import spec_widgets as xspw
#class XSpecGui(QtGui.QMainWindow):
#class XAbsIDGui(QtGui.QMainWindow):
#class XVelPltGui(QtGui.QDialog):
# x_specplot replacement
class XSpecGui(QtGui.QMainWindow):
''' GUI to replace XIDL x_specplot
12-Dec-2014 by JXP v1.0
27-Mar-2015 by JXP v2.0 :: EW, column, better zooming + panning
'''
def __init__(self, spec, parent=None, zsys=None, norm=None):
QtGui.QMainWindow.__init__(self, parent)
'''
spec = Spectrum1D
'''
mpl.rcParams['agg.path.chunksize'] = 20000 # Needed to avoid carsh in large spectral files
# Build a widget combining several others
self.main_widget = QtGui.QWidget()
# Status bar
self.create_status_bar()
# Grab the pieces and tie together
self.pltline_widg = xspw.PlotLinesWidget(status=self.statusBar, init_z=zsys)
self.pltline_widg.setMaximumWidth(300)
# Hook the spec widget to Plot Line
self.spec_widg = xspw.ExamineSpecWidget(spec,status=self.statusBar,
llist=self.pltline_widg.llist,
zsys=zsys, norm=norm)
self.pltline_widg.spec_widg = self.spec_widg
self.spec_widg.canvas.mpl_connect('button_press_event', self.on_click)
extras = QtGui.QWidget()
extras.setMaximumWidth(130)
vbox = QtGui.QVBoxLayout()
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(self.quit)
vbox.addWidget(self.pltline_widg)
vbox.addWidget(qbtn)
extras.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.spec_widg)
hbox.addWidget(extras)
self.main_widget.setLayout(hbox)
# Point MainWindow
self.setCentralWidget(self.main_widget)
def create_status_bar(self):
self.status_text = QtGui.QLabel("XSpec")
self.statusBar().addWidget(self.status_text, 1)
def on_click(self,event):
if event.button == 3: # Set redshift
if self.pltline_widg.llist['List'] is None:
return
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
if line.strip() == 'None':
return
#
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
z = event.xdata/wrest.value - 1.
self.pltline_widg.llist['z'] = z
self.statusBar().showMessage('z = {:f}'.format(z))
self.pltline_widg.zbox.setText('{:.5f}'.format(self.pltline_widg.llist['z']))
# Draw
self.spec_widg.on_draw()
# Quit
def quit(self):
self.close()
# GUI for Identifying many (all) Abs Systems in a Spectrum
class XAbsIDGui(QtGui.QMainWindow):
''' GUI to analyze absorption systems in a spectrum
16-Dec-2014 by JXP
'''
def __init__(self, spec, parent=None, abssys_dir=None, absid_list=None, norm=True,
srch_id=True, id_dir='ID_LINES/', second_file=None):
QtGui.QMainWindow.__init__(self, parent)
'''
spec = Spectrum1D
second_file = Second spectrum file (e.g. COS + STIS)
'''
# Build a widget combining several others
self.main_widget = QtGui.QWidget()
# Status bar
self.create_status_bar()
# Initialize
if absid_list is None:
# Automatically search for ID files
if srch_id:
absid_list = glob.glob(id_dir+'*id.fits')
else:
absid_list = []
# Grab the pieces and tie together
self.abssys_widg = xspw.AbsSysWidget(absid_list)
self.pltline_widg = xspw.PlotLinesWidget(status=self.statusBar)
self.spec_widg = xspw.ExamineSpecWidget(spec,status=self.statusBar,
llist=self.pltline_widg.llist, norm=norm,
second_file=second_file,
abs_sys=self.abssys_widg.abs_sys)
self.pltline_widg.spec_widg = self.spec_widg
# Connections
self.spec_widg.canvas.mpl_connect('button_press_event', self.on_click)
self.spec_widg.canvas.mpl_connect('key_press_event', self.on_key)
self.abssys_widg.refine_button.clicked.connect(self.refine_abssys)
# Layout
anly_widg = QtGui.QWidget()
anly_widg.setMaximumWidth(300)
anly_widg.setMinimumWidth(150)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.pltline_widg)
vbox.addWidget(self.abssys_widg)
anly_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.spec_widg)
hbox.addWidget(anly_widg)
self.main_widget.setLayout(hbox)
# Point MainWindow
self.setCentralWidget(self.main_widget)
def create_status_bar(self):
self.status_text = QtGui.QLabel("XAbsID")
self.statusBar().addWidget(self.status_text, 1)
def on_key(self,event):
if event.key == 'v': # Stack plot
if self.spec_widg.vplt_flg == 1:
self.abssys_widg.add_fil(self.spec_widg.outfil)
self.abssys_widg.reload()
# Update line list
idx = self.pltline_widg.lists.index(self.spec_widg.llist['List'])
self.pltline_widg.llist_widget.setCurrentRow(idx)
elif event.key == '?': # Check for a match with known systems
wv_chosen = event.xdata
# Load grb
llist = xspw.set_llist('grb.lst')
# Loop through systems
for iabs_sys in self.abssys_widg.all_abssys:
z = iabs_sys.zabs
wvobs = np.array((1+z) * llist['grb.lst']['wrest'])
mtwv = np.where( np.abs( wvobs-wv_chosen ) < 0.2 )[0]
for imt in mtwv:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
print('z={:g}, {:s}, f={:g}'.format(z,
llist['grb.lst']['name'][imt],
llist['grb.lst']['fval'][imt]))
if len(mtwv) == 0:
print('No match. wrest={:g} for z={:g}'.format(wv_chosen/(1+z), z))
def on_click(self,event):
if event.button == 3: # Set redshift
# Line list?
try:
self.pltline_widg.llist['List']
except KeyError:
print('Set a line list first!!')
return
#
if self.pltline_widg.llist[self.pltline_widg.llist['List']] == 'None':
return
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
if line.strip() == 'None':
return
#
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
z = event.xdata/wrest.value - 1.
self.pltline_widg.llist['z'] = z
self.statusBar().showMessage('z = {:f}'.format(z))
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(
self.pltline_widg.llist['z']))
# Draw
self.spec_widg.on_draw()
def refine_abssys(self):
item = self.abssys_widg.abslist_widget.selectedItems()
if len(item) != 1:
self.statusBar().showMessage('AbsSys: Must select only 1 system!')
print('AbsSys: Must select only 1 system!')
txt = item[0].text()
ii = self.abssys_widg.all_items.index(txt)
iabs_sys = self.abssys_widg.all_abssys[ii]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Launch
gui = XVelPltGui(self.spec_widg.spec, outfil=iabs_sys.absid_file,
abs_sys=iabs_sys, norm=self.spec_widg.norm)
gui.exec_()
# ##################################
# GUI for velocity plot
class XVelPltGui(QtGui.QDialog):
''' GUI to analyze absorption systems in a spectrum
24-Dec-2014 by JXP
'''
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.]*u.km/u.s, abs_sys=None, outfil='dum_ID.fits',
sel_wv=None):
'''
spec = Filename or Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
sel_wv: Selected wavelength. Used to inspect a single, unknown line
'''
super(XVelPltGui, self).__init__(parent)
# Initialize
self.abs_sys = abs_sys
if not self.abs_sys is None:
self.z = self.abs_sys.zabs
else:
if z is None:
raise ValueError('XVelPlt: Need to set abs_sys or z!')
self.z = z
self.vmnx = vmnx
self.outfil = outfil
self.norm = norm
self.sel_wv = sel_wv
# Grab the pieces and tie together
self.vplt_widg = xspw.VelPlotWidget(ispec, abs_sys=self.abs_sys, llist=llist,
vmnx=self.vmnx, z=self.z, norm=self.norm)
self.pltline_widg = xspw.PlotLinesWidget(init_llist=self.vplt_widg.llist,
init_z=self.z)
#self.pltline_widg.spec_widg = self.vplt_widg
self.slines = xspw.SelectedLinesWidget(self.vplt_widg.llist[self.vplt_widg.llist['List']],
init_select=self.vplt_widg.llist['show_line'],
plot_widget=self.vplt_widg)
# Connections
self.pltline_widg.llist_widget.currentItemChanged.connect(self.on_llist_change)
self.connect(self.pltline_widg.zbox, QtCore.SIGNAL('editingFinished ()'), self.setz)
self.vplt_widg.canvas.mpl_connect('key_press_event', self.on_key)
# Outfil
wbtn = QtGui.QPushButton('Write', self)
wbtn.setAutoDefault(False)
wbtn.clicked.connect(self.write_out)
self.out_box = QtGui.QLineEdit()
self.out_box.setText(self.outfil)
self.connect(self.out_box, QtCore.SIGNAL('editingFinished ()'), self.set_outfil)
# Quit
buttons = QtGui.QWidget()
wqbtn = QtGui.QPushButton('Write+Quit', self)
wqbtn.setAutoDefault(False)
wqbtn.clicked.connect(self.write_quit)
qbtn = QtGui.QPushButton('Quit', self)
qbtn.setAutoDefault(False)
qbtn.clicked.connect(self.quit)
# Sizes
lines_widg = QtGui.QWidget()
lines_widg.setMaximumWidth(300)
lines_widg.setMinimumWidth(200)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.pltline_widg)
vbox.addWidget(self.slines)
vbox.addWidget(wbtn)
vbox.addWidget(self.out_box)
# Quit buttons
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(wqbtn)
hbox1.addWidget(qbtn)
buttons.setLayout(hbox1)
#
vbox.addWidget(buttons)
lines_widg.setLayout(vbox)
hbox = QtGui.QHBoxLayout()
hbox.addWidget(self.vplt_widg)
hbox.addWidget(lines_widg)
self.setLayout(hbox)
# Initial draw
self.vplt_widg.on_draw()
# Change z
def on_key(self,event):
if event.key == 'z':
self.z = self.vplt_widg.z
self.pltline_widg.llist['z'] = self.z
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(self.z))
if event.key == 'T': # Try another rest wavelength for input line
# Get line from User
self.select_line_widg = xspw.SelectLineWidget(
self.pltline_widg.llist[self.pltline_widg.llist['List']]._data)
self.select_line_widg.exec_()
line = self.select_line_widg.line
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
# Set redshift
self.z = self.sel_wv / wrest - 1.
print('Setting z = {:g}'.format(self.z))
self.pltline_widg.llist['z'] = self.z
self.pltline_widg.zbox.setText(self.pltline_widg.zbox.z_frmt.format(self.z))
self.vplt_widg.z = self.pltline_widg.llist['z']
# Reset
self.vplt_widg.init_lines()
self.vplt_widg.on_draw()
# Set z from pltline_widg
def setz(self):
self.vplt_widg.abs_sys.zabs = self.pltline_widg.llist['z']
self.vplt_widg.z = self.pltline_widg.llist['z']
self.z = self.pltline_widg.llist['z']
self.vplt_widg.on_draw()
# Change list of lines to choose from
def on_llist_change(self):
llist = self.pltline_widg.llist
all_lines = list( llist[llist['List']]['wrest'] )
# Set selected
abs_sys = self.vplt_widg.abs_sys
wrest = abs_sys.lines.keys()
wrest.sort()
select = []
for iwrest in wrest:
try:
select.append(all_lines.index(iwrest))
except ValueError:
pass
select.sort()
# GUIs
self.vplt_widg.llist['List'] = llist['List']
self.vplt_widg.llist['show_line'] = select
self.vplt_widg.idx_line = 0
self.slines.selected = select
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.slines.on_list_change(llist[llist['List']])
# Write
def set_outfil(self):
self.outfil = str(self.out_box.text())
print('XVelPlot: Will write to {:s}'.format(self.outfil))
# Write
def write_out(self):
self.vplt_widg.abs_sys.absid_file = self.outfil
self.vplt_widg.abs_sys.write_absid_file()
# Write + Quit
def write_quit(self):
self.write_out()
self.flg_quit = 1
self.abs_sys = self.vplt_widg.abs_sys
self.done(1)
# Write + Quit
def quit(self):
#self.abs_sys = self.vplt_widg.abs_sys # Have to write to pass back
self.flg_quit = 0
self.done(1)
# x_specplot replacement
class XAODMGui(QtGui.QDialog):
''' GUI to show AODM plots
28-Dec-2014 by JXP
'''
def __init__(self, spec, z, wrest, vmnx=[-300., 300.]*u.km/u.s, parent=None, norm=True):
super(XAODMGui, self).__init__(parent)
'''
spec = Spectrum1D
'''
# Grab the pieces and tie together
self.aodm_widg = xspw.AODMWidget(spec,z,wrest,vmnx=vmnx,norm=norm)
self.aodm_widg.canvas.mpl_connect('key_press_event', self.on_key)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.aodm_widg)
self.setLayout(vbox)
self.aodm_widg.on_draw()
def on_key(self,event):
if event.key == 'q': # Quit
self.done(1)
# Script to run XSpec from the command line
def run_xspec(*args, **kwargs):
'''
Runs the XSpecGui
Command line
or from Python
Examples:
1. python ~/xastropy/xastropy/xguis/spec_guis.py 1
2. spec_guis.run_xspec(filename)
3. spec_guis.run_xspec(spec1d)
'''
import argparse
from specutils import Spectrum1D
from xastropy.spec.utils import XSpectrum1D
parser = argparse.ArgumentParser(description='Parse for XSpec')
parser.add_argument("flag", type=int, help="GUI flag (ignored)")
parser.add_argument("file", type=str, help="Spectral file")
parser.add_argument("-zsys", type=float, help="System Redshift")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
action="store_true")
if len(args) == 0:
pargs = parser.parse_args()
else: # better know what you are doing!
#xdb.set_trace()
if type(args[0]) in [XSpectrum1D, Spectrum1D]:
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(args[0], **kwargs)
gui.show()
app.exec_()
return
else: # String parsing
largs = ['1'] + [iargs for iargs in args]
pargs = parser.parse_args(largs)
# Normalized?
norm=True
if pargs.un_norm:
norm=False
# Second spectral file?
try:
zsys = pargs.zsys
except AttributeError:
zsys=None
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(pargs.file, zsys=zsys, norm=norm)
gui.show()
app.exec_()
# Script to run XAbsID from the command line
def run_xabsid():
import argparse
parser = argparse.ArgumentParser(description='Script for XSpec')
parser.add_argument("flag", type=int, help="GUI flag (ignored)")
parser.add_argument("file", type=str, help="Spectral file")
parser.add_argument("--un_norm", help="Spectrum is NOT normalized",
action="store_true")
parser.add_argument("-id_dir", type=str,
help="Directory for ID files (ID_LINES is default)")
parser.add_argument("-secondfile", type=str, help="Second spectral file")
args = parser.parse_args()
# Normalized?
norm=True
if args.un_norm:
norm=False
# Second spectral file?
second_file=None
if args.secondfile:
second_file=args.secondfile
# Launch
app = QtGui.QApplication(sys.argv)
gui = XAbsIDGui(args.file, norm=norm, second_file=second_file)
gui.show()
app.exec_()
# ################
if __name__ == "__main__":
import sys
from linetools.spectra import io as lsi
from xastropy.igm import abs_sys as xiabs
if len(sys.argv) == 1: # TESTING
flg_fig = 0
#flg_fig += 2**0 # XSpec
#flg_fig += 2**1 # XAbsID
#flg_fig += 2**2 # XVelPlt Gui
flg_fig += 2**3 # XVelPlt Gui without ID list; Also tests select wave
#flg_fig += 2**4 # XAODM Gui
# Read spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = lsi.readspec(spec_fil)
# XSpec
if (flg_fig % 2) == 1:
app = QtGui.QApplication(sys.argv)
gui = XSpecGui(spec)
gui.show()
app.exec_()
# XAbsID
if (flg_fig % 2**2) >= 2**1:
#spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#spec = xspec.readwrite.readspec(spec_fil)
#norm = True
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
norm = False
absid_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
absid_fil2 = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ2348-1041_z2.997_id.fits'
app = QtGui.QApplication(sys.argv)
gui = XAbsIDGui(spec_fil,norm=norm) #,absid_list=[absid_fil, absid_fil2])
gui.show()
app.exec_()
# XVelPlt with existing AbsID file
if (flg_fig % 2**3) >= 2**2:
spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#spec = xspec.readwrite.readspec(spec_fil)
absid_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
abs_sys = xiabs.abssys_utils.Generic_System(None)
abs_sys.parse_absid_file(absid_fil)
#
app = QtGui.QApplication(sys.argv)
app.setApplicationName('XVelPlt')
gui = XVelPltGui(spec_fil,abs_sys=abs_sys,
outfil='/Users/xavier/Desktop/tmp.fits')
gui.show()
sys.exit(app.exec_())
# XVelPlt without existing AbsID file
if (flg_fig % 2**4) >= 2**3:
#spec_fil = '/u/xavier/PROGETTI/LLSZ3/data/normalize/SDSSJ1004+0018_nF.fits'
#z=2.746
#outfil='/Users/xavier/Desktop/J1004+0018_z2.746_id.fits'
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
z=0.
outfil='/Users/xavier/Desktop/tmp.fits'
#
app = QtGui.QApplication(sys.argv)
app.setApplicationName('XVelPlt')
gui = XVelPltGui(spec_fil, z=z, outfil=outfil,norm=False, sel_wv=1526.80)
gui.show()
sys.exit(app.exec_())
# AODM GUI
if (flg_fig % 2**5) >= 2**4:
#spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
#z=2.96916
#lines = [1548.195, 1550.770]
norm = True
spec_fil = '/Users/xavier/Dropbox/CASBAH/jxp_analysis/FBQS0751+2919/fbqs0751_nov2014bin.fits'
z=0.4391
lines = [1215.6701, 1025.7223] * u.AA
norm = False
# Launch
spec = lsi.readspec(spec_fil)
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AODM')
main = XAODMGui(spec, z, lines, norm=norm)
main.show()
sys.exit(app.exec_())
else: # RUN A GUI
id_gui = int(sys.argv[1]) # 1 = XSpec, 2=XAbsId
if id_gui == 1:
run_xspec()
elif id_gui == 2:
run_xabsid()
| bsd-3-clause |
cmap/cmapPy | cmapPy/pandasGEXpress/mini_gctoo_for_testing.py | 1 | 4266 | """
Creates a small GCToo instance (with representative examples of typically found fields); can use for testing.
ex:
import mini_gctoo_for testing
my_mini_gctoo = mini_gctoo_for_testing.make()
"""
import logging
import pandas
import numpy
import cmapPy.pandasGEXpress.GCToo as GCToo
import cmapPy.pandasGEXpress.setup_GCToo_logger as setup_logger
__author__ = 'Oana Enache'
__email__ = 'oana@broadinstitute.org'
logger = logging.getLogger(setup_logger.LOGGER_NAME)
def make(convert_neg_666=True):
"""
Creates a small GCToo instance (with representative examples of typically found fields); can use for testing.
"""
# metadata examples; should be one of each type reasonable to find
id_vals = ["LJP007_MCF10A_24H:TRT_CP:BRD-K93918653:3.33", "MISC003_A375_24H:TRT_CP:BRD-K93918653:3.33",
"LJP007_MCF7_24H:TRT_POSCON:BRD-K81418486:10", "LJP007_MCF7_24H:TRT_POSCON:BRD-A61304759:10",
"LJP007_MCF7_24H:CTL_VEHICLE:DMSO:-666", "LJP007_MCF7_24H:TRT_CP:BRD-K64857848:10"]
count_cv = ["14|15|14", "13|14|13",
"13|15|14|14|15|14|14|13|14|15|15|14|14|15|14|15|14|14|15|14|15|14|14|14|14|14|14|15|14|14|15|14|14|14|14|13|14|14|14|14|14|14|15|14|13|13|15|14|14|15|14|14|14|15|13|13|15|13|14|13|13|14|14|14|14|13",
"13", "13", "14"]
distil_ss = [9.822065353, 6.8915205, 1.35840559, 5.548898697, 3.355231762, 4.837643147]
zmad_ref = ["population", "population", "population", "population", "population", "population"]
distil_nsample = [3, 3, 66, 2, 9, 111111]
mfc_plate_id = ["-666", "-666", "-666", "-666", "-666", "-666"]
# build metadata dataframe
mini_meta_dict = {}
mini_meta_dict["id"] = id_vals
mini_meta_dict["count_cv"] = count_cv
mini_meta_dict["distil_ss"] = distil_ss
mini_meta_dict["zmad_ref"] = zmad_ref
mini_meta_dict["distil_nsample"] = distil_nsample
mini_meta_dict["mfc_plate_id"] = mfc_plate_id
mini_row_metadata = pandas.DataFrame(mini_meta_dict,
columns=['id', 'count_cv', 'distil_nsample', 'distil_ss', 'mfc_plate_id', 'zmad_ref'])
if convert_neg_666:
mini_row_metadata = mini_row_metadata.replace([-666, "-666", -666.0], [numpy.nan, numpy.nan, numpy.nan])
# if all values in a column are nanpandas.Series(mini_row_metadata.isna().sum() == mini_row_metadata.shape[0]) convert dtype of that column to float
all_nan_columns = (mini_row_metadata.isnull().sum() == numpy.array(mini_row_metadata.shape[0])).nonzero()[0]
mini_row_metadata = mini_row_metadata.astype({d: 'float' for d in mini_row_metadata.columns[all_nan_columns.tolist()]})
else:
mini_row_metadata = mini_row_metadata.replace([-666, -666.0], ["-666", "-666"])
# for now (at least) col and row metadata are the same
mini_col_metadata = mini_row_metadata.copy()
# data example values
r1 = [1, 2, 3, 4, 5, 6]
r2 = [4.3, 4.5, 4.3, 4.3, 4.3, 4.3]
r3 = [7, 8, 9, 0, 1.23476, 9.758320]
r4 = [0.11, 3.3456356, 2.345667, 9.822065353, 4.78865099, 4.7886]
r5 = [-0.11, -3.3456356, -2.345667, -9.822065353, -4.78865099, -4.7886]
r6 = [1, -2, 3, -4, 5, -6]
# build data dataframe
mini_data_mat = pandas.DataFrame([r1, r2, r3, r4, r5, r6], dtype=numpy.float32)
mini_data_mat.index = id_vals
mini_data_mat.columns = id_vals
# instantiate & assign attributes of GCToo instance
mini_version = "GCTX1.0"
mini_src = "mini_gctoo.gctx"
mini_row_metadata_df = mini_row_metadata
mini_row_metadata_df.set_index("id", inplace=True, drop=True)
mini_row_metadata.index.name = "rid"
mini_row_metadata_df.columns.name = "rhd"
mini_col_metadata_df = mini_col_metadata
mini_col_metadata_df.set_index("id", inplace=True, drop=True)
mini_col_metadata.index.name = "cid"
mini_col_metadata_df.columns.name = "chd"
mini_data_df = mini_data_mat
mini_data_df.index.name = "rid"
mini_data_df.columns.name = "cid"
logger.debug("Making mini_gctoo instance...")
mini_gctoo = GCToo.GCToo(data_df=mini_data_df, row_metadata_df=mini_row_metadata_df,
col_metadata_df=mini_col_metadata_df, src=mini_src, version=mini_version)
return mini_gctoo
| bsd-3-clause |
mac389/at-risk-agents | analyze-abm.py | 1 | 8030 | import os, json,re
import numpy as np
import Graphics as artist
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
from matplotlib import rcParams
from optparse import OptionParser
from scipy.stats import percentileofscore,scoreatpercentile
rcParams['text.usetex'] = True
parser = OptionParser(usage="usage: %prog [options] filename",
version="%prog 1.0")
parser.add_option("-s", "--source",
action="store",
dest="source",
default=False,
help="Folder with data to analyze")
(options, args) = parser.parse_args()
READ = 'rb'
DELIMITER = '\t'
basepath = os.path.join(os.getcwd(),options.source)
hist_props={"range":[-1,1],"histtype":"stepfilled"}
make_filename = lambda filename: os.path.join(os.getcwd(),basepath,filename)
USERS = 0
TIME = 1
'''
Questions to ask:
1. Do those with the worst drinking behavior have a different alphas (susceptibilities) than those with the best
2. Is targeted intervention (using our method of identification) effective
'''
verbose = False
data = {}
print 'ABM BASEPATH IS ',basepath
directory = json.load(open(os.path.join(os.getcwd(),basepath,'directory.json'),READ))
for variable in directory:
if verbose:
print 'Analyzing %s'%variable
data[variable] = np.loadtxt(directory[variable],delimiter = DELIMITER)
if variable == 'complete record':
shape = tuple(map(int,re.findall('\d+',directory[variable]))[-3:])
tmp = np.reshape(data[variable],shape) #-- Figure out shape from filename
data[variable] = tmp
#Bounds for drinking behavior
upper_quartile_cutoff = scoreatpercentile(data['past month drinking'],75)
lower_quartile_cutoff = scoreatpercentile(data['past month drinking'],25)
light_users_idx = np.unique(np.where(data['past month drinking']<lower_quartile_cutoff)[USERS])
heavy_users_idx = np.where(data['past month drinking']>upper_quartile_cutoff)[USERS]
periods_of_intense_drinking = {agent:count for agent,count in enumerate(np.bincount(heavy_users_idx)) if count>0}
temporal_threshold = scoreatpercentile(periods_of_intense_drinking.values(),75)
heavy_frequent_users_idx = [agent for agent in periods_of_intense_drinking if periods_of_intense_drinking[agent] > temporal_threshold]
heavy_not_frequent_users_idx = np.array(list(set(periods_of_intense_drinking.keys()) - set(heavy_frequent_users_idx)))
heavy_users_idx = np.unique(heavy_users_idx)
#Identify baseline characteristics of each quartile
variable_filenames = [filename for filename in os.listdir(basepath) if 'initial-distribution' in filename]
demographics = {filename:np.loadtxt(make_filename(filename),delimiter=DELIMITER) for filename in variable_filenames}
nrows = 2
ncols = 3
normalize = lambda data: (data-data.min())/float(data.max()-data.min())
fig,axs = plt.subplots(nrows=nrows,ncols=ncols,sharex=True,sharey=True)
yvars = open('./agent-variables',READ).read().splitlines()
characteristics = ['initial-distribution-%s.txt'%('-'.join(yvar.split())) for yvar in yvars]
#Compare heavy users vs light users
for i,col in enumerate(axs):
for j,row in enumerate(col):
characteristic = characteristics[i*ncols+j]
uq = demographics[characteristic][heavy_not_frequent_users_idx]
lq = demographics[characteristic][heavy_frequent_users_idx]
_,_,patches1=row.hist(uq,color='k',label=artist.format('Heavy Users'),range=(-1,1))
plt.hold(True)
_,_,patches2=row.hist(lq,color='r',alpha=0.5,label=artist.format('Heavy Frequent Users'),range=(-1,1))
fig.canvas.mpl_connect('draw_event', artist.on_draw)
artist.adjust_spines(row)
if 'attitude' not in yvars[i*ncols+j]:
row.set_xlabel(artist.format(yvars[i*ncols+j]))
elif 'psychological' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude to','psychological','consequences']))
row.set_xlabel(label)
elif 'medical' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude','to medical','consequences']))
row.set_xlabel(label)
plt.tight_layout()
fig.legend((patches1[0], patches2[0]), (artist.format('Heavy Users'),artist.format('Heavy Frequent Users')),
loc='lower right', frameon=False, ncol=2)
#filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-no-temporal-threshold.png')
filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-frequent-vs-not-heavy.png')
plt.savefig(filename,dpi=300)
del fig,axs,i,j
fig,axs = plt.subplots(nrows=nrows,ncols=ncols,sharex=True,sharey=True)
#Compare heavy users vs frequent users
for i,col in enumerate(axs):
for j,row in enumerate(col):
characteristic = characteristics[i*ncols+j]
uq = demographics[characteristic][heavy_users_idx]
lq = demographics[characteristic][light_users_idx]
_,_,patches1=row.hist(uq,color='k',label=artist.format('Heavy Users'),range=(-1,1))
plt.hold(True)
_,_,patches2=row.hist(lq,color='r',alpha=0.5,label=artist.format('Light Users'),range=(-1,1))
fig.canvas.mpl_connect('draw_event', artist.on_draw)
artist.adjust_spines(row)
if 'attitude' not in yvars[i*ncols+j]:
row.set_xlabel(artist.format(yvars[i*ncols+j]))
elif 'psychological' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude to','psychological','consequences']))
row.set_xlabel(label)
elif 'medical' in yvars[i*ncols+j]:
label = '\n'.join(map(artist.format,['Attitude','to medical','consequences']))
row.set_xlabel(label)
plt.tight_layout()
fig.legend((patches1[0], patches2[0]), (artist.format('Heavy Users'),artist.format('Light Users')),
loc='lower right', frameon=False, ncol=2)
#filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-no-temporal-threshold.png')
filename = os.path.join(os.getcwd(),basepath,'compare-quartile-demographics-light-vs-heavy.png')
plt.savefig(filename,dpi=300)
del fig,axs
data = np.loadtxt(make_filename('alpha.txt'),delimiter=DELIMITER)
uq =data[heavy_users_idx]
lq = data[heavy_frequent_users_idx]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(uq,color='k',label=artist.format('Heavy Users'),range=(0,1),bins=20)
plt.hold(True)
ax.hist(lq,color='r',alpha=0.5,label=artist.format('Heavy Frequent Users'),range=(0,1),bins=20)
fig.canvas.mpl_connect('draw_event', artist.on_draw)
artist.adjust_spines(ax)
ax.set_ylabel(artist.format('Prevalance'))
ax.set_xlabel(artist.format('Social Susceptibility'))
plt.legend(frameon=False,ncol=2,loc='upper center',bbox_to_anchor=(.5,1.05))
plt.tight_layout()
#plt.savefig(make_filename('susceptibility-no-temporal-threshold.png'),dpi=300)
plt.savefig(make_filename('susceptibility-frequent-vs-frequent-heavy.png'),dpi=300)
del fig,ax
data = np.loadtxt(make_filename('alpha.txt'),delimiter=DELIMITER)
uq =data[heavy_not_frequent_users_idx]
lq = data[light_users_idx]
fig = plt.figure()
ax = fig.add_subplot(111)
ax.hist(uq,color='k',label=artist.format('Heavy Users'),range=(0,1),bins=20)
plt.hold(True)
ax.hist(lq,color='r',alpha=0.5,label=artist.format('Light Users'),range=(0,1),bins=20)
fig.canvas.mpl_connect('draw_event', artist.on_draw)
artist.adjust_spines(ax)
ax.set_ylabel(artist.format('Prevalance'))
ax.set_xlabel(artist.format('Social Susceptibility'))
plt.legend(frameon=False,ncol=2,loc='upper center',bbox_to_anchor=(.5,1.05))
plt.tight_layout()
#plt.savefig(make_filename('susceptibility-no-temporal-threshold.png'),dpi=300)
plt.savefig(make_filename('susceptibility-heavy-light.png'),dpi=300)
#--- Create graphical output
visualization.graph_everything(basepath=basepath,moniker=options.target,verbose=False,logfilename=logfilename)
#Need a measure to show their behaviors are different
visualization.population_summary(moniker=options.target+'-at-risk',basepath=basepath,criterion=list(target_idx), criterionname='at risk')
visualization.time_series(moniker=options.target, basepath=basepath,criterion = list(target_idx),
criterionname='at risk')
visualization.snapshots(drinking_behavior[:,start],drinking_behavior[:,stop-1],moniker='beta-%.02f'%beta,basepath=basepath)
| mit |
ProkopHapala/SimpleSimulationEngine | python/pyMolecular/test_CLCFGO.py | 1 | 26748 | import os
import sys
import numpy as np
#sys.path.append("../")
sys.path.append('../')
from pyMeta import cpp_utils
cpp_utils.clean_build = False # Recompile only if changed
#import eFF
import CLCFGO as effmc
import eFF_terms as effpy
# ========== Globals
iNorm = -1
bDebug = 0
natom=0; norb=2; perOrb=1; nqOrb=1
bPrintInfo = False
label=""
plt =None
ss_glob=None; xs_glob=None; cs_glob=None
rnd_pos = 0
rnd_size = 0
rnd_coef = 0
# ========= Functions
def init_effmc_2x2( sz=0.5 ):
global natom,norb,perOrb,nqOrb
natom=0; norb=2; perOrb=2; nqOrb=perOrb*(perOrb+1)/2
effmc.init(natom,norb,perOrb,1) # natom, nOrb, perOrb, natypes
ecoef = effmc.getBuff( "ecoef",(norb,perOrb) )
esize = effmc.getBuff( "esize",(norb,perOrb) )
epos = effmc.getBuff( "epos" ,(norb,perOrb,3) )
epos [:,:,:]= 0
#epos [0,0,0]= 0.0; epos [0,1,0]= -0.25;
#epos [1,0,1]= -0.5; epos [1,1,1]= -0.25
epos [0,0,0]= 0.0; epos [0,1,0]= 1.0;
epos [1,0,1]= 1.0; epos [1,1,1]= 2.0
#esize[:,:]= 1
#esize[:,:]= 0.5
esize[0,0]= 1.8; esize[0,1]= 1.1;
esize[1,0]= 1.5; esize[1,1]= 0.3;
ecoef[:,:]= 1
ecoef[0,0 ]= 0.4; ecoef[0,1]= 0.3;
epos [:,:,:] += (np.random.rand(norb,perOrb,3)-0.5)*0.2
esize[:,: ] += (np.random.rand(norb,perOrb )-0.5)*0.2
ecoef[:,: ] += (np.random.rand(norb,perOrb )-0.5)*0.2
#ecoef[1,0 ]= 0.7; ecoef[1,1]= 1.6
if(bPrintInfo): effmc.printAtomsAndElectrons()
def init_effmc_H2mol_1g( adist=1.0, edist=0.1, sz=0.9, aQ=-1,aQsz=0.0, aP=0.0,aPsz=0.1 ):
global natom,norb,perOrb,nqOrb
natom=2; norb=2; perOrb=1; nqOrb=perOrb*(perOrb+1)/2
effmc.init(natom,norb,perOrb,1) # natom, nOrb, perOrb, natypes
ecoef = effmc.getBuff( "ecoef",(norb,perOrb) )
esize = effmc.getBuff( "esize",(norb,perOrb) )
epos = effmc.getBuff( "epos" ,(norb,perOrb,3) )
ospin = effmc.getIBuff( "ospin",(norb) )
apos = effmc.getBuff( "apos" ,(natom,3) )
aPars = effmc.getBuff( "aPars",(natom,4) )
ospin[0]=1; ospin[1]=-1
epos [:,:,:]= 0; epos[0,:,0]=-edist/2; epos[1,:,0]=+edist/2;
esize[:,:]= sz
ecoef[:,:]= 1
#epos [:,:,:] += (np.random.rand(norb,perOrb,3)-0.5)*0.2
#esize[:,: ] += (np.random.rand(norb,perOrb )-0.5)*0.2
#ecoef[:,: ] += (np.random.rand(norb,perOrb )-0.5)*0.2
apos [:,:] = 0; apos[0,0] = -adist/2; apos[1,0] = +adist/2
aPars [:,0] = aQ
aPars [:,1] = aQsz
aPars [:,2] = aPsz
aPars [:,3] = aP
if(bPrintInfo): effmc.printAtomsAndElectrons()
def init_effmc( natom_=0, norb_=1, perOrb_=1, sz=0.5, dist=1.0, aQ=-1,aQsz=0.0, aP=0.0,aPsz=0.1 ):
global natom,norb,perOrb,nqOrb
natom=natom_; norb=norb_; perOrb=perOrb_; nqOrb=perOrb*(perOrb+1)/2
effmc.init(natom,norb,perOrb,1) # natom, nOrb, perOrb, natypes
ecoef = effmc.getBuff( "ecoef",(norb,perOrb) )
esize = effmc.getBuff( "esize",(norb,perOrb) )
epos = effmc.getBuff( "epos" ,(norb,perOrb,3) )
rhoQ = effmc.getBuff( "rhoQ" ,(norb,nqOrb) )
rhoS = effmc.getBuff( "rhoS" ,(norb,nqOrb) )
rhoP = effmc.getBuff( "rhoP" ,(norb,nqOrb,3) )
epos [:,:,:]= 0 + (np.random.rand(norb,perOrb,3)-0.5)*rnd_pos
esize[:,: ]=sz + (np.random.rand(norb,perOrb )-0.5)*rnd_size
ecoef[:,: ]=1 + (np.random.rand(norb,perOrb )-0.5)*rnd_coef
rhoP [:,:,:]=0 + (np.random.rand(norb,nqOrb,3 )-0.5)*rnd_pos
rhoS [:,: ]=sz*np.sqrt(0.5) + (np.random.rand(norb,nqOrb )-0.5)*rnd_size
rhoQ [:,: ]=1 + (np.random.rand(norb,nqOrb )-0.5)*rnd_coef
if norb_>1:
epos [1,0,0] += dist
#ecoef[1,1]=0 # psi2=(1,0)
#ecoef[1,0]=1 # psi2=(1,0)
if perOrb_>1:
epos [:,1,0] = epos [:,0,0] + dist
if natom_>0:
apos = effmc.getBuff( "apos" ,(natom,3) )
aPars = effmc.getBuff( "aPars",(natom,4) )
#aQs = effmc.getBuff( "aQs" ,(natom,) )
#aQsize = effmc.getBuff( "aQsize",(natom,) )
#aPcoef = effmc.getBuff( "aPcoef",(natom,) )
#aPsize = effmc.getBuff( "aPsize",(natom,) )
apos[:,:] = 0
apos[:,0] = np.arange(0,dist*natom_,dist)
#aQs [:] = aQ
#aQsize[:] = aQsz
#aPcoef[:] = aP
#aPsize[:] = aPsz
aPars [:,0] = aQ
aPars [:,1] = aQsz
aPars [:,2] = aPsz
aPars [:,3] = aP
if(bPrintInfo): effmc.printAtomsAndElectrons()
def test_ProjectWf( Etoll=1e-5 ):
print "\n ============ test_ProjectWf ( rho = |wf|^2 )"
init_effmc( norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
# ---- test
nps = 400+1
ps = np.zeros((nps,3))
ps[:,0] = np.linspace( -5.0, 5.0, nps )
effmc.eval() # we have to run it to project wavefuction to aux density
wf = effmc.orbAtPoints(ps)
rho = effmc.rhoAtPoints(ps)
wf2 = wf**2
err = rho - wf2
Err = np.sqrt( (err**2).sum()/len(err) )
print " Error ", Err
if plt is not None:
plt.figure(figsize=(5,5))
plt.plot( ps[:,0], wf, label='wf' )
plt.plot( ps[:,0], rho, label='rho' )
plt.plot( ps[:,0], wf2,':', label='wf^2')
plt.legend(); plt.grid()
plt.title( " test_ProjectWf( rho = |wf|^2 )" )
#print "DEBUG 4 "
return Err
def test_Poisson( Etoll=1e-5 ):
print " ===== test_Poisson ( rho = dd_xyz V )"
init_effmc( norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
effmc.eval() # we have to run it to project wavefuction to aux density
dx=0.03; R=3.0
err2, rho, rho_ = effmc.test_Poisson( dx=dx, Rmax=R )
Err = np.sqrt( err2/len(rho) )
if(plt):
xs=np.arange(0,R*2,dx)
plt.figure(figsize=(5,5))
plt.plot( xs, rho , label=('rho ' ) );
plt.plot( xs, rho_, ":", label=('rho_' ) );
plt.title( "test_Poisson( rho = dd_xyz V )" )
plt.legend(); plt.grid()
print " Error ", Err
return Err
def test_OrbInteraction( Etoll=1e-5, iMODE=1 ):
labels=[ "NONE", "Overlap Sij", "Kinetic Tij", "Coulomb Kij", ]
label=labels[iMODE]
print " ===== test_OrbInteraction "+label
init_effmc( norb_=2, perOrb_=1, sz=0.5, dist=1.0 )
effmc.eval() # we have to run it to project wavefuction to aux density
dx=0.2;
#nint=30;
nint=50
err2, Ek, Ek_, f1, f2 = effmc.test_OrbInteraction( iMODE=iMODE, io=0,jo=1, nint=nint, dx=dx, Rmax=5.0, bPrint=0, bSave=0 )
#err2, rho, rho_ = effmc.test_Poisson( dx=dx, Rmax=R, useWf=False )
Err = np.sqrt( err2/len(Ek) )
print "Error ", Err
if(plt):
xs=np.arange(0,dx*nint,dx)
plt.figure(figsize=(10,5))
plt.subplot(1,2,1)
plt.plot( f1, label=('f1' ) );
plt.plot( f2, ":", label=('f2' ) );
plt.title( "test_OrbInteraction funcs "+ label )
plt.legend(); plt.grid()
plt.subplot(1,2,2)
plt.plot( xs, Ek , label=('Ek_ana' ) );
plt.plot( xs, Ek_, ":", label=('Ek_num' ) );
plt.title( "test_OrbInteraction "+label )
#plt.ylim( 0, 20.0 )
plt.legend(); plt.grid()
return Err
def test_Overlap_Sij():
return test_OrbInteraction(iMODE=1)
def test_Kinetic_Tij():
return test_OrbInteraction(iMODE=2)
def test_Coulomb_Kij():
effmc.setSwitches_( normalize=1, kinetic=-1, coulomb=1, exchange=-1, pauli=-1, AA=-1, AE=-1, AECoulomb=-1, AEPauli=-1 )
return test_OrbInteraction(iMODE=3)
def plot_Terms( xs=None, xname="epos", inds=(0,0) ):
nind = len(inds)
szs = (norb,perOrb,3)[:nind]
xbuf = effmc.getBuff( xname, szs )
Ebuf = effmc.getEnergyTerms( )
nterm = len(Ebuf)
Etot = np.zeros(len(xs))
Es = np.zeros((len(xs),nterm))
#fs = np.zeros(len(xs))
for i in range(len(xs)):
if(nind>2):
xbuf[inds[0],inds[1],inds[2]] = xs[i]
else:
xbuf[inds[0],inds[1]] = xs[i]
Etot[i] = effmc.eval()
Es[i,:] = Ebuf[:]
if(plt):
#print "test_ETerms PLOT"
term_names = ["Ek","Eee","EeePaul","EeeExch","Eae","EaePaul","Eaa" ]
plt.figure(figsize=(5,5))
for i in range(nterm):
plt.plot( xs, Es[:,i], label=term_names[i] )
plt.plot ( xs, Etot, "k", lw=4, label="Etot" )
plt.grid();plt.legend();
plt.title(label)
return xs, Etot, Es
def test_ETerms( xname="epos", inds=(0,0), xs=xs_glob ):
init_effmc( natom_=1, norb_=2, perOrb_=2, sz=0.25, dist=0.2 )
effmc.setSwitches_( normalize=-1, normForce=-1, kinetic=1, coulomb=1, pauli=-1, AA=1, AE=1, AECoulomb=1, AEPauli=1 )
return plot_Terms( xname=xname, inds=inds, xs=xs )
# ========= Check Forces
def processForces( xs,Es,fs ):
n=len(xs)
dx=xs[1]-xs[0]
fs_num=(Es[2:]-Es[:-2])/(2*dx)
normF2 = (fs**2).sum()
Err = np.sqrt( ( (fs_num-fs[1:-1])**2/normF2 ).sum()/(n-1) )
print label, "Error ", Err
if(plt):
plt.figure(figsize=(5,5))
plt.plot( xs, Es , label="E" )
plt.plot( xs, -fs , label="f_ana" )
plt.plot( xs[1:-1],fs_num, ":", label="f_num" )
#plt.plot( xs[1:-1],(fs_num-fs[1:-1])*10.0, label="(f_ana-f_num)*10.0" )
plt.grid();plt.legend();
plt.title(label)
return Err
def checkForces( xname="ecoef", fname="efcoef", inds=(0,0), xs=None ):
nind = len(inds)
if(nind==1):
szs = (natom,)
else:
szs = (norb,perOrb,3)[:nind]
xbuf = effmc.getBuff( xname,szs )
fbuf = effmc.getBuff( fname,szs )
#if xs is None:
# x0 += x0_glob
# xs = np.arange(x0,x0+nx*dx,dx)
Es = np.zeros(len(xs))
fs = np.zeros(len(xs))
#effmc.eval() # ---- This is to make sure initial normalization is not a problem
for i in range(len(xs)):
xbuf[inds]= xs[i]
Es[i] = effmc.eval()
fs[i] = fbuf[inds]
#print "Es ", Es
#print "fs ", fs
return processForces( xs,Es,fs )
# ========= Kinetic
def checkForces_Kinetic_epos( ):
init_effmc( norb_=1, perOrb_=2, sz=0.75, dist=-0.1 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, kinetic=1 )
return checkForces( xname="epos",fname="efpos",inds=(0,0,0), xs=xs_glob )
def checkForces_Kinetic_esize( ):
init_effmc( norb_=2, perOrb_=2, sz=0.5, dist=10.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, kinetic=1 )
return checkForces( xname="esize",fname="efsize",inds=(0,0), xs=ss_glob )
def checkForces_Kinetic_ecoef( ):
init_effmc( norb_=1, perOrb_=2, sz=0.75, dist=-0.1 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, kinetic=1 )
return checkForces( xname="ecoef",fname="efcoef",inds=(0,0), xs=cs_glob )
# ========= Hartree
def checkForces_Hartree_epos( ):
#init_effmc( norb_=2, perOrb_=2, sz=0.2, dist=1.0 )
init_effmc_2x2( sz=0.5 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, coulomb=1 )
return checkForces( xname="epos",fname="efpos",inds=(0,0,0), xs=xs_glob )
def checkForces_Hartree_esize( ):
#init_effmc( norb_=2, perOrb_=2, sz=0.75, dist=0.25 )
init_effmc_2x2( sz=0.5 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, coulomb=1 )
return checkForces( xname="esize",fname="efsize",inds=(0,0), xs=ss_glob )
def checkForces_Hartree_ecoef( ):
#init_effmc( norb_=2, perOrb_=2, sz=0.75, dist=0.25 )
init_effmc_2x2( sz=0.5 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, coulomb=1 )
return checkForces( xname="ecoef",fname="efcoef",inds=(0,0), xs=cs_glob )
# ========= Pauli
def checkForces_Pauli_epos( ):
init_effmc( norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, pauli=1 )
return checkForces( xname="epos",fname="efpos",inds=(0,0,0), xs=xs_glob )
def checkForces_Pauli_esize( ):
init_effmc( norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, pauli=1 )
return checkForces( xname="esize",fname="efsize",inds=(0,0), xs=ss_glob )
def checkForces_Pauli_ecoef( ):
init_effmc( norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, pauli=1 )
return checkForces( xname="ecoef",fname="efcoef",inds=(0,0), xs=cs_glob )
# ========= Atom-Electron Coulomb
def checkForces_AQ_epos( ):
init_effmc( natom_=1, norb_=1, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, AE=1, AECoulomb=1, AEPauli=-1 )
return checkForces( xname="epos",fname="efpos",inds=(0,0,0), xs=xs_glob )
def checkForces_AQ_esize( ):
init_effmc( natom_=1, norb_=1, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, AE=1, AECoulomb=1, AEPauli=-1 )
return checkForces( xname="esize",fname="efsize",inds=(0,0), xs=ss_glob )
def checkForces_AQ_ecoef( ):
init_effmc( natom_=1, norb_=1, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, AE=1, AECoulomb=1, AEPauli=-1 )
return checkForces( xname="ecoef",fname="efcoef",inds=(0,0), xs=cs_glob )
# ========= Atom-Electron Pauli
def checkForces_AP_epos( ):
init_effmc( natom_=1, norb_=1, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, AE=1, AECoulomb=-1, AEPauli=+1 )
return checkForces( xname="epos",fname="efpos",inds=(0,0,0), xs=xs_glob )
def checkForces_AP_esize( ):
init_effmc( natom_=1, norb_=1, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, AE=1, AECoulomb=-1, AEPauli=+1 )
return checkForces( xname="esize",fname="efsize",inds=(0,0), xs=ss_glob )
def checkForces_AP_ecoef( ):
init_effmc( natom_=1, norb_=1, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, AE=1, AECoulomb=-1, AEPauli=+1 )
return checkForces( xname="ecoef",fname="efcoef",inds=(0,0), xs=cs_glob )
# ========= Atom-Atom Electrostatics
def checkForces_AA_pos( ):
init_effmc( natom_=1, norb_=0, perOrb_=0, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=-1, AE=1, AECoulomb=+1, AEPauli=+1 )
return checkForces( xname="apos",fname="aforce",inds=(0,), xs=xs_glob )
# ========= Total
def checkForces_Tot_epos( ):
init_effmc( natom_=1, norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, kinetic=1, coulomb=1, pauli=-1, AA=1, AE=1, AECoulomb=1, AEPauli=1 )
return checkForces( xname="epos",fname="efpos",inds=(0,0,0), xs=xs_glob )
def checkForces_Tot_esize( ):
init_effmc( natom_=1, norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, kinetic=1, coulomb=1, pauli=-1, AA=1, AE=1, AECoulomb=1, AEPauli=1 )
return checkForces( xname="esize",fname="efsize",inds=(0,0), xs=ss_glob )
def checkForces_Tot_ecoef( ):
init_effmc( natom_=1, norb_=2, perOrb_=2, sz=0.5, dist=1.0 )
effmc.setSwitches_( normalize=iNorm, normForce=iNorm, kinetic=1, coulomb=1, pauli=-1, AA=1, AE=1, AECoulomb=1, AEPauli=1 )
return checkForces( xname="ecoef",fname="efcoef",inds=(0,0), xs=cs_glob )
# ========= Check Normalization derivatives
def check_dS( xname="ecoef", fname="enfcoef", inds=(0,0), xs=None ):
nind = len(inds)
szs = (norb,perOrb,3)[:nind]
xbuf = effmc.getBuff( xname,szs )
fbuf = effmc.getBuff( fname,szs )
oQs = effmc.getBuff( "oQs",(norb) )
#x0 += x0_glob
#xs = np.arange(x0,x0+nx*dx,dx)
#xs=xs_glob
Es = np.zeros(len(xs))
fs = np.zeros(len(xs))
effmc.eval() # ---- This is to make sure initial normalization is not a problem
for i in range(nx):
xbuf[inds] = xs[i]
E = effmc.eval()
Es[i] = oQs[0]
fs[i] = fbuf[inds]
return processForces( xs,Es,fs )
def check_dS_epos( ):
init_effmc( norb_=1, perOrb_=2, sz=1.0, dist=0.5 )
effmc.setSwitches_( normalize=-1, normForce=1 )
return check_dS( xname="epos", fname="enfpos", inds=(0,0,0), xs=xs_glob )
def check_dS_esize( ):
init_effmc( norb_=1, perOrb_=2, sz=1.0, dist=0.5 )
effmc.setSwitches_( normalize=-1, normForce=1 )
return check_dS( xname="esize", fname="enfsize", inds=(0,0), xs=ss_glob )
def check_dS_ecoef( ):
init_effmc( norb_=1, perOrb_=2, sz=1.0, dist=0.5 )
effmc.setSwitches_( normalize=-1, normForce=1 )
return check_dS( xname="ecoef", fname="enfcoef", inds=(0,0), xs=cs_glob )
# ========= Check Normalization derivatives
def check_Coulomb( xname="rhoQ", fname="rhofQ", inds=(0,0), xs=None ):
nind = len(inds)
szs = (norb,perOrb,3)[:nind]
xbuf = effmc.getBuff( xname,szs )
fbuf = effmc.getBuff( fname,szs )
#x0 += x0_glob
#xs = np.arange(x0,x0+nx*dx,dx)
#print " x0 ", x0
Es = np.zeros(len(xs))
fs = np.zeros(len(xs))
for i in range(nx):
xbuf[inds] = xs[i]
Es[i] = effmc.coulombOrbPair( 0, 1 )
fs[i] = fbuf[inds]
return processForces( xs,Es,fs )
def check_Coulomb_rhoP_( ):
init_effmc( norb_=2, perOrb_=1, sz=0.8, dist=-0.0 )
return check_Coulomb( xname="rhoP", fname="rhofP", inds=(0,0,0), xs=xs_glob )
def check_Coulomb_rhoS_( ):
init_effmc( norb_=2, perOrb_=1, sz=0.8, dist=-0.5 )
return check_Coulomb( xname="rhoS", fname="rhofS", inds=(0,0), xs=ss_glob )
def check_Coulomb_rhoQ_( ):
init_effmc( norb_=2, perOrb_=1, sz=0.8, dist=-0.1 )
return check_Coulomb( xname="rhoQ", fname="rhofQ", inds=(0,0), xs=cs_glob )
def test_EvalFuncDerivs( r0=None, s0=None, label="test_EvalFuncDerivs"):
# - Note check firts what is currently in evalFuncDerivs()
# ====== test Gauss::Coulomb()
if r0 is None:
xs = np.arange( 0.001, 5.0, 0.05 )
Es,Fs = effmc.evalFuncDerivs(xs,s0)
else:
xs = np.arange( 0.3, 3.0, 0.005 )
Es,Fs = effmc.evalFuncDerivs(r0,xs)
plt.plot(xs,Es,label="E")
plt.plot(xs,Fs,label="F")
plt.plot(xs[1:-1],(Es[2:]-Es[:-2])/(-2*(xs[1]-xs[0])),':',label="F_num")
plt.legend(); plt.grid();
plt.title(label)
#plt.show();
def test_Hatom():
# ====== test H-atom
# - Note check first what is currently in evalFuncDerivs()
init_effmc( natom_=1, norb_=1, perOrb_=1, sz=0.5, dist=1.0, aQ=-1,aQsz=0.0, aP=0.0,aPsz=0.0 )
effmc.setSwitches_( normalize=-1, normForce=-1, kinetic=1, coulomb=1, pauli=1, AA=1, AE=1, AECoulomb=1, AEPauli=-1 )
xs = np.arange(0.3,3.0,0.05)
plot_Terms( xs=xs, xname="esize" )
import eFF_terms as eff
Ek,Eae = eff.Hatom(xs)
plt.plot(xs, Ek, ':', label="Ek_eFF")
plt.plot(xs, Eae,':', label="Eae_eFF")
plt.plot(xs, Ek+Eae, ':', label="Etot_eFF")
plt.legend()
def checkForces_H_2g( inds=(0,0) ):
global label
#effmc.setSwitches_( normalize=-1, normForce=-1, kinetic=1, AE=1, AECoulomb=1 )
#effmc.setSwitches_( normalize=1, normForce=-1, kinetic=1, AE=-1, AECoulomb=-1 )
#effmc.setSwitches_( normalize=1, normForce=1, kinetic=1, AE=1, AECoulomb=1 )
effmc.setSwitches_( normalize=1, normForce=+1, kinetic=1, AE=-1, AECoulomb=-1 )
effmc.loadFromFile( "../../cpp/sketches_SDL/Molecular/data/H_2g_problem_sym.fgo" )
effmc.loadFromFile( "../../cpp/sketches_SDL/Molecular/data/H_2g_compare.fgo" )
label="epos"; checkForces( xname="epos", fname="efpos", inds=inds, xs=np.arange(-1.0,1.0,0.01) )
#label="epos"; checkForces( xname="epos", fname="efpos", inds=inds, xs=np.arange( 0.1,1.0,0.01) )
label="esize"; checkForces( xname="esize", fname="efsize", inds=inds, xs=np.arange(1.0,2.0,0.01) )
label="ecoef"; checkForces( xname="ecoef", fname="efcoef", inds=inds, xs=np.arange(5.0,7.0,0.01) )
def compareForces_H_2g( inds=(0,0), bNormalize=True ):
import CLCFGO_normalization_derivs_2 as effpy
global label
if bNormalize:
effmc.setSwitches_( normalize=1, normForce=1, kinetic=-1, AE=1, AECoulomb=1, AEPauli=-1 )
else:
effmc.setSwitches_( normalize=-1, normForce=-1, kinetic=-1, AE=1, AECoulomb=1, AEPauli=-1 )
#effmc.setSwitches_( normalize=1, normForce=-1, kinetic=1, AE=-1, AECoulomb=-1 )
#effmc.setSwitches_( normalize=1, normForce=1, kinetic=1, AE=1, AECoulomb=1 )
#effmc.setSwitches_( normalize=1, normForce=+1, kinetic=1, AE=-1, AECoulomb=-1 )
effmc.loadFromFile( "../../cpp/sketches_SDL/Molecular/data/H_2g_compare.fgo" )
effmc.printSetup()
effmc.printAtomsAndElectrons()
Q,E, (dEdxa,dEdsa,dEdca),(dQdxa,dQdsa,dQdca),xs = effpy.evalTest( bNormalize=bNormalize, xa=-0.4,sa=0.35,ca=1.6, xb=+0.5,sb=0.55,cb=-0.4 )
label="epos"; checkForces( xname="epos", fname="efpos", inds=inds, xs=np.arange(-2.0,3.0,0.1) )
effpy.plotNumDeriv( xs, E, dEdxa, F_=None, title="", bNewFig=False )
plt.grid()
#label="esize"; checkForces( xname="esize", fname="efsize", inds=inds, xs=np.arange(1.0,2.0,0.01) )
#label="ecoef"; checkForces( xname="ecoef", fname="efcoef", inds=inds, xs=np.arange(5.0,7.0,0.01) )
def test_H2molecule():
'''
according to http://aip.scitation.org/doi/10.1063/1.3272671
s_opt = 0.94 A (1.77 bohr)
lHHopt = 1.47 A ()
E_opt = -2.91 [eV] (67kcal/mol)
'''
# ====== test H-atom
init_effmc_H2mol_1g()
#init_effmc( natom_=1, norb_=1, perOrb_=1, sz=0.5, dist=1.0, aQ=-1,aQsz=0.0, aP=0.0,aPsz=0.0 )
#effmc.setSwitches_( normalize=-1, normForce=-1, kinetic=1, coulomb=1, pauli=1, AA=1, AE=1, AECoulomb=1, AEPauli=-1 )
#effmc.setSwitches_( normalize=-1, normForce=-1, kinetic=1, coulomb=1, pauli=1, AA=1, AE=1, AECoulomb=1, AEPauli=-1 )
xs = np.arange(-3.0,3.0,0.05)
plot_Terms( xs=xs, xname="epos", inds=(0,0) )
def opt_He_Triplet( xs=None, s0=0.5, s1=0.5 ):
#effmc.init(1,2,2,1)
#effmc.loadFromFile( "../../cpp/sketches_SDL/Molecular/data/He_2g_triplet_asym.fgo" )
effmc.init(1,2,2,1)
effmc.getBuffs( 1, 2, 2 )
effmc.setSwitches_( normalize=1, normForce=1, kinetic=1, coulomb=1, pauli=1, AA=-1, AE=1, AECoulomb=1, AEPauli=1 )
#epos = effmc.epos
#esize = effmc.esize
#ecoef = effmc.ecoef
#Ebuf = effmc.Ebuf
#apos = effmc.apos
#aPars = effmc.aPars
ff = effmc
ff.apos [:,:]=0.0
ff.aPars[:,0]=-2;ff.aPars[:,1]=0.2;ff.aPars[:,2]=0.2;ff.aPars[:,3]=0.0;
nterm = len(ff.Ebuf)
Es = np.zeros((len(xs),nterm+1))
Es_ = np.zeros((len(xs),nterm+1))
# -- Orb1
ff.epos [0,:,:] = 0.0
ff.esize[0,:] = s0
ff.ecoef[0,:] = 1.0
# -- Orb2
ff.esize[1,:] = s1
ff.ecoef[1,0] = 1.0
ff.ecoef[1,1] = -1.0
for i in range(len(xs)):
ff.epos[1,0,0] = +xs[i]
ff.epos[1,1,0] = -xs[i]
Es[i,0 ] = ff.eval()
Es[i,1:] = ff.Ebuf[:]
ff.ecoef[1,0] = 1.0
ff.ecoef[1,1] = 1.0
for i in range(len(xs)):
ff.epos[1,0,0] = +xs[i]
ff.epos[1,1,0] = -xs[i]
Es_[i,0 ] = ff.eval()
Es_[i,1:] = ff.Ebuf[:]
if(plt):
#print "test_ETerms PLOT"
term_clr = ['k', 'r', 'b', 'm', '', 'g', '', '' ]
term_mask = [1, 1, 1, 1, 0, 1, 0, 0 ]
term_names = ["Etot","Ek","Eee","EeePaul","EeeExch","Eae","EaePaul","Eaa" ]
plt.figure(figsize=(5,5))
for i in range(nterm):
if term_mask[i]==1: plt.plot( xs, Es [:,i],'-', c=term_clr[i], label=term_names[i] )
for i in range(nterm):
if term_mask[i]==1: plt.plot( xs, Es_[:,i],':', c=term_clr[i] )
plt.grid();plt.legend();
plt.title(label)
return xs, Es, Es_
if __name__ == "__main__":
#np.random.seed( 451)
#np.random.seed( 1) # good
np.random.seed( 3)
#np.random.seed( 5) # exact
#np.random.seed( 8)
import matplotlib.pyplot as plt_
global plt,label
#global dx,nx,x0_glob
global ss_glob,xs_glob,cs_glob
global rnd_pos, rnd_size, rnd_coef
global iNorm
x0_glob = 0.00001
dx=0.05
nx=100
#nx=50
#nx=10
#nx=2
plt=plt_
bPrintInfo = True
rnd_pos = 0.0; rnd_size = 0.0; rnd_coef = 0.0
#rnd_pos = 0.2; rnd_size = 0.2; rnd_coef = 0.2
#rnd_pos = 0.2; rnd_size = 0.2; rnd_coef = 0.5
#rnd_pos = 1.0; rnd_size = 0.2; rnd_coef = 0.2
ss_glob = np.arange( 0.1,3.0,0.01)
xs_glob = np.arange(-2.0,3.0,0.01)
cs_glob = np.arange(-1.0,2.0,0.01)
#iNorm = +1
#test_Hatom() #; plt.show(); exit(0)
compareForces_H_2g( inds=(0,0), bNormalize=True ); plt.show(); exit(0)
#compareForces_H_2g( inds=(0,0), bNormalize=False ); plt.show(); exit(0)
#checkForces_H_2g( inds=(0,0) ); plt.show(); exit(0)
#dx=0.02
#opt_He_Triplet( np.arange(x0_glob,x0_glob+dx*100,dx), s0=0.5, s1=0.7 )
#test_EvalFuncDerivs() # plt.show; exit()
#test_EvalFuncDerivs( s0=0.8 )
effmc.setPauliMode(0) # E = K*S^2
#effmc.setPauliMode(2) # E = Sij^2/(1-Sij^2) * ( Tii + Tjj - 2Tij/Sij )
#effmc.setPauliMode(3) # E=T
#effmc.setPauliMode(4) # E=S
#effmc.setPauliMode(5) # Ep = ( Sij/(1-Sij^2) )* Tij
#effmc.setPauliMode(6) # Ep = Sij*Tij
#test_ETerms( xname="epos", inds=(0,0), x0=0 ); # plt.show(); exit(0)
#test_H2molecule()
tests_results = []
tests_funcs = []
tests_funcs += [ test_ProjectWf, test_Poisson ]
#tests_funcs += [ check_dS_epos , check_dS_esize , check_dS_ecoef ]
#tests_funcs += [ checkForces_Kinetic_epos, checkForces_Kinetic_esize, checkForces_Kinetic_ecoef ]
#tests_funcs += [ checkForces_Pauli_epos , checkForces_Pauli_esize , checkForces_Pauli_ecoef ]
tests_funcs += [ checkForces_Hartree_epos, checkForces_Hartree_esize, checkForces_Hartree_ecoef ]
#tests_funcs += [ checkForces_AQ_epos , checkForces_AQ_esize , checkForces_AQ_ecoef ]
#tests_funcs += [ checkForces_AP_epos , checkForces_AP_esize , checkForces_AP_ecoef ]
#tests_funcs += [ checkForces_AA_pos ]
#tests_funcs += [ checkForces_Tot_epos, checkForces_Tot_esize , checkForces_Tot_ecoef ]
#tests_funcs += [ check_Coulomb_rhoP_, check_Coulomb_rhoS_, check_Coulomb_rhoQ_ ]
#tests_funcs += [ check_Coulomb_rhoP_ ]
#tests_funcs += [ check_Coulomb_rhoP_, check_Coulomb_rhoS_ ]
#tests_funcs += [ test_Overlap_Sij, test_Kinetic_Tij, test_Coulomb_Kij ]
for test_func in tests_funcs:
label = test_func.__name__
effmc.setSwitches_( normalize=1, normForce=-1, kinetic=-1, coulomb=-1, exchange=-1, pauli=-1, AA=-1, AE=-1, AECoulomb=-1, AEPauli=-1 )
tests_results.append( test_func() )
print ""
print " ##### Test Result Summary ##### "
for i,test_func in enumerate(tests_funcs):
print test_func.__name__," Error: ", tests_results[i]
plt.show()
print( " ===== ALL DONE !!! \n" )
| mit |
bundgus/python-playground | matplotlib-playground/examples/pylab_examples/contourf_demo.py | 4 | 3246 | #!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
origin = 'lower'
#origin = 'upper'
delta = 0.025
x = y = np.arange(-3.0, 3.01, delta)
X, Y = np.meshgrid(x, y)
Z1 = plt.mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0)
Z2 = plt.mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1)
Z = 10 * (Z1 - Z2)
nr, nc = Z.shape
# put NaNs in one corner:
Z[-nr//6:, -nc//6:] = np.nan
# contourf will convert these to masked
Z = np.ma.array(Z)
# mask another corner:
Z[:nr//6, :nc//6] = np.ma.masked
# mask a circle in the middle:
interior = np.sqrt((X**2) + (Y**2)) < 0.5
Z[interior] = np.ma.masked
# We are using automatic selection of contour levels;
# this is usually not such a good idea, because they don't
# occur on nice boundaries, but we do it here for purposes
# of illustration.
CS = plt.contourf(X, Y, Z, 10,
#[-1, -0.1, 0, 0.1],
#alpha=0.5,
cmap=plt.cm.bone,
origin=origin)
# Note that in the following, we explicitly pass in a subset of
# the contour levels used for the filled contours. Alternatively,
# We could pass in additional levels to provide extra resolution,
# or leave out the levels kwarg to use all of the original levels.
CS2 = plt.contour(CS, levels=CS.levels[::2],
colors='r',
origin=origin,
hold='on')
plt.title('Nonsense (3 masked regions)')
plt.xlabel('word length anomaly')
plt.ylabel('sentence length anomaly')
# Make a colorbar for the ContourSet returned by the contourf call.
cbar = plt.colorbar(CS)
cbar.ax.set_ylabel('verbosity coefficient')
# Add the contour line levels to the colorbar
cbar.add_lines(CS2)
plt.figure()
# Now make a contour plot with the levels specified,
# and with the colormap generated automatically from a list
# of colors.
levels = [-1.5, -1, -0.5, 0, 0.5, 1]
CS3 = plt.contourf(X, Y, Z, levels,
colors=('r', 'g', 'b'),
origin=origin,
extend='both')
# Our data range extends outside the range of levels; make
# data below the lowest contour level yellow, and above the
# highest level cyan:
CS3.cmap.set_under('yellow')
CS3.cmap.set_over('cyan')
CS4 = plt.contour(X, Y, Z, levels,
colors=('k',),
linewidths=(3,),
origin=origin)
plt.title('Listed colors (3 masked regions)')
plt.clabel(CS4, fmt='%2.1f', colors='w', fontsize=14)
# Notice that the colorbar command gets all the information it
# needs from the ContourSet object, CS3.
plt.colorbar(CS3)
# Illustrate all 4 possible "extend" settings:
extends = ["neither", "both", "min", "max"]
cmap = plt.cm.get_cmap("winter")
cmap.set_under("magenta")
cmap.set_over("yellow")
# Note: contouring simply excludes masked or nan regions, so
# instead of using the "bad" colormap value for them, it draws
# nothing at all in them. Therefore the following would have
# no effect:
# cmap.set_bad("red")
fig, axs = plt.subplots(2, 2)
for ax, extend in zip(axs.ravel(), extends):
cs = ax.contourf(X, Y, Z, levels, cmap=cmap, extend=extend, origin=origin)
fig.colorbar(cs, ax=ax, shrink=0.9)
ax.set_title("extend = %s" % extend)
ax.locator_params(nbins=4)
plt.show()
| mit |
bjanesh/odi-tools | odi_phot_process.py | 1 | 2508 | #!/usr/bin/env python
import sys, os, glob, string
import numpy as np
import astropy as ast
import matplotlib.pyplot as plt
from pyraf import iraf
from tqdm import tqdm
import odi_config as odi
import glob
import shutil
import pandas as pd
try:
object_str, filters, instrument, images, new_extension, remove_tpv_flag, trim_image_flag, wcs_flag, trim_section, airmasses = odi.photcfgparse('phot_confing.yaml')
except IOError:
print('phot_config.yaml does not exist, quitting...')
exit()
images_ = []
new_images_=[]
for filt in images:
for key in images[filt]:
images_.append(images[filt][key])
new_images_.append(images[filt][key].strip('.fits') + new_extension)
nopv_images = new_images_
for i,img in enumerate(images_):
if trim_image_flag == True:
x1, x2, y1, y2 = trim_section[0], trim_section[1], trim_section[2], trim_section[3]
odi.trim_img(img,x1,x2,y1,y2)
if remove_tpv_flag == True:
img_nopv = odi.tpv_remove(img.nofits()+'.trim.fits')
nopv_images[i] = img_nopv
if wcs_flag == True:
if not os.path.isfile('full_wcs_fix.done'):
odi.full_sdssmatch(nopv_images[0],nopv_images[1],instrument,gmaglim=23.0)
odi.fix_wcs_full(nopv_images[0],coords=nopv_images[0][:-5]+'.wcs.coo')
odi.fix_wcs_full(nopv_images[1],coords=nopv_images[0][:-5]+'.wcs.coo')
with open('full_wcs_fix.done','w+') as f:
print('', file=f)
apcor_values = {}
apcor_stds = {}
apcor_sems = {}
fwhm_values = {}
for i,img in enumerate(images_):
median_fwhm,median_bg_mean,median_bg_median,median_bg_std = odi.read_proc('derived_props.txt',filters[i])
img = nopv_images[i]
peaks,gfwhms = odi.getfwhm_full_sdss(img)
median_gfwhm = np.median(gfwhms[np.where(gfwhms < 20.0)])
print(median_gfwhm)
fwhm_values[i] = median_gfwhm
odi.sdss_phot_full(img,median_gfwhm,airmasses[i])
apcor, apcor_std, apcor_sem = odi.apcor_sdss(img, median_gfwhm, inspect=False)
apcor_values[i] = apcor
apcor_stds[i] = apcor_std
apcor_sems[i] = apcor_sem
odi.find_sources_full(img,median_gfwhm,median_bg_std,threshold=3.5)
odi.phot_sources_full(img,median_gfwhm,airmasses[i],1.0)
odi.phot_sources_xy2sky(img,instrument)
photcalFile = odi.calibrate_match(nopv_images[0],nopv_images[1],fwhm_values[0],fwhm_values[1],airmasses[0],airmasses[1])
odi.match_phot_srcs(nopv_images[0],nopv_images[1])
odi.calc_calibrated_mags(apcor_values[0], 0, apcor_values[1], 0, photcalFile, object_str)
| bsd-3-clause |
jlegendary/scikit-learn | examples/model_selection/plot_train_error_vs_test_error.py | 349 | 2577 | """
=========================
Train error vs Test error
=========================
Illustration of how the performance of an estimator on unseen data (test data)
is not the same as the performance on training data. As the regularization
increases the performance on train decreases while the performance on test
is optimal within a range of values of the regularization parameter.
The example with an Elastic-Net regression model and the performance is
measured using the explained variance a.k.a. R^2.
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# License: BSD 3 clause
import numpy as np
from sklearn import linear_model
###############################################################################
# Generate sample data
n_samples_train, n_samples_test, n_features = 75, 150, 500
np.random.seed(0)
coef = np.random.randn(n_features)
coef[50:] = 0.0 # only the top 10 features are impacting the model
X = np.random.randn(n_samples_train + n_samples_test, n_features)
y = np.dot(X, coef)
# Split train and test data
X_train, X_test = X[:n_samples_train], X[n_samples_train:]
y_train, y_test = y[:n_samples_train], y[n_samples_train:]
###############################################################################
# Compute train and test errors
alphas = np.logspace(-5, 1, 60)
enet = linear_model.ElasticNet(l1_ratio=0.7)
train_errors = list()
test_errors = list()
for alpha in alphas:
enet.set_params(alpha=alpha)
enet.fit(X_train, y_train)
train_errors.append(enet.score(X_train, y_train))
test_errors.append(enet.score(X_test, y_test))
i_alpha_optim = np.argmax(test_errors)
alpha_optim = alphas[i_alpha_optim]
print("Optimal regularization parameter : %s" % alpha_optim)
# Estimate the coef_ on full data with optimal regularization parameter
enet.set_params(alpha=alpha_optim)
coef_ = enet.fit(X, y).coef_
###############################################################################
# Plot results functions
import matplotlib.pyplot as plt
plt.subplot(2, 1, 1)
plt.semilogx(alphas, train_errors, label='Train')
plt.semilogx(alphas, test_errors, label='Test')
plt.vlines(alpha_optim, plt.ylim()[0], np.max(test_errors), color='k',
linewidth=3, label='Optimum on test')
plt.legend(loc='lower left')
plt.ylim([0, 1.2])
plt.xlabel('Regularization parameter')
plt.ylabel('Performance')
# Show estimated coef_ vs true coef
plt.subplot(2, 1, 2)
plt.plot(coef, label='True coef')
plt.plot(coef_, label='Estimated coef')
plt.legend()
plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.26)
plt.show()
| bsd-3-clause |
c-north/hdbscan | hdbscan/tests/test_hdbscan.py | 1 | 5899 | """
Tests for HDBSCAN clustering algorithm
Shamelessly based on (i.e. ripped off from) the DBSCAN test code
"""
#import pickle
from nose.tools import assert_less
import numpy as np
from scipy.spatial import distance
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_not_in
from hdbscan import HDBSCAN
from hdbscan import hdbscan
from sklearn.cluster.tests.common import generate_clustered_data
from scipy.stats import mode
from sklearn import datasets
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters, n_samples_per_cluster=50)
def relabel(labels):
result = np.zeros(labels.shape[0])
labels_to_go = set(labels)
i = 0
new_l = 0
while len(labels_to_go) > 0:
l = labels[i]
if l in labels_to_go:
result[labels == l] = new_l
new_l += 1
labels_to_go.remove(l)
i += 1
return result
def generate_noisy_data():
blobs, _ = datasets.make_blobs(n_samples=200,
centers=[(-0.75,2.25), (1.0, 2.0)],
cluster_std=0.25)
moons, _ = datasets.make_moons(n_samples=200, noise=0.05)
noise = np.random.uniform(-1.0, 3.0, (50, 2))
return np.vstack([blobs, moons, noise])
def homogeneity(labels1, labels2):
num_missed = 0.0
for label in set(labels1):
matches = labels2[labels1 == label]
match_mode = mode(matches).mode[0]
num_missed += np.sum(matches != match_mode)
for label in set(labels2):
matches = labels1[labels2 == label]
match_mode = mode(matches).mode[0]
num_missed += np.sum(matches != match_mode)
return num_missed / 2.0
def test_hdbscan_distance_matrix():
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
labels, p, ctree, ltree, mtree = hdbscan(D, metric='precomputed')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels) # ignore noise
assert_equal(n_clusters_1, n_clusters)
labels = HDBSCAN(metric="precomputed").fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_hdbscan_feature_vector():
labels, p, ctree, ltree, mtree = hdbscan(X)
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
labels = HDBSCAN().fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_hdbscan_no_clusters():
labels, p, ctree, ltree, mtree = hdbscan(X, min_cluster_size=len(X)+1)
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, 0)
labels = HDBSCAN(min_cluster_size=len(X)+1).fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, 0)
def test_hdbscan_callable_metric():
# metric is the function reference, not the string key.
metric = distance.euclidean
labels, p, ctree, ltree, mtree = hdbscan(X, metric=metric)
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
labels = HDBSCAN(metric=metric).fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_hdbscan_input_lists():
X = [[1., 2.], [3., 4.]]
HDBSCAN().fit(X) # must not raise exception
def test_hdbscan_boruvka_kdtree_matches():
data = generate_noisy_data()
labels_prims, p, ctree, ltree, mtree = hdbscan(data, algorithm='generic')
labels_boruvka, p, ctree, ltree, mtree = hdbscan(data, algorithm='boruvka_kdtree')
num_mismatches = homogeneity(labels_prims, labels_boruvka)
assert_less(num_mismatches / float(data.shape[0]), 0.015)
labels_prims = HDBSCAN(algorithm='generic').fit_predict(data)
labels_boruvka = HDBSCAN(algorithm='boruvka_kdtree').fit_predict(data)
num_mismatches = homogeneity(labels_prims, labels_boruvka)
assert_less(num_mismatches / float(data.shape[0]), 0.015)
def test_hdbscan_boruvka_balltree_matches():
data = generate_noisy_data()
labels_prims, p, ctree, ltree, mtree = hdbscan(data, algorithm='generic')
labels_boruvka, p, ctree, ltree, mtree = hdbscan(data, algorithm='boruvka_balltree')
num_mismatches = homogeneity(labels_prims, labels_boruvka)
assert_less(num_mismatches / float(data.shape[0]), 0.015)
labels_prims = HDBSCAN(algorithm='generic').fit_predict(data)
labels_boruvka = HDBSCAN(algorithm='boruvka_balltree').fit_predict(data)
num_mismatches = homogeneity(labels_prims, labels_boruvka)
assert_less(num_mismatches / float(data.shape[0]), 0.015)
def test_hdbscan_badargs():
assert_raises(ValueError,
hdbscan,
X='fail')
assert_raises(ValueError,
hdbscan,
X=None)
assert_raises(ValueError,
hdbscan,
X, min_cluster_size='fail')
assert_raises(ValueError,
hdbscan,
X, min_samples='fail')
assert_raises(ValueError,
hdbscan,
X, min_samples=-1)
assert_raises(ValueError,
hdbscan,
X, metric='imperial')
assert_raises(ValueError,
hdbscan,
X, metric=None)
assert_raises(ValueError,
hdbscan,
X, metric='minkowski', p=-1)
### Probably not applicable now #########################
#def test_dbscan_sparse():
#def test_dbscan_balltree():
#def test_pickle():
#def test_dbscan_core_samples_toy():
#def test_boundaries():
| bsd-3-clause |
tomevans/limbdark | atlas.py | 1 | 7392 | from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import os, sys, pdb
#import pidly
# Overview:
#
# TME 26 Aug 2016
# These limb darkening routines should be made into proper modular objects.
# They're already set up in a way that should make this fairly straightforward.
# This is on the to-do list.
#
# TME 13 Jan 2014
#
# Before using the routines in this module, you need to make sure that you
# have the appropriate input files downloaded from Robert Kurucz's website.
# To do this, go to:
#
# http://kurucz.harvard.edu
#
# and navigate to the appropriate file. For the angular intensities, first
# on "Grids of model atmospheres" and then select the appropriate category
# of model. These have names of the form "GRIDxyyz" where x is either "P"
# for + or "M" for -, yy is the metallicity [M/H] (or [Fe/H]), and z is any
# additional suffix for further subdivisions. For example, "GRIDP03" will take
# you to the models with [M/H]=+0.3, whereas GRIDM30 take you to the models
# with [M/H]=-3.0. Note that between the two yy digits, it is assumed there is
# a decimal point. Once you're onto the next page, there will be a list of
# files, many of which are quite large. The ones that give emergent intensity
# as a function of mu=cos(theta), where theta is the angle between the line of
# sight and the emergent intensity, have names of the form i*.pck, i.e. an "i"
# prefix - see previous page you were just on for further details. Once you've
# decided which file you'd like to download, you can right click on it and
# choose "Save link as...", but I prefer curl (or wget), using something like:
#
# >> curl -O http://kurucz.harvard.edu/grids/gridP03/ip03k2.pck
#
# Then you have all you need to start estimating limb darkening coefficients
# with the routines below.
#
# A basic calling sequence for the routines in this module would be:
#
# 1>> mu, wav, intens = atlas.read_grid( model_filepath='im01k2new.pck', \
# teff=6000, logg=4., new_grid=False )
# 2>> tr_curve = np.loadtxt( tr_filename )
# 3>> tr_wavs = tr_curve[:,0] # convert these wavelengths to nm if necessary
# 4>> tr_vals = tr_curve[:,1]
# 5>> ld_coeffs = ld.fit_law( mu, wav, intens, tr_wavs, \
# cuton_wav_nm=800, cutoff_wav_nm=1000, \
# passband_sensitivity=tr_vals, plot_fits=True )
#
# Stepping through each of the above commands:
# 1>> Reads in the model grid. Note that teff and logg have to correspond
# to actual points on the grid - no interpolation is performed. The
# 'new_grid' flag refers to whether or not the grid is one of the new
# ones, which would be indicated in the filename. There seems to have
# been a few little hiccups with the formatted of these new grids, so
# the routine has to account for these when it reads them in. The output
# is then an array for the mu values, an array for the wavelength values
# in units of nm, and an array for the intensities at each point spanned
# by the mu-wavelength grid.
# 2>> Reads in an external file containing the passband transmission function
# as a function of wavelength.
# 3>> Unpacks the wavelengths of the passband, which should be in nm.
# 4>> Unpacks the relative transmission of the passband at the different
# wavelengths.
# 5>> Evaluates the limb darkening parameters for four separate laws -
# linear, quadratic, three-parameter nonlinear and four-parameter nonlinear.
# Note that the keyword argument 'passband_sensitivty' can be set to None
# if you want a simple boxcar transmission function.
#
def read_grid( model_filepath=None, teff=None, logg=None, new_grid=False ):
"""
Given the full path to an ATLAS model grid, along with values for
Teff and logg, this routine extracts the values for the specific
intensity as a function of mu=cos(theta), where theta is the angle
between the line of site and the emergent radiation. Calling is:
mu, wav, intensity = atlas.read_grid( model_filepath='filename.pck', \
teff=6000, logg=4.5, vturb=2. )
Note that the input grids correspond to a given metallicity and
vturb parameter. So those parameters are controlled by defining
the model_filepath input appropriately.
The units of the output variables are:
mu - unitless
wav - nm
intensity - erg/cm**2/s/nm/ster
Another point to make is that there are some minor issues with the
formatting of 'new' ATLAS grids on the Kurucz website. This
routine will fail on those if you simply download them and feed
them as input, unchanged. This is because:
- They have an extra blank line at the start of the file.
- More troublesome, the last four wavelengths of each grid
are printed on a single line, which screws up the expected
structure that this routine requires to read in the file.
This is
"""
# Row dimensions of the input file:
if new_grid==False:
nskip = 0 # number of lines to skip at start of file
nhead = 3 # number of header lines for each grid point
nwav = 1221 # number of wavelengths for each grid point
else:
nskip = 0 # number of lines to skip at start of file
nhead = 4 # number of header lines for each grid point
nwav = 1216 # number of wavelengths for each grid point
nang = 17 # number of angles for each grid point
# Note: The 'new' model grids don't quite have the
# same format, so they won't work for this code.
print( '\nReading in the model grid...' )
ifile = open( model_filepath, 'rU' )
ifile.seek( 0 )
rows = ifile.readlines()
ifile.close()
rows = rows[nskip:]
nrows = len( rows )
print( 'Done.' )
# The angles, where mu=cos(theta):
mus = np.array( rows[nskip+nhead-1].split(), dtype=float )
# Read in the teff, logg and vturb values
# for each of the grid points:
row_ixs = np.arange( nrows )
header_ixs = row_ixs[ row_ixs%( nhead + nwav )==0 ]
if new_grid==True:
header_ixs += 1
header_ixs = header_ixs[:-1]
ngrid = len( header_ixs )
teff_grid = np.zeros( ngrid )
logg_grid = np.zeros( ngrid )
for i in range( ngrid ):
header = rows[header_ixs[i]].split()
teff_grid[i] = float( header[1] )
logg_grid[i] = header[3]
# Identify the grid point of interest:
logg_ixs = ( logg_grid==logg )
teff_ixs = ( teff_grid==teff )
# Extract the intensities at each of the wavelengths
# as a function of wavelength:
grid_ix = ( logg_ixs*teff_ixs )
row_ix = int( header_ixs[grid_ix] )
grid_lines = rows[row_ix+nhead:row_ix+nhead+nwav]
grid = []
for i in range( nwav ):
grid += [ grid_lines[i].split() ]
if new_grid==True:
grid=grid[:-1]
grid = np.array( np.vstack( grid ), dtype=float )
wavs_nm = grid[:,0]
intensities = grid[:,1:]
nmus = len( mus )
for i in range( 1, nmus ):
intensities[:,i] = intensities[:,i]*intensities[:,0]/100000.
# Convert the intensities from per unit frequency to
# per nm in wavelength:
for i in range( nmus ):
intensities[:,i] /= ( wavs_nm**2. )
return mus, wavs_nm, intensities
| gpl-2.0 |
aetilley/scikit-learn | benchmarks/bench_lasso.py | 297 | 3305 | """
Benchmarks of Lasso vs LassoLars
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import gc
from time import time
import numpy as np
from sklearn.datasets.samples_generator import make_regression
def compute_bench(alpha, n_samples, n_features, precompute):
lasso_results = []
lars_lasso_results = []
it = 0
for ns in n_samples:
for nf in n_features:
it += 1
print('==================')
print('Iteration %s of %s' % (it, max(len(n_samples),
len(n_features))))
print('==================')
n_informative = nf // 10
X, Y, coef_ = make_regression(n_samples=ns, n_features=nf,
n_informative=n_informative,
noise=0.1, coef=True)
X /= np.sqrt(np.sum(X ** 2, axis=0)) # Normalize data
gc.collect()
print("- benchmarking Lasso")
clf = Lasso(alpha=alpha, fit_intercept=False,
precompute=precompute)
tstart = time()
clf.fit(X, Y)
lasso_results.append(time() - tstart)
gc.collect()
print("- benchmarking LassoLars")
clf = LassoLars(alpha=alpha, fit_intercept=False,
normalize=False, precompute=precompute)
tstart = time()
clf.fit(X, Y)
lars_lasso_results.append(time() - tstart)
return lasso_results, lars_lasso_results
if __name__ == '__main__':
from sklearn.linear_model import Lasso, LassoLars
import pylab as pl
alpha = 0.01 # regularization parameter
n_features = 10
list_n_samples = np.linspace(100, 1000000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, list_n_samples,
[n_features], precompute=True)
pl.figure('scikit-learn LASSO benchmark results')
pl.subplot(211)
pl.plot(list_n_samples, lasso_results, 'b-',
label='Lasso')
pl.plot(list_n_samples, lars_lasso_results, 'r-',
label='LassoLars')
pl.title('precomputed Gram matrix, %d features, alpha=%s' % (n_features, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of samples')
pl.ylabel('Time (s)')
pl.axis('tight')
n_samples = 2000
list_n_features = np.linspace(500, 3000, 5).astype(np.int)
lasso_results, lars_lasso_results = compute_bench(alpha, [n_samples],
list_n_features, precompute=False)
pl.subplot(212)
pl.plot(list_n_features, lasso_results, 'b-', label='Lasso')
pl.plot(list_n_features, lars_lasso_results, 'r-', label='LassoLars')
pl.title('%d samples, alpha=%s' % (n_samples, alpha))
pl.legend(loc='upper left')
pl.xlabel('number of features')
pl.ylabel('Time (s)')
pl.axis('tight')
pl.show()
| bsd-3-clause |
decebel/librosa | tests/test_onset.py | 2 | 5297 | #!/usr/bin/env python
# CREATED:2013-03-11 18:14:30 by Brian McFee <brm2132@columbia.edu>
# unit tests for librosa.beat
from __future__ import print_function
from nose.tools import raises, eq_
# Disable cache
import os
try:
os.environ.pop('LIBROSA_CACHE_DIR')
except:
pass
import matplotlib
matplotlib.use('Agg')
import numpy as np
import librosa
__EXAMPLE_FILE = 'data/test1_22050.wav'
def test_onset_strength_audio():
def __test(y, sr, feature, n_fft, hop_length, detrend, centering):
oenv = librosa.onset.onset_strength(y=y, sr=sr,
S=None,
detrend=detrend,
centering=centering,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length)
assert oenv.ndim == 1
S = librosa.feature.melspectrogram(y=y,
n_fft=n_fft,
hop_length=hop_length)
target_shape = S.shape[-1]
if centering:
target_shape += n_fft // (2 * hop_length)
if not detrend:
assert np.all(oenv >= 0)
eq_(oenv.shape[-1], target_shape)
y, sr = librosa.load(__EXAMPLE_FILE)
for feature in [None,
librosa.feature.melspectrogram,
librosa.feature.chroma_stft]:
for n_fft in [512, 2048]:
for hop_length in [n_fft // 2, n_fft // 4]:
for detrend in [False, True]:
for centering in [False, True]:
for aggregate in [None, np.mean, np.max]:
yield (__test, y, sr, feature, n_fft,
hop_length, detrend, centering)
tf = raises(librosa.ParameterError)(__test)
yield (tf, None, sr, feature, n_fft,
hop_length, detrend, centering)
def test_onset_strength_spectrogram():
def __test(S, sr, feature, n_fft, hop_length, detrend, centering):
oenv = librosa.onset.onset_strength(y=None, sr=sr,
S=S,
detrend=detrend,
centering=centering,
aggregate=aggregate,
feature=feature,
n_fft=n_fft,
hop_length=hop_length)
assert oenv.ndim == 1
target_shape = S.shape[-1]
if centering:
target_shape += n_fft // (2 * hop_length)
if not detrend:
assert np.all(oenv >= 0)
eq_(oenv.shape[-1], target_shape)
y, sr = librosa.load(__EXAMPLE_FILE)
S = librosa.feature.melspectrogram(y=y, sr=sr)
for feature in [None,
librosa.feature.melspectrogram,
librosa.feature.chroma_stft]:
for n_fft in [512, 2048]:
for hop_length in [n_fft // 2, n_fft // 4]:
for detrend in [False, True]:
for centering in [False, True]:
for aggregate in [None, np.mean, np.max]:
yield (__test, S, sr, feature, n_fft,
hop_length, detrend, centering)
tf = raises(librosa.ParameterError)(__test)
yield (tf, None, sr, feature, n_fft,
hop_length, detrend, centering)
def test_onset_detect_real():
def __test(y, sr, oenv, hop_length):
onsets = librosa.onset.onset_detect(y=y, sr=sr, onset_envelope=oenv,
hop_length=hop_length)
assert np.all(onsets > 0)
assert np.all(onsets < len(y) * sr // hop_length)
if oenv is not None:
assert np.all(onsets < len(oenv))
y, sr = librosa.load(__EXAMPLE_FILE)
# Test with no signal
yield raises(librosa.ParameterError)(__test), None, sr, None, 512
for hop_length in [64, 512, 2048]:
yield __test, y, sr, None, hop_length
oenv = librosa.onset.onset_strength(y=y, sr=sr, hop_length=hop_length)
yield __test, y, sr, oenv, hop_length
def test_onset_detect_const():
def __test(y, sr, oenv, hop_length):
onsets = librosa.onset.onset_detect(y=y, sr=sr, onset_envelope=oenv,
hop_length=hop_length)
eq_(len(onsets), 0)
sr = 22050
duration = 3.0
for f in [np.zeros, np.ones]:
y = f(int(duration * sr))
for hop_length in [64, 512, 2048]:
yield __test, y, sr, None, hop_length
yield __test, -y, sr, None, hop_length
oenv = librosa.onset.onset_strength(y=y,
sr=sr,
hop_length=hop_length)
yield __test, y, sr, oenv, hop_length
| isc |
GehenHe/Recognize-Face-on-Android | tensorflow/contrib/learn/python/learn/estimators/_sklearn.py | 153 | 6723 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""sklearn cross-support."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
import six
def _pprint(d):
return ', '.join(['%s=%s' % (key, str(value)) for key, value in d.items()])
class _BaseEstimator(object):
"""This is a cross-import when sklearn is not available.
Adopted from sklearn.BaseEstimator implementation.
https://github.com/scikit-learn/scikit-learn/blob/master/sklearn/base.py
"""
def get_params(self, deep=True):
"""Get parameters for this estimator.
Args:
deep: boolean, optional
If `True`, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns:
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
param_names = [name for name in self.__dict__ if not name.startswith('_')]
for key in param_names:
value = getattr(self, key, None)
if isinstance(value, collections.Callable):
continue
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The former have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Args:
**params: Parameters.
Returns:
self
Raises:
ValueError: If params contain invalid names.
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in six.iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
class_name = self.__class__.__name__
return '%s(%s)' % (class_name,
_pprint(self.get_params(deep=False)),)
# pylint: disable=old-style-class
class _ClassifierMixin():
"""Mixin class for all classifiers."""
pass
class _RegressorMixin():
"""Mixin class for all regression estimators."""
pass
class _TransformerMixin():
"""Mixin class for all transformer estimators."""
class NotFittedError(ValueError, AttributeError):
"""Exception class to raise if estimator is used before fitting.
This class inherits from both ValueError and AttributeError to help with
exception handling and backward compatibility.
Examples:
>>> from sklearn.svm import LinearSVC
>>> from sklearn.exceptions import NotFittedError
>>> try:
... LinearSVC().predict([[1, 2], [2, 3], [3, 4]])
... except NotFittedError as e:
... print(repr(e))
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
NotFittedError('This LinearSVC instance is not fitted yet',)
Copied from
https://github.com/scikit-learn/scikit-learn/master/sklearn/exceptions.py
"""
# pylint: enable=old-style-class
def _accuracy_score(y_true, y_pred):
score = y_true == y_pred
return np.average(score)
def _mean_squared_error(y_true, y_pred):
if len(y_true.shape) > 1:
y_true = np.squeeze(y_true)
if len(y_pred.shape) > 1:
y_pred = np.squeeze(y_pred)
return np.average((y_true - y_pred)**2)
def _train_test_split(*args, **options):
# pylint: disable=missing-docstring
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
if test_size is None and train_size is None:
train_size = 0.75
elif train_size is None:
train_size = 1 - test_size
train_size = int(train_size * args[0].shape[0])
np.random.seed(random_state)
indices = np.random.permutation(args[0].shape[0])
train_idx, test_idx = indices[:train_size], indices[train_size:]
result = []
for x in args:
result += [x.take(train_idx, axis=0), x.take(test_idx, axis=0)]
return tuple(result)
# If "TENSORFLOW_SKLEARN" flag is defined then try to import from sklearn.
TRY_IMPORT_SKLEARN = os.environ.get('TENSORFLOW_SKLEARN', False)
if TRY_IMPORT_SKLEARN:
# pylint: disable=g-import-not-at-top,g-multiple-import,unused-import
from sklearn.base import BaseEstimator, ClassifierMixin, RegressorMixin, TransformerMixin
from sklearn.metrics import accuracy_score, log_loss, mean_squared_error
from sklearn.cross_validation import train_test_split
try:
from sklearn.exceptions import NotFittedError
except ImportError:
try:
from sklearn.utils.validation import NotFittedError
except ImportError:
pass
else:
# Naive implementations of sklearn classes and functions.
BaseEstimator = _BaseEstimator
ClassifierMixin = _ClassifierMixin
RegressorMixin = _RegressorMixin
TransformerMixin = _TransformerMixin
accuracy_score = _accuracy_score
log_loss = None
mean_squared_error = _mean_squared_error
train_test_split = _train_test_split
| apache-2.0 |
ningchi/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
detrout/debian-statsmodels | statsmodels/sandbox/examples/ex_mixed_lls_re.py | 34 | 5393 | # -*- coding: utf-8 -*-
"""Example using OneWayMixed
Created on Sat Dec 03 10:15:55 2011
Author: Josef Perktold
This example constructs a linear model with individual specific random
effects, and uses OneWayMixed to estimate it.
This is a variation on ex_mixed_lls_0.py. Here we only have a single
individual specific constant, that is just a random effect without exogenous
regressors.
"""
import numpy as np
from statsmodels.sandbox.panel.mixed import OneWayMixed, Unit
examples = ['ex1']
if 'ex1' in examples:
#np.random.seed(54321)
np.random.seed(978326)
nsubj = 2000
units = []
nobs_i = 4 #number of observations per unit, changed below
nx = 0 #number fixed effects
nz = 1 ##number random effects
beta = np.ones(nx)
gamma = 0.5 * np.ones(nz) #mean of random effect
gamma[0] = 0
gamma_re_true = []
for i in range(nsubj):
#create data for one unit
#random effect/coefficient
gamma_re = gamma + 0.2 * np.random.standard_normal(nz)
#store true parameter for checking
gamma_re_true.append(gamma_re)
#for testing unbalanced case, let's change nobs per unit
if i > nsubj//4:
nobs_i = 6
#generate exogenous variables
X = np.random.standard_normal((nobs_i, nx))
Z = np.random.standard_normal((nobs_i, nz-1))
Z = np.column_stack((np.ones(nobs_i), Z))
noise = 0.1 * np.random.randn(nobs_i) #sig_e = 0.1
#generate endogenous variable
Y = np.dot(X, beta) + np.dot(Z, gamma_re) + noise
#add random effect design matrix also to fixed effects to
#capture the mean
#this seems to be necessary to force mean of RE to zero !?
#(It's not required for estimation but interpretation of random
#effects covariance matrix changes - still need to check details.
X = np.hstack((X,Z))
#create units and append to list
unit = Unit(Y, X, Z)
units.append(unit)
m = OneWayMixed(units)
import time
t0 = time.time()
m.initialize()
res = m.fit(maxiter=100, rtol=1.0e-5, params_rtol=1e-6, params_atol=1e-6)
t1 = time.time()
print('time for initialize and fit', t1-t0)
print('number of iterations', m.iterations)
#print dir(m)
#print vars(m)
print('\nestimates for fixed effects')
print(m.a)
print(m.params)
bfixed_cov = m.cov_fixed()
print('beta fixed standard errors')
print(np.sqrt(np.diag(bfixed_cov)))
print(m.bse)
b_re = m.params_random_units
print('RE mean:', b_re.mean(0))
print('RE columns std', b_re.std(0))
print('np.cov(b_re, rowvar=0), sample statistic')
print(np.cov(b_re, rowvar=0))
print('std of above')
#need atleast_1d or diag raises exception
print(np.sqrt(np.diag(np.atleast_1d(np.cov(b_re, rowvar=0)))))
print('m.cov_random()')
print(m.cov_random())
print('std of above')
print(res.std_random())
print(np.sqrt(np.diag(m.cov_random())))
print('\n(non)convergence of llf')
print(m.history['llf'][-4:])
print('convergence of parameters')
#print np.diff(np.vstack(m.history[-4:])[:,1:],axis=0)
print(np.diff(np.vstack(m.history['params'][-4:]),axis=0))
print('convergence of D')
print(np.diff(np.array(m.history['D'][-4:]), axis=0))
#zdotb = np.array([np.dot(unit.Z, unit.b) for unit in m.units])
zb = np.array([(unit.Z * unit.b[None,:]).sum(0) for unit in m.units])
'''if Z is not included in X:
>>> np.dot(b_re.T, b_re)/100
array([[ 0.03270611, -0.00916051],
[-0.00916051, 0.26432783]])
>>> m.cov_random()
array([[ 0.0348722 , -0.00909159],
[-0.00909159, 0.26846254]])
>>> #note cov_random doesn't subtract mean!
'''
print('\nchecking the random effects distribution and prediction')
gamma_re_true = np.array(gamma_re_true)
print('mean of random effect true', gamma_re_true.mean(0))
print('mean from fixed effects ', m.params[-2:])
print('mean of estimated RE ', b_re.mean(0))
print()
absmean_true = np.abs(gamma_re_true).mean(0)
mape = ((m.params[-2:] + b_re) / gamma_re_true - 1).mean(0)*100
mean_abs_perc = np.abs((m.params[-2:] + b_re) - gamma_re_true).mean(0) \
/ absmean_true*100
median_abs_perc = np.median(np.abs((m.params[-2:] + b_re) - gamma_re_true), 0) \
/ absmean_true*100
rmse_perc = ((m.params[-2:] + b_re) - gamma_re_true).std(0) \
/ absmean_true*100
print('mape ', mape)
print('mean_abs_perc ', mean_abs_perc)
print('median_abs_perc', median_abs_perc)
print('rmse_perc (std)', rmse_perc)
from numpy.testing import assert_almost_equal
#assert is for n_units=100 in original example
#I changed random number generation, so this won't work anymore
#assert_almost_equal(rmse_perc, [ 34.14783884, 11.6031684 ], decimal=8)
#now returns res
print('llf', res.llf) #based on MLE, does not include constant
print('tvalues', res.tvalues)
print('pvalues', res.pvalues)
print(res.t_test([1]))
print('test mean of both random effects variables is zero')
print(res.f_test([[1]]))
plots = res.plot_random_univariate(bins=50)
#fig = res.plot_scatter_pairs(0, 1) #no pairs
import matplotlib.pyplot as plt
plt.show()
| bsd-3-clause |
liuguoyaolgy/Stock | m_load_update_data.py | 1 | 7478 | import matplotlib.pyplot as plt
import tushare as ts
from sqlalchemy import create_engine
import pymysql
pymysql.install_as_MySQLdb()
import time
import datetime
from m_db import m_db2
#from ConfigParser import SafeConfigParser
import configparser
class load:
engine =''
sqlconn =''
cur =''
db = ''
def __init__(self):
cf = configparser.ConfigParser()
cf.read('/home/lgy/PycharmProjects/Stock/stock.init')
#cf.read('stock.init')
usr = cf.get('db','db_user')
pwd = cf.get('db', 'db_pass')
# self.engine = create_engine('mysql://root:root@127.0.0.1/gupiao?charset=utf8')
# self.sqlconn = pymysql.connect(host='localhost', port=3306,user='root',passwd='root',db='mysql',charset='UTF8')
self.engine = create_engine('mysql://'+usr+':'+pwd+'@127.0.0.1/gupiao?charset=utf8')
self.sqlconn = pymysql.connect(host='localhost', port=3306,user=usr,passwd=pwd,db='mysql',charset='UTF8')
self.cur = self.sqlconn.cursor()
self.cur.execute("use gupiao;")
self.db = m_db2()
def __del__(self):
self.sqlconn.close()
def get_all_stick_inf(self):
try:
df = ts.get_stock_basics()
except Exception as e:
print('Err:',e)
return
cnt = 0;
print('begin====== 股票数量不同则 全量更新所有股票基本面信息 ===============')
while cnt < 3:
try:
self.cur.execute("select count(*) from t_all_stickcode; ")
rs = self.cur.fetchall()
print('rs:',rs[0][0],len(df))
if rs[0][0] == len(df) :
break
print('delete')
self.cur.execute("delete from t_all_stickcode; ")
self.sqlconn.commit()
print('insert')
df.to_sql('t_all_stickcode',self.engine,if_exists='append')
except:
print('err')
df.to_sql('t_all_stickcode',self.engine,if_exists='append')
cnt += 1;
#df = ts.get_
self.sqlconn.commit()
return
def get_stick_hisdata_w(self,begin_date,end_date,all='YES'):
if all == 'YES':
print('all stock ...........')
self.cur.execute(
"select code,outstanding from t_all_stickcode ;")
else:
print('part stock ........................')
self.cur.execute("select code,outstanding from t_all_stickcode where outstanding < 50000 and (substr(code,1,1)='0' or substr(code,1,1)='6'); ;")
#self.cur.execute("select code,outstanding from t_all_stickcode a where not EXISTS (select * from t_stick_data_w where code = a.code);")
sqlrs = self.cur.fetchall();
# self.cur.execute("delete from t_stick_data_w")
#self.sqlconn.commit()
for code in sqlrs:
print(code[0]+'_w' ,begin_date,end_date)
try:
rs = ts.get_k_data(code=code[0],start=begin_date,end=end_date,ktype='W')
except Exception as e:
print('ERR:',e)
self.db.delete_date('t_stick_data_w',begin_date,code[0])
self.db.insert_data('t_stick_data_w',rs.as_matrix())
self.db.commit()
return
def get_stick_hisdata_d(self,begin_date,end_date):
# cur.execute("select code,outstanding from t_all_stickcode where outstanding < 50000 and (substr(code,1,1)='0' or substr(code,1,1)='6'); ;")
self.cur.execute("select code,outstanding from t_all_stickcode ;")
sqlrs = self.cur.fetchall();
# self.cur.execute("delete from t_stick_data_d")
self.sqlconn.commit()
for code in sqlrs:
print(code[0]+'_d' )
try:
rs = ts.get_k_data(code=code[0],start=begin_date,end=end_date,ktype='D')
except Exception as e:
print('ERR:',e)
continue
# rs.to_sql('t_stick_data_d',self.engine,if_exists='replace',index=False);
self.db.delete_date('t_stick_data_d',end_date,code[0])
self.db.insert_data('t_stick_data_d',rs.as_matrix())
self.sqlconn.commit()
return
def get_stick_hisdata_m(self,begin_date,end_date):
# cur.execute("select code,outstanding from t_all_stickcode where outstanding < 50000 and (substr(code,1,1)='0' or substr(code,1,1)='6'); ;")
self.cur.execute("select code,outstanding from t_all_stickcode a where not EXISTS (select * from t_stick_data_d where code = a.code);")
sqlrs = self.cur.fetchall();
# self.cur.execute("delete from t_stick_data_m")
self.sqlconn.commit()
for code in sqlrs:
print(code[0] )
rs = ts.get_k_data(code=code[0],start=begin_date,end=end_date,ktype='M')
self.db.delete_date('t_stick_data_m',end_date,code[0])
self.db.insert_data('t_stick_data_m',rs.as_matrix())
self.sqlconn.commit()
return
def get_stick_hisdata_add_m(self):
# cur.execute("select code,outstanding from t_all_stickcode where outstanding < 50000 and (substr(code,1,1)='0' or substr(code,1,1)='6'); ;")
# cur.execute("select code,outstanding from t_all_stickcode ;")
# sqlrs = cur.fetchall();
# cur.execute("delete from t_stick_data_m")
# sqlconn.commit()
# for code in sqlrs:
# print(code[0]+'_m' )
# rs = ts.get_k_data(code=code[0],start='2001-01-01',end='2016-12-31',ktype='M')
# rs.to_sql('t_stick_data_m_test',engine,if_exists='append');
rs = ts.get_k_data(code='600848',start='2013-01-01',end='2014-01-01',ktype='M')
rs.to_sql('t_stick_data_m_test',self.engine,if_exists='replace',index=False);
self.sqlconn.commit()
return
def data_complete(self,beginday='',endday='',ktype='D'):
if 'D' == ktype:
days = 2
if 'W' == ktype:
days = 13
if 'M' == ktype:
days = 31
if '' == beginday:
begindaytmp = datetime.date.today() - datetime.timedelta(days=days)
beginday = begindaytmp.strftime('%Y-%m-%d')
if '' == endday:
endday = datetime.date.today().strftime('%Y-%m-%d')
if 'D' == ktype:
self.get_stick_hisdata_d(begin_date=beginday, end_date=endday)
if 'W' == ktype:
self.get_stick_hisdata_w(begin_date=beginday, end_date=endday)
if 'M' == ktype:
self.get_stick_hisdata_m(begin_date=beginday, end_date=endday)
return
def get_little_stock(self):
self.cur.execute("select code from t_all_stickcode where liquidassets < 70000 and liquidassets > 60000 and (substr(code,1,1)='0' or substr(code,1,1)='6') ;")
rs = self.cur.fetchall();
return rs
# enddate = datetime.date.today()
# begindate = datetime.date.today() - datetime.timedelta(days=336)
#
# ld = load()
# ld.get_all_stick_inf()
# ld.get_stick_hisdata_w(begindate.strftime('%Y-%m-%d'),enddate.strftime('%Y-%m-%d'),all='YES')
# ld.get_stick_hisdata_w()
#get_stick_hisdata_d()
#get_stick_hisdata_w()
#get_stick_hisdata_m()
#get_stick_hisdata_add_m()
# rs = ts.get_k_data(code='600848', start='2016-06-01', end='2017-01-01', ktype='w')
# print(rs)
# cf = configparser.ConfigParser()
# cf.read('stock.init')
# usr = cf.get('db','db_user')
# pwd = cf.get('db', 'db_pass')
# print(usr,pwd)
| gpl-2.0 |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/pandas/io/packers.py | 6 | 27445 | """
Msgpack serializer support for reading and writing pandas data structures
to disk
portions of msgpack_numpy package, by Lev Givon were incorporated
into this module (and tests_packers.py)
License
=======
Copyright (c) 2013, Lev Givon.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Lev Givon nor the names of any
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from datetime import datetime, date, timedelta
from dateutil.parser import parse
import os
from textwrap import dedent
import warnings
import numpy as np
from pandas import compat
from pandas.compat import u, u_safe
from pandas.core.dtypes.common import (
is_categorical_dtype, is_object_dtype,
needs_i8_conversion, pandas_dtype)
from pandas import (Timestamp, Period, Series, DataFrame, # noqa
Index, MultiIndex, Float64Index, Int64Index,
Panel, RangeIndex, PeriodIndex, DatetimeIndex, NaT,
Categorical, CategoricalIndex)
from pandas._libs.tslib import NaTType
from pandas.core.sparse.api import SparseSeries, SparseDataFrame
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.generic import NDFrame
from pandas.errors import PerformanceWarning
from pandas.io.common import get_filepath_or_buffer
from pandas.core.internals import BlockManager, make_block, _safe_reshape
import pandas.core.internals as internals
from pandas.io.msgpack import Unpacker as _Unpacker, Packer as _Packer, ExtType
from pandas.util._move import (
BadMove as _BadMove,
move_into_mutable_buffer as _move_into_mutable_buffer,
)
# check whcih compression libs we have installed
try:
import zlib
def _check_zlib():
pass
except ImportError:
def _check_zlib():
raise ImportError('zlib is not installed')
_check_zlib.__doc__ = dedent(
"""\
Check if zlib is installed.
Raises
------
ImportError
Raised when zlib is not installed.
""",
)
try:
import blosc
def _check_blosc():
pass
except ImportError:
def _check_blosc():
raise ImportError('blosc is not installed')
_check_blosc.__doc__ = dedent(
"""\
Check if blosc is installed.
Raises
------
ImportError
Raised when blosc is not installed.
""",
)
# until we can pass this into our conversion functions,
# this is pretty hacky
compressor = None
def to_msgpack(path_or_buf, *args, **kwargs):
"""
msgpack (serialize) object to input file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, buffer-like, or None
if None, return generated string
args : an object or objects to serialize
encoding: encoding for unicode objects
append : boolean whether to append to an existing msgpack
(default is False)
compress : type of compressor (zlib or blosc), default to None (no
compression)
"""
global compressor
compressor = kwargs.pop('compress', None)
if compressor:
compressor = u(compressor)
append = kwargs.pop('append', None)
if append:
mode = 'a+b'
else:
mode = 'wb'
def writer(fh):
for a in args:
fh.write(pack(a, **kwargs))
if isinstance(path_or_buf, compat.string_types):
with open(path_or_buf, mode) as fh:
writer(fh)
elif path_or_buf is None:
buf = compat.BytesIO()
writer(buf)
return buf.getvalue()
else:
writer(path_or_buf)
def read_msgpack(path_or_buf, encoding='utf-8', iterator=False, **kwargs):
"""
Load msgpack pandas object from the specified
file path
THIS IS AN EXPERIMENTAL LIBRARY and the storage format
may not be stable until a future release.
Parameters
----------
path_or_buf : string File path, BytesIO like or string
encoding: Encoding for decoding msgpack str type
iterator : boolean, if True, return an iterator to the unpacker
(default is False)
Returns
-------
obj : type of object stored in file
"""
path_or_buf, _, _ = get_filepath_or_buffer(path_or_buf)
if iterator:
return Iterator(path_or_buf)
def read(fh):
l = list(unpack(fh, encoding=encoding, **kwargs))
if len(l) == 1:
return l[0]
return l
# see if we have an actual file
if isinstance(path_or_buf, compat.string_types):
try:
exists = os.path.exists(path_or_buf)
except (TypeError, ValueError):
exists = False
if exists:
with open(path_or_buf, 'rb') as fh:
return read(fh)
# treat as a binary-like
if isinstance(path_or_buf, compat.binary_type):
fh = None
try:
fh = compat.BytesIO(path_or_buf)
return read(fh)
finally:
if fh is not None:
fh.close()
# a buffer like
if hasattr(path_or_buf, 'read') and compat.callable(path_or_buf.read):
return read(path_or_buf)
raise ValueError('path_or_buf needs to be a string file path or file-like')
dtype_dict = {21: np.dtype('M8[ns]'),
u('datetime64[ns]'): np.dtype('M8[ns]'),
u('datetime64[us]'): np.dtype('M8[us]'),
22: np.dtype('m8[ns]'),
u('timedelta64[ns]'): np.dtype('m8[ns]'),
u('timedelta64[us]'): np.dtype('m8[us]'),
# this is platform int, which we need to remap to np.int64
# for compat on windows platforms
7: np.dtype('int64'),
'category': 'category'
}
def dtype_for(t):
""" return my dtype mapping, whether number or name """
if t in dtype_dict:
return dtype_dict[t]
return np.typeDict.get(t, t)
c2f_dict = {'complex': np.float64,
'complex128': np.float64,
'complex64': np.float32}
# numpy 1.6.1 compat
if hasattr(np, 'float128'):
c2f_dict['complex256'] = np.float128
def c2f(r, i, ctype_name):
"""
Convert strings to complex number instance with specified numpy type.
"""
ftype = c2f_dict[ctype_name]
return np.typeDict[ctype_name](ftype(r) + 1j * ftype(i))
def convert(values):
""" convert the numpy values to a list """
dtype = values.dtype
if is_categorical_dtype(values):
return values
elif is_object_dtype(dtype):
return values.ravel().tolist()
if needs_i8_conversion(dtype):
values = values.view('i8')
v = values.ravel()
if compressor == 'zlib':
_check_zlib()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, zlib.compress(v))
elif compressor == 'blosc':
_check_blosc()
# return string arrays like they are
if dtype == np.object_:
return v.tolist()
# convert to a bytes array
v = v.tostring()
return ExtType(0, blosc.compress(v, typesize=dtype.itemsize))
# ndarray (on original dtype)
return ExtType(0, v.tostring())
def unconvert(values, dtype, compress=None):
as_is_ext = isinstance(values, ExtType) and values.code == 0
if as_is_ext:
values = values.data
if is_categorical_dtype(dtype):
return values
elif is_object_dtype(dtype):
return np.array(values, dtype=object)
dtype = pandas_dtype(dtype).base
if not as_is_ext:
values = values.encode('latin1')
if compress:
if compress == u'zlib':
_check_zlib()
decompress = zlib.decompress
elif compress == u'blosc':
_check_blosc()
decompress = blosc.decompress
else:
raise ValueError("compress must be one of 'zlib' or 'blosc'")
try:
return np.frombuffer(
_move_into_mutable_buffer(decompress(values)),
dtype=dtype,
)
except _BadMove as e:
# Pull the decompressed data off of the `_BadMove` exception.
# We don't just store this in the locals because we want to
# minimize the risk of giving users access to a `bytes` object
# whose data is also given to a mutable buffer.
values = e.args[0]
if len(values) > 1:
# The empty string and single characters are memoized in many
# string creating functions in the capi. This case should not
# warn even though we need to make a copy because we are only
# copying at most 1 byte.
warnings.warn(
'copying data after decompressing; this may mean that'
' decompress is caching its result',
PerformanceWarning,
)
# fall through to copying `np.fromstring`
# Copy the string into a numpy array.
return np.fromstring(values, dtype=dtype)
def encode(obj):
"""
Data encoder
"""
tobj = type(obj)
if isinstance(obj, Index):
if isinstance(obj, RangeIndex):
return {u'typ': u'range_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'start': getattr(obj, '_start', None),
u'stop': getattr(obj, '_stop', None),
u'step': getattr(obj, '_step', None)}
elif isinstance(obj, PeriodIndex):
return {u'typ': u'period_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'compress': compressor}
elif isinstance(obj, DatetimeIndex):
tz = getattr(obj, 'tz', None)
# store tz info and data as UTC
if tz is not None:
tz = u(tz.zone)
obj = obj.tz_convert('UTC')
return {u'typ': u'datetime_index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.asi8),
u'freq': u_safe(getattr(obj, 'freqstr', None)),
u'tz': tz,
u'compress': compressor}
elif isinstance(obj, MultiIndex):
return {u'typ': u'multi_index',
u'klass': u(obj.__class__.__name__),
u'names': getattr(obj, 'names', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
else:
return {u'typ': u'index',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif isinstance(obj, Categorical):
return {u'typ': u'category',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'codes': obj.codes,
u'categories': obj.categories,
u'ordered': obj.ordered,
u'compress': compressor}
elif isinstance(obj, Series):
if isinstance(obj, SparseSeries):
raise NotImplementedError(
'msgpack sparse series is not implemented'
)
# d = {'typ': 'sparse_series',
# 'klass': obj.__class__.__name__,
# 'dtype': obj.dtype.name,
# 'index': obj.index,
# 'sp_index': obj.sp_index,
# 'sp_values': convert(obj.sp_values),
# 'compress': compressor}
# for f in ['name', 'fill_value', 'kind']:
# d[f] = getattr(obj, f, None)
# return d
else:
return {u'typ': u'series',
u'klass': u(obj.__class__.__name__),
u'name': getattr(obj, 'name', None),
u'index': obj.index,
u'dtype': u(obj.dtype.name),
u'data': convert(obj.values),
u'compress': compressor}
elif issubclass(tobj, NDFrame):
if isinstance(obj, SparseDataFrame):
raise NotImplementedError(
'msgpack sparse frame is not implemented'
)
# d = {'typ': 'sparse_dataframe',
# 'klass': obj.__class__.__name__,
# 'columns': obj.columns}
# for f in ['default_fill_value', 'default_kind']:
# d[f] = getattr(obj, f, None)
# d['data'] = dict([(name, ss)
# for name, ss in compat.iteritems(obj)])
# return d
else:
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
# the block manager
return {u'typ': u'block_manager',
u'klass': u(obj.__class__.__name__),
u'axes': data.axes,
u'blocks': [{u'locs': b.mgr_locs.as_array,
u'values': convert(b.values),
u'shape': b.values.shape,
u'dtype': u(b.dtype.name),
u'klass': u(b.__class__.__name__),
u'compress': compressor} for b in data.blocks]
}
elif isinstance(obj, (datetime, date, np.datetime64, timedelta,
np.timedelta64, NaTType)):
if isinstance(obj, Timestamp):
tz = obj.tzinfo
if tz is not None:
tz = u(tz.zone)
freq = obj.freq
if freq is not None:
freq = u(freq.freqstr)
return {u'typ': u'timestamp',
u'value': obj.value,
u'freq': freq,
u'tz': tz}
if isinstance(obj, NaTType):
return {u'typ': u'nat'}
elif isinstance(obj, np.timedelta64):
return {u'typ': u'timedelta64',
u'data': obj.view('i8')}
elif isinstance(obj, timedelta):
return {u'typ': u'timedelta',
u'data': (obj.days, obj.seconds, obj.microseconds)}
elif isinstance(obj, np.datetime64):
return {u'typ': u'datetime64',
u'data': u(str(obj))}
elif isinstance(obj, datetime):
return {u'typ': u'datetime',
u'data': u(obj.isoformat())}
elif isinstance(obj, date):
return {u'typ': u'date',
u'data': u(obj.isoformat())}
raise Exception("cannot encode this datetimelike object: %s" % obj)
elif isinstance(obj, Period):
return {u'typ': u'period',
u'ordinal': obj.ordinal,
u'freq': u(obj.freq)}
elif isinstance(obj, BlockIndex):
return {u'typ': u'block_index',
u'klass': u(obj.__class__.__name__),
u'blocs': obj.blocs,
u'blengths': obj.blengths,
u'length': obj.length}
elif isinstance(obj, IntIndex):
return {u'typ': u'int_index',
u'klass': u(obj.__class__.__name__),
u'indices': obj.indices,
u'length': obj.length}
elif isinstance(obj, np.ndarray):
return {u'typ': u'ndarray',
u'shape': obj.shape,
u'ndim': obj.ndim,
u'dtype': u(obj.dtype.name),
u'data': convert(obj),
u'compress': compressor}
elif isinstance(obj, np.number):
if np.iscomplexobj(obj):
return {u'typ': u'np_scalar',
u'sub_typ': u'np_complex',
u'dtype': u(obj.dtype.name),
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
else:
return {u'typ': u'np_scalar',
u'dtype': u(obj.dtype.name),
u'data': u(obj.__repr__())}
elif isinstance(obj, complex):
return {u'typ': u'np_complex',
u'real': u(obj.real.__repr__()),
u'imag': u(obj.imag.__repr__())}
return obj
def decode(obj):
"""
Decoder for deserializing numpy data types.
"""
typ = obj.get(u'typ')
if typ is None:
return obj
elif typ == u'timestamp':
freq = obj[u'freq'] if 'freq' in obj else obj[u'offset']
return Timestamp(obj[u'value'], tz=obj[u'tz'], freq=freq)
elif typ == u'nat':
return NaT
elif typ == u'period':
return Period(ordinal=obj[u'ordinal'], freq=obj[u'freq'])
elif typ == u'index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
return globals()[obj[u'klass']](data, dtype=dtype, name=obj[u'name'])
elif typ == u'range_index':
return globals()[obj[u'klass']](obj[u'start'],
obj[u'stop'],
obj[u'step'],
name=obj[u'name'])
elif typ == u'multi_index':
dtype = dtype_for(obj[u'dtype'])
data = unconvert(obj[u'data'], dtype,
obj.get(u'compress'))
data = [tuple(x) for x in data]
return globals()[obj[u'klass']].from_tuples(data, names=obj[u'names'])
elif typ == u'period_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'])
return globals()[obj[u'klass']]._from_ordinals(data, **d)
elif typ == u'datetime_index':
data = unconvert(obj[u'data'], np.int64, obj.get(u'compress'))
d = dict(name=obj[u'name'], freq=obj[u'freq'], verify_integrity=False)
result = globals()[obj[u'klass']](data, **d)
tz = obj[u'tz']
# reverse tz conversion
if tz is not None:
result = result.tz_localize('UTC').tz_convert(tz)
return result
elif typ == u'category':
from_codes = globals()[obj[u'klass']].from_codes
return from_codes(codes=obj[u'codes'],
categories=obj[u'categories'],
ordered=obj[u'ordered'])
elif typ == u'series':
dtype = dtype_for(obj[u'dtype'])
pd_dtype = pandas_dtype(dtype)
index = obj[u'index']
result = globals()[obj[u'klass']](unconvert(obj[u'data'], dtype,
obj[u'compress']),
index=index,
dtype=pd_dtype,
name=obj[u'name'])
return result
elif typ == u'block_manager':
axes = obj[u'axes']
def create_block(b):
values = _safe_reshape(unconvert(
b[u'values'], dtype_for(b[u'dtype']),
b[u'compress']), b[u'shape'])
# locs handles duplicate column names, and should be used instead
# of items; see GH 9618
if u'locs' in b:
placement = b[u'locs']
else:
placement = axes[0].get_indexer(b[u'items'])
return make_block(values=values,
klass=getattr(internals, b[u'klass']),
placement=placement,
dtype=b[u'dtype'])
blocks = [create_block(b) for b in obj[u'blocks']]
return globals()[obj[u'klass']](BlockManager(blocks, axes))
elif typ == u'datetime':
return parse(obj[u'data'])
elif typ == u'datetime64':
return np.datetime64(parse(obj[u'data']))
elif typ == u'date':
return parse(obj[u'data']).date()
elif typ == u'timedelta':
return timedelta(*obj[u'data'])
elif typ == u'timedelta64':
return np.timedelta64(int(obj[u'data']))
# elif typ == 'sparse_series':
# dtype = dtype_for(obj['dtype'])
# return globals()[obj['klass']](
# unconvert(obj['sp_values'], dtype, obj['compress']),
# sparse_index=obj['sp_index'], index=obj['index'],
# fill_value=obj['fill_value'], kind=obj['kind'], name=obj['name'])
# elif typ == 'sparse_dataframe':
# return globals()[obj['klass']](
# obj['data'], columns=obj['columns'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind']
# )
# elif typ == 'sparse_panel':
# return globals()[obj['klass']](
# obj['data'], items=obj['items'],
# default_fill_value=obj['default_fill_value'],
# default_kind=obj['default_kind'])
elif typ == u'block_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'blocs'],
obj[u'blengths'])
elif typ == u'int_index':
return globals()[obj[u'klass']](obj[u'length'], obj[u'indices'])
elif typ == u'ndarray':
return unconvert(obj[u'data'], np.typeDict[obj[u'dtype']],
obj.get(u'compress')).reshape(obj[u'shape'])
elif typ == u'np_scalar':
if obj.get(u'sub_typ') == u'np_complex':
return c2f(obj[u'real'], obj[u'imag'], obj[u'dtype'])
else:
dtype = dtype_for(obj[u'dtype'])
try:
return dtype(obj[u'data'])
except:
return dtype.type(obj[u'data'])
elif typ == u'np_complex':
return complex(obj[u'real'] + u'+' + obj[u'imag'] + u'j')
elif isinstance(obj, (dict, list, set)):
return obj
else:
return obj
def pack(o, default=encode,
encoding='utf-8', unicode_errors='strict', use_single_float=False,
autoreset=1, use_bin_type=1):
"""
Pack an object and return the packed bytes.
"""
return Packer(default=default, encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type).pack(o)
def unpack(packed, object_hook=decode,
list_hook=None, use_list=False, encoding='utf-8',
unicode_errors='strict', object_pairs_hook=None,
max_buffer_size=0, ext_hook=ExtType):
"""
Unpack a packed object, return an iterator
Note: packed lists will be returned as tuples
"""
return Unpacker(packed, object_hook=object_hook,
list_hook=list_hook,
use_list=use_list, encoding=encoding,
unicode_errors=unicode_errors,
object_pairs_hook=object_pairs_hook,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Packer(_Packer):
def __init__(self, default=encode,
encoding='utf-8',
unicode_errors='strict',
use_single_float=False,
autoreset=1,
use_bin_type=1):
super(Packer, self).__init__(default=default,
encoding=encoding,
unicode_errors=unicode_errors,
use_single_float=use_single_float,
autoreset=autoreset,
use_bin_type=use_bin_type)
class Unpacker(_Unpacker):
def __init__(self, file_like=None, read_size=0, use_list=False,
object_hook=decode,
object_pairs_hook=None, list_hook=None, encoding='utf-8',
unicode_errors='strict', max_buffer_size=0, ext_hook=ExtType):
super(Unpacker, self).__init__(file_like=file_like,
read_size=read_size,
use_list=use_list,
object_hook=object_hook,
object_pairs_hook=object_pairs_hook,
list_hook=list_hook,
encoding=encoding,
unicode_errors=unicode_errors,
max_buffer_size=max_buffer_size,
ext_hook=ext_hook)
class Iterator(object):
""" manage the unpacking iteration,
close the file on completion """
def __init__(self, path, **kwargs):
self.path = path
self.kwargs = kwargs
def __iter__(self):
needs_closing = True
try:
# see if we have an actual file
if isinstance(self.path, compat.string_types):
try:
path_exists = os.path.exists(self.path)
except TypeError:
path_exists = False
if path_exists:
fh = open(self.path, 'rb')
else:
fh = compat.BytesIO(self.path)
else:
if not hasattr(self.path, 'read'):
fh = compat.BytesIO(self.path)
else:
# a file-like
needs_closing = False
fh = self.path
unpacker = unpack(fh)
for o in unpacker:
yield o
finally:
if needs_closing:
fh.close()
| mit |
grain2011/vislab | vislab/util.py | 4 | 5117 | import os
import pandas as pd
import pymongo
import redis
import socket
import tempfile
import cPickle
import subprocess
import shutil
import vislab
def zero_results(collection, query):
return collection.find(query).limit(1).count() == 0
def exclude_ids_in_collection(image_ids, collection):
"""
Exclude ids already stored in the collection.
Useful for submitting map jobs.
"""
computed_image_ids = [
x['image_id'] for x in collection.find(fields=['image_id'])
]
num_ids = len(image_ids)
image_ids = list(set(image_ids) - set(computed_image_ids))
print("Cut down on {} existing out of {} total image ids.".format(
num_ids - len(image_ids), num_ids))
return image_ids
def load_or_generate_df(filename, generator_fn, force=False, args=None):
"""
If filename does not already exist, gather data with generator_fn,
and write to filename.
If filename does exist, load from it.
"""
if not force and os.path.exists(filename):
df = pd.read_hdf(filename, 'df')
else:
df = generator_fn(args)
df.to_hdf(filename, 'df', mode='w')
return df
def running_on_icsi():
"""
Return True if this script is running on the ICSI cluster.
"""
return socket.gethostname().endswith('ICSI.Berkeley.EDU')
def get_mongodb_client():
"""
Establish connection to MongoDB.
"""
try:
host, port = vislab.config['servers']['mongo']
connection = pymongo.MongoClient(host, port)
except pymongo.errors.ConnectionFailure:
raise Exception(
"Need a MongoDB server running on {}, port {}".format(host, port))
return connection
def print_collection_counts():
"""
Print all collections and their counts for all databases in MongoDB.
"""
client = get_mongodb_client()
for db_name in client.database_names():
for coll_name in client[db_name].collection_names():
print('{} |\t\t{}: {}'.format(
db_name, coll_name, client[db_name][coll_name].count()))
def get_redis_client():
host, port = vislab.config['servers']['redis']
try:
connection = redis.Redis(host, port)
connection.ping()
except redis.ConnectionError:
raise Exception(
"Need a Redis server running on {}, port {}".format(host, port))
return connection
def pickle_function_call(func_name, args):
f, temp_filename = tempfile.mkstemp()
with open(temp_filename, 'w') as f:
cPickle.dump((func_name, args), f)
c = "import os; import cPickle;"
c += "f = open('{0}'); func, args = cPickle.load(f); f.close();"
c += "os.remove('{0}'); func(*args)"
c = c.format(temp_filename)
return c
def run_through_bash_script(cmds, filename=None, verbose=False, num_workers=1):
"""
Write out given commands to a bash script file and execute it.
This is useful when the commands to run include pipes, or are chained.
subprocess is not too easy to use in those cases.
Parameters
----------
cmds: list of string
filename: string or None [None]
If None, a temporary file is used and deleted after.
verbose: bool [False]
If True, output the commands that will be run.
num_workers: int [1]
If > 1, commands are piped through parallel -j num_workers
"""
assert(num_workers > 0)
remove_file = False
if filename is None:
f, filename = tempfile.mkstemp()
remove_file = True
if num_workers > 1:
contents = "echo \"{}\" | parallel --env PATH -j {}".format(
'\n'.join(cmds), num_workers)
else:
contents = '\n'.join(cmds)
with open(filename, 'w') as f:
f.write(contents + '\n')
if verbose:
print("Contents of script file about to be run:")
print(contents)
p = subprocess.Popen(['bash', filename])
out, err = p.communicate()
if remove_file:
os.remove(filename)
if not p.returncode == 0:
print(out)
print(err)
raise Exception("Script exited with code {}".format(p.returncode))
def run_shell_cmd(cmd, echo=True):
"""
Run a command in a sub-shell, capturing stdout and stderr
to temporary files that are then read.
"""
_, stdout_f = tempfile.mkstemp()
_, stderr_f = tempfile.mkstemp()
print("Running command")
print(cmd)
p = subprocess.Popen(
'{} >{} 2>{}'.format(cmd, stdout_f, stderr_f), shell=True)
p.wait()
with open(stdout_f) as f:
stdout = f.read()
os.remove(stdout_f)
with open(stderr_f) as f:
stderr = f.read()
os.remove(stderr_f)
if echo:
print("stdout:")
print(stdout)
print("stderr:")
print(stderr)
return stdout, stderr
def makedirs(dirname):
if os.path.exists(dirname):
return dirname
try:
os.makedirs(dirname)
except OSError:
pass
except:
raise
return dirname
def cleardirs(dirname):
if os.path.exists(dirname):
shutil.rmtree(dirname)
return makedirs(dirname)
| bsd-2-clause |
DouglasLeeTucker/DECam_PGCM | bin/rawdata_clean_relevant_gaiadr2_data.py | 1 | 8174 | #!/usr/bin/env python
"""
rawdata_clean_relevant_gaiadr2_data.py
Example:
rawdata_clean_relevant_gaiadr2_data.py --help
rawdata_clean_relevant_gaiadr2_data.py --inputFile gaiadr2_new_rawdata_rawdata.csv --outputFile gaiadr2_new_y2a1_rawdata.u.csv.tmp --verbose 2
"""
##################################
def main():
import argparse
import time
"""Create command line arguments"""
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('--inputFile', help='name of the input CSV file', default='input.csv')
parser.add_argument('--outputFile', help='name of the output CSV file', default='output.csv')
parser.add_argument('--verbose', help='verbosity level of output to screen (0,1,2,...)', default=0, type=int)
args = parser.parse_args()
if args.verbose > 0: print args
status = clean_relevant_gaiadr2_data(args)
return status
##################################
# clean_relevant_gaiadr2_data
#
def clean_relevant_gaiadr2_data(args):
import numpy as np
import os
import sys
import datetime
import fitsio
import pandas as pd
if args.verbose>0:
print
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print 'clean_relevant_gaiadr2_data'
print '* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *'
print
inputFile = args.inputFile
outputFile = args.outputFile
# Read selected columns from inputFile...
columns = ['RA_WRAP','RA','DEC',
'PHOT_G_MEAN_MAG','PHOT_G_MEAN_FLUX_OVER_ERROR',
'PHOT_BP_MEAN_MAG','PHOT_BP_MEAN_FLUX_OVER_ERROR',
'PHOT_RP_MEAN_MAG','PHOT_RP_MEAN_FLUX_OVER_ERROR',
'BP_RP','BP_G','G_RP','PHOT_BP_RP_EXCESS_FACTOR']
print datetime.datetime.now()
print """Reading in selected columns from %s...""" % (inputFile)
df = pd.read_csv(inputFile, usecols=columns)
print datetime.datetime.now()
# Includes masks for mag, magerr, color, main stellar locus outliers,
# and BP_RP photometric excess...
mask = ( (1.086/df.PHOT_G_MEAN_FLUX_OVER_ERROR < 0.3) &
(1.086/df.PHOT_BP_MEAN_FLUX_OVER_ERROR < 0.3) &
(1.086/df.PHOT_RP_MEAN_FLUX_OVER_ERROR < 0.3) &
(df.PHOT_G_MEAN_MAG < 19.0) &
(df.BP_G > 0.2) & (df.BP_G < 1.6) &
(np.abs(df.G_RP - 0.45*(df.BP_RP + 0.2)) < 0.2) &
(df.PHOT_BP_RP_EXCESS_FACTOR > (1.0 + 0.015*df.BP_RP*df.BP_RP)) &
(df.PHOT_BP_RP_EXCESS_FACTOR < (1.3 + 0.060*df.BP_RP*df.BP_RP)) )
# Steve Kent's Gaia DR2 -> DES transformations, of the format:
# des_mag = Gaia_G + intercept + slope*( (Gaia_BP-Gaia_G) - color0 ),
# one relation for (Gaia_BP-Gaia_G) < color0 [blue],
# and another for (Gaia_BP-Gaia_G) > color0 [red].
#
# See S Kent's e-mail from 31 August 2018...
skent1 = {}
skent1['g.color0'] = 0.899
skent1['g.intercept'] = 1.339
skent1['g.blue.slope'] = 1.682
skent1['g.red.slope'] = 1.015
skent1['r.color0'] = 0.78
skent1['r.intercept'] = -0.124
skent1['r.blue.slope'] = -0.174
skent1['r.red.slope'] = 0.767
skent1['i.color0'] = 0.90
skent1['i.intercept'] = -0.674
skent1['i.blue.slope'] = -0.879
skent1['i.red.slope'] = -0.437
skent1['z.color0'] = 1.12
skent1['z.intercept'] = -1.216
skent1['z.blue.slope'] = -1.247
skent1['z.red.slope'] = -0.706
skent1['Y.color0'] = 0.91
skent1['Y.intercept'] = -1.052
skent1['Y.blue.slope'] = -1.441
skent1['Y.red.slope'] = -1.028
skent2 = {}
skent2['g.color0'] = 0.899
skent2['g.intercept'] = 1.349
skent2['g.blue.slope'] = 1.702
skent2['g.red.slope'] = 0.907
skent2['r.color0'] = 0.78
skent2['r.intercept'] = -0.116
skent2['r.blue.slope'] = -0.151
skent2['r.red.slope'] = 0.747
skent2['i.color0'] = 0.90
skent2['i.intercept'] = -0.691
skent2['i.blue.slope'] = -0.925
skent2['i.red.slope'] = -0.410
skent2['z.color0'] = 1.12
skent2['z.intercept'] = -1.217
skent2['z.blue.slope'] = -1.282
skent2['z.red.slope'] = -0.637
skent2['Y.color0'] = 0.91
skent2['Y.intercept'] = -1.055
skent2['Y.blue.slope'] = -1.514
skent2['Y.red.slope'] = -0.992
skent3 = {}
skent3['g.color0'] = 0.899
skent3['g.intercept'] = 1.306
skent3['g.blue.slope'] = 1.634
skent3['g.red.slope'] = 0.939
skent3['r.color0'] = 0.78
skent3['r.intercept'] = -0.136
skent3['r.blue.slope'] = -0.179
skent3['r.red.slope'] = 0.747
skent3['i.color0'] = 0.90
skent3['i.intercept'] = -0.678
skent3['i.blue.slope'] = -0.905
skent3['i.red.slope'] = -0.444
skent3['z.color0'] = 1.12
skent3['z.intercept'] = -1.193
skent3['z.blue.slope'] = -1.256
skent3['z.red.slope'] = -0.873
skent3['Y.color0'] = 0.91
skent3['Y.intercept'] = -1.034
skent3['Y.blue.slope'] = -1.464
skent3['Y.red.slope'] = -1.094
for band in ['g', 'r', 'i', 'z', 'Y']:
# S Kent #1:
desMagColName1 = """%sMAG_DES_1""" % (band.upper())
color0 = """%s.color0""" % (band)
intercept = """%s.intercept""" % (band)
blue_slope = """%s.blue.slope""" % (band)
red_slope = """%s.red.slope""" % (band)
df.loc[:,desMagColName1] = -9999.
blueMask = (mask & (df.BP_G <= skent1[color0]))
redMask = (mask & (df.BP_G > skent1[color0]))
df.loc[blueMask,desMagColName1] = df.loc[blueMask,'PHOT_G_MEAN_MAG'] + \
+ skent1[intercept] + skent1[blue_slope]*(df.loc[blueMask,'BP_G'] - skent1[color0])
df.loc[redMask,desMagColName1] = df.loc[redMask,'PHOT_G_MEAN_MAG'] + \
+ skent1[intercept] + skent1[red_slope]*(df.loc[redMask,'BP_G'] - skent1[color0])
# S Kent #2:
desMagColName2 = """%sMAG_DES_2""" % (band.upper())
color0 = """%s.color0""" % (band)
intercept = """%s.intercept""" % (band)
blue_slope = """%s.blue.slope""" % (band)
red_slope = """%s.red.slope""" % (band)
df.loc[:,desMagColName2] = -9999.
blueMask = (mask & (df.BP_G <= skent2[color0]))
redMask = (mask & (df.BP_G > skent2[color0]))
df.loc[blueMask,desMagColName2] = df.loc[blueMask,'PHOT_G_MEAN_MAG'] + \
+ skent2[intercept] + skent2[blue_slope]*(df.loc[blueMask,'BP_G'] - skent1[color0])
df.loc[redMask,desMagColName2] = df.loc[redMask,'PHOT_G_MEAN_MAG'] + \
+ skent2[intercept] + skent2[red_slope]*(df.loc[redMask,'BP_G'] - skent1[color0])
# S Kent #3:
desMagColName3 = """%sMAG_DES_3""" % (band.upper())
color0 = """%s.color0""" % (band)
intercept = """%s.intercept""" % (band)
blue_slope = """%s.blue.slope""" % (band)
red_slope = """%s.red.slope""" % (band)
df.loc[:,desMagColName3] = -9999.
blueMask = (mask & (df.BP_G <= skent3[color0]))
redMask = (mask & (df.BP_G > skent3[color0]))
df.loc[blueMask,desMagColName3] = df.loc[blueMask,'PHOT_G_MEAN_MAG'] + \
+ skent3[intercept] + skent3[blue_slope]*(df.loc[blueMask,'BP_G'] - skent1[color0])
df.loc[redMask,desMagColName3] = df.loc[redMask,'PHOT_G_MEAN_MAG'] + \
+ skent3[intercept] + skent3[red_slope]*(df.loc[redMask,'BP_G'] - skent1[color0])
# S Kent average...
desMagColName = """%sMAG_DES""" % (band.upper())
df.loc[:,desMagColName] = ( df.loc[:,desMagColName1] + \
df.loc[:,desMagColName2] + \
df.loc[:,desMagColName3] ) / 3.
# Output results...
outcolumns = columns.extend(['GMAG_DES','RMAG_DES','IMAG_DES','ZMAG_DES','YMAG_DES'])
df.to_csv(outputFile, columns=outcolumns, index=False, float_format='%.6f')
return 0
##################################
if __name__ == "__main__":
main()
##################################
| gpl-3.0 |
DailyActie/Surrogate-Model | 01-codes/scikit-learn-master/examples/datasets/plot_iris_dataset.py | 1 | 1927 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
The Iris Dataset
=========================================================
This data sets consists of 3 different types of irises'
(Setosa, Versicolour, and Virginica) petal and sepal
length, stored in a 150x4 numpy.ndarray
The rows being the samples and the columns being:
Sepal Length, Sepal Width, Petal Length and Petal Width.
The below plot uses the first two features.
See `here <http://en.wikipedia.org/wiki/Iris_flower_data_set>`_ for more
information on this dataset.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets
from sklearn.decomposition import PCA
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
Y = iris.target
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
plt.figure(2, figsize=(8, 6))
plt.clf()
# Plot the training points
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
# To getter a better understanding of interaction of the dimensions
# plot the first three PCA dimensions
fig = plt.figure(1, figsize=(8, 6))
ax = Axes3D(fig, elev=-150, azim=110)
X_reduced = PCA(n_components=3).fit_transform(iris.data)
ax.scatter(X_reduced[:, 0], X_reduced[:, 1], X_reduced[:, 2], c=Y,
cmap=plt.cm.Paired)
ax.set_title("First three PCA directions")
ax.set_xlabel("1st eigenvector")
ax.w_xaxis.set_ticklabels([])
ax.set_ylabel("2nd eigenvector")
ax.w_yaxis.set_ticklabels([])
ax.set_zlabel("3rd eigenvector")
ax.w_zaxis.set_ticklabels([])
plt.show()
| mit |
murali-munna/scikit-learn | examples/ensemble/plot_forest_importances.py | 241 | 1761 | """
=========================================
Feature importances with forests of trees
=========================================
This examples shows the use of forests of trees to evaluate the importance of
features on an artificial classification task. The red bars are the feature
importances of the forest, along with their inter-trees variability.
As expected, the plot suggests that 3 features are informative, while the
remaining are not.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_classification
from sklearn.ensemble import ExtraTreesClassifier
# Build a classification task using 3 informative features
X, y = make_classification(n_samples=1000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
n_classes=2,
random_state=0,
shuffle=False)
# Build a forest and compute the feature importances
forest = ExtraTreesClassifier(n_estimators=250,
random_state=0)
forest.fit(X, y)
importances = forest.feature_importances_
std = np.std([tree.feature_importances_ for tree in forest.estimators_],
axis=0)
indices = np.argsort(importances)[::-1]
# Print the feature ranking
print("Feature ranking:")
for f in range(10):
print("%d. feature %d (%f)" % (f + 1, indices[f], importances[indices[f]]))
# Plot the feature importances of the forest
plt.figure()
plt.title("Feature importances")
plt.bar(range(10), importances[indices],
color="r", yerr=std[indices], align="center")
plt.xticks(range(10), indices)
plt.xlim([-1, 10])
plt.show()
| bsd-3-clause |
trankmichael/scikit-learn | sklearn/svm/tests/test_bounds.py | 280 | 2541 | import nose
from nose.tools import assert_equal, assert_true
from sklearn.utils.testing import clean_warning_registry
import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
def test_l2_deprecation():
clean_warning_registry()
with warnings.catch_warnings(record=True) as w:
assert_equal(l1_min_c(dense_X, Y1, "l2"),
l1_min_c(dense_X, Y1, "squared_hinge"))
assert_equal(w[0].category, DeprecationWarning)
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@nose.tools.raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@nose.tools.raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| bsd-3-clause |
ClinicalGraphics/scikit-image | doc/examples/segmentation/plot_threshold_adaptive.py | 22 | 1307 | """
=====================
Adaptive Thresholding
=====================
Thresholding is the simplest way to segment objects from a background. If that
background is relatively uniform, then you can use a global threshold value to
binarize the image by pixel-intensity. If there's large variation in the
background intensity, however, adaptive thresholding (a.k.a. local or dynamic
thresholding) may produce better results.
Here, we binarize an image using the `threshold_adaptive` function, which
calculates thresholds in regions of size `block_size` surrounding each pixel
(i.e. local neighborhoods). Each threshold value is the weighted mean of the
local neighborhood minus an offset value.
"""
import matplotlib.pyplot as plt
from skimage import data
from skimage.filters import threshold_otsu, threshold_adaptive
image = data.page()
global_thresh = threshold_otsu(image)
binary_global = image > global_thresh
block_size = 40
binary_adaptive = threshold_adaptive(image, block_size, offset=10)
fig, axes = plt.subplots(nrows=3, figsize=(7, 8))
ax0, ax1, ax2 = axes
plt.gray()
ax0.imshow(image)
ax0.set_title('Image')
ax1.imshow(binary_global)
ax1.set_title('Global thresholding')
ax2.imshow(binary_adaptive)
ax2.set_title('Adaptive thresholding')
for ax in axes:
ax.axis('off')
plt.show()
| bsd-3-clause |
WafaaT/spark-tk | regression-tests/sparktkregtests/testcases/frames/extreme_value_test.py | 11 | 8496 | # vim: set encoding=utf-8
# Copyright (c) 2016 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
""" Exercise NaN and Inf values in various contexts. """
import unittest
import numpy as np
import math
import sys
import os
from sparktkregtests.lib import sparktk_test
class ExtremeValueTest(sparktk_test.SparkTKTestCase):
def setUp(self):
self.data_proj = self.get_file("extreme_value.csv")
self.schema_proj = [("col_A", int),
("col_B", int),
("col_C", float),
("Double", float),
("Text", str)]
def test_extreme_projection(self):
""" Test projection including Inf / NaN data """
master = self.context.frame.import_csv(
self.data_proj, schema=self.schema_proj)
self.assertEqual(master.count(), 7)
# Add a new column; replace some values with +/-Inf or NaN
def add_extremes(row):
new_val = {123456: np.inf,
777: -np.inf,
4321: np.nan}
return new_val.get(row["col_A"], row["col_A"])
master.add_columns(add_extremes, ("col_D", float))
proj_3col = master.copy(['col_D', 'Double', 'Text'])
self.assertEqual(proj_3col.count(), master.count())
self.assertEqual(len(proj_3col.column_names), 3)
proj_1col = master.copy({'col_A': 'extremes'})
self.assertEqual(proj_1col.count(), master.count())
self.assertEqual(len(proj_1col.column_names), 1)
#check if NaN/inf values are present
test_extreme = master.to_pandas()
for index, row in test_extreme.iterrows():
if(row['col_A'] == 123456 or row['col_A'] == 777):
self.assertTrue(math.isinf(row['col_D']))
if(row['col_A'] == 4321):
self.assertTrue(math.isnan(row['col_D']))
def test_extreme_copy(self):
""" Test copy with Inf / NaN data """
# Add a new column; replace some values with +/-Inf or NaN
def add_extremes(row):
new_val = {700: np.inf,
701: -np.inf,
702: np.nan}
return new_val.get(row["col_A"], row["col_A"])
frame = self.context.frame.import_csv(
self.data_proj, schema=self.schema_proj)
frame.add_columns(add_extremes, ("col_D", float))
frame_copy = frame.copy()
self.assertEqual(frame.column_names, frame_copy.column_names)
self.assertFramesEqual(frame_copy, frame)
def test_extreme_maxmin32(self):
""" Test extremal 32 bit float values"""
schema_maxmin32 = [("col_A", float),
("col_B", float)]
extreme32 = self.context.frame.import_csv(
self.get_file("BigAndTinyFloat32s.csv"),
schema=schema_maxmin32)
self.assertEqual(extreme32.count(), 16)
extreme32.add_columns(lambda row: [np.sqrt(-9)],
[('neg_root', float)])
extake = extreme32.to_pandas(extreme32.count())
for index, row in extake.iterrows():
self.assertTrue(math.isnan(row['neg_root']))
def test_extreme_maxmin64(self):
""" Test extreme large and small magnitudes on 64-bit floats."""
data_maxmin64 = self.get_file('BigAndTinyFloat64s.csv')
schema_maxmin64 = [("col_A", float),
("col_B", float)]
extreme64 = self.context.frame.import_csv(
data_maxmin64, schema=schema_maxmin64)
self.assertEqual(extreme64.count(), 16)
extreme64.add_columns(lambda row:
[row.col_A*2, np.sqrt(-9)],
[("twice", float),
('neg_root', float)])
extake = extreme64.to_pandas(extreme64.count())
#check for inf when values exceed 64-bit range;
#double the value if outside the range [0,1)
for index, row in extake.iterrows():
if row['col_A'] >= 1 or row['col_A'] < 0:
self.assertTrue(math.isinf(row['twice']))
else:
self.assertEqual(row['twice'], row['col_A'] * 2)
self.assertTrue(math.isnan(row['neg_root']))
def test_extreme_colmode(self):
""" Insert NaN and +/-Inf for weights"""
def add_extremes(row):
new_val = {"Charizard": np.inf,
"Squirtle": -np.inf,
"Wartortle": np.nan}
return new_val.get(row["item"], row['weight'])
data_stat = self.get_file("mode_stats.tsv")
schema_stat = [("weight", float), ("item", int)]
stat_frame = self.context.frame.import_csv(
data_stat, schema=schema_stat, delimiter="\t")
stat_frame.add_columns(add_extremes, ("weight2", float))
stats = stat_frame.column_mode(
"item", "weight2", max_modes_returned=50)
self.assertEqual(stats.mode_count, 2)
self.assertEqual(stats.total_weight, 1749)
self.assertIn(60 , stats.modes)
def test_extreme_col_summary(self):
""" Test column_summary_stats with Inf / NaN data """
# Add a new column; replace some values with +/-Inf or NaN
def add_extremes(row):
new_val = {20: np.inf,
30: -np.inf,
40: np.nan}
return new_val.get(row.Item, row.Item)
data_mode = self.get_file("mode_stats2.csv")
schema_mode = [("Item", int), ("Weight", int)]
stat_frame = self.context.frame.import_csv(
data_mode, schema=schema_mode)
# Create new column where only values 10 and 50 are valid;
stat_frame.add_columns(add_extremes, ("Item2", float))
stat_frame.drop_columns("Item")
stat_frame.rename_columns({"Item2": "Item"})
stats = stat_frame.column_summary_statistics(
"Item",
weights_column="Weight",
use_popultion_variance=True)
self.assertEqual(stats.mean, 30)
self.assertEqual(stats.good_row_count, 20)
self.assertAlmostEqual(stats.mean_confidence_upper, 34.3826932359)
def test_extreme_col_summary_corner(self):
""" Test column_summary_stats with very large values and odd cases."""
def add_extremes(row):
bf = float(2) ** 1021
new_val = {600: 30,
1: 0,
2: bf * 2,
3: float(2) ** 1023,
3.1: 5.4}
return new_val.get(row.Factors, row.Factors)
expected_stats = [1624.6, 56, 563700.64]
data_corner = self.get_file("SummaryStats2.csv")
schema_corner = [("Item", float), ("Factors", float)]
stat_frame = self.context.frame.import_csv(
data_corner, schema=schema_corner)
stat_frame.add_columns(add_extremes, ("Weight", float))
stat_frame.drop_columns("Factors")
stats = stat_frame.column_summary_statistics(
"Item", weights_column="Weight")
self.assertAlmostEqual([stats.mean,
stats.good_row_count,
stats.variance], expected_stats)
def test_extreme_col_summary_empty_frame(self):
expected_stats = [float('nan'), 0, 1.0]
schema = [("Item", float), ("Weight", float)]
stat_frame = self.context.frame.create([], schema=schema)
stats = stat_frame.column_summary_statistics(
"Item", weights_column="Weight")
self.assertTrue([math.isnan(stats.maximum),
stats.good_row_count,
stats.geometric_mean], expected_stats)
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
opikalo/pyfire | control/robot_control.py | 1 | 15320 | """ Makes a robot implementation from the class roam aound the path
within the game """
from __future__ import division
from math import *
import random
import time
import matplotlib.pyplot as plt
import networkx as nx
from planning.astar.global_map import (plot_map, GlobalMap,
MIN_UNCONTRAINED_PENALTY)
from copy import deepcopy
from collections import OrderedDict
from planning.astar.local_graph import plan_path
from smoothing.gd import smooth_graph, graph_to_path
from steering.steering import Steering
# don't change the noise paameters
#steering_noise = 0.1
#distance_noise = 0.03
#measurement_noise = 0.05
# ------------------------------------------------
#
# this is the robot class
#
class robot:
# --------
# init:
# creates robot and initializes location/orientation to 0, 0, 0
#
def __init__(self, length = 40):
self.x = 0.0
self.y = 0.0
self.orientation = 0.0
self.length = length
self.steering_noise = 0.0
self.distance_noise = 0.0
self.measurement_noise = 0.0
self.penalty = 0
self.num_steps = 0
self.steering = Steering()
# --------
# set:
# sets a robot coordinate
#
def set(self, new_x, new_y, new_orientation):
self.x = float(new_x)
self.y = float(new_y)
self.orientation = float(new_orientation) % (2.0 * pi)
# --------
# set_noise:
# sets the noise parameters
#
def set_noise(self, new_s_noise, new_d_noise, new_m_noise):
# makes it possible to change the noise parameters
# this is often useful in particle filters
self.steering_noise = float(new_s_noise)
self.distance_noise = float(new_d_noise)
self.measurement_noise = float(new_m_noise)
# --------
# check:
# checks of the robot pose collides with an obstacle, or
# is too far outside the plane
def check_collision(self, global_map):
penalty = global_map.check_collision([self.x, self.y])
if penalty == MIN_UNCONTRAINED_PENALTY:
return False
self.penalty += penalty
return True
def check_goal(self, goal, threshold = 20.0):
dist = sqrt((float(goal[0]) - self.x) ** 2 + (float(goal[1]) - self.y) ** 2)
return dist < threshold
# --------
# move:
# steering = front wheel steering angle, limited by max_steering_angle
# distance = total distance driven, most be non-negative
def move(self, steering, distance,
tolerance = 0.0001, max_steering_angle = pi / 6.0, real=False, fps=25):
if steering > max_steering_angle:
steering = max_steering_angle
if steering < -max_steering_angle:
steering = -max_steering_angle
if distance < 0.0:
distance = 0.0
cmds = []
if real:
if distance:
cmds.append(('up', distance*0.005*(25/fps)))
if steering > 0:
cmds.append(('right', steering*.9*(25/fps)))
else:
cmds.append(('left', abs(steering)*.9*(25/fps)))
# make a new copy
res = robot()
res.length = self.length
res.steering_noise = self.steering_noise
res.distance_noise = self.distance_noise
res.measurement_noise = self.measurement_noise
res.penalty = self.penalty
res.num_steps = self.num_steps + 1
# apply noise
steering2 = random.gauss(steering, self.steering_noise)
distance2 = random.gauss(distance, self.distance_noise)
# Execute motion
turn = tan(steering2) * distance2 / res.length
if abs(turn) < tolerance:
# approximate by straight line motion
res.x = self.x + (distance2 * cos(self.orientation))
res.y = self.y + (distance2 * sin(self.orientation))
res.orientation = (self.orientation + turn) % (2.0 * pi)
else:
# approximate bicycle model for motion
radius = distance2 / turn
cx = self.x - (sin(self.orientation) * radius)
cy = self.y + (cos(self.orientation) * radius)
res.orientation = (self.orientation + turn) % (2.0 * pi)
res.x = cx + (sin(res.orientation) * radius)
res.y = cy - (cos(res.orientation) * radius)
# check for collision
# res.check_collision(grid)
if real:
return res, cmds
return res
# --------
# sense:
#
def sense(self):
return [random.gauss(self.x, self.measurement_noise),
random.gauss(self.y, self.measurement_noise)]
# --------
# measurement_prob
# computes the probability of a measurement
#
def measurement_prob(self, measurement):
# compute errors
error_x = measurement[0] - self.x
error_y = measurement[1] - self.y
# calculate Gaussian
error = exp(- (error_x ** 2) / (self.measurement_noise ** 2) / 2.0) \
/ sqrt(2.0 * pi * (self.measurement_noise ** 2))
error *= exp(- (error_y ** 2) / (self.measurement_noise ** 2) / 2.0) \
/ sqrt(2.0 * pi * (self.measurement_noise ** 2))
return error
def __repr__(self):
# return '[x=%.5f y=%.5f orient=%.5f]' % (self.x, self.y, self.orientation)
return '[%.5f, %.5f]' % (self.x, self.y)
# ------------------------------------------------
#
# this is the particle filter class
#
class particles:
# --------
# init:
# creates particle set with given initial position
#
def __init__(self, x, y, theta,
steering_noise, distance_noise, measurement_noise, N = 100):
self.N = N
self.steering_noise = steering_noise
self.distance_noise = distance_noise
self.measurement_noise = measurement_noise
self.data = []
for i in range(self.N):
r = robot()
r.set(x, y, theta)
r.set_noise(steering_noise, distance_noise, measurement_noise)
self.data.append(r)
# --------
#
# extract position from a particle set
#
def get_position(self):
x = 0.0
y = 0.0
orientation = 0.0
for i in range(self.N):
x += self.data[i].x
y += self.data[i].y
# orientation is tricky because it is cyclic. By normalizing
# around the first particle we are somewhat more robust to
# the 0=2pi problem
orientation += (((self.data[i].orientation
- self.data[0].orientation + pi) % (2.0 * pi))
+ self.data[0].orientation - pi)
return [x / self.N, y / self.N, orientation / self.N]
# --------
#
# motion of the particles
#
def move(self, steer, speed):
newdata = []
for i in range(self.N):
r = self.data[i].move(steer, speed)
newdata.append(r)
self.data = newdata
# --------
#
# sensing and resampling
#
def sense(self, Z):
w = []
for i in range(self.N):
w.append(self.data[i].measurement_prob(Z))
# resampling (careful, this is using shallow copy)
p3 = []
index = int(random.random() * self.N)
beta = 0.0
mw = max(w)
for i in range(self.N):
beta += random.random() * 2.0 * mw
while beta > w[index]:
beta -= w[index]
index = (index + 1) % self.N
p3.append(self.data[index])
self.data = p3
# --------
#
# run: runs control program for the robot
#
def run(global_map, start, goal, spath, params, printflag = False, speed = 5, timeout = 2000):
mg = nx.DiGraph()
myrobot = robot()
myrobot.set(start[0], start[1], pi/2)
mg.add_node(0, pos=(myrobot.x, myrobot.y))
myrobot.set_noise(steering_noise, distance_noise, measurement_noise)
filter = particles(myrobot.x, myrobot.y, myrobot.orientation,
steering_noise, distance_noise, measurement_noise)
cte = 0.0
err = 0.0
N = 0
index = 0 # index into the path
while not myrobot.check_goal(goal) and N < timeout:
diff_cte = - cte
# ----------------------------------------
# compute the CTE
# start with the present robot estimate
estimate = filter.get_position()
### ENTER CODE HERE
x, y, theta = estimate
while True:
x1, y1 = spath[index]
Rx = x - x1
Ry = y - y1
x2, y2 = spath[index + 1]
dx = x2 - x1
dy = y2 - y1
u = abs(Rx*dx + Ry*dy)/(dx*dx + dy*dy)
if u > 1 and index < (len(spath) - 2):
index +=1
else:
break
cte = (Ry * dx - Rx * dy) / (dx * dx + dy * dy)
# ----------------------------------------
diff_cte += cte
steer = - params[0] * cte - params[1] * diff_cte
myrobot = myrobot.move(steer, speed)
filter.move(steer, speed)
Z = myrobot.sense()
filter.sense(Z)
#update penalty
myrobot.check_collision(global_map)
err += (cte ** 2)
N += 1
mg.add_node(N, pos=(myrobot.x, myrobot.y))
mg.add_edge(N-1, N)
if printflag:
print myrobot, cte, index, u
return [myrobot.check_goal(goal), myrobot.penalty, myrobot.num_steps, mg]
# ------------------------------------------------
#
# this is our main routine
#
def main(global_map, init, goal, steering_noise, distance_noise, measurement_noise,
weight_data, weight_smooth, p_gain, d_gain):
start_pos = [2650, 2650]
goal_pos = [1900, 400]
graph_path = plan_path(start_pos, goal_pos)
path_pos = nx.get_node_attributes(graph_path, 'pos')
sg = smooth_graph(graph_path, start_pos, goal_pos, True,
weight_data, weight_smooth)
sg_pos = nx.get_node_attributes(sg, 'pos')
spath = graph_to_path(sg)
global_map.plot()
nx.draw(sg, sg_pos, node_size=5, edge_color='r')
(reached_goal, penalty, num_steps, mg) = run(global_map, start_pos,
goal_pos, spath,
[p_gain, d_gain])
mg_pos = nx.get_node_attributes(mg, 'pos')
nx.draw(mg, mg_pos, node_size=5, edge_color='b')
plt.show()
class Simulate(object):
def __init__(self):
self.start_pos = [2650, 2650]
self.goal_pos = [1900, 400]
self.graph_path = plan_path(self.start_pos, self.goal_pos)
self.global_map = GlobalMap()
def run(self, weight_data, weight_smooth, p_gain, d_gain):
print "starting run with", weight_data, weight_smooth, p_gain, d_gain
sg = smooth_graph(self.graph_path, self.start_pos, self.goal_pos,
False, weight_data, weight_smooth)
spath = graph_to_path(sg)
(reached_goal, penalty, num_steps, mg) = run(self.global_map,
self.start_pos,
self.goal_pos,
spath,
[p_gain, d_gain])
print "finished run:", reached_goal, penalty, num_steps
#self.global_map.plot()
#mg_pos = nx.get_node_attributes(mg, 'pos')
#nx.draw(mg, mg_pos, node_size=5, edge_color='b')
#plt.show(block=False)
return (reached_goal, penalty, num_steps)
# ------------------------------------------------
#
# input data and parameters
#
# grid format:
# 0 = navigable space
# 1 = occupied space
grid = [[0, 1, 0, 0, 0, 0],
[0, 1, 0, 1, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 0, 0, 1, 0, 1],
[0, 1, 0, 1, 0, 0]]
init = [0, 0]
goal = [len(grid)-1, len(grid[0])-1]
steering_noise = 0.01
distance_noise = 0.03
measurement_noise = 0.05
weight_data = 1.1
weight_smooth = 0.2
p_gain = 2.0
d_gain = 6.0
#main(GlobalMap(), init, goal, steering_noise, distance_noise, measurement_noise,
# weight_data, weight_smooth, p_gain, d_gain)
def twiddle(init_params):
n_params = len(init_params)
dparams = [1.0 for row in range(n_params)]
params = [0.0 for row in range(n_params)]
K = 10
NO_GOAL_PENALTY = 10e10
s = Simulate()
for i in range(n_params):
params[i] = init_params[i]
best_error = 0.0;
for k in range(K):
ret = s.run(params[0], params[1], params[2], params[3])
if ret[0]:
best_error += ret[1] * 100 + ret[2]
else:
best_error += NO_GOAL_PENALTY
best_error = float(best_error) / float(k+1)
print best_error
n = 0
while sum(dparams) > 0.0000001:
for i in range(len(params)):
params[i] += dparams[i]
err = 0
for k in range(K):
ret = s.run(params[0], params[1], params[2], params[3])
if ret[0]:
err += ret[1] * 100 + ret[2]
else:
err += NO_GOAL_PENALTY
print float(err) / float(k+1)
if err < best_error:
best_error = float(err) / float(k+1)
dparams[i] *= 1.1
else:
params[i] -= 2.0 * dparams[i]
err = 0
for k in range(K):
ret = s.run(params[0], params[1], params[2], params[3])
if ret[0]:
err += ret[1] * 100 + ret[2]
else:
err += NO_GOAL_PENALTY
print float(err) / float(k+1)
if err < best_error:
best_error = float(err) / float(k+1)
dparams[i] *= 1.1
else:
params[i] += dparams[i]
dparams[i] *= 0.5
n += 1
print 'Twiddle #', n, params, ' -> ', best_error
print ' '
return params
def mytwiddle(init_param, tol = 0.2): #Make this tolerance bigger if you are timing out!
p = [0, 0, 0]
dp = [1, 1, 1]
best_err = run(p)
while (sum([abs(x) for x in dp]) > tol):
for i in range(len(p)):
p[i] += dp[i]
err = run(p)
if err < best_err:
best_err = err
dp[i]*=1.1
continue
p[i] -= 2*dp[i]
err = run(p)
if err < best_err:
best_err = err
dp[i]-= 2*dp[i]
continue
p[i] += dp[i]
dp[i]*=0.9
#print "param", p, "dp", dp, "best_err", best_err
return run(p)
#twiddle([weight_data, weight_smooth, p_gain, d_gain])
| mit |
abhishekgahlot/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
mwv/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
andreshp/Algorithms | Problems/Hackerrank/IndeedPrime/6_regression.py | 1 | 1536 | ##!/usr/bin/python
#######################################################################
# Author: Andrés Herrera Poyatos
# Universidad de Granada, June, 2015
# Indeed Prime Challengue
# Problem 6
########################################################################
import numpy
import math
from sklearn import linear_model
#-------------------------------- FUNCTIONS --------------------------------#
def preprocessing(new_line):
instance = [None] * 6
first_split = new_line.split()
first_part_split = first_split[0].split("-")
second_part_split = first_split[2].split("-")
instance[0] = int(first_part_split[0])
instance[1] = int(first_part_split[1])
instance[2] = int(first_part_split[2])
instance[3] = int(second_part_split[0])
instance[4] = int(second_part_split[1])
instance[5] = int(second_part_split[2])
return instance, int(first_split[3]) if len(first_split[3].split("_")) == 1 else None
ROWS = 595
TEST_ROWS = 50
TRAIN_ROWS = ROWS - TEST_ROWS
###############################################################################
# Load data
x = []; y = []
for i in range(0, ROWS):
instance, value = preprocessing(input())
x.append(instance)
y.append(value)
x_train, y_train = x[:TRAIN_ROWS], y[:TRAIN_ROWS]
x_test, y_test = x[TRAIN_ROWS:], y[TRAIN_ROWS:]
###############################################################################
clf = linear_model.BayesianRidge()
clf.fit(x_train, y_train)
y = clf.predict(x_test)
for p in y:
print(math.trunc(p)) | gpl-2.0 |
scenarios/tensorflow | tensorflow/examples/learn/boston.py | 13 | 1945 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNRegressor for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
from sklearn import preprocessing
import tensorflow as tf
def main(unused_argv):
# Load dataset
boston = tf.contrib.learn.datasets.load_dataset('boston')
x, y = boston.data, boston.target
# Split dataset into train / test
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
x, y, test_size=0.2, random_state=42)
# Scale data (training set) to 0 mean and unit standard deviation.
scaler = preprocessing.StandardScaler()
x_train = scaler.fit_transform(x_train)
# Build 2 layer fully connected DNN with 10, 10 units respectively.
feature_columns = tf.contrib.learn.infer_real_valued_columns_from_input(
x_train)
regressor = tf.contrib.learn.DNNRegressor(
feature_columns=feature_columns, hidden_units=[10, 10])
# Fit
regressor.fit(x_train, y_train, steps=5000, batch_size=1)
# Predict and score
y_predicted = list(
regressor.predict(
scaler.transform(x_test), as_iterable=True))
score = metrics.mean_squared_error(y_predicted, y_test)
print('MSE: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
EvanzzzZ/mxnet | example/rcnn/rcnn/pycocotools/coco.py | 17 | 18296 | __author__ = 'tylin'
__version__ = '2.0'
# Interface for accessing the Microsoft COCO dataset.
# Microsoft COCO is a large image dataset designed for object detection,
# segmentation, and caption generation. pycocotools is a Python API that
# assists in loading, parsing and visualizing the annotations in COCO.
# Please visit http://mscoco.org/ for more information on COCO, including
# for the data, paper, and tutorials. The exact format of the annotations
# is also described on the COCO website. For example usage of the pycocotools
# please see pycocotools_demo.ipynb. In addition to this API, please download both
# the COCO images and annotations in order to run the demo.
# An alternative to using the API is to load the annotations directly
# into Python dictionary
# Using the API provides additional utility functions. Note that this API
# supports both *instance* and *caption* annotations. In the case of
# captions not all functions are defined (e.g. categories are undefined).
# The following API functions are defined:
# COCO - COCO api class that loads COCO annotation file and prepare data structures.
# decodeMask - Decode binary mask M encoded via run-length encoding.
# encodeMask - Encode binary mask M using run-length encoding.
# getAnnIds - Get ann ids that satisfy given filter conditions.
# getCatIds - Get cat ids that satisfy given filter conditions.
# getImgIds - Get img ids that satisfy given filter conditions.
# loadAnns - Load anns with the specified ids.
# loadCats - Load cats with the specified ids.
# loadImgs - Load imgs with the specified ids.
# annToMask - Convert segmentation in an annotation to binary mask.
# showAnns - Display the specified annotations.
# loadRes - Load algorithm results and create API for accessing them.
# download - Download COCO images from mscoco.org server.
# Throughout the API "ann"=annotation, "cat"=category, and "img"=image.
# Help on each functions can be accessed by: "help COCO>function".
# See also COCO>decodeMask,
# COCO>encodeMask, COCO>getAnnIds, COCO>getCatIds,
# COCO>getImgIds, COCO>loadAnns, COCO>loadCats,
# COCO>loadImgs, COCO>annToMask, COCO>showAnns
# Microsoft COCO Toolbox. version 2.0
# Data, paper, and tutorials available at: http://mscoco.org/
# Code written by Piotr Dollar and Tsung-Yi Lin, 2014.
# Licensed under the Simplified BSD License [see bsd.txt]
import json
import time
import matplotlib.pyplot as plt
from matplotlib.collections import PatchCollection
from matplotlib.patches import Polygon
import numpy as np
import copy
import itertools
from . import mask as maskUtils
import os
from collections import defaultdict
import sys
PYTHON_VERSION = sys.version_info[0]
if PYTHON_VERSION == 2:
from urllib import urlretrieve
elif PYTHON_VERSION == 3:
from urllib.request import urlretrieve
class COCO:
def __init__(self, annotation_file=None):
"""
Constructor of Microsoft COCO helper class for reading and visualizing annotations.
:param annotation_file (str): location of annotation file
:param image_folder (str): location to the folder that hosts images.
:return:
"""
# load dataset
self.dataset,self.anns,self.cats,self.imgs = dict(),dict(),dict(),dict()
self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
if not annotation_file == None:
print('loading annotations into memory...')
tic = time.time()
dataset = json.load(open(annotation_file, 'r'))
assert type(dataset)==dict, 'annotation file format {} not supported'.format(type(dataset))
print('Done (t={:0.2f}s)'.format(time.time()- tic))
self.dataset = dataset
self.createIndex()
def createIndex(self):
# create index
print('creating index...')
anns, cats, imgs = {}, {}, {}
imgToAnns,catToImgs = defaultdict(list),defaultdict(list)
if 'annotations' in self.dataset:
for ann in self.dataset['annotations']:
imgToAnns[ann['image_id']].append(ann)
anns[ann['id']] = ann
if 'images' in self.dataset:
for img in self.dataset['images']:
imgs[img['id']] = img
if 'categories' in self.dataset:
for cat in self.dataset['categories']:
cats[cat['id']] = cat
if 'annotations' in self.dataset and 'categories' in self.dataset:
for ann in self.dataset['annotations']:
catToImgs[ann['category_id']].append(ann['image_id'])
print('index created!')
# create class members
self.anns = anns
self.imgToAnns = imgToAnns
self.catToImgs = catToImgs
self.imgs = imgs
self.cats = cats
def info(self):
"""
Print information about the annotation file.
:return:
"""
for key, value in self.dataset['info'].items():
print('{}: {}'.format(key, value))
def getAnnIds(self, imgIds=[], catIds=[], areaRng=[], iscrowd=None):
"""
Get ann ids that satisfy given filter conditions. default skips that filter
:param imgIds (int array) : get anns for given imgs
catIds (int array) : get anns for given cats
areaRng (float array) : get anns for given area range (e.g. [0 inf])
iscrowd (boolean) : get anns for given crowd label (False or True)
:return: ids (int array) : integer array of ann ids
"""
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == len(areaRng) == 0:
anns = self.dataset['annotations']
else:
if not len(imgIds) == 0:
lists = [self.imgToAnns[imgId] for imgId in imgIds if imgId in self.imgToAnns]
anns = list(itertools.chain.from_iterable(lists))
else:
anns = self.dataset['annotations']
anns = anns if len(catIds) == 0 else [ann for ann in anns if ann['category_id'] in catIds]
anns = anns if len(areaRng) == 0 else [ann for ann in anns if ann['area'] > areaRng[0] and ann['area'] < areaRng[1]]
if not iscrowd == None:
ids = [ann['id'] for ann in anns if ann['iscrowd'] == iscrowd]
else:
ids = [ann['id'] for ann in anns]
return ids
def getCatIds(self, catNms=[], supNms=[], catIds=[]):
"""
filtering parameters. default skips that filter.
:param catNms (str array) : get cats for given cat names
:param supNms (str array) : get cats for given supercategory names
:param catIds (int array) : get cats for given cat ids
:return: ids (int array) : integer array of cat ids
"""
catNms = catNms if type(catNms) == list else [catNms]
supNms = supNms if type(supNms) == list else [supNms]
catIds = catIds if type(catIds) == list else [catIds]
if len(catNms) == len(supNms) == len(catIds) == 0:
cats = self.dataset['categories']
else:
cats = self.dataset['categories']
cats = cats if len(catNms) == 0 else [cat for cat in cats if cat['name'] in catNms]
cats = cats if len(supNms) == 0 else [cat for cat in cats if cat['supercategory'] in supNms]
cats = cats if len(catIds) == 0 else [cat for cat in cats if cat['id'] in catIds]
ids = [cat['id'] for cat in cats]
return ids
def getImgIds(self, imgIds=[], catIds=[]):
'''
Get img ids that satisfy given filter conditions.
:param imgIds (int array) : get imgs for given ids
:param catIds (int array) : get imgs with all given cats
:return: ids (int array) : integer array of img ids
'''
imgIds = imgIds if type(imgIds) == list else [imgIds]
catIds = catIds if type(catIds) == list else [catIds]
if len(imgIds) == len(catIds) == 0:
ids = self.imgs.keys()
else:
ids = set(imgIds)
for i, catId in enumerate(catIds):
if i == 0 and len(ids) == 0:
ids = set(self.catToImgs[catId])
else:
ids &= set(self.catToImgs[catId])
return list(ids)
def loadAnns(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying anns
:return: anns (object array) : loaded ann objects
"""
if type(ids) == list:
return [self.anns[id] for id in ids]
elif type(ids) == int:
return [self.anns[ids]]
def loadCats(self, ids=[]):
"""
Load cats with the specified ids.
:param ids (int array) : integer ids specifying cats
:return: cats (object array) : loaded cat objects
"""
if type(ids) == list:
return [self.cats[id] for id in ids]
elif type(ids) == int:
return [self.cats[ids]]
def loadImgs(self, ids=[]):
"""
Load anns with the specified ids.
:param ids (int array) : integer ids specifying img
:return: imgs (object array) : loaded img objects
"""
if type(ids) == list:
return [self.imgs[id] for id in ids]
elif type(ids) == int:
return [self.imgs[ids]]
def showAnns(self, anns):
"""
Display the specified annotations.
:param anns (array of object): annotations to display
:return: None
"""
if len(anns) == 0:
return 0
if 'segmentation' in anns[0] or 'keypoints' in anns[0]:
datasetType = 'instances'
elif 'caption' in anns[0]:
datasetType = 'captions'
else:
raise Exception('datasetType not supported')
if datasetType == 'instances':
ax = plt.gca()
ax.set_autoscale_on(False)
polygons = []
color = []
for ann in anns:
c = (np.random.random((1, 3))*0.6+0.4).tolist()[0]
if 'segmentation' in ann:
if type(ann['segmentation']) == list:
# polygon
for seg in ann['segmentation']:
poly = np.array(seg).reshape((int(len(seg)/2), 2))
polygons.append(Polygon(poly))
color.append(c)
else:
# mask
t = self.imgs[ann['image_id']]
if type(ann['segmentation']['counts']) == list:
rle = maskUtils.frPyObjects([ann['segmentation']], t['height'], t['width'])
else:
rle = [ann['segmentation']]
m = maskUtils.decode(rle)
img = np.ones( (m.shape[0], m.shape[1], 3) )
if ann['iscrowd'] == 1:
color_mask = np.array([2.0,166.0,101.0])/255
if ann['iscrowd'] == 0:
color_mask = np.random.random((1, 3)).tolist()[0]
for i in range(3):
img[:,:,i] = color_mask[i]
ax.imshow(np.dstack( (img, m*0.5) ))
if 'keypoints' in ann and type(ann['keypoints']) == list:
# turn skeleton into zero-based index
sks = np.array(self.loadCats(ann['category_id'])[0]['skeleton'])-1
kp = np.array(ann['keypoints'])
x = kp[0::3]
y = kp[1::3]
v = kp[2::3]
for sk in sks:
if np.all(v[sk]>0):
plt.plot(x[sk],y[sk], linewidth=3, color=c)
plt.plot(x[v>0], y[v>0],'o',markersize=8, markerfacecolor=c, markeredgecolor='k',markeredgewidth=2)
plt.plot(x[v>1], y[v>1],'o',markersize=8, markerfacecolor=c, markeredgecolor=c, markeredgewidth=2)
p = PatchCollection(polygons, facecolor=color, linewidths=0, alpha=0.4)
ax.add_collection(p)
p = PatchCollection(polygons, facecolor='none', edgecolors=color, linewidths=2)
ax.add_collection(p)
elif datasetType == 'captions':
for ann in anns:
print(ann['caption'])
def loadRes(self, resFile):
"""
Load result file and return a result api object.
:param resFile (str) : file name of result file
:return: res (obj) : result api object
"""
res = COCO()
res.dataset['images'] = [img for img in self.dataset['images']]
print('Loading and preparing results...')
tic = time.time()
if type(resFile) == str or type(resFile) == unicode:
anns = json.load(open(resFile))
elif type(resFile) == np.ndarray:
anns = self.loadNumpyAnnotations(resFile)
else:
anns = resFile
assert type(anns) == list, 'results in not an array of objects'
annsImgIds = [ann['image_id'] for ann in anns]
assert set(annsImgIds) == (set(annsImgIds) & set(self.getImgIds())), \
'Results do not correspond to current coco set'
if 'caption' in anns[0]:
imgIds = set([img['id'] for img in res.dataset['images']]) & set([ann['image_id'] for ann in anns])
res.dataset['images'] = [img for img in res.dataset['images'] if img['id'] in imgIds]
for id, ann in enumerate(anns):
ann['id'] = id+1
elif 'bbox' in anns[0] and not anns[0]['bbox'] == []:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
bb = ann['bbox']
x1, x2, y1, y2 = [bb[0], bb[0]+bb[2], bb[1], bb[1]+bb[3]]
if not 'segmentation' in ann:
ann['segmentation'] = [[x1, y1, x1, y2, x2, y2, x2, y1]]
ann['area'] = bb[2]*bb[3]
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'segmentation' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
# now only support compressed RLE format as segmentation results
ann['area'] = maskUtils.area(ann['segmentation'])
if not 'bbox' in ann:
ann['bbox'] = maskUtils.toBbox(ann['segmentation'])
ann['id'] = id+1
ann['iscrowd'] = 0
elif 'keypoints' in anns[0]:
res.dataset['categories'] = copy.deepcopy(self.dataset['categories'])
for id, ann in enumerate(anns):
s = ann['keypoints']
x = s[0::3]
y = s[1::3]
x0,x1,y0,y1 = np.min(x), np.max(x), np.min(y), np.max(y)
ann['area'] = (x1-x0)*(y1-y0)
ann['id'] = id + 1
ann['bbox'] = [x0,y0,x1-x0,y1-y0]
print('DONE (t={:0.2f}s)'.format(time.time()- tic))
res.dataset['annotations'] = anns
res.createIndex()
return res
def download(self, tarDir = None, imgIds = [] ):
'''
Download COCO images from mscoco.org server.
:param tarDir (str): COCO results directory name
imgIds (list): images to be downloaded
:return:
'''
if tarDir is None:
print('Please specify target directory')
return -1
if len(imgIds) == 0:
imgs = self.imgs.values()
else:
imgs = self.loadImgs(imgIds)
N = len(imgs)
if not os.path.exists(tarDir):
os.makedirs(tarDir)
for i, img in enumerate(imgs):
tic = time.time()
fname = os.path.join(tarDir, img['file_name'])
if not os.path.exists(fname):
urlretrieve(img['coco_url'], fname)
print('downloaded {}/{} images (t={:0.1f}s)'.format(i, N, time.time()- tic))
def loadNumpyAnnotations(self, data):
"""
Convert result data from a numpy array [Nx7] where each row contains {imageID,x1,y1,w,h,score,class}
:param data (numpy.ndarray)
:return: annotations (python nested list)
"""
print('Converting ndarray to lists...')
assert(type(data) == np.ndarray)
print(data.shape)
assert(data.shape[1] == 7)
N = data.shape[0]
ann = []
for i in range(N):
if i % 1000000 == 0:
print('{}/{}'.format(i,N))
ann += [{
'image_id' : int(data[i, 0]),
'bbox' : [ data[i, 1], data[i, 2], data[i, 3], data[i, 4] ],
'score' : data[i, 5],
'category_id': int(data[i, 6]),
}]
return ann
def annToRLE(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE to RLE.
:return: binary mask (numpy 2D array)
"""
t = self.imgs[ann['image_id']]
h, w = t['height'], t['width']
segm = ann['segmentation']
if type(segm) == list:
# polygon -- a single object might consist of multiple parts
# we merge all parts into one mask rle code
rles = maskUtils.frPyObjects(segm, h, w)
rle = maskUtils.merge(rles)
elif type(segm['counts']) == list:
# uncompressed RLE
rle = maskUtils.frPyObjects(segm, h, w)
else:
# rle
rle = ann['segmentation']
return rle
def annToMask(self, ann):
"""
Convert annotation which can be polygons, uncompressed RLE, or RLE to binary mask.
:return: binary mask (numpy 2D array)
"""
rle = self.annToRLE(ann)
m = maskUtils.decode(rle)
return m | apache-2.0 |
martinggww/lucasenlights | ETF/winwin/src/getDf.py | 2 | 15125 | import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import my_config as config
import numpy as np
import talib as ta
import pandas as pd
import logging, json
STICKERS = config.STICKERS
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
#Append a dict as a row in df
def appendDf(stickers, df, dict,data_frame):
entry = [dict['DATE']]
values = []
for sticker in stickers:
if sticker == 'CASH':
continue
entry.append(dict[sticker])
if data_frame == 'daily':
entry.append(dict['FUND_PERF'])
df.loc[df.shape[0]] = entry
return df
#Must kee the same sequence, DATE, xxx_daily, yyy_daily,...., FUND_PERF
def getQuantDfColumnNames(stickers, data_frame):
column_names = []
column_names.append('DATE')
for sticker in stickers:
if sticker == 'CASH':
continue
column_names.append(sticker + '_' + data_frame)
if data_frame == 'daily':
column_names.append('FUND_PERF')
return column_names
#Return FeatureDFColumnNames
def getFeatureDfColumnNames(stickers, features, rank_features):
column_names = []
for sticker in stickers:
if sticker == 'CASH':
continue
for feature in features.values():
column_names.append(sticker + feature)
column_names.append(rank_features['_KD_RANK'])
column_names.append(rank_features['_MFI_RANK'])
return column_names
#This is the columns names for Data frame,
def getDfFeatures(data_frame):
FEATURES = config.FEATURES
RANK_FEATURES = config.RANK_FEATURES
if data_frame == 'weekly':
FEATURES = config.WEEKLY_FEATURES
RANK_FEATURES = config.WEEKLY_RANK_FEATURES
elif data_frame == 'monthly':
FEATURES = config.MONTHLY_FEATURES
RANK_FEATURES = config.MONTHLY_RANK_FEATURES
return FEATURES, RANK_FEATURES
def getStatFeatures(data_frame):
FEATURES = config.STAT_FEATURES
RANK_FEATURES = config.RANK_FEATURES
if data_frame == 'weekly':
FEATURES = config.WEEKLY_STAT_FEATURES
RANK_FEATURES = config.WEEKLY_RANK_FEATURES
elif data_frame == 'monthly':
FEATURES = config.MONTHLY_STAT_FEATURES
RANK_FEATURES = config.MONTHLY_RANK_FEATURES
return FEATURES, RANK_FEATURES
def getFeatureDf():
d, w, m = readCsvFiles()
#ToDo, add a logic to check the last data
d, d_hist_mom = getDailyFeatureDf(d)
w, w_hist_mom = getWeeklyFeatureDf(w)
d = d.dropna()
w = w.dropna()
d = d.reset_index(drop=True)
w = w.reset_index(drop=True)
return d, w, None
'''
input:
@sticker:string, sticker name
output:
@df:data frame.
Read csv file as a data frame, 1)drop open, high, low, close, keep aclose and volume 2) add 3 more colummns
xxx_st_slope_momentum, xxx_lt_slope_momentum, xxx_sm_slope_momentum
3) Set those three columns as zero
'''
def readCsv(sticker, time_frame):
file_name = None
if time_frame == "daily":
file_name = '../csv/' + sticker + '.csv'
# timestamp, open, high, low, close, adjusted_close, volume, dividend_amount, split_coefficient
df = pd.read_csv(file_name, sep=',', header=0, names=['date', sticker + '_open', sticker + '_high', \
sticker + '_low', sticker + '_org_close', \
sticker + '_close', \
sticker + '_volume', sticker + '_dividend_amount', \
sticker + '_split_coefficient'])
return df
elif time_frame == "weekly":
file_name = '../csv/' + sticker + '.weekly' + '.csv'
# timestamp, open, high, low, close, adjusted close, volume, dividend amount
df = pd.read_csv(file_name, sep=',', header=0, names=['date', sticker + '_open', sticker + '_high', \
sticker + '_low', sticker + '_org_close', \
sticker + '_close', \
sticker + '_volume', sticker + '_dividend_amount'])
return df
return None
# Read the original .csv dataframe
def readCsvFilesTimeFrame(TimeFrame):
dfs = []
#Read all symbol's trade data
for sticker in STICKERS:
df = readCsv(sticker, TimeFrame)
dfs.append(df)
df = dfs[0]
for idx in xrange(1, len(dfs), 1):
_ = dfs[idx]
df = pd.merge(df, _, on='date', how='outer')
# Reverse the dataframe
df = df.iloc[::-1]
df = df.dropna()
df.drop(df.index[[0, 1]], inplace=True)
df = df.reset_index(drop=True)
return df
'''
Read original .csv files, daily, weekly and monthly
'''
def readCsvFiles():
df = readCsvFilesTimeFrame('daily')
weekly_df = readCsvFilesTimeFrame('weekly')
#monthly_df = readCsvFilesTimeFrame('monthly')
return df, weekly_df, None#monthly_df
'''Get ranks from high to low, descending...'''
def getRanks(df, feature_name):
sm_names = [sticker + feature_name for sticker in STICKERS]
sm_names.append('CASH'+feature_name)
df_sm = df.loc[:, sm_names]
# For each row, sort the values by column name
ranking_list = []
for index, row in df_sm.iterrows():
temp = row.sort_values(ascending=False, kind='quicksort', inplace=False)
values = temp.to_frame().index.values.tolist()
ranking_list.append([item.split('_')[0] for item in values])
return ranking_list
def getDiv(v1, v2):
result = []
for index in xrange(0, len(v1), 1):
if v2[index] != 0:
result.append(float(v1[index])/v2[index])
else:
result.append(1.0)
return np.array(result)
'''
This function add feature names to feature Df
'''
def AddFeatureToDf(df, data_frame):
# Add the following columns for feature extraction.
FEATURES, RANK_FEATURES = getDfFeatures(data_frame)
feature_columns = getFeatureDfColumnNames(STICKERS, FEATURES, RANK_FEATURES)
feature_df = pd.DataFrame(np.zeros((df.shape[0], len(feature_columns))), columns=feature_columns, dtype=float)
feature_df['date'] = df.date
df = pd.merge(df, feature_df, on='date', how='outer')
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.dropna(how='any', inplace=True)
if df.isnull().any().any():
logger.error("After merge, has null")
exit(1)
#print df.columns.values
#print df.head(10)
return df
def getWeeklyFeatureDf(df):
# Add the following columns for feature extraction.
df = AddFeatureToDf(df, 'weekly')
FEATURES = config.WEEKLY_FEATURES
RANK_FEATURES = config.WEEKLY_RANK_FEATURES
hist_mom_dict = {}
date = np.asarray(df['date'], dtype=str)
total_len = len(date)
for sticker in config.STICKERS:
kd_date, k, d = extractKD(sticker, date, '_weekly_KD.json')
mfi_date, mfi = extractMFI(sticker, date, '_weekly_MFI.json')
k = postProcess(total_len, k)
d = postProcess(total_len, d)
mfi = postProcess(total_len, mfi)
# get high, low, close and volume
close = np.asarray(df[sticker + '_close'], dtype=float)
#KD
_column_kd = sticker + FEATURES['_KD']
temp = (d + k)/2.0
df[_column_kd] = temp
_ssto_slope = ta.LINEARREG_SLOPE(temp, timeperiod=10)
#KD_SLOPE
_column_ssto_slope = sticker + FEATURES['_KD_SLOPE']
_ssto_slope[np.isnan(_ssto_slope)] = 0
df[_column_ssto_slope] = _ssto_slope
#MFI SLOPE
_column_mfi = sticker + FEATURES['_MFI']
_column_mfi_slope = sticker + FEATURES['_MFI_SLOPE']
mfi = np.array(mfi)
_mfi_slope = ta.LINEARREG_SLOPE(mfi, timeperiod=10)
_mfi_slope[np.isnan(_mfi_slope)] = 0
df[_column_mfi] = mfi
df[_column_mfi_slope] = _mfi_slope
#ROC
_column_roc = sticker + FEATURES['_ROC']
_roc = ta.ROC(close, timeperiod=10)
_roc[np.isnan(_roc)] = 0
df[_column_roc] = _roc
df['CASH_weekly_MFI'] = 50.0
df['CASH_weekly_K'] = 50.0
df['CASH_weekly_D'] = 50.0
df['CASH_weekly_KD'] = 50.0
df['CASH_weekly_HIST_MOM'] = 1.0
df['CASH_weekly_KD_SLOPE'] = 0.0001
df['CASH_weekly_MFI_SLOPE'] = 0.0001
#From high to low, slope from high to low
kd_rank = getRanks(df, '_weekly_KD_SLOPE')
mfi_rank = getRanks(df, '_weekly_MFI_SLOPE')
df[RANK_FEATURES['_KD_RANK']] = kd_rank
df[RANK_FEATURES['_MFI_RANK']] = mfi_rank
return df, hist_mom_dict
def extractKD(sticker, dates,data_frame):
kd = {}
file_name = '../csv/' + sticker + data_frame
with open(file_name) as json_data:
kd = json.load(json_data)
dict_list = kd['Technical Analysis: STOCH']
date_list=[]
k = []
d = []
flag = False
for date, value in sorted(dict_list.iteritems()):
if date >= dates[0] and date <= dates[-1]:
date_list.append(date)
k.append(float(value['SlowK']))
d.append(float(value['SlowD']))
return date_list, np.asarray(k), np.asarray(d)
def extractMFI(sticker, dates, data_frame):
file_name = '../csv/' + sticker + data_frame
with open(file_name) as json_data:
mfi = json.load(json_data)
dict_list = mfi['Technical Analysis: MFI']
date_list=[]
mfi = []
flag = False
for date, value in sorted(dict_list.iteritems()):
if date >= dates[0] and date <= dates[-1]:
date_list.append(date)
mfi.append(float(value['MFI']))
return date_list, np.asarray(mfi)
def postProcess(total_len, data):
list_len = len(data)
if list_len >= total_len:
return data
result = np.full(total_len - list_len, np.nan)
result = np.concatenate([result, data])
return result
def getDailyFeatureDf(df):
# Add the following columns for feature extraction.
df = AddFeatureToDf(df, 'daily')
FEATURES = config.FEATURES
RANK_FEATURES = config.RANK_FEATURES
hist_mom_dict = {}
date = np.asarray(df['date'], dtype=str)
total_len = len(date)
for sticker in config.STICKERS:
kd_date, k, d = extractKD(sticker, date, '_daily_KD.json')
mfi_date, mfi = extractMFI(sticker, date, '_daily_MFI.json')
k = postProcess(total_len, k)
d = postProcess(total_len, d)
mfi = postProcess(total_len, mfi)
# get high, low, close and volume
close = np.asarray(df[sticker + '_close'], dtype=float)
df[sticker + '_high'] = (df[sticker + '_close'] * df[sticker + '_high']) / df[sticker + '_org_close']
df[sticker + '_low'] = (df[sticker + '_close'] * df[sticker + '_low']) / df[sticker + '_org_close']
#20SMA
_column_20_sma = sticker + FEATURES['_20_SMA']
_20_sma = ta.SMA(close, timeperiod=20)
_20_sma[np.isnan(_20_sma)] = 0.0
df[_column_20_sma] = _20_sma
#50SMA
_column_50_sma = sticker + FEATURES['_50_SMA']
_50_sma = ta.SMA(close, timeperiod=50)
_50_sma[np.isnan(_50_sma)] = 0.0
df[_column_50_sma] = _50_sma
#100 SMA
_column_100_sma = sticker + FEATURES['_100_SMA']
_100_sma = ta.SMA(close, timeperiod=100)
_100_sma[np.isnan(_100_sma)] = 0.0
df[_column_100_sma] = _100_sma
#K,D
_column_kd = sticker + FEATURES['_KD']
temp = (d + k) / 2.0
df[_column_kd] = temp
_ssto_slope = ta.LINEARREG_SLOPE(temp, timeperiod=10)
#KD_SLOPE
_column_ssto_slope = sticker + FEATURES['_KD_SLOPE']
#_ssto_slope[np.isnan(_ssto_slope)] = 0
df[_column_ssto_slope] = _ssto_slope
#MFI
_column_mfi = sticker + FEATURES['_MFI']
_column_mfi_slope = sticker + FEATURES['_MFI_SLOPE']
#MFI_SLOPE
mfi = np.array(mfi)
df[_column_mfi] = mfi
_mfi_slope = ta.LINEARREG_SLOPE(mfi, timeperiod=10)
#_mfi_slope[np.isnan(_mfi_slope)] = 0
df[_column_mfi_slope] = _mfi_slope
#ROC
_column_roc = sticker + FEATURES['_ROC']
_roc = ta.ROC(close, timeperiod=10)
#_roc[np.isnan(_roc)] = 0.0
df[_column_roc] = _roc
#Historical momentum
_column_hist_mom = sticker + FEATURES['_HIST_MOM']
if sticker != 'CASH':
_hist_mom = getDiv(df[sticker + '_close'].tolist(), df[sticker + '_daily_20_SMA'].tolist()) * \
getDiv(df[sticker + '_close'].tolist(), df[sticker + '_daily_50_SMA'].tolist()) * \
getDiv(df[sticker + '_close'].tolist(), df[sticker + '_daily_100_SMA'].tolist())
_hist_mom[np.isnan(_hist_mom)] = 1.0
df[_column_hist_mom] = _hist_mom
hist_mom = {}
hist_mom['min'] = df[_column_hist_mom].min()
hist_mom['max'] = df[_column_hist_mom].max()
hist_mom_dict[sticker] = hist_mom
df['CASH_daily_K'] = 50.0
df['CASH_daily_D'] = 50.0
df['CASH_daily_KD'] = 50.0
df['CASH_daily_MFI'] = 50.0
df['CASH_daily_HIST_MOM'] = 1.0
df['CASH_daily_KD_SLOPE'] = 0.0001
df['CASH_daily_MFI_SLOPE'] = 0.0001
#From high to low, slope from high to low
kd_rank = getRanks(df, '_daily_KD_SLOPE')
mfi_rank = getRanks(df, '_daily_MFI_SLOPE')
df[RANK_FEATURES['_KD_RANK']] = kd_rank
df[RANK_FEATURES['_MFI_RANK']] = mfi_rank
return df, hist_mom_dict
def addFundPerf(df, data_frame):
FEATURES = config.FUND_FEATURES
if data_frame == "weekly":
FEATURES = config.WEEKLY_FUND_FEATURES
df[FEATURES['FUND_PERF']] = 0.0000001
df[FEATURES['FUND_MONEY_FLOW']] = 0.0000001
df[FEATURES['FUND_PRICE_FLOW']] = 0.0000001
df[FEATURES['FUND_HIST']] = 0.0000001
df.replace([np.inf, -np.inf], np.nan, inplace=True)
df.dropna(how='any', inplace=True)
if df.isnull().any().any():
logger.error("After calculation feature values, df has null")
exit(1)
df = df.reset_index(drop=True)
return df
def readStatics():
d = None
w = None
with open(config.DAILY_STATICS) as json_data:
d = json.load(json_data)
with open(config.WEEKLY_STATICS) as json_data:
w = json.load(json_data)
return d, w
def dropOffTrade(d, first_date):
index = 0
for index, row in d.iterrows():
if row['date'] == first_date:
break
d = d.drop(d.index[:index])
return d
def dropOff(df, data_frame):
drop_off_column_names = []
drops = config.DROP_OFF_COLUMN_NAMES
if data_frame == 'weekly':
drops = config.WEEKLY_DROP_OFF_COLUMN_NAMES
if data_frame == 'monthly':
drops = config.MONTHLY_DROP_OFF_COLUMN_NAMES
for sticker in STICKERS:
for drop in drops:
drop_off_column_names.append(sticker + drop)
df.drop(drop_off_column_names, axis=1, inplace=True)
return df | cc0-1.0 |
ecuvelier/P3MVEOS | test_ro_script.py | 1 | 3720 | # -*- coding: utf-8 -*-
"""
Created on Mon May 29 11:16:51 2017
@author: edcuvelier
"""
from Crypto.Random.random import randint
from script import P, Q, Pair, Fr
import cryptoTools.polyCommitment as pC
import pathORAM.ringORAM as ringO
import time
import pickle
import random
import matplotlib.pyplot as plt
poly_deg = 10
nbOfDigits = 9
pC_SK = pC.PCommitment_Secret_Key(Fr.random().val)
g0 = P
h0 = pC_SK.alpha*g0
gp = Q
G = Fr.random().val*g0
pC_PK = pC.PCommitment_Public_Key(Pair,poly_deg,[],[])
pC_PK.setup(g0,h0,gp,pC_SK)
phone_number_PK = pC.Phone_Number_Commitment_Public_Key(pC_PK)
def produce_phoneNumbers():
phoneNumbers = []
rand = randint(2,poly_deg)
for i in range(rand):
phone_i = randint(10**nbOfDigits, 10**(nbOfDigits+1))
phoneNumbers.append(Fr.elem(phone_i))
return phoneNumbers
def test_real_example(Z = 4, S = 4, A = 4,nbChildren = 2, depth = 3,nbWords = None):
# create PO Tree
po_tree = ringO.PathORAMTree_for_Polynomial_Commitment(pC_PK, treeID = 'test_PO_tree')
phiprime_dic = {}
def cDB():
phi_x, c = pC_PK.commit_messages([Fr.zero()])
com, phiprime_x = c
return com
def rB(com,block_id):
if block_id == None :
rerand_com = cDB()
else :
phiprime_x = phiprime_dic[block_id]
rerand_com, new_phiprime = pC_PK.rerandomize(com,phiprime_x)
phiprime_dic[block_id] = new_phiprime
return rerand_com
RO = ringO.RingORAM(po_tree,Z = Z, S=S, A=A , nbChildren = nbChildren, depth = depth, createDummyBlock = cDB, rerandomizeBlock= rB)
if nbWords == None :
nbWords = int(RO.tLoad/4)
print 'parameters are\n Z:',Z,'\n S:',S,'\n A:',A,'\n depth:', depth,'\n number of children:', nbChildren,'\n number of blocks:', nbWords,'\n theoretic load of the tree:', RO.tLoad
t1 = time.time()
print 'Ring ORAM tree created'
blockList = []
messagesList = []
for i in range(nbWords):
phoneNumbers = produce_phoneNumbers()
# commitment on the phone number phoneNumbers[0] on other phone numbers phoneNumbers[1:]
com , phi_x, phiprime_x = phone_number_PK.commitPhoneNode(G, phoneNumbers[0], phoneNumbers[1:])
blockID = str(phoneNumbers[0].val)
phiprime_dic[blockID] = phiprime_x
blockList.append((blockID,com))
messagesList.append((phoneNumbers[0],phi_x, phiprime_x))
t2 = time.time()
print 'List of blocks generated',t2-t1,(t2-t1)/nbWords,'\n Filling up the tree'
RO.fillupTree(blockList)
t3 = time.time()
print 'Tree filled', t3-t2
s = str(Z)+'_'+str(depth)+'_'+str(nbChildren)
f = open('./posDictionaries/positionDic'+s,'w')
pickle.dump(RO.sPD,f)
f.close()
return RO,blockList,t2-t1,t3-t2, phiprime_dic, messagesList
def generating_queries(RO,n):
print RO.positionMap
keys = RO.positionMap.keys()
clientStashSize = []
dummyStashSize = []
t_mean = 0
for i in range(n):
blockID = random.sample(keys,1)[0]
print '\n ###\t query nb',i, 'on block', blockID
t1 = time.time()
RO.queryBlock(blockID)
t2 = time.time()
t_mean += t2-t1
clientStashSize.append(len(RO.clientStash))
dummyStashSize.append(len(RO.dummyStash))
RO.checkSync()
#print RO.positionMap
plt.plot(range(n), clientStashSize)
plt.plot(range(n), dummyStashSize)
plt.show()
print t_mean/n, 'sec per query'
print RO.dummyCounter, 'dummy blocks created'
return clientStashSize,dummyStashSize | apache-2.0 |
phoebe-project/phoebe2-docs | 2.1/tutorials/ecc.py | 1 | 2098 | #!/usr/bin/env python
# coding: utf-8
# Eccentricity (Volume Conservation)
# ============================
#
# Setup
# -----------------------------
# Let's first make sure we have the latest version of PHOEBE 2.1 installed. (You can comment out this line if you don't use pip for your installation or don't want to update to the latest release).
# In[ ]:
get_ipython().system('pip install -I "phoebe>=2.1,<2.2"')
# As always, let's do imports and initialize a logger and a new Bundle. See [Building a System](building_a_system.html) for more details.
# In[1]:
get_ipython().run_line_magic('matplotlib', 'inline')
# In[2]:
import phoebe
from phoebe import u # units
import numpy as np
import matplotlib.pyplot as plt
logger = phoebe.logger()
b = phoebe.default_binary()
# Relevant Parameters
# ----------------------------
#
# In[3]:
print b.get(qualifier='ecc')
# In[4]:
print b.get(qualifier='ecosw', context='component')
# In[5]:
print b.get(qualifier='esinw', context='component')
# Relevant Constraints
# -----------------------------
# In[6]:
print b.get(qualifier='ecosw', context='constraint')
# In[7]:
print b.get(qualifier='esinw', context='constraint')
# Influence on Meshes (volume conservation)
# ----------------------------
#
# In[8]:
b.add_dataset('mesh', times=np.linspace(0,1,11), columns=['volume'])
# In[9]:
b.set_value('ecc', 0.2)
# In[10]:
b.run_compute()
# In[11]:
print b['volume@primary@model']
# In[12]:
afig, mplfig = b['mesh01'].plot(x='times', y='volume', show=True)
# In[13]:
b.remove_dataset('mesh01')
# Influence on Radial Velocities
# ----------------------------------
#
# In[14]:
b.add_dataset('rv', times=np.linspace(0,1,51))
# In[15]:
b.run_compute()
# In[16]:
afig, mplfig = b['rv@model'].plot(show=True)
# In[17]:
b.remove_dataset('rv01')
# Influence on Light Curves (fluxes)
# -----------------------------------------
#
# In[18]:
b.add_dataset('lc', times=np.linspace(0,1,51))
# In[19]:
b.run_compute()
# In[20]:
afig, mplfig = b['lc@model'].plot(show=True)
| gpl-3.0 |
rbalda/neural_ocr | env/lib/python2.7/site-packages/matplotlib/tri/triangulation.py | 8 | 8357 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import matplotlib._tri as _tri
import matplotlib._qhull as _qhull
import numpy as np
class Triangulation(object):
"""
An unstructured triangular grid consisting of npoints points and
ntri triangles. The triangles can either be specified by the user
or automatically generated using a Delaunay triangulation.
Parameters
----------
x, y : array_like of shape (npoints)
Coordinates of grid points.
triangles : integer array_like of shape (ntri, 3), optional
For each triangle, the indices of the three points that make
up the triangle, ordered in an anticlockwise manner. If not
specified, the Delaunay triangulation is calculated.
mask : boolean array_like of shape (ntri), optional
Which triangles are masked out.
Attributes
----------
`edges`
`neighbors`
is_delaunay : bool
Whether the Triangulation is a calculated Delaunay
triangulation (where `triangles` was not specified) or not.
Notes
-----
For a Triangulation to be valid it must not have duplicate points,
triangles formed from colinear points, or overlapping triangles.
"""
def __init__(self, x, y, triangles=None, mask=None):
self.x = np.asarray(x, dtype=np.float64)
self.y = np.asarray(y, dtype=np.float64)
if self.x.shape != self.y.shape or len(self.x.shape) != 1:
raise ValueError("x and y must be equal-length 1-D arrays")
self.mask = None
self._edges = None
self._neighbors = None
self.is_delaunay = False
if triangles is None:
# No triangulation specified, so use matplotlib._qhull to obtain
# Delaunay triangulation.
self.triangles, self._neighbors = _qhull.delaunay(x, y)
self.is_delaunay = True
else:
# Triangulation specified. Copy, since we may correct triangle
# orientation.
self.triangles = np.array(triangles, dtype=np.int32, order='C')
if self.triangles.ndim != 2 or self.triangles.shape[1] != 3:
raise ValueError('triangles must be a (?,3) array')
if self.triangles.max() >= len(self.x):
raise ValueError('triangles max element is out of bounds')
if self.triangles.min() < 0:
raise ValueError('triangles min element is out of bounds')
if mask is not None:
self.mask = np.asarray(mask, dtype=np.bool)
if (len(self.mask.shape) != 1 or
self.mask.shape[0] != self.triangles.shape[0]):
raise ValueError('mask array must have same length as '
'triangles array')
# Underlying C++ object is not created until first needed.
self._cpp_triangulation = None
# Default TriFinder not created until needed.
self._trifinder = None
def calculate_plane_coefficients(self, z):
"""
Calculate plane equation coefficients for all unmasked triangles from
the point (x,y) coordinates and specified z-array of shape (npoints).
Returned array has shape (npoints,3) and allows z-value at (x,y)
position in triangle tri to be calculated using
z = array[tri,0]*x + array[tri,1]*y + array[tri,2].
"""
return self.get_cpp_triangulation().calculate_plane_coefficients(z)
@property
def edges(self):
"""
Return integer array of shape (nedges,2) containing all edges of
non-masked triangles.
Each edge is the start point index and end point index. Each
edge (start,end and end,start) appears only once.
"""
if self._edges is None:
self._edges = self.get_cpp_triangulation().get_edges()
return self._edges
def get_cpp_triangulation(self):
# Return the underlying C++ Triangulation object, creating it
# if necessary.
if self._cpp_triangulation is None:
self._cpp_triangulation = _tri.Triangulation(
self.x, self.y, self.triangles, self.mask, self._edges,
self._neighbors, not self.is_delaunay)
return self._cpp_triangulation
def get_masked_triangles(self):
"""
Return an array of triangles that are not masked.
"""
if self.mask is not None:
return self.triangles.compress(1 - self.mask, axis=0)
else:
return self.triangles
@staticmethod
def get_from_args_and_kwargs(*args, **kwargs):
"""
Return a Triangulation object from the args and kwargs, and
the remaining args and kwargs with the consumed values removed.
There are two alternatives: either the first argument is a
Triangulation object, in which case it is returned, or the args
and kwargs are sufficient to create a new Triangulation to
return. In the latter case, see Triangulation.__init__ for
the possible args and kwargs.
"""
if isinstance(args[0], Triangulation):
triangulation = args[0]
args = args[1:]
else:
x = args[0]
y = args[1]
args = args[2:] # Consumed first two args.
# Check triangles in kwargs then args.
triangles = kwargs.pop('triangles', None)
from_args = False
if triangles is None and len(args) > 0:
triangles = args[0]
from_args = True
if triangles is not None:
try:
triangles = np.asarray(triangles, dtype=np.int32)
except ValueError:
triangles = None
if triangles is not None and (triangles.ndim != 2 or
triangles.shape[1] != 3):
triangles = None
if triangles is not None and from_args:
args = args[1:] # Consumed first item in args.
# Check for mask in kwargs.
mask = kwargs.pop('mask', None)
triangulation = Triangulation(x, y, triangles, mask)
return triangulation, args, kwargs
def get_trifinder(self):
"""
Return the default :class:`matplotlib.tri.TriFinder` of this
triangulation, creating it if necessary. This allows the same
TriFinder object to be easily shared.
"""
if self._trifinder is None:
# Default TriFinder class.
from matplotlib.tri.trifinder import TrapezoidMapTriFinder
self._trifinder = TrapezoidMapTriFinder(self)
return self._trifinder
@property
def neighbors(self):
"""
Return integer array of shape (ntri,3) containing neighbor
triangles.
For each triangle, the indices of the three triangles that
share the same edges, or -1 if there is no such neighboring
triangle. neighbors[i,j] is the triangle that is the neighbor
to the edge from point index triangles[i,j] to point index
triangles[i,(j+1)%3].
"""
if self._neighbors is None:
self._neighbors = self.get_cpp_triangulation().get_neighbors()
return self._neighbors
def set_mask(self, mask):
"""
Set or clear the mask array. This is either None, or a boolean
array of shape (ntri).
"""
if mask is None:
self.mask = None
else:
self.mask = np.asarray(mask, dtype=np.bool)
if (len(self.mask.shape) != 1 or
self.mask.shape[0] != self.triangles.shape[0]):
raise ValueError('mask array must have same length as '
'triangles array')
# Set mask in C++ Triangulation.
if self._cpp_triangulation is not None:
self._cpp_triangulation.set_mask(self.mask)
# Clear derived fields so they are recalculated when needed.
self._edges = None
self._neighbors = None
# Recalculate TriFinder if it exists.
if self._trifinder is not None:
self._trifinder._initialize()
| mit |
xmnlab/minilab | labtrans/peaks/filtrado.py | 1 | 2612 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 23 13:59:37 2013
@author: Marcos Gallo
"""
from spectrum import *
from pylab import *
from matplotlib.widgets import Slider, Button
from scipy import signal
import numpy
import time
#freq de muestreo
f = 5000
#tempo entre amostras
T = 1/f
#frequecia de nyquist
nyq = f/2
#lower bound
lb = 2
#upper bound
ub = -1
rawf = open('20130503_093235_piezoQuartzo_DadosBrutos.txt','r')
sinais = []
found = False
for line in rawf:
if 'X_Value' not in line and not found:
continue
if not found:
found = True
for n in range(len(line.split('\t')[1:-1])):
sinais.append([])
continue
if found:
for n, val in enumerate(line.split('\t')[1:-1]):
sinais[n].append(float(val.replace(',','.')))
sinais = [numpy.array(sina) for sina in sinais]
#pego so a primeira sinal
sinal = sinais[0][lb:ub]
n = len(sinal)
k = arange(n)
x1 = arange(n)
x1d = arange(n-1)
#primera derivada
st = time.time()
der = numpy.gradient(sinal,1/5000)
et = time.time()
print 'gradient %s secs' % (et - st)
st = time.time()
der2 = numpy.ediff1d(sinal)
et = time.time()
print 'ediff1d %s secs' % (et - st)
st = time.time()
der3 = numpy.diff(sinal) #<---- mais rapido!
et = time.time()
print 'diff %s secs' % (et - st)
#filtro pasa bajos
taps=61
fir_coef = signal.firwin(taps,cutoff=50.0/nyq)
#senial filtrada
sinalf = signal.lfilter(fir_coef,1.0, sinal)
#filtro media movil
def moving_average(a, n=3) :
ret = np.cumsum(a, dtype=float)
return (ret[n - 1:] - ret[:1 - n]) / n
mvn = 13
sinalf2 = moving_average(sinal,mvn)
derf = numpy.gradient(sinalf)
#otro filtro
fir2 = signal.firwin(100,cutoff=5.0/nyq)
#pasa alto
fir2 = -fir2
fir2[100/2] = fir2[100/2] + 1
fdiffh = signal.lfilter(fir2,1.0,derf)
#deteccion de zero-crossings
signaldiff = numpy.diff(numpy.sign(der))
signalfdiff = numpy.diff(numpy.sign(derf))
#plots de sinais
subplot(311)
grid()
plot(x1,sinal)
print -taps/2, -taps+taps/2
plot(range(-taps+taps/2,len(sinalf)-taps+taps/2),sinalf)
plot(range(mvn/2,len(sinalf2)+mvn/2),sinalf2)
legend(['o','lp','MA'])
#plots de spectrums
subplot(312)
grid()
p = Periodogram(sinal, f)
p.run()
p.plot()
pf = Periodogram(sinalf, f)
pf.run()
pf.plot()
pfd = Periodogram(sinalf2[:-2 if len(sinalf2)%2 == 0 else -1], f)
pfd.run()
pfd.plot()
legend(['o','lp','MA'])
#plots de derivadas e cruces por zero
subplot(313)
grid()
plot(range(len(der)),der)
plot(range(len(der2)),der2)
plot(range(len(der3)),der3)
#plot(x1, derf)
#plot(x1d, signalfdiff)
#plot(x1,fdiffh)
#plot(range(len(fdiffhz)), fdiffhz)
show()
print 'Done' | gpl-3.0 |
MJuddBooth/pandas | pandas/tests/scalar/timestamp/test_comparisons.py | 2 | 4860 | # -*- coding: utf-8 -*-
from datetime import datetime
import operator
import numpy as np
import pytest
from pandas.compat import PY2, long
from pandas import Timestamp
class TestTimestampComparison(object):
def test_comparison_object_array(self):
# GH#15183
ts = Timestamp('2011-01-03 00:00:00-0500', tz='US/Eastern')
other = Timestamp('2011-01-01 00:00:00-0500', tz='US/Eastern')
naive = Timestamp('2011-01-01 00:00:00')
arr = np.array([other, ts], dtype=object)
res = arr == ts
expected = np.array([False, True], dtype=bool)
assert (res == expected).all()
# 2D case
arr = np.array([[other, ts],
[ts, other]],
dtype=object)
res = arr != ts
expected = np.array([[True, False], [False, True]], dtype=bool)
assert res.shape == expected.shape
assert (res == expected).all()
# tzaware mismatch
arr = np.array([naive], dtype=object)
with pytest.raises(TypeError):
arr < ts
def test_comparison(self):
# 5-18-2012 00:00:00.000
stamp = long(1337299200000000000)
val = Timestamp(stamp)
assert val == val
assert not val != val
assert not val < val
assert val <= val
assert not val > val
assert val >= val
other = datetime(2012, 5, 18)
assert val == other
assert not val != other
assert not val < other
assert val <= other
assert not val > other
assert val >= other
other = Timestamp(stamp + 100)
assert val != other
assert val != other
assert val < other
assert val <= other
assert other > val
assert other >= val
def test_compare_invalid(self):
# GH#8058
val = Timestamp('20130101 12:01:02')
assert not val == 'foo'
assert not val == 10.0
assert not val == 1
assert not val == long(1)
assert not val == []
assert not val == {'foo': 1}
assert not val == np.float64(1)
assert not val == np.int64(1)
assert val != 'foo'
assert val != 10.0
assert val != 1
assert val != long(1)
assert val != []
assert val != {'foo': 1}
assert val != np.float64(1)
assert val != np.int64(1)
def test_cant_compare_tz_naive_w_aware(self, utc_fixture):
# see GH#1404
a = Timestamp('3/12/2012')
b = Timestamp('3/12/2012', tz=utc_fixture)
with pytest.raises(TypeError):
a == b
with pytest.raises(TypeError):
a != b
with pytest.raises(TypeError):
a < b
with pytest.raises(TypeError):
a <= b
with pytest.raises(TypeError):
a > b
with pytest.raises(TypeError):
a >= b
with pytest.raises(TypeError):
b == a
with pytest.raises(TypeError):
b != a
with pytest.raises(TypeError):
b < a
with pytest.raises(TypeError):
b <= a
with pytest.raises(TypeError):
b > a
with pytest.raises(TypeError):
b >= a
if PY2:
with pytest.raises(TypeError):
a == b.to_pydatetime()
with pytest.raises(TypeError):
a.to_pydatetime() == b
else:
assert not a == b.to_pydatetime()
assert not a.to_pydatetime() == b
def test_timestamp_compare_scalars(self):
# case where ndim == 0
lhs = np.datetime64(datetime(2013, 12, 6))
rhs = Timestamp('now')
nat = Timestamp('nat')
ops = {'gt': 'lt',
'lt': 'gt',
'ge': 'le',
'le': 'ge',
'eq': 'eq',
'ne': 'ne'}
for left, right in ops.items():
left_f = getattr(operator, left)
right_f = getattr(operator, right)
expected = left_f(lhs, rhs)
result = right_f(rhs, lhs)
assert result == expected
expected = left_f(rhs, nat)
result = right_f(nat, rhs)
assert result == expected
def test_timestamp_compare_with_early_datetime(self):
# e.g. datetime.min
stamp = Timestamp('2012-01-01')
assert not stamp == datetime.min
assert not stamp == datetime(1600, 1, 1)
assert not stamp == datetime(2700, 1, 1)
assert stamp != datetime.min
assert stamp != datetime(1600, 1, 1)
assert stamp != datetime(2700, 1, 1)
assert stamp > datetime(1600, 1, 1)
assert stamp >= datetime(1600, 1, 1)
assert stamp < datetime(2700, 1, 1)
assert stamp <= datetime(2700, 1, 1)
| bsd-3-clause |
eaplatanios/tensorflow | tensorflow/examples/tutorials/input_fn/boston.py | 76 | 2920 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""DNNRegressor with custom input_fn for Housing dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import pandas as pd
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)
COLUMNS = ["crim", "zn", "indus", "nox", "rm", "age",
"dis", "tax", "ptratio", "medv"]
FEATURES = ["crim", "zn", "indus", "nox", "rm",
"age", "dis", "tax", "ptratio"]
LABEL = "medv"
def get_input_fn(data_set, num_epochs=None, shuffle=True):
return tf.estimator.inputs.pandas_input_fn(
x=pd.DataFrame({k: data_set[k].values for k in FEATURES}),
y=pd.Series(data_set[LABEL].values),
num_epochs=num_epochs,
shuffle=shuffle)
def main(unused_argv):
# Load datasets
training_set = pd.read_csv("boston_train.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
test_set = pd.read_csv("boston_test.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Set of 6 examples for which to predict median house values
prediction_set = pd.read_csv("boston_predict.csv", skipinitialspace=True,
skiprows=1, names=COLUMNS)
# Feature cols
feature_cols = [tf.feature_column.numeric_column(k) for k in FEATURES]
# Build 2 layer fully connected DNN with 10, 10 units respectively.
regressor = tf.estimator.DNNRegressor(feature_columns=feature_cols,
hidden_units=[10, 10],
model_dir="/tmp/boston_model")
# Train
regressor.train(input_fn=get_input_fn(training_set), steps=5000)
# Evaluate loss over one epoch of test_set.
ev = regressor.evaluate(
input_fn=get_input_fn(test_set, num_epochs=1, shuffle=False))
loss_score = ev["loss"]
print("Loss: {0:f}".format(loss_score))
# Print out predictions over a slice of prediction_set.
y = regressor.predict(
input_fn=get_input_fn(prediction_set, num_epochs=1, shuffle=False))
# .predict() returns an iterator of dicts; convert to a list and print
# predictions
predictions = list(p["predictions"] for p in itertools.islice(y, 6))
print("Predictions: {}".format(str(predictions)))
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
XiaodunServerGroup/medicalmooc | docs/en_us/developers/source/conf.py | 6 | 6667 | # -*- coding: utf-8 -*-
#pylint: disable=C0103
#pylint: disable=W0622
#pylint: disable=W0212
#pylint: disable=W0613
import sys, os
from path import path
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
sys.path.append('../../../../')
from docs.shared.conf import *
# Add any paths that contain templates here, relative to this directory.
templates_path.append('source/_templates')
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path.append('source/_static')
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
root = path('../../../..').abspath()
sys.path.insert(0, root)
sys.path.append(root / "common/djangoapps")
sys.path.append(root / "common/lib")
sys.path.append(root / "common/lib/capa")
sys.path.append(root / "common/lib/chem")
sys.path.append(root / "common/lib/sandbox-packages")
sys.path.append(root / "common/lib/xmodule")
sys.path.append(root / "lms/djangoapps")
sys.path.append(root / "lms/lib")
sys.path.append(root / "cms/djangoapps")
sys.path.append(root / "cms/lib")
sys.path.insert(0, os.path.abspath(os.path.normpath(os.path.dirname(__file__)
+ '/../../../')))
sys.path.append('.')
# django configuration - careful here
if on_rtd:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms'
else:
os.environ['DJANGO_SETTINGS_MODULE'] = 'lms.envs.test'
# -- General configuration -----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinx.ext.autodoc', 'sphinx.ext.doctest', 'sphinx.ext.intersphinx',
'sphinx.ext.todo', 'sphinx.ext.coverage', 'sphinx.ext.pngmath',
'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['build']
# Output file base name for HTML help builder.
htmlhelp_basename = 'edXDocs'
project = u'edX Platform Developer Documentation'
copyright = u'2014, edX'
# --- Mock modules ------------------------------------------------------------
# Mock all the modules that the readthedocs build can't import
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
# The list of modules and submodules that we know give RTD trouble.
# Make sure you've tried including the relevant package in
# docs/share/requirements.txt before adding to this list.
MOCK_MODULES = [
'bson',
'bson.errors',
'bson.objectid',
'dateutil',
'dateutil.parser',
'fs',
'fs.errors',
'fs.osfs',
'lazy',
'mako',
'mako.template',
'matplotlib',
'matplotlib.pyplot',
'mock',
'numpy',
'oauthlib',
'oauthlib.oauth1',
'oauthlib.oauth1.rfc5849',
'PIL',
'pymongo',
'pyparsing',
'pysrt',
'requests',
'scipy.interpolate',
'scipy.constants',
'scipy.optimize',
'yaml',
'webob',
'webob.multidict',
]
if on_rtd:
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
# -----------------------------------------------------------------------------
# from http://djangosnippets.org/snippets/2533/
# autogenerate models definitions
import inspect
import types
from HTMLParser import HTMLParser
def force_unicode(s, encoding='utf-8', strings_only=False, errors='strict'):
"""
Similar to smart_unicode, except that lazy instances are resolved to
strings, rather than kept as lazy objects.
If strings_only is True, don't convert (some) non-string-like objects.
"""
if strings_only and isinstance(s, (types.NoneType, int)):
return s
if not isinstance(s, basestring,):
if hasattr(s, '__unicode__'):
s = unicode(s)
else:
s = unicode(str(s), encoding, errors)
elif not isinstance(s, unicode):
s = unicode(s, encoding, errors)
return s
class MLStripper(HTMLParser):
def __init__(self):
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def get_data(self):
return ''.join(self.fed)
def strip_tags(html):
s = MLStripper()
s.feed(html)
return s.get_data()
def process_docstring(app, what, name, obj, options, lines):
"""Autodoc django models"""
# This causes import errors if left outside the function
from django.db import models
# If you want extract docs from django forms:
# from django import forms
# from django.forms.models import BaseInlineFormSet
# Only look at objects that inherit from Django's base MODEL class
if inspect.isclass(obj) and issubclass(obj, models.Model):
# Grab the field list from the meta class
fields = obj._meta._fields()
for field in fields:
# Decode and strip any html out of the field's help text
help_text = strip_tags(force_unicode(field.help_text))
# Decode and capitalize the verbose name, for use if there isn't
# any help text
verbose_name = force_unicode(field.verbose_name).capitalize()
if help_text:
# Add the model field to the end of the docstring as a param
# using the help text as the description
lines.append(u':param %s: %s' % (field.attname, help_text))
else:
# Add the model field to the end of the docstring as a param
# using the verbose name as the description
lines.append(u':param %s: %s' % (field.attname, verbose_name))
# Add the field's type to the docstring
lines.append(u':type %s: %s' % (field.attname, type(field).__name__))
return lines
def setup(app):
"""Setup docsting processors"""
#Register the docstring processor with sphinx
app.connect('autodoc-process-docstring', process_docstring)
| agpl-3.0 |
kaichogami/scikit-learn | sklearn/tests/test_naive_bayes.py | 32 | 17897 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1:]), 2)
assert_equal(clf.predict_proba([X[0]]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0:1]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba([X[1]])), 1)
assert_almost_equal(np.sum(clf.predict_proba([X[-1]])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float64)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([[1, 0]]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([[0, 1, 1, 0, 0, 1]])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
def test_naive_bayes_scale_invariance():
# Scaling the data should not change the prediction results
iris = load_iris()
X, y = iris.data, iris.target
labels = [GaussianNB().fit(f * X, y).predict(f * X)
for f in [1E-10, 1, 1E10]]
assert_array_equal(labels[0], labels[1])
assert_array_equal(labels[1], labels[2])
| bsd-3-clause |
yonglehou/scikit-learn | examples/svm/plot_separating_hyperplane.py | 294 | 1273 | """
=========================================
SVM: Maximum margin separating hyperplane
=========================================
Plot the maximum margin separating hyperplane within a two-class
separable dataset using a Support Vector Machine classifier with
linear kernel.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2, 2], np.random.randn(20, 2) + [2, 2]]
Y = [0] * 20 + [1] * 20
# fit the model
clf = svm.SVC(kernel='linear')
clf.fit(X, Y)
# get the separating hyperplane
w = clf.coef_[0]
a = -w[0] / w[1]
xx = np.linspace(-5, 5)
yy = a * xx - (clf.intercept_[0]) / w[1]
# plot the parallels to the separating hyperplane that pass through the
# support vectors
b = clf.support_vectors_[0]
yy_down = a * xx + (b[1] - a * b[0])
b = clf.support_vectors_[-1]
yy_up = a * xx + (b[1] - a * b[0])
# plot the line, the points, and the nearest vectors to the plane
plt.plot(xx, yy, 'k-')
plt.plot(xx, yy_down, 'k--')
plt.plot(xx, yy_up, 'k--')
plt.scatter(clf.support_vectors_[:, 0], clf.support_vectors_[:, 1],
s=80, facecolors='none')
plt.scatter(X[:, 0], X[:, 1], c=Y, cmap=plt.cm.Paired)
plt.axis('tight')
plt.show()
| bsd-3-clause |
gregversteeg/gaussianize | gaussianize.py | 1 | 12549 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Transform data so that it is approximately normally distributed
This code written by Greg Ver Steeg, 2015.
"""
from typing import Text, List, Union
import numpy as np
from scipy import special
from scipy.stats import kurtosis, norm, rankdata, boxcox
from scipy import optimize # TODO: Explore efficacy of other opt. methods
import sklearn
from matplotlib import pylab as plt
from scipy import stats
import warnings
import os
np.seterr(all='warn')
# Tolerance for == 0.0 tolerance.
_EPS = 1e-6
def _update_x(x: Union[np.ndarray, List]) -> np.ndarray:
x = np.asarray(x)
if len(x.shape) == 1:
x = x[:, np.newaxis]
elif len(x.shape) != 2:
raise ValueError("Data should be a 1-d list of samples to transform or a 2d array with samples as rows.")
return x
class Gaussianize(sklearn.base.TransformerMixin):
"""
Gaussianize data using various methods.
Conventions
----------
This class is a wrapper that follows sklearn naming/style (e.g. fit(X) to train).
In this code, x is the input, y is the output. But in the functions outside the class, I follow
Georg's convention that Y is the input and X is the output (Gaussianized) data.
Parameters
----------
strategy : str, default='lambert'. Possibilities are 'lambert'[1], 'brute'[2] and 'boxcox'[3].
tol : float, default = 1e-4
max_iter : int, default = 100
Maximum number of iterations to search for correct parameters of Lambert transform.
Attributes
----------
coefs_ : list of tuples
For each variable, we have transformation parameters.
For Lambert, e.g., a tuple consisting of (mu, sigma, delta), corresponding to the parameters of the
appropriate Lambert transform. Eq. 6 and 8 in the paper below.
References
----------
[1] Georg M Goerg. The Lambert Way to Gaussianize heavy tailed data with
the inverse of Tukey's h transformation as a special case
Author generously provides code in R: https://cran.r-project.org/web/packages/LambertW/
[2] Valero Laparra, Gustavo Camps-Valls, and Jesus Malo. Iterative Gaussianization: From ICA to Random Rotations
[3] Box cox transformation and references: https://en.wikipedia.org/wiki/Power_transform
"""
def __init__(self, strategy: Text = 'lambert',
tol: float = 1e-5,
max_iter: int = 100,
verbose: bool = False):
self.tol = tol
self.max_iter = max_iter
self.strategy = strategy
self.coefs_ = [] # Store tau for each transformed variable
self.verbose = verbose
def fit(self, x: np.ndarray, y=None):
"""Fit a Gaussianizing transformation to each variable/column in x."""
# Initialize coefficients again with an empty list. Otherwise
# calling .fit() repeatedly will augment previous .coefs_ list.
self.coefs_ = []
x = _update_x(x)
if self.verbose:
print("Gaussianizing with strategy='%s'" % self.strategy)
if self.strategy == "lambert":
_get_coef = lambda vec: igmm(vec, self.tol, max_iter=self.max_iter)
elif self.strategy == "brute":
_get_coef = lambda vec: None # TODO: In principle, we could store parameters to do a quasi-invert
elif self.strategy == "boxcox":
_get_coef = lambda vec: boxcox(vec)[1]
else:
raise NotImplementedError("stategy='%s' not implemented." % self.strategy)
for x_i in x.T:
self.coefs_.append(_get_coef(x_i))
return self
def transform(self, x: np.ndarray) -> np.ndarray:
"""Transform new data using a previously learned Gaussianization model."""
x = _update_x(x)
if x.shape[1] != len(self.coefs_):
raise ValueError("%d variables in test data, but %d variables were in training data." % (x.shape[1], len(self.coefs_)))
if self.strategy == 'lambert':
return np.array([w_t(x_i, tau_i) for x_i, tau_i in zip(x.T, self.coefs_)]).T
elif self.strategy == 'brute':
return np.array([norm.ppf((rankdata(x_i) - 0.5) / len(x_i)) for x_i in x.T]).T
elif self.strategy == 'boxcox':
return np.array([boxcox(x_i, lmbda=lmbda_i) for x_i, lmbda_i in zip(x.T, self.coefs_)]).T
else:
raise NotImplementedError("stategy='%s' not implemented." % self.strategy)
def inverse_transform(self, y: np.ndarray) -> np.ndarray:
"""Recover original data from Gaussianized data."""
if self.strategy == 'lambert':
return np.array([inverse(y_i, tau_i) for y_i, tau_i in zip(y.T, self.coefs_)]).T
elif self.strategy == 'boxcox':
return np.array([(1. + lmbda_i * y_i) ** (1./lmbda_i) for y_i, lmbda_i in zip(y.T, self.coefs_)]).T
else:
raise NotImplementedError("Inversion not supported for gaussianization transform '%s'" % self.strategy)
def qqplot(self, x: np.ndarray, prefix: Text = 'qq', output_dir: Text = "/tmp/"):
"""Show qq plots compared to normal before and after the transform."""
x = _update_x(x)
y = self.transform(x)
n_dim = y.shape[1]
for i in range(n_dim):
stats.probplot(x[:, i], dist="norm", plot=plt)
plt.savefig(os.path.join(output_dir, prefix + '_%d_before.png' % i))
plt.clf()
stats.probplot(y[:, i], dist="norm", plot=plt)
plt.savefig(os.path.join(output_dir, prefix + '_%d_after.png' % i))
plt.clf()
def w_d(z, delta):
# Eq. 9
if delta < _EPS:
return z
return np.sign(z) * np.sqrt(np.real(special.lambertw(delta * z ** 2)) / delta)
def w_t(y, tau):
# Eq. 8
return tau[0] + tau[1] * w_d((y - tau[0]) / tau[1], tau[2])
def inverse(x, tau):
# Eq. 6
u = (x - tau[0]) / tau[1]
return tau[0] + tau[1] * (u * np.exp(u * u * (tau[2] * 0.5)))
def igmm(y: np.ndarray, tol: float = 1e-6, max_iter: int = 100):
# Infer mu, sigma, delta using IGMM in Alg.2, Appendix C
if np.std(y) < _EPS:
return np.mean(y), np.std(y).clip(_EPS), 0
delta0 = delta_init(y)
tau1 = (np.median(y), np.std(y) * (1. - 2. * delta0) ** 0.75, delta0)
for k in range(max_iter):
tau0 = tau1
z = (y - tau1[0]) / tau1[1]
delta1 = delta_gmm(z)
x = tau0[0] + tau1[1] * w_d(z, delta1)
mu1, sigma1 = np.mean(x), np.std(x)
tau1 = (mu1, sigma1, delta1)
if np.linalg.norm(np.array(tau1) - np.array(tau0)) < tol:
break
else:
if k == max_iter - 1:
warnings.warn("Warning: No convergence after %d iterations. Increase max_iter." % max_iter)
return tau1
def delta_gmm(z):
# Alg. 1, Appendix C
delta0 = delta_init(z)
def func(q):
u = w_d(z, np.exp(q))
if not np.all(np.isfinite(u)):
return 0.
else:
k = kurtosis(u, fisher=True, bias=False)**2
if not np.isfinite(k) or k > 1e10:
return 1e10
else:
return k
res = optimize.fmin(func, np.log(delta0), disp=0)
return np.around(np.exp(res[-1]), 6)
def delta_init(z):
gamma = kurtosis(z, fisher=False, bias=False)
with np.errstate(all='ignore'):
delta0 = np.clip(1. / 66 * (np.sqrt(66 * gamma - 162.) - 6.), 0.01, 0.48)
if not np.isfinite(delta0):
delta0 = 0.01
return delta0
if __name__ == '__main__':
# Command line interface
# Sample commands:
# python gaussianize.py test_data.csv
import csv
import sys, os
import traceback
from optparse import OptionParser, OptionGroup
parser = OptionParser(usage="usage: %prog [options] data_file.csv \n"
"It is assumed that the first row and first column of the data CSV file are labels.\n"
"Use options to indicate otherwise.")
group = OptionGroup(parser, "Input Data Format Options")
group.add_option("-c", "--no_column_names",
action="store_true", dest="nc", default=False,
help="We assume the top row is variable names for each column. "
"This flag says that data starts on the first row and gives a "
"default numbering scheme to the variables (1,2,3...).")
group.add_option("-r", "--no_row_names",
action="store_true", dest="nr", default=False,
help="We assume the first column is a label or index for each sample. "
"This flag says that data starts on the first column.")
group.add_option("-d", "--delimiter",
action="store", dest="delimiter", type="string", default=",",
help="Separator between entries in the data, default is ','.")
parser.add_option_group(group)
group = OptionGroup(parser, "Transform Options")
group.add_option("-s", "--strategy",
action="store", dest="strategy", type="string", default="lambert",
help="Strategy.")
parser.add_option_group(group)
group = OptionGroup(parser, "Output Options")
group.add_option("-o", "--output",
action="store", dest="output", type="string", default="gaussian_output.csv",
help="Where to store gaussianized data.")
group.add_option("-q", "--qqplots",
action="store_true", dest="q", default=False,
help="Produce qq plots for each variable before and after transform.")
parser.add_option_group(group)
(options, args) = parser.parse_args()
if not len(args) == 1:
warnings.warn("Run with '-h' option for usage help.")
sys.exit()
#Load data from csv file
filename = args[0]
with open(filename, 'rU') as csvfile:
reader = csv.reader(csvfile, delimiter=" ") #options.delimiter)
if options.nc:
variable_names = None
else:
variable_names = reader.next()[(1 - options.nr):]
sample_names = []
data = []
for row in reader:
if options.nr:
sample_names = None
else:
sample_names.append(row[0])
data.append(row[(1 - options.nr):])
print(len(data), data[0])
try:
for i in range(len(data)):
data[i] = map(float, data[i])
X = np.array(data, dtype=float) # Data matrix in numpy format
except:
raise ValueError("Incorrect data format.\nCheck that you've correctly specified options "
"such as continuous or not, \nand if there is a header row or column.\n"
"Run 'python gaussianize.py -h' option for help with options.")
traceback.print_exc(file=sys.stdout)
sys.exit()
ks = []
for xi in X.T:
ks.append(kurtosis(xi))
print(np.mean(np.array(ks) > 1))
from matplotlib import pylab
pylab.hist(ks, bins=30)
pylab.xlabel('excess kurtosis')
pylab.savefig('excess_kurtoses_all.png')
pylab.clf()
pylab.hist([k for k in ks if k < 2], bins=30)
pylab.xlabel('excess kurtosis')
pylab.savefig('excess_kurtoses_near_zero.png')
print(np.argmax(ks))
pdict = {}
for k in np.argsort(- np.array(ks))[:50]:
pylab.clf()
p = np.argmax(X[:, k])
pdict[p] = pdict.get(p, 0) + 1
pylab.hist(X[:, k], bins=30)
pylab.xlabel(variable_names[k])
pylab.ylabel('Histogram of patients')
pylab.savefig('high_kurtosis/'+variable_names[k] + '.png')
print(pdict) # 203, 140 appear three times.
sys.exit()
out = Gaussianize(strategy=options.strategy)
y = out.fit_transform(X)
with open(options.output, 'w') as csvfile:
writer = csv.writer(csvfile, delimiter=options.delimiter)
if not options.nc:
writer.writerow([""] * (1 - options.nr) + variable_names)
for i, row in enumerate(y):
if not options.nr:
writer.writerow([sample_names[i]] + list(row))
else:
writer.writerow(row)
if options.q:
print('Making qq plots')
prefix = options.output.split('.')[0]
if not os.path.exists(prefix+'_q'):
os.makedirs(prefix+'_q')
out.qqplot(X, prefix=prefix + '_q/q') | mit |
kwilliams-mo/iris | lib/iris/tests/test_pandas.py | 2 | 17307 | # (C) British Crown Copyright 2013, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
# import iris tests first so that some things can be initialised before
# importing anything else
import iris.tests as tests
import datetime
import unittest
import netcdftime
import numpy as np
try:
import pandas
except ImportError:
# Disable all these tests if pandas is not installed.
pandas = None
skip_pandas = unittest.skipIf(pandas is None,
'Test(s) require "pandas", '
'which is not available.')
if pandas is not None:
from iris.coords import DimCoord
from iris.cube import Cube
import iris.pandas
import iris.unit
@skip_pandas
class TestAsSeries(tests.IrisTest):
"""Test conversion of 1D cubes to Pandas using as_series()"""
def test_no_dim_coord(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series',
'no_dim_coord.txt')))
def test_simple(self):
cube = Cube(np.array([0, 1, 2, 3, 4.4]), long_name="foo")
dim_coord = DimCoord([5, 6, 7, 8, 9], long_name="bar")
cube.add_dim_coord(dim_coord, 0)
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series', 'simple.txt')))
def test_masked(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4.4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data.astype('f').filled(np.nan))
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series', 'masked.txt')))
def test_time_gregorian(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_coord = DimCoord([0, 100.1, 200.2, 300.3, 400.4],
long_name="time",
units="days since 2000-01-01 00:00")
cube.add_dim_coord(time_coord, 0)
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series',
'time_gregorian.txt')))
def test_time_360(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="ts")
time_unit = iris.unit.Unit("days since 2000-01-01 00:00",
calendar=iris.unit.CALENDAR_360_DAY)
time_coord = DimCoord([0, 100.1, 200.2, 300.3, 400.4],
long_name="time", units=time_unit)
cube.add_dim_coord(time_coord, 0)
series = iris.pandas.as_series(cube)
self.assertArrayEqual(series, cube.data)
self.assertString(
str(series),
tests.get_result_path(('pandas', 'as_series',
'time_360.txt')))
def test_copy_true(self):
cube = Cube(np.array([0, 1, 2, 3, 4]), long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_int32_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_int64_false(self):
cube = Cube(np.array([0, 1, 2, 3, 4], dtype=np.int32), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_float_false(self):
cube = Cube(np.array([0, 1, 2, 3.3, 4]), long_name="foo")
series = iris.pandas.as_series(cube, copy=False)
series[0] = 99
self.assertEqual(cube.data[0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
series = iris.pandas.as_series(cube)
series[0] = 99
self.assertEqual(cube.data[0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray([0, 1, 2, 3, 4], mask=[0, 1, 0, 1, 0])
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
series = iris.pandas.as_series(cube, copy=False)
@skip_pandas
class TestAsDataFrame(tests.IrisTest):
"""Test conversion of 2D cubes to Pandas using as_data_frame()"""
def test_no_dim_coords(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'no_dim_coords.txt')))
def test_no_x_coord(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
y_coord = DimCoord([10, 11], long_name="bar")
cube.add_dim_coord(y_coord, 0)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'no_x_coord.txt')))
def test_no_y_coord(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
cube.add_dim_coord(x_coord, 1)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'no_y_coord.txt')))
def test_simple(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
x_coord = DimCoord([10, 11, 12, 13, 14], long_name="bar")
y_coord = DimCoord([15, 16], long_name="milk")
cube.add_dim_coord(x_coord, 1)
cube.add_dim_coord(y_coord, 0)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'simple.txt')))
def test_masked(self):
data = np.ma.MaskedArray([[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]])
cube = Cube(data, long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data.astype('f').filled(np.nan))
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'masked.txt')))
def test_time_gregorian(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="ts")
day_offsets = [0, 100.1, 200.2, 300.3, 400.4]
time_coord = DimCoord(day_offsets, long_name="time",
units="days since 2000-01-01 00:00")
cube.add_dim_coord(time_coord, 1)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
nanoseconds_per_day = 24 * 60 * 60 * 1000000000
days_to_2000 = 365 * 30 + 7
# pandas Timestamp class cannot handle floats in pandas <v0.12
timestamps = [pandas.Timestamp(int(nanoseconds_per_day *
(days_to_2000 + day_offset)))
for day_offset in day_offsets]
self.assertTrue(all(data_frame.columns == timestamps))
self.assertTrue(all(data_frame.index == [0, 1]))
def test_time_360(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="ts")
time_unit = iris.unit.Unit("days since 2000-01-01 00:00",
calendar=iris.unit.CALENDAR_360_DAY)
time_coord = DimCoord([100.1, 200.2], long_name="time",
units=time_unit)
cube.add_dim_coord(time_coord, 0)
data_frame = iris.pandas.as_data_frame(cube)
self.assertArrayEqual(data_frame, cube.data)
self.assertString(
str(data_frame),
tests.get_result_path(('pandas', 'as_dataframe',
'time_360.txt')))
def test_copy_true(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]),
long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_int32_false(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=np.int32), long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_int64_false(self):
cube = Cube(np.array([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
dtype=np.int64), long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_float_false(self):
cube = Cube(np.array([[0, 1, 2, 3, 4.4], [5, 6, 7, 8, 9]]),
long_name="foo")
data_frame = iris.pandas.as_data_frame(cube, copy=False)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 99)
def test_copy_masked_true(self):
data = np.ma.MaskedArray([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]])
cube = Cube(data, long_name="foo")
data_frame = iris.pandas.as_data_frame(cube)
data_frame[0][0] = 99
self.assertEqual(cube.data[0, 0], 0)
def test_copy_masked_false(self):
data = np.ma.MaskedArray([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]],
mask=[[0, 1, 0, 1, 0], [1, 0, 1, 0, 1]])
cube = Cube(data, long_name="foo")
with self.assertRaises(ValueError):
data_frame = iris.pandas.as_data_frame(cube, copy=False)
@skip_pandas
class TestSeriesAsCube(tests.IrisTest):
def test_series_simple(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube', 'series_simple.cml')))
def test_series_object(self):
class Thing(object):
def __repr__(self):
return "A Thing"
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[Thing(), Thing(), Thing(), Thing(), Thing()])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube', 'series_object.cml')))
def test_series_masked(self):
series = pandas.Series([0, float('nan'), 2, np.nan, 4],
index=[5, 6, 7, 8, 9])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube', 'series_masked.cml')))
def test_series_datetime_gregorian(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[datetime.datetime(2001, 01, 01, 01, 01, 01),
datetime.datetime(2002, 02, 02, 02, 02, 02),
datetime.datetime(2003, 03, 03, 03, 03, 03),
datetime.datetime(2004, 04, 04, 04, 04, 04),
datetime.datetime(2005, 05, 05, 05, 05, 05)])
self.assertCML(
iris.pandas.as_cube(series),
tests.get_result_path(('pandas', 'as_cube',
'series_datetime_gregorian.cml')))
def test_series_netcdftime_360(self):
series = pandas.Series(
[0, 1, 2, 3, 4],
index=[netcdftime.datetime(2001, 01, 01, 01, 01, 01),
netcdftime.datetime(2002, 02, 02, 02, 02, 02),
netcdftime.datetime(2003, 03, 03, 03, 03, 03),
netcdftime.datetime(2004, 04, 04, 04, 04, 04),
netcdftime.datetime(2005, 05, 05, 05, 05, 05)])
self.assertCML(
iris.pandas.as_cube(series,
calendars={0: iris.unit.CALENDAR_360_DAY}),
tests.get_result_path(('pandas', 'as_cube',
'series_netcdfimte_360.cml')))
def test_copy_true(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series)
cube.data[0] = 99
self.assertEqual(series[5], 0)
def test_copy_false(self):
series = pandas.Series([0, 1, 2, 3, 4], index=[5, 6, 7, 8, 9])
cube = iris.pandas.as_cube(series, copy=False)
cube.data[0] = 99
self.assertEqual(series[5], 99)
@skip_pandas
class TestDataFrameAsCube(tests.IrisTest):
def test_data_frame_simple(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_simple.cml')))
def test_data_frame_nonotonic(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[10, 10],
columns=[12, 12, 14, 15, 16])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_nonotonic.cml')))
def test_data_frame_masked(self):
data_frame = pandas.DataFrame([[0, float('nan'), 2, 3, 4],
[5, 6, 7, np.nan, 9]],
index=[10, 11],
columns=[12, 13, 14, 15, 16])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_masked.cml')))
def test_data_frame_netcdftime_360(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[netcdftime.datetime(2001, 01, 01, 01, 01, 01),
netcdftime.datetime(2002, 02, 02, 02, 02, 02)],
columns=[10, 11, 12, 13, 14])
self.assertCML(
iris.pandas.as_cube(
data_frame,
calendars={0: iris.unit.CALENDAR_360_DAY}),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_netcdftime_360.cml')))
def test_data_frame_datetime_gregorian(self):
data_frame = pandas.DataFrame(
[[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]],
index=[datetime.datetime(2001, 01, 01, 01, 01, 01),
datetime.datetime(2002, 02, 02, 02, 02, 02)],
columns=[10, 11, 12, 13, 14])
self.assertCML(
iris.pandas.as_cube(data_frame),
tests.get_result_path(('pandas', 'as_cube',
'data_frame_datetime_gregorian.cml')))
def test_copy_true(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 0)
def test_copy_false(self):
data_frame = pandas.DataFrame([[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]])
cube = iris.pandas.as_cube(data_frame, copy=False)
cube.data[0, 0] = 99
self.assertEqual(data_frame[0][0], 99)
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
timbennett/twitter-tools | get_recent_tweets.py | 1 | 1318 | '''
export user's last 3240 tweets to CSV (full structure)
usage: python get_recent_tweets.py screenname
requires pandas because why reinvent to_csv()?
'''
import tweepy #https://github.com/tweepy/tweepy
import csv
import sys
import json
import pandas as pd
# make sure twitter_auth.py exists with contents:
#
# access_key = ""
# access_secret = ""
# consumer_key = ""
# consumer_secret = ""
#
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_key, access_secret)
api = tweepy.API(auth)
alltweets = []
screenname=sys.argv[1]
new_tweets = api.user_timeline(screen_name = screenname,count=200)
alltweets.extend(new_tweets)
oldest = alltweets[-1].id - 1
while len(new_tweets) > 0:
print "getting tweets before %s" % (oldest)
#all subsiquent requests use the max_id param to prevent duplicates
new_tweets = api.user_timeline(screen_name = screenname,count=200,max_id=oldest)
#save most recent tweets
alltweets.extend(new_tweets)
#update the id of the oldest tweet less one
oldest = alltweets[-1].id - 1
print "...%s tweets downloaded so far" % (len(alltweets))
json_strings = json.dumps([tweet._json for tweet in alltweets])
df = pd.read_json(json_strings)
df.to_csv('{}.csv'.format(screenname), encoding='utf-8')
| mit |
minireference/noBSLAnotebooks | util/plot_helpers.py | 1 | 8787 | from matplotlib.patches import FancyArrowPatch
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import proj3d
import numpy as np
# Helper functions for plotting vectors, lines, and planes using matplotlib
#
# Design
# ------
# We want to have functions `plot_point`, `plot_vec`, `plot_line`, `plot_plane`
# that are easy to use and can handle both 2D and 3D SymPy Matrix inputs.
def plot_point(point, color='k'):
"""
Plot the 2d or 3d input `point` as a solid dot on the graph.
"""
# TODO: implement me...
pass
def plot_vec(vec, at=[0,0,0], color='k'):
"""
Plot the 2d or 3d vector `vec`, which can be a SymPy Matrix, a numpy array,
or a python list.
"""
if len(vec) == 3:
ax = plt.gca(projection='3d')
ax.set_aspect("auto")
vec_x, vec_y, vec_z = float(vec[0]), float(vec[1]), float(vec[2])
at_x, at_y, at_z = float(at[0]), float(at[1]), float(at[2])
a = Arrow3D([at_x, at_x + vec_x],
[at_y, at_y + vec_y],
[at_z, at_z + vec_z],
mutation_scale=20, lw=1, arrowstyle="-|>", color=color)
ax.add_artist(a)
elif len(vec) == 2:
ax = plt.gca()
ax.set_aspect("equal")
vec_x, vec_y = float(vec[0]), float(vec[1])
at_x, at_y = float(at[0]), float(at[1])
a = Arrow2D([at_x, at_x + vec_x],
[at_y, at_y + vec_y],
mutation_scale=20, lw=1, arrowstyle="-|>", color=color)
ax.add_artist(a)
else:
print('plot_vec supports only 2D and 3D vectors.')
def plot_vecs(*args):
"""
Plot each of the vectors in the arugment list in a different color.
"""
COLORS = ['k', 'b', 'g', 'r', 'c', 'm']
for i, vec in enumerate(args):
plot_vec(vec, color=COLORS[i%len(COLORS)])
def plot_line(dir_vec, point, color=None):
"""
Plots the line with direction vector `dir_vec` passing though `point`.
"""
if len(dir_vec) == 3:
ax = plt.gca(projection='3d')
ax.set_aspect("auto")
dir_vec_x = float(dir_vec[0])
dir_vec_y = float(dir_vec[1])
dir_vec_z = float(dir_vec[2])
point_x = float(point[0])
point_y = float(point[1])
point_z = float(point[2])
s = (np.linspace(-5, 5, 100) - point_x)/dir_vec_x
x = point_x + dir_vec_x*s
y = point_y + dir_vec_y*s
z = point_z + dir_vec_z*s
ax.plot(x, y, z, color=color)
elif len(dir_vec) == 2:
ax = plt.gca()
ax.set_aspect("equal")
dir_vec_x = float(dir_vec[0])
dir_vec_y = float(dir_vec[1])
point_x = float(point[0])
point_y = float(point[1])
s = (np.linspace(-5, 5, 100) - point_x)/dir_vec_x
x = point_x + dir_vec_x*s
y = point_y + dir_vec_y*s
ax.plot(x, y, color=color)
else:
print('plot_line supports only 2D and 3D vectors.')
def plot_plane(normal, d, color=None, xrange=[-5,5], yrange=[-5,5]):
"""
Plots the plane whose general equation is normal . (x,y,z) = d.
If normal is a 2-vector, plots a line (2D plot).
"""
if len(normal) == 3:
ax = plt.gca(projection='3d')
ax.set_aspect("auto")
normal_x = float(normal[0])
normal_y = float(normal[1])
normal_z = float(normal[2])
d = float(d)
x = np.linspace(xrange[0], xrange[1], 100)
y = np.linspace(yrange[0], yrange[1], 100)
X, Y = np.meshgrid(x,y)
if color is None:
color_picker = PlaneColorPicker()
color = color_picker.get_color()
Z = (d - normal_x*X - normal_y*Y)/normal_z
ax.plot_surface(X, Y, Z, color=color, alpha=0.2)
elif len(normal) == 2:
ax = plt.gca()
ax.set_aspect("equal")
normal_x = float(normal[0])
normal_y = float(normal[1])
d = float(d)
x = np.linspace(xrange[0], xrange[1], 100)
y = (d - normal_x*x)/normal_y
ax.plot(x, y, color=color)
else:
print('plot_plane supports only 2D and 3D vectors.')
# CHAPTER 4: COMPUTATIONAL LINEAR ALGERBA
################################################################################
def plot_augmat(AUG):
"""
Visualize of the augmented matrix `AUG` geometrically as the interseciton
of goemterical objects:
- Intersection of lines in 2D (when AUG has three cols)
- Intersection of planes in 3D (when AUG has four cols)
"""
if AUG.cols == 3:
# An Mx3 augemented matrix represents lines in the cartesian plane
for i in range(AUG.rows):
line = AUG[i,:]
template = 'Line {0:d}: {1:d}x {2:+d}y = {3:d}'
print(template.format(i+1, *map(int,line)))
plot_plane(line[0:2], line[2])
elif AUG.cols == 4:
# An Mx4 augmented matrix represents planes in 3D
for i in range(AUG.rows):
plane = AUG[i,:]
normal = plane[0:3]
d = plane[3]
template = 'Plane {0:d}: {1:d}x {2:+d}y {3:+d}z = {4:d}'
print(template.format(i+1, *map(int,plane)))
plot_plane(normal, d)
else:
print('plot_augmat supports only lines and planes.')
# IMPLEMENTATION DETAILS
################################################################################
class Arrow3D(FancyArrowPatch):
"""
A 3D arrow used to represent vectors in 3D.
"""
def __init__(self, xs, ys, zs, *args, **kwargs):
FancyArrowPatch.__init__(self, (0,0), (0,0), *args, **kwargs)
self._verts3d = xs, ys, zs
def draw(self, renderer):
xs3d, ys3d, zs3d = self._verts3d
xs, ys, zs = proj3d.proj_transform(xs3d, ys3d, zs3d, renderer.M)
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
class Arrow2D(FancyArrowPatch):
"""
A 2D arrow used to represent vectors in 2D.
"""
def __init__(self, xs, ys, *args, **kwargs):
self._verts2d = xs, ys
FancyArrowPatch.__init__(self, (xs[0],ys[0]), (xs[1],ys[1]), *args, **kwargs)
def draw(self, renderer):
xs3d, ys3d = self._verts2d
xs, ys = xs3d, ys3d
self.set_positions((xs[0],ys[0]),(xs[1],ys[1]))
FancyArrowPatch.draw(self, renderer)
# HELPER FUNCTIONS
################################################################################
def autoscale_arrows(ax=None):
"""
Custom auto-scaling method for Arrow3D objects.
"""
if ax is None:
ax = plt.gca()
arrow3Ds = [ ch for ch in ax.get_children() if type(ch) == Arrow3D ]
arrow2Ds = [ ch for ch in ax.get_children() if type(ch) == Arrow2D ]
if arrow2Ds and arrow3Ds:
print('Mixing Arrow2D and Arrow3D not supported')
return -1
if arrow3Ds:
all_xs, all_ys, all_zs = [], [], []
for arrow in arrow3Ds:
all_xs.append(arrow._verts3d[0][0])
all_xs.append(arrow._verts3d[0][1])
all_ys.append(arrow._verts3d[1][0])
all_ys.append(arrow._verts3d[1][1])
all_zs.append(arrow._verts3d[2][0])
all_zs.append(arrow._verts3d[2][1])
min_x, max_x = min(all_xs), max(all_xs)
min_y, max_y = min(all_ys), max(all_ys)
min_z, max_z = min(all_zs), max(all_zs)
cube_side = max(max_x-min_x, max_y-min_y, max_z-min_z)
ax.set_xlim(min_x, min_x + cube_side)
ax.set_ylim(min_y, min_y + cube_side)
ax.set_zlim(min_z, min_z + cube_side)
elif arrow2Ds:
all_xs, all_ys = [], []
for arrow in arrow2Ds:
all_xs.append(arrow._verts2d[0][0])
all_xs.append(arrow._verts2d[0][1])
all_ys.append(arrow._verts2d[1][0])
all_ys.append(arrow._verts2d[1][1])
min_x, max_x = min(all_xs), max(all_xs)
min_y, max_y = min(all_ys), max(all_ys)
square_side = max(max_x-min_x, max_y-min_y)
ax.set_xlim(min_x, min_x + square_side)
ax.set_ylim(min_y, min_y + square_side)
class PlaneColorPicker:
"""
A singleton class that cycles through the colors used for drawing planes.
"""
COLORS = ['b', 'g', 'r', 'c', 'm', 'k']
instance = None
class __PlaneColorPicker:
def __init__(self, start_index):
self.color_index = start_index
def __get_color(self):
cur = self.color_index
self.color_index = (cur + 1) % len(PlaneColorPicker.COLORS)
return PlaneColorPicker.COLORS[cur]
def __init__(self, start=0):
if not PlaneColorPicker.instance:
PlaneColorPicker.instance = PlaneColorPicker.__PlaneColorPicker(start)
def get_color(self):
return self.instance.__get_color()
| mit |
mwv/scikit-learn | sklearn/utils/tests/test_sparsefuncs.py | 157 | 13799 | import numpy as np
import scipy.sparse as sp
from scipy import linalg
from numpy.testing import assert_array_almost_equal, assert_array_equal
from sklearn.datasets import make_classification
from sklearn.utils.sparsefuncs import (mean_variance_axis,
inplace_column_scale,
inplace_row_scale,
inplace_swap_row, inplace_swap_column,
min_max_axis,
count_nonzero, csc_median_axis_0)
from sklearn.utils.sparsefuncs_fast import assign_rows_csr
from sklearn.utils.testing import assert_raises
def test_mean_variance_axis0():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
X_means, X_vars = mean_variance_axis(X_csc, axis=0)
assert_array_almost_equal(X_means, np.mean(X, axis=0))
assert_array_almost_equal(X_vars, np.var(X, axis=0))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=0)
def test_mean_variance_illegal_axis():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_csr = sp.csr_matrix(X)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-3)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=2)
assert_raises(ValueError, mean_variance_axis, X_csr, axis=-1)
def test_mean_variance_axis1():
X, _ = make_classification(5, 4, random_state=0)
# Sparsify the array a little bit
X[0, 0] = 0
X[2, 1] = 0
X[4, 3] = 0
X_lil = sp.lil_matrix(X)
X_lil[1, 0] = 0
X[1, 0] = 0
X_csr = sp.csr_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_csc = sp.csc_matrix(X_lil)
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
X = X.astype(np.float32)
X_csr = X_csr.astype(np.float32)
X_csc = X_csr.astype(np.float32)
X_means, X_vars = mean_variance_axis(X_csr, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
X_means, X_vars = mean_variance_axis(X_csc, axis=1)
assert_array_almost_equal(X_means, np.mean(X, axis=1))
assert_array_almost_equal(X_vars, np.var(X, axis=1))
assert_raises(TypeError, mean_variance_axis, X_lil, axis=1)
def test_densify_rows():
X = sp.csr_matrix([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_rows = np.array([0, 2, 3], dtype=np.intp)
out = np.ones((6, X.shape[1]), dtype=np.float64)
out_rows = np.array([1, 3, 4], dtype=np.intp)
expect = np.ones_like(out)
expect[out_rows] = X[X_rows, :].toarray()
assign_rows_csr(X, X_rows, out_rows, out)
assert_array_equal(out, expect)
def test_inplace_column_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(200)
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale
inplace_column_scale(Xc, scale)
inplace_column_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_row_scale():
rng = np.random.RandomState(0)
X = sp.rand(100, 200, 0.05)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
scale = rng.rand(100)
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
X = X.astype(np.float32)
scale = scale.astype(np.float32)
Xr = X.tocsr()
Xc = X.tocsc()
XA = X.toarray()
XA *= scale.reshape(-1, 1)
inplace_row_scale(Xc, scale)
inplace_row_scale(Xr, scale)
assert_array_almost_equal(Xr.toarray(), Xc.toarray())
assert_array_almost_equal(XA, Xc.toarray())
assert_array_almost_equal(XA, Xr.toarray())
assert_raises(TypeError, inplace_column_scale, X.tolil(), scale)
def test_inplace_swap_row():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[0], X[-1] = swap(X[0], X[-1])
inplace_swap_row(X_csr, 0, -1)
inplace_swap_row(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[2], X[3] = swap(X[2], X[3])
inplace_swap_row(X_csr, 2, 3)
inplace_swap_row(X_csc, 2, 3)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_row, X_csr.tolil())
def test_inplace_swap_column():
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
X = np.array([[0, 3, 0],
[2, 4, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
swap = linalg.get_blas_funcs(('swap',), (X,))
swap = swap[0]
X[:, 0], X[:, -1] = swap(X[:, 0], X[:, -1])
inplace_swap_column(X_csr, 0, -1)
inplace_swap_column(X_csc, 0, -1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
X[:, 0], X[:, 1] = swap(X[:, 0], X[:, 1])
inplace_swap_column(X_csr, 0, 1)
inplace_swap_column(X_csc, 0, 1)
assert_array_equal(X_csr.toarray(), X_csc.toarray())
assert_array_equal(X, X_csc.toarray())
assert_array_equal(X, X_csr.toarray())
assert_raises(TypeError, inplace_swap_column, X_csr.tolil())
def test_min_max_axis0():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=0)
assert_array_equal(mins_csr, X.min(axis=0))
assert_array_equal(maxs_csr, X.max(axis=0))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=0)
assert_array_equal(mins_csc, X.min(axis=0))
assert_array_equal(maxs_csc, X.max(axis=0))
def test_min_max_axis1():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
X = X.astype(np.float32)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
mins_csr, maxs_csr = min_max_axis(X_csr, axis=1)
assert_array_equal(mins_csr, X.min(axis=1))
assert_array_equal(maxs_csr, X.max(axis=1))
mins_csc, maxs_csc = min_max_axis(X_csc, axis=1)
assert_array_equal(mins_csc, X.min(axis=1))
assert_array_equal(maxs_csc, X.max(axis=1))
def test_min_max_axis_errors():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
assert_raises(TypeError, min_max_axis, X_csr.tolil(), axis=0)
assert_raises(ValueError, min_max_axis, X_csr, axis=2)
assert_raises(ValueError, min_max_axis, X_csc, axis=-3)
def test_count_nonzero():
X = np.array([[0, 3, 0],
[2, -1, 0],
[0, 0, 0],
[9, 8, 7],
[4, 0, 5]], dtype=np.float64)
X_csr = sp.csr_matrix(X)
X_csc = sp.csc_matrix(X)
X_nonzero = X != 0
sample_weight = [.5, .2, .3, .1, .1]
X_nonzero_weighted = X_nonzero * np.array(sample_weight)[:, None]
for axis in [0, 1, -1, -2, None]:
assert_array_almost_equal(count_nonzero(X_csr, axis=axis),
X_nonzero.sum(axis=axis))
assert_array_almost_equal(count_nonzero(X_csr, axis=axis,
sample_weight=sample_weight),
X_nonzero_weighted.sum(axis=axis))
assert_raises(TypeError, count_nonzero, X_csc)
assert_raises(ValueError, count_nonzero, X_csr, axis=2)
def test_csc_row_median():
# Test csc_row_median actually calculates the median.
# Test that it gives the same output when X is dense.
rng = np.random.RandomState(0)
X = rng.rand(100, 50)
dense_median = np.median(X, axis=0)
csc = sp.csc_matrix(X)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test that it gives the same output when X is sparse
X = rng.rand(51, 100)
X[X < 0.7] = 0.0
ind = rng.randint(0, 50, 10)
X[ind] = -X[ind]
csc = sp.csc_matrix(X)
dense_median = np.median(X, axis=0)
sparse_median = csc_median_axis_0(csc)
assert_array_equal(sparse_median, dense_median)
# Test for toy data.
X = [[0, -2], [-1, -1], [1, 0], [2, 1]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0.5, -0.5]))
X = [[0, -2], [-1, -5], [1, -3]]
csc = sp.csc_matrix(X)
assert_array_equal(csc_median_axis_0(csc), np.array([0., -3]))
# Test that it raises an Error for non-csc matrices.
assert_raises(TypeError, csc_median_axis_0, sp.csr_matrix(X))
| bsd-3-clause |
volumeint/zeppelin | python/src/main/resources/python/bootstrap_sql.py | 60 | 1189 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Setup SQL over Pandas DataFrames
# It requires next dependencies to be installed:
# - pandas
# - pandasql
from __future__ import print_function
try:
from pandasql import sqldf
pysqldf = lambda q: sqldf(q, globals())
except ImportError:
pysqldf = lambda q: print("Can not run SQL over Pandas DataFrame" +
"Make sure 'pandas' and 'pandasql' libraries are installed")
| apache-2.0 |
lukebarnard1/bokeh | examples/charts/file/scatter.py | 37 | 1607 |
from collections import OrderedDict
import pandas as pd
from bokeh.charts import Scatter, output_file, show, vplot
from bokeh.sampledata.iris import flowers
setosa = flowers[(flowers.species == "setosa")][["petal_length", "petal_width"]]
versicolor = flowers[(flowers.species == "versicolor")][["petal_length", "petal_width"]]
virginica = flowers[(flowers.species == "virginica")][["petal_length", "petal_width"]]
xyvalues = OrderedDict([("setosa", setosa.values), ("versicolor", versicolor.values), ("virginica", virginica.values)])
scatter1 = Scatter(xyvalues, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left', marker="triangle")
groupped_df = flowers[["petal_length", "petal_width", "species"]].groupby("species")
scatter2 = Scatter(groupped_df, title="iris dataset, dict_input", xlabel="petal_length",
ylabel="petal_width", legend='top_left')
pdict = OrderedDict()
for i in groupped_df.groups.keys():
labels = groupped_df.get_group(i).columns
xname = labels[0]
yname = labels[1]
x = getattr(groupped_df.get_group(i), xname)
y = getattr(groupped_df.get_group(i), yname)
pdict[i] = list(zip(x, y))
df = pd.DataFrame(pdict)
scatter3 = Scatter(
df, title="iris dataset, dict_input",
xlabel="petal_length", ylabel="petal_width", legend='top_left')
scatter4 = Scatter(
list(xyvalues.values()), title="iris dataset, dict_input",
xlabel="petal_length", ylabel="petal_width", legend='top_left')
output_file("scatter.html")
show(vplot(scatter1, scatter2, scatter3, scatter4))
| bsd-3-clause |
janmejay/librdkafka | tests/performance_plot.py | 6 | 2901 | #!/usr/bin/env python
#
import sys, json
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
def semver2int (semver):
if semver == 'trunk':
semver = '0.10.0.0'
vi = 0
i = 0
for v in reversed(semver.split('.')):
vi += int(v) * (i * 10)
i += 1
return vi
def get_perf_data (perfname, stats):
""" Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays
labels: broker versions
x: list with identical value (to plot on same x point)
y: perfname counter (average)
errs: errors
"""
ver = defaultdict(list)
# Per version:
# * accumulate values
# * calculate average
# * calculate error
# Accumulate values per version
for x in stats:
v = str(x[0])
ver[v].append(x[1][perfname])
print('%s is %s' % (perfname, ver))
labels0 = sorted(ver.keys(), key=semver2int)
y0 = list()
errs0 = list()
# Maintain order by using labels0
for v in labels0:
# Calculate average
avg = sum(ver[v]) / float(len(ver[v]))
y0.append(avg)
# Calculate error
errs0.append(max(ver[v]) - avg)
labels = np.array(labels0)
y1 = np.array(y0)
x1 = np.array(range(0, len(labels)))
errs = np.array(errs0)
return [labels,x1,y1,errs]
def plot (description, name, stats, perfname, outfile=None):
labels,x,y,errs = get_perf_data(perfname, stats)
colors = np.random.rand(len(labels))
plt.title('%s: %s %s' % (description, name, perfname))
plt.xlabel('Kafka version')
plt.ylabel(perfname)
plt.errorbar(x, y, yerr=errs, alpha=0.5)
plt.xticks(x, labels, rotation='vertical')
plt.margins(0.2)
plt.subplots_adjust(bottom=0.2)
if outfile is None:
plt.show()
else:
plt.savefig(outfile, bbox_inches='tight')
return
if __name__ == '__main__':
outfile = sys.argv[1]
reports = []
for rf in sys.argv[2:]:
with open(rf) as f:
reports.append(json.load(f))
stats = defaultdict(list)
# Extract performance test data
for rep in reports:
perfs = rep.get('tests', dict()).get('0038_performance', list).get('report', None)
if perfs is None:
continue
for perf in perfs:
for n in ['producer','consumer']:
o = perf.get(n, None)
if o is None:
print('no %s in %s' % (n, perf))
continue
stats[n].append((rep.get('broker_version', 'unknown'), o))
for t in ['producer','consumer']:
for perfname in ['mb_per_sec', 'records_per_sec']:
plot('librdkafka 0038_performance test: %s (%d samples)' % \
(outfile, len(reports)),
t, stats[t], perfname, outfile='%s_%s_%s.png' % (outfile, t, perfname))
| bsd-2-clause |
pymedusa/SickRage | ext/pint/registry.py | 2 | 56734 | # -*- coding: utf-8 -*-
"""
pint.registry
~~~~~~~~~~~~~
Defines the Registry, a class to contain units and their relations.
The module actually defines 5 registries with different capabilites:
- BaseRegistry: Basic unit definition and querying.
Conversion between multiplicative units.
- NonMultiplicativeRegistry: Conversion between non multiplicative (offset) units.
(e.g. Temperature)
* Inherits from BaseRegistry
- ContextRegisty: Conversion between units with different dimenstions according
to previously established relations (contexts).
(e.g. in the spectroscopy, conversion between frequency and energy is possible)
* Inherits from BaseRegistry
- SystemRegistry: Group unit and changing of base units.
(e.g. in MKS, meter, kilogram and second are base units.)
* Inherits from BaseRegistry
- UnitRegistry: Combine all previous capabilities, it is exposed by Pint.
:copyright: 2016 by Pint Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import division, unicode_literals, print_function, absolute_import
import os
import re
import math
import functools
import itertools
import pkg_resources
from decimal import Decimal
from fractions import Fraction
from contextlib import contextmanager, closing
from io import open, StringIO
from collections import defaultdict
from tokenize import NUMBER, NAME
from . import registry_helpers
from .context import Context, ContextChain
from .util import (logger, pi_theorem, solve_dependencies, ParserHelper,
string_preprocessor, find_connected_nodes,
find_shortest_path, UnitsContainer, _is_dim,
to_units_container, SourceIterator)
from .compat import tokenizer, string_types, meta
from .definitions import (Definition, UnitDefinition, PrefixDefinition,
DimensionDefinition)
from .converters import ScaleConverter
from .errors import (DimensionalityError, UndefinedUnitError,
DefinitionSyntaxError, RedefinitionError)
from .pint_eval import build_eval_tree
from . import systems
_BLOCK_RE = re.compile(r' |\(')
class _Meta(type):
"""This is just to call after_init at the right time
instead of asking the developer to do it when subclassing.
"""
def __call__(self, *args, **kwargs):
obj = super(_Meta, self).__call__(*args, **kwargs)
obj._after_init()
return obj
class BaseRegistry(meta.with_metaclass(_Meta)):
"""Base class for all registries.
Capabilities:
- Register units, prefixes, and dimensions, and their relations.
- Convert between units.
- Find dimensionality of a unit.
- Parse units with prefix and/or suffix.
- Parse expressions.
- Parse a definition file.
- Allow extending the definition file parser by registering @ directives.
:param filename: path of the units definition file to load or line iterable object.
Empty to load the default definition file.
None to leave the UnitRegistry empty.
:type filename: str | None
:param force_ndarray: convert any input, scalar or not to a numpy.ndarray.
:param on_redefinition: action to take in case a unit is redefined.
'warn', 'raise', 'ignore'
:type on_redefinition: str
:param auto_reduce_dimensions: If True, reduce dimensionality on appropriate operations.
"""
#: Map context prefix to function
#: type: Dict[str, (SourceIterator -> None)]
_parsers = None
#: List to be used in addition of units when dir(registry) is called.
#: Also used for autocompletion in IPython.
_dir = ['Quantity', 'Unit', 'Measurement',
'define', 'load_definitions',
'get_name', 'get_symbol', 'get_dimensionality',
'get_base_units', 'get_root_units',
'parse_unit_name', 'parse_units', 'parse_expression',
'convert']
def __init__(self, filename='', force_ndarray=False, on_redefinition='warn', auto_reduce_dimensions=False):
self._register_parsers()
from .unit import build_unit_class
self.Unit = build_unit_class(self)
from .quantity import build_quantity_class
self.Quantity = build_quantity_class(self, force_ndarray)
from .measurement import build_measurement_class
self.Measurement = build_measurement_class(self, force_ndarray)
self._filename = filename
#: Action to take in case a unit is redefined. 'warn', 'raise', 'ignore'
self._on_redefinition = on_redefinition
#: Determines if dimensionality should be reduced on appropriate operations.
self.auto_reduce_dimensions = auto_reduce_dimensions
#: Map between name (string) and value (string) of defaults stored in the definitions file.
self._defaults = {}
#: Map dimension name (string) to its definition (DimensionDefinition).
self._dimensions = {}
#: Map unit name (string) to its definition (UnitDefinition).
#: Might contain prefixed units.
self._units = {}
#: Map unit name in lower case (string) to a set of unit names with the right case.
#: Does not contain prefixed units.
#: e.g: 'hz' - > set('Hz', )
self._units_casei = defaultdict(set)
#: Map prefix name (string) to its definition (PrefixDefinition).
self._prefixes = {'': PrefixDefinition('', '', (), 1)}
#: Map suffix name (string) to canonical , and unit alias to canonical unit name
self._suffixes = {'': None, 's': ''}
#: Maps dimensionality (UnitsContainer) to Units (str)
self._dimensional_equivalents = dict()
#: Maps dimensionality (UnitsContainer) to Dimensionality (UnitsContainer)
self._root_units_cache = dict()
#: Maps dimensionality (UnitsContainer) to Units (UnitsContainer)
self._dimensionality_cache = dict()
#: Cache the unit name associated to user input. ('mV' -> 'millivolt')
self._parse_unit_cache = dict()
self._initialized = False
def _after_init(self):
"""This should be called after all __init__
"""
if self._filename == '':
self.load_definitions('default_en.txt', True)
elif self._filename is not None:
self.load_definitions(self._filename)
self.define(UnitDefinition('pi', 'π', (), ScaleConverter(math.pi)))
self._build_cache()
self._initialized = True
def _register_parsers(self):
self._register_parser('@defaults', self._parse_defaults)
def _parse_defaults(self, ifile):
"""Loader for a @default section.
:type ifile: SourceITerator
"""
next(ifile)
for lineno, part in ifile.block_iter():
k, v = part.split('=')
self._defaults[k.strip()] = v.strip()
def __name__(self):
return 'UnitRegistry'
def __getattr__(self, item):
if item[0] == '_':
return super(BaseRegistry, self).__getattribute__(item)
return self.Unit(item)
def __getitem__(self, item):
logger.warning('Calling the getitem method from a UnitRegistry is deprecated. '
'use `parse_expression` method or use the registry as a callable.')
return self.parse_expression(item)
def __dir__(self):
return list(self._units.keys()) + self._dir
@property
def default_format(self):
"""Default formatting string for quantities.
"""
return self.Quantity.default_format
@default_format.setter
def default_format(self, value):
self.Unit.default_format = value
self.Quantity.default_format = value
def define(self, definition):
"""Add unit to the registry.
:param definition: a dimension, unit or prefix definition.
:type definition: str | Definition
"""
if isinstance(definition, string_types):
for line in definition.split('\n'):
self._define(Definition.from_string(line))
else:
self._define(definition)
def _define(self, definition):
"""Add unit to the registry.
This method defines only multiplicative units, converting any other type
to `delta_` units.
:param definition: a dimension, unit or prefix definition.
:type definition: Definition
:return: Definition instance, case sensitive unit dict, case insensitive unit dict.
:rtype: Definition, dict, dict
"""
if isinstance(definition, DimensionDefinition):
d, di = self._dimensions, None
elif isinstance(definition, UnitDefinition):
d, di = self._units, self._units_casei
# For a base units, we need to define the related dimension
# (making sure there is only one to define)
if definition.is_base:
for dimension in definition.reference.keys():
if dimension in self._dimensions:
if dimension != '[]':
raise DefinitionSyntaxError('only one unit per dimension can be a base unit.')
continue
self.define(DimensionDefinition(dimension, '', (), None, is_base=True))
elif isinstance(definition, PrefixDefinition):
d, di = self._prefixes, None
else:
raise TypeError('{} is not a valid definition.'.format(definition))
# define "delta_" units for units with an offset
if getattr(definition.converter, "offset", 0.0) != 0.0:
if definition.name.startswith('['):
d_name = '[delta_' + definition.name[1:]
else:
d_name = 'delta_' + definition.name
if definition.symbol:
d_symbol = 'Δ' + definition.symbol
else:
d_symbol = None
d_aliases = tuple('Δ' + alias for alias in definition.aliases)
d_reference = UnitsContainer(dict((ref, value)
for ref, value in definition.reference.items()))
d_def = UnitDefinition(d_name, d_symbol, d_aliases,
ScaleConverter(definition.converter.scale),
d_reference, definition.is_base)
else:
d_def = definition
self._define_adder(d_def, d, di)
return definition, d, di
def _define_adder(self, definition, unit_dict, casei_unit_dict):
"""Helper function to store a definition in the internal dictionaries.
It stores the definition under its name, symbol and aliases.
"""
self._define_single_adder(definition.name, definition, unit_dict, casei_unit_dict)
if definition.has_symbol:
self._define_single_adder(definition.symbol, definition, unit_dict, casei_unit_dict)
for alias in definition.aliases:
if ' ' in alias:
logger.warn('Alias cannot contain a space: ' + alias)
self._define_single_adder(alias, definition, unit_dict, casei_unit_dict)
def _define_single_adder(self, key, value, unit_dict, casei_unit_dict):
"""Helper function to store a definition in the internal dictionaries.
It warns or raise error on redefinition.
"""
if key in unit_dict:
if self._on_redefinition == 'raise':
raise RedefinitionError(key, type(value))
elif self._on_redefinition == 'warn':
logger.warning("Redefining '%s' (%s)", key, type(value))
unit_dict[key] = value
if casei_unit_dict is not None:
casei_unit_dict[key.lower()].add(key)
def _register_parser(self, prefix, parserfunc):
"""Register a loader for a given @ directive..
:param prefix: string identifying the section (e.g. @context)
:param parserfunc: A function that is able to parse a Definition section.
:type parserfunc: SourceIterator -> None
"""
if self._parsers is None:
self._parsers = dict()
if prefix and prefix[0] == '@':
self._parsers[prefix] = parserfunc
else:
raise ValueError("Prefix directives must start with '@'")
def load_definitions(self, file, is_resource=False):
"""Add units and prefixes defined in a definition text file.
:param file: can be a filename or a line iterable.
:param is_resource: used to indicate that the file is a resource file
and therefore should be loaded from the package.
"""
# Permit both filenames and line-iterables
if isinstance(file, string_types):
try:
if is_resource:
with closing(pkg_resources.resource_stream(__name__, file)) as fp:
rbytes = fp.read()
return self.load_definitions(StringIO(rbytes.decode('utf-8')), is_resource)
else:
with open(file, encoding='utf-8') as fp:
return self.load_definitions(fp, is_resource)
except (RedefinitionError, DefinitionSyntaxError) as e:
if e.filename is None:
e.filename = file
raise e
except Exception as e:
msg = getattr(e, 'message', '') or str(e)
raise ValueError('While opening {}\n{}'.format(file, msg))
ifile = SourceIterator(file)
for no, line in ifile:
if line and line[0] == '@':
if line.startswith('@import'):
if is_resource:
path = line[7:].strip()
else:
try:
path = os.path.dirname(file.name)
except AttributeError:
path = os.getcwd()
path = os.path.join(path, os.path.normpath(line[7:].strip()))
self.load_definitions(path, is_resource)
else:
parts = _BLOCK_RE.split(line)
loader = self._parsers.get(parts[0], None) if self._parsers else None
if loader is None:
raise DefinitionSyntaxError('Unknown directive %s' % line, lineno=no)
try:
loader(ifile)
except DefinitionSyntaxError as ex:
if ex.lineno is None:
ex.lineno = no
raise ex
else:
try:
self.define(Definition.from_string(line))
except DefinitionSyntaxError as ex:
if ex.lineno is None:
ex.lineno = no
raise ex
except Exception as ex:
logger.error("In line {}, cannot add '{}' {}".format(no, line, ex))
def _build_cache(self):
"""Build a cache of dimensionality and base units.
"""
self._dimensional_equivalents = dict()
deps = dict((name, set(definition.reference.keys() if definition.reference else {}))
for name, definition in self._units.items())
for unit_names in solve_dependencies(deps):
for unit_name in unit_names:
if '[' in unit_name:
continue
parsed_names = tuple(self.parse_unit_name(unit_name))
_prefix = None
if parsed_names:
_prefix, base_name, _suffix = parsed_names[0]
else:
base_name = unit_name
prefixed = True if _prefix else False
try:
uc = ParserHelper.from_word(base_name)
bu = self._get_root_units(uc)
di = self._get_dimensionality(uc)
self._root_units_cache[uc] = bu
self._dimensionality_cache[uc] = di
if not prefixed:
if di not in self._dimensional_equivalents:
self._dimensional_equivalents[di] = set()
self._dimensional_equivalents[di].add(self._units[base_name]._name)
except Exception as e:
logger.warning('Could not resolve {0}: {1!r}'.format(unit_name, e))
def _dedup_candidates(self, candidates):
"""Given a list of unit triplets (prefix, name, suffix),
remove those with different names but equal value.
e.g. ('kilo', 'gram', '') and ('', 'kilogram', '')
"""
candidates = tuple(candidates)
if len(candidates) < 2:
return candidates
unique = [candidates[0]]
for c in candidates[2:]:
for u in unique:
if c == u:
break
else:
unique.append(c)
return tuple(unique)
def get_name(self, name_or_alias, case_sensitive=True):
"""Return the canonical name of a unit.
"""
if name_or_alias == 'dimensionless':
return ''
try:
return self._units[name_or_alias]._name
except KeyError:
pass
candidates = self._dedup_candidates(self.parse_unit_name(name_or_alias, case_sensitive))
if not candidates:
raise UndefinedUnitError(name_or_alias)
elif len(candidates) == 1:
prefix, unit_name, _ = candidates[0]
else:
logger.warning('Parsing {} yield multiple results. '
'Options are: {}'.format(name_or_alias, candidates))
prefix, unit_name, _ = candidates[0]
if prefix:
name = prefix + unit_name
symbol = self.get_symbol(name)
prefix_def = self._prefixes[prefix]
self._units[name] = UnitDefinition(name, symbol, (), prefix_def.converter,
UnitsContainer({unit_name: 1}))
return prefix + unit_name
return unit_name
def get_symbol(self, name_or_alias):
"""Return the preferred alias for a unit
"""
candidates = self._dedup_candidates(self.parse_unit_name(name_or_alias))
if not candidates:
raise UndefinedUnitError(name_or_alias)
elif len(candidates) == 1:
prefix, unit_name, _ = candidates[0]
else:
logger.warning('Parsing {0} yield multiple results. '
'Options are: {1!r}'.format(name_or_alias, candidates))
prefix, unit_name, _ = candidates[0]
return self._prefixes[prefix].symbol + self._units[unit_name].symbol
def _get_symbol(self, name):
return self._units[name].symbol
def get_dimensionality(self, input_units):
"""Convert unit or dict of units or dimensions to a dict of base dimensions
dimensions
:param input_units:
:return: dimensionality
"""
input_units = to_units_container(input_units)
return self._get_dimensionality(input_units)
def _get_dimensionality(self, input_units):
""" Convert a UnitsContainer to base dimensions.
:param input_units:
:return: dimensionality
"""
if not input_units:
return UnitsContainer()
if input_units in self._dimensionality_cache:
return self._dimensionality_cache[input_units]
accumulator = defaultdict(float)
self._get_dimensionality_recurse(input_units, 1.0, accumulator)
if '[]' in accumulator:
del accumulator['[]']
dims = UnitsContainer(dict((k, v) for k, v in accumulator.items()
if v != 0.0))
self._dimensionality_cache[input_units] = dims
return dims
def _get_dimensionality_recurse(self, ref, exp, accumulator):
for key in ref:
exp2 = exp*ref[key]
if _is_dim(key):
reg = self._dimensions[key]
if reg.is_base:
accumulator[key] += exp2
elif reg.reference is not None:
self._get_dimensionality_recurse(reg.reference, exp2, accumulator)
else:
reg = self._units[self.get_name(key)]
if reg.reference is not None:
self._get_dimensionality_recurse(reg.reference, exp2, accumulator)
def _get_dimensionality_ratio(self, unit1, unit2):
""" Get the exponential ratio between two units, i.e. solve unit2 = unit1**x for x.
:param unit1: first unit
:type unit1: UnitsContainer compatible (str, Unit, UnitsContainer, dict)
:param unit2: second unit
:type unit2: UnitsContainer compatible (str, Unit, UnitsContainer, dict)
:returns: exponential proportionality or None if the units cannot be converted
"""
#shortcut in case of equal units
if unit1 == unit2:
return 1
dim1, dim2 = (self.get_dimensionality(unit) for unit in (unit1, unit2))
if not dim1 or not dim2 or dim1.keys() != dim2.keys(): #not comparable
return None
ratios = (dim2[key]/val for key, val in dim1.items())
first = next(ratios)
if all(r == first for r in ratios): #all are same, we're good
return first
return None
def get_root_units(self, input_units, check_nonmult=True):
"""Convert unit or dict of units to the root units.
If any unit is non multiplicative and check_converter is True,
then None is returned as the multiplicative factor.
:param input_units: units
:type input_units: UnitsContainer or str
:param check_nonmult: if True, None will be returned as the
multiplicative factor if a non-multiplicative
units is found in the final Units.
:return: multiplicative factor, base units
"""
input_units = to_units_container(input_units)
f, units = self._get_root_units(input_units, check_nonmult)
return f, self.Unit(units)
def _get_root_units(self, input_units, check_nonmult=True):
"""Convert unit or dict of units to the root units.
If any unit is non multiplicative and check_converter is True,
then None is returned as the multiplicative factor.
:param input_units: units
:type input_units: UnitsContainer or dict
:param check_nonmult: if True, None will be returned as the
multiplicative factor if a non-multiplicative
units is found in the final Units.
:return: multiplicative factor, base units
"""
if not input_units:
return 1., UnitsContainer()
# The cache is only done for check_nonmult=True
if check_nonmult and input_units in self._root_units_cache:
return self._root_units_cache[input_units]
accumulators = [1., defaultdict(float)]
self._get_root_units_recurse(input_units, 1.0, accumulators)
factor = accumulators[0]
units = UnitsContainer(dict((k, v) for k, v in accumulators[1].items()
if v != 0.))
# Check if any of the final units is non multiplicative and return None instead.
if check_nonmult:
for unit in units.keys():
if not self._units[unit].converter.is_multiplicative:
return None, units
if check_nonmult:
self._root_units_cache[input_units] = factor, units
return factor, units
def get_base_units(self, input_units, check_nonmult=True, system=None):
"""Convert unit or dict of units to the base units.
If any unit is non multiplicative and check_converter is True,
then None is returned as the multiplicative factor.
:param input_units: units
:type input_units: UnitsContainer or str
:param check_nonmult: if True, None will be returned as the
multiplicative factor if a non-multiplicative
units is found in the final Units.
:return: multiplicative factor, base units
"""
return self.get_root_units(input_units, check_nonmult)
def _get_root_units_recurse(self, ref, exp, accumulators):
for key in sorted(ref):
exp2 = exp*ref[key]
key = self.get_name(key)
reg = self._units[key]
if reg.is_base:
accumulators[1][key] += exp2
else:
accumulators[0] *= reg._converter.scale ** exp2
if reg.reference is not None:
self._get_root_units_recurse(reg.reference, exp2,
accumulators)
def get_compatible_units(self, input_units, group_or_system=None):
"""
"""
input_units = to_units_container(input_units)
equiv = self._get_compatible_units(input_units, group_or_system)
return frozenset(self.Unit(eq) for eq in equiv)
def _get_compatible_units(self, input_units, group_or_system):
"""
"""
if not input_units:
return frozenset()
src_dim = self._get_dimensionality(input_units)
ret = self._dimensional_equivalents[src_dim]
return ret
def convert(self, value, src, dst, inplace=False):
"""Convert value from some source to destination units.
:param value: value
:param src: source units.
:type src: Quantity or str
:param dst: destination units.
:type dst: Quantity or str
:return: converted value
"""
src = to_units_container(src, self)
dst = to_units_container(dst, self)
if src == dst:
return value
return self._convert(value, src, dst, inplace)
def _convert(self, value, src, dst, inplace=False, check_dimensionality=True):
"""Convert value from some source to destination units.
:param value: value
:param src: source units.
:type src: UnitsContainer
:param dst: destination units.
:type dst: UnitsContainer
:return: converted value
"""
if check_dimensionality:
src_dim = self._get_dimensionality(src)
dst_dim = self._get_dimensionality(dst)
# If the source and destination dimensionality are different,
# then the conversion cannot be performed.
if src_dim != dst_dim:
raise DimensionalityError(src, dst, src_dim, dst_dim)
# Here src and dst have only multiplicative units left. Thus we can
# convert with a factor.
factor, units = self._get_root_units(src / dst)
# factor is type float and if our magnitude is type Decimal then
# must first convert to Decimal before we can '*' the values
if isinstance(value, Decimal):
factor = Decimal(str(factor))
elif isinstance(value, Fraction):
factor = Fraction(str(factor))
if inplace:
value *= factor
else:
value = value * factor
return value
def parse_unit_name(self, unit_name, case_sensitive=True):
"""Parse a unit to identify prefix, unit name and suffix
by walking the list of prefix and suffix.
:rtype: (str, str, str)
"""
stw = unit_name.startswith
edw = unit_name.endswith
for suffix, prefix in itertools.product(self._suffixes, self._prefixes):
if stw(prefix) and edw(suffix):
name = unit_name[len(prefix):]
if suffix:
name = name[:-len(suffix)]
if len(name) == 1:
continue
if case_sensitive:
if name in self._units:
yield (self._prefixes[prefix].name,
self._units[name].name,
self._suffixes[suffix])
else:
for real_name in self._units_casei.get(name.lower(), ()):
yield (self._prefixes[prefix].name,
self._units[real_name].name,
self._suffixes[suffix])
def parse_units(self, input_string, as_delta=None):
"""Parse a units expression and returns a UnitContainer with
the canonical names.
The expression can only contain products, ratios and powers of units.
:param as_delta: if the expression has multiple units, the parser will
interpret non multiplicative units as their `delta_` counterparts.
:raises:
:class:`pint.UndefinedUnitError` if a unit is not in the registry
:class:`ValueError` if the expression is invalid.
"""
units = self._parse_units(input_string, as_delta)
return self.Unit(units)
def _parse_units(self, input_string, as_delta=None):
"""
"""
if as_delta is None:
as_delta = True
if as_delta and input_string in self._parse_unit_cache:
return self._parse_unit_cache[input_string]
if not input_string:
return UnitsContainer()
# Sanitize input_string with whitespaces.
input_string = input_string.strip()
units = ParserHelper.from_string(input_string)
if units.scale != 1:
raise ValueError('Unit expression cannot have a scaling factor.')
ret = {}
many = len(units) > 1
for name in units:
cname = self.get_name(name)
value = units[name]
if not cname:
continue
if as_delta and (many or (not many and value != 1)):
definition = self._units[cname]
if not definition.is_multiplicative:
cname = 'delta_' + cname
ret[cname] = value
ret = UnitsContainer(ret)
if as_delta:
self._parse_unit_cache[input_string] = ret
return ret
def _eval_token(self, token, case_sensitive=True, **values):
token_type = token[0]
token_text = token[1]
if token_type == NAME:
if token_text == 'pi':
return self.Quantity(math.pi)
elif token_text == 'dimensionless':
return 1 * self.dimensionless
elif token_text in values:
return self.Quantity(values[token_text])
else:
return self.Quantity(1, UnitsContainer({self.get_name(token_text,
case_sensitive=case_sensitive) : 1}))
elif token_type == NUMBER:
return ParserHelper.eval_token(token)
else:
raise Exception('unknown token type')
def parse_expression(self, input_string, case_sensitive=True, **values):
"""Parse a mathematical expression including units and return a quantity object.
Numerical constants can be specified as keyword arguments and will take precedence
over the names defined in the registry.
"""
if not input_string:
return self.Quantity(1)
input_string = string_preprocessor(input_string)
gen = tokenizer(input_string)
return build_eval_tree(gen).evaluate(lambda x: self._eval_token(x,
case_sensitive=case_sensitive,
**values))
__call__ = parse_expression
class NonMultiplicativeRegistry(BaseRegistry):
"""Handle of non multiplicative units (e.g. Temperature).
Capabilities:
- Register non-multiplicative units and their relations.
- Convert between non-multiplicative units.
:param default_as_delta: If True, non-multiplicative units are interpreted as
their *delta* counterparts in multiplications.
:param autoconvert_offset_to_baseunit: If True, non-multiplicative units are
converted to base units in multiplications.
"""
def __init__(self, default_as_delta=True, autoconvert_offset_to_baseunit=False, **kwargs):
super(NonMultiplicativeRegistry, self).__init__(**kwargs)
#: When performing a multiplication of units, interpret
#: non-multiplicative units as their *delta* counterparts.
self.default_as_delta = default_as_delta
# Determines if quantities with offset units are converted to their
# base units on multiplication and division.
self.autoconvert_offset_to_baseunit = autoconvert_offset_to_baseunit
def _parse_units(self, input_string, as_delta=None):
"""
"""
if as_delta is None:
as_delta = self.default_as_delta
return super(NonMultiplicativeRegistry, self)._parse_units(input_string, as_delta)
def _define(self, definition):
"""Add unit to the registry.
In addition to what is done by the BaseRegistry,
registers also non-multiplicative units.
:param definition: a dimension, unit or prefix definition.
:type definition: str | Definition
:return: Definition instance, case sensitive unit dict, case insensitive unit dict.
:rtype: Definition, dict, dict
"""
definition, d, di = super(NonMultiplicativeRegistry, self)._define(definition)
# define additional units for units with an offset
if getattr(definition.converter, "offset", 0.0) != 0.0:
self._define_adder(definition, d, di)
return definition, d, di
def _is_multiplicative(self, u):
if u in self._units:
return self._units[u].is_multiplicative
# If the unit is not in the registry might be because it is not
# registered with its prefixed version.
# TODO: Might be better to register them.
l = self._dedup_candidates(self.parse_unit_name(u))
try:
u = l[0][1]
return self._units[u].is_multiplicative
except KeyError:
raise UndefinedUnitError(u)
def _validate_and_extract(self, units):
nonmult_units = [(u, e) for u, e in units.items()
if not self._is_multiplicative(u)]
# Let's validate source offset units
if len(nonmult_units) > 1:
# More than one src offset unit is not allowed
raise ValueError('more than one offset unit.')
elif len(nonmult_units) == 1:
# A single src offset unit is present. Extract it
# But check that:
# - the exponent is 1
# - is not used in multiplicative context
nonmult_unit, exponent = nonmult_units.pop()
if exponent != 1:
raise ValueError('offset units in higher order.')
if len(units) > 1 and not self.autoconvert_offset_to_baseunit:
raise ValueError('offset unit used in multiplicative context.')
return nonmult_unit
return None
def _convert(self, value, src, dst, inplace=False):
"""Convert value from some source to destination units.
In addition to what is done by the BaseRegistry,
converts between non-multiplicative units.
:param value: value
:param src: source units.
:type src: UnitsContainer
:param dst: destination units.
:type dst: UnitsContainer
:return: converted value
"""
# Conversion needs to consider if non-multiplicative (AKA offset
# units) are involved. Conversion is only possible if src and dst
# have at most one offset unit per dimension. Other rules are applied
# by validate and extract.
try:
src_offset_unit = self._validate_and_extract(src)
except ValueError as ex:
raise DimensionalityError(src, dst, extra_msg=' - In source units, %s ' % ex)
try:
dst_offset_unit = self._validate_and_extract(dst)
except ValueError as ex:
raise DimensionalityError(src, dst, extra_msg=' - In destination units, %s ' % ex)
if not (src_offset_unit or dst_offset_unit):
return super(NonMultiplicativeRegistry, self)._convert(value, src, dst, inplace)
src_dim = self._get_dimensionality(src)
dst_dim = self._get_dimensionality(dst)
# If the source and destination dimensionality are different,
# then the conversion cannot be performed.
if src_dim != dst_dim:
raise DimensionalityError(src, dst, src_dim, dst_dim)
# clean src from offset units by converting to reference
if src_offset_unit:
value = self._units[src_offset_unit].converter.to_reference(value, inplace)
src = src.remove([src_offset_unit])
# clean dst units from offset units
dst = dst.remove([dst_offset_unit])
# Convert non multiplicative units to the dst.
value = super(NonMultiplicativeRegistry, self)._convert(value, src, dst, inplace, False)
# Finally convert to offset units specified in destination
if dst_offset_unit:
value = self._units[dst_offset_unit].converter.from_reference(value, inplace)
return value
class ContextRegistry(BaseRegistry):
"""Handle of Contexts.
Conversion between units with different dimenstions according
to previously established relations (contexts).
(e.g. in the spectroscopy, conversion between frequency and energy is possible)
Capabilities:
- Register contexts.
- Enable and disable contexts.
- Parse @context directive.
"""
def __init__(self, **kwargs):
super(ContextRegistry, self).__init__(**kwargs)
#: Map context name (string) or abbreviation to context.
self._contexts = {}
#: Stores active contexts.
self._active_ctx = ContextChain()
def _register_parsers(self):
super(ContextRegistry, self)._register_parsers()
self._register_parser('@context', self._parse_context)
def _parse_context(self, ifile):
try:
self.add_context(Context.from_lines(ifile.block_iter(),
self.get_dimensionality))
except KeyError as e:
raise DefinitionSyntaxError('unknown dimension {} in context'.format(str(e)))
def add_context(self, context):
"""Add a context object to the registry.
The context will be accessible by its name and aliases.
Notice that this method will NOT enable the context. Use `enable_contexts`.
"""
if context.name in self._contexts:
logger.warning('The name %s was already registered for another context.',
context.name)
self._contexts[context.name] = context
for alias in context.aliases:
if alias in self._contexts:
logger.warning('The name %s was already registered for another context',
context.name)
self._contexts[alias] = context
def remove_context(self, name_or_alias):
"""Remove a context from the registry and return it.
Notice that this methods will not disable the context. Use `disable_contexts`.
"""
context = self._contexts[name_or_alias]
del self._contexts[context.name]
for alias in context.aliases:
del self._contexts[alias]
return context
def enable_contexts(self, *names_or_contexts, **kwargs):
"""Enable contexts provided by name or by object.
:param names_or_contexts: sequence of the contexts or contexts names/alias
:param kwargs: keyword arguments for the context
"""
# If present, copy the defaults from the containing contexts
if self._active_ctx.defaults:
kwargs = dict(self._active_ctx.defaults, **kwargs)
# For each name, we first find the corresponding context
ctxs = list((self._contexts[name] if isinstance(name, string_types) else name)
for name in names_or_contexts)
# Check if the contexts have been checked first, if not we make sure
# that dimensions are expressed in terms of base dimensions.
for ctx in ctxs:
if getattr(ctx, '_checked', False):
continue
for (src, dst), func in ctx.funcs.items():
src_ = self._get_dimensionality(src)
dst_ = self._get_dimensionality(dst)
if src != src_ or dst != dst_:
ctx.remove_transformation(src, dst)
ctx.add_transformation(src_, dst_, func)
ctx._checked = True
# and create a new one with the new defaults.
ctxs = tuple(Context.from_context(ctx, **kwargs)
for ctx in ctxs)
# Finally we add them to the active context.
self._active_ctx.insert_contexts(*ctxs)
self._build_cache()
def disable_contexts(self, n=None):
"""Disable the last n enabled contexts.
"""
if n is None:
n = len(self._contexts)
self._active_ctx.remove_contexts(n)
self._build_cache()
@contextmanager
def context(self, *names, **kwargs):
"""Used as a context manager, this function enables to activate a context
which is removed after usage.
:param names: name of the context.
:param kwargs: keyword arguments for the contexts.
Context are called by their name::
>>> with ureg.context('one'):
... pass
If the context has an argument, you can specify its value as a keyword
argument::
>>> with ureg.context('one', n=1):
... pass
Multiple contexts can be entered in single call:
>>> with ureg.context('one', 'two', n=1):
... pass
or nested allowing you to give different values to the same keyword argument::
>>> with ureg.context('one', n=1):
... with ureg.context('two', n=2):
... pass
A nested context inherits the defaults from the containing context::
>>> with ureg.context('one', n=1):
... with ureg.context('two'): # Here n takes the value of the upper context
... pass
"""
# Enable the contexts.
self.enable_contexts(*names, **kwargs)
try:
# After adding the context and rebuilding the graph, the registry
# is ready to use.
yield self
finally:
# Upon leaving the with statement,
# the added contexts are removed from the active one.
self.disable_contexts(len(names))
def with_context(self, name, **kw):
"""Decorator to wrap a function call in a Pint context.
Use it to ensure that a certain context is active when
calling a function::
>>> @ureg.with_context('sp')
... def my_cool_fun(wavelenght):
... print('This wavelength is equivalent to: %s', wavelength.to('terahertz'))
:param names: name of the context.
:param kwargs: keyword arguments for the contexts.
:return: the wrapped function.
"""
def decorator(func):
assigned = tuple(attr for attr in functools.WRAPPER_ASSIGNMENTS if hasattr(func, attr))
updated = tuple(attr for attr in functools.WRAPPER_UPDATES if hasattr(func, attr))
@functools.wraps(func, assigned=assigned, updated=updated)
def wrapper(*values, **kwargs):
with self.context(name, **kw):
return func(*values, **kwargs)
return wrapper
return decorator
def _convert(self, value, src, dst, inplace=False):
"""Convert value from some source to destination units.
In addition to what is done by the BaseRegistry,
converts between units with different dimensions by following
transformation rules defined in the context.
:param value: value
:param src: source units.
:type src: UnitsContainer
:param dst: destination units.
:type dst: UnitsContainer
:return: converted value
"""
# If there is an active context, we look for a path connecting source and
# destination dimensionality. If it exists, we transform the source value
# by applying sequentially each transformation of the path.
if self._active_ctx:
src_dim = self._get_dimensionality(src)
dst_dim = self._get_dimensionality(dst)
path = find_shortest_path(self._active_ctx.graph, src_dim, dst_dim)
if path:
src = self.Quantity(value, src)
for a, b in zip(path[:-1], path[1:]):
src = self._active_ctx.transform(a, b, self, src)
value, src = src._magnitude, src._units
return super(ContextRegistry, self)._convert(value, src, dst, inplace)
def _get_compatible_units(self, input_units, group_or_system):
"""
"""
src_dim = self._get_dimensionality(input_units)
ret = super(ContextRegistry, self)._get_compatible_units(input_units, group_or_system)
if self._active_ctx:
nodes = find_connected_nodes(self._active_ctx.graph, src_dim)
if nodes:
for node in nodes:
ret |= self._dimensional_equivalents[node]
return ret
class SystemRegistry(BaseRegistry):
"""Handle of Systems and Groups.
Conversion between units with different dimenstions according
to previously established relations (contexts).
(e.g. in the spectroscopy, conversion between frequency and energy is possible)
Capabilities:
- Register systems and groups.
- List systems
- Get or get the default system.
- Parse @system and @group directive.
"""
def __init__(self, system=None, **kwargs):
super(SystemRegistry, self).__init__(**kwargs)
#: Map system name to system.
#: :type: dict[ str | System]
self._systems = {}
#: Maps dimensionality (UnitsContainer) to Dimensionality (UnitsContainer)
self._base_units_cache = dict()
#: Map group name to group.
#: :type: dict[ str | Group]
self._groups = {}
self.Group = systems.build_group_class(self)
self._groups['root'] = self.Group('root')
self.System = systems.build_system_class(self)
self._default_system = system
def _after_init(self):
super(SystemRegistry, self)._after_init()
#: Copy units in root group to the default group
if 'group' in self._defaults:
grp = self.get_group(self._defaults['group'], True)
grp.add_units(*self.get_group('root', False).non_inherited_unit_names)
#: System name to be used by default.
self._default_system = self._default_system or self._defaults.get('system', None)
def _register_parsers(self):
super(SystemRegistry, self)._register_parsers()
self._register_parser('@group', self._parse_group)
self._register_parser('@system', self._parse_system)
def _parse_group(self, ifile):
self.Group.from_lines(ifile.block_iter(), self.define)
def _parse_system(self, ifile):
self.System.from_lines(ifile.block_iter(), self.get_root_units)
def get_group(self, name, create_if_needed=True):
"""Return a Group.
:param name: Name of the group to be
:param create_if_needed: Create a group if not Found. If False, raise an Exception.
:return: Group
"""
if name in self._groups:
return self._groups[name]
if not create_if_needed:
raise ValueError('Unkown group %s' % name)
return self.Group(name)
@property
def sys(self):
return systems.Lister(self._systems)
@property
def default_system(self):
return self._default_system
@default_system.setter
def default_system(self, name):
if name:
if name not in self._systems:
raise ValueError('Unknown system %s' % name)
self._base_units_cache = {}
self._default_system = name
def get_system(self, name, create_if_needed=True):
"""Return a Group.
:param name: Name of the group to be
:param create_if_needed: Create a group if not Found. If False, raise an Exception.
:return: System
"""
if name in self._systems:
return self._systems[name]
if not create_if_needed:
raise ValueError('Unkown system %s' % name)
return self.System(name)
def _define(self, definition):
# In addition to the what is done by the BaseRegistry,
# this adds all units to the `root` group.
definition, d, di = super(SystemRegistry, self)._define(definition)
if isinstance(definition, UnitDefinition):
# We add all units to the root group
self.get_group('root').add_units(definition.name)
return definition, d, di
def get_base_units(self, input_units, check_nonmult=True, system=None):
"""Convert unit or dict of units to the base units.
If any unit is non multiplicative and check_converter is True,
then None is returned as the multiplicative factor.
Unlike BaseRegistry, in this registry root_units might be different
from base_units
:param input_units: units
:type input_units: UnitsContainer or str
:param check_nonmult: if True, None will be returned as the
multiplicative factor if a non-multiplicative
units is found in the final Units.
:return: multiplicative factor, base units
"""
input_units = to_units_container(input_units)
f, units = self._get_base_units(input_units, check_nonmult, system)
return f, self.Unit(units)
def _get_base_units(self, input_units, check_nonmult=True, system=None):
if system is None:
system = self._default_system
# The cache is only done for check_nonmult=True and the current system.
if check_nonmult and system == self._default_system and input_units in self._base_units_cache:
return self._base_units_cache[input_units]
factor, units = self.get_root_units(input_units, check_nonmult)
if not system:
return factor, units
# This will not be necessary after integration with the registry
# as it has a UnitsContainer intermediate
units = to_units_container(units, self)
destination_units = UnitsContainer()
bu = self.get_system(system, False).base_units
for unit, value in units.items():
if unit in bu:
new_unit = bu[unit]
new_unit = to_units_container(new_unit, self)
destination_units *= new_unit ** value
else:
destination_units *= UnitsContainer({unit: value})
base_factor = self.convert(factor, units, destination_units)
if check_nonmult:
self._base_units_cache[input_units] = base_factor, destination_units
return base_factor, destination_units
def _get_compatible_units(self, input_units, group_or_system):
"""
"""
if group_or_system is None:
group_or_system = self._default_system
ret = super(SystemRegistry, self)._get_compatible_units(input_units, group_or_system)
if group_or_system:
if group_or_system in self._systems:
members = self._systems[group_or_system].members
elif group_or_system in self._groups:
members = self._groups[group_or_system].members
else:
raise ValueError("Unknown Group o System with name '%s'" % group_or_system)
return frozenset(ret.intersection(members))
return ret
class UnitRegistry(SystemRegistry, ContextRegistry, NonMultiplicativeRegistry):
"""The unit registry stores the definitions and relationships between units.
:param filename: path of the units definition file to load or line-iterable object.
Empty to load the default definition file.
None to leave the UnitRegistry empty.
:param force_ndarray: convert any input, scalar or not to a numpy.ndarray.
:param default_as_delta: In the context of a multiplication of units, interpret
non-multiplicative units as their *delta* counterparts.
:param autoconvert_offset_to_baseunit: If True converts offset units in quantites are
converted to their base units in multiplicative
context. If False no conversion happens.
:param on_redefinition: action to take in case a unit is redefined.
'warn', 'raise', 'ignore'
:type on_redefinition: str
:param auto_reduce_dimensions: If True, reduce dimensionality on appropriate operations.
"""
def __init__(self, filename='', force_ndarray=False, default_as_delta=True,
autoconvert_offset_to_baseunit=False,
on_redefinition='warn', system=None,
auto_reduce_dimensions=False):
super(UnitRegistry, self).__init__(filename=filename, force_ndarray=force_ndarray,
on_redefinition=on_redefinition,
default_as_delta=default_as_delta,
autoconvert_offset_to_baseunit=autoconvert_offset_to_baseunit,
system=system,
auto_reduce_dimensions=auto_reduce_dimensions)
def pi_theorem(self, quantities):
"""Builds dimensionless quantities using the Buckingham π theorem
:param quantities: mapping between variable name and units
:type quantities: dict
:return: a list of dimensionless quantities expressed as dicts
"""
return pi_theorem(quantities, self)
def setup_matplotlib(self, enable=True):
"""Set up handlers for matplotlib's unit support.
:param enable: whether support should be enabled or disabled
:type enable: bool
"""
# Delays importing matplotlib until it's actually requested
from .matplotlib import setup_matplotlib_handlers
setup_matplotlib_handlers(self, enable)
wraps = registry_helpers.wraps
check = registry_helpers.check
class LazyRegistry(object):
def __init__(self, args=None, kwargs=None):
self.__dict__['params'] = args or (), kwargs or {}
def __init(self):
args, kwargs = self.__dict__['params']
kwargs['on_redefinition'] = 'raise'
self.__class__ = UnitRegistry
self.__init__(*args, **kwargs)
self._after_init()
def __getattr__(self, item):
if item == '_on_redefinition':
return 'raise'
self.__init()
return getattr(self, item)
def __setattr__(self, key, value):
if key == '__class__':
super(LazyRegistry, self).__setattr__(key, value)
else:
self.__init()
setattr(self, key, value)
def __getitem__(self, item):
self.__init()
return self[item]
def __call__(self, *args, **kwargs):
self.__init()
return self(*args, **kwargs)
| gpl-3.0 |
harshaneelhg/scikit-learn | examples/linear_model/plot_sgd_comparison.py | 167 | 1659 | """
==================================
Comparing various online solvers
==================================
An example showing how different online solvers perform
on the hand-written digits dataset.
"""
# Author: Rob Zinkov <rob at zinkov dot com>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import SGDClassifier, Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
heldout = [0.95, 0.90, 0.75, 0.50, 0.01]
rounds = 20
digits = datasets.load_digits()
X, y = digits.data, digits.target
classifiers = [
("SGD", SGDClassifier()),
("ASGD", SGDClassifier(average=True)),
("Perceptron", Perceptron()),
("Passive-Aggressive I", PassiveAggressiveClassifier(loss='hinge',
C=1.0)),
("Passive-Aggressive II", PassiveAggressiveClassifier(loss='squared_hinge',
C=1.0)),
]
xx = 1. - np.array(heldout)
for name, clf in classifiers:
rng = np.random.RandomState(42)
yy = []
for i in heldout:
yy_ = []
for r in range(rounds):
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=i, random_state=rng)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
yy_.append(1 - np.mean(y_pred == y_test))
yy.append(np.mean(yy_))
plt.plot(xx, yy, label=name)
plt.legend(loc="upper right")
plt.xlabel("Proportion train")
plt.ylabel("Test Error Rate")
plt.show()
| bsd-3-clause |
Titan-C/scikit-learn | sklearn/kernel_ridge.py | 48 | 6731 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_array, check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, laplacian, polynomial, exponential chi2
and sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_samples] or [n_samples, n_targets]
Representation of weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or array-like of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
if sample_weight is not None and not isinstance(sample_weight, float):
sample_weight = check_array(sample_weight, ensure_2d=False)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
jaidevd/scikit-learn | benchmarks/bench_glmnet.py | 111 | 3890 | """
To run this, you'll need to have installed.
* glmnet-python
* scikit-learn (of course)
Does two benchmarks
First, we fix a training set and increase the number of
samples. Then we plot the computation time as function of
the number of samples.
In the second benchmark, we increase the number of dimensions of the
training set. Then we plot the computation time as function of
the number of dimensions.
In both cases, only 10% of the features are informative.
"""
import numpy as np
import gc
from time import time
from sklearn.datasets.samples_generator import make_regression
alpha = 0.1
# alpha = 0.01
def rmse(a, b):
return np.sqrt(np.mean((a - b) ** 2))
def bench(factory, X, Y, X_test, Y_test, ref_coef):
gc.collect()
# start time
tstart = time()
clf = factory(alpha=alpha).fit(X, Y)
delta = (time() - tstart)
# stop time
print("duration: %0.3fs" % delta)
print("rmse: %f" % rmse(Y_test, clf.predict(X_test)))
print("mean coef abs diff: %f" % abs(ref_coef - clf.coef_.ravel()).mean())
return delta
if __name__ == '__main__':
from glmnet.elastic_net import Lasso as GlmnetLasso
from sklearn.linear_model import Lasso as ScikitLasso
# Delayed import of matplotlib.pyplot
import matplotlib.pyplot as plt
scikit_results = []
glmnet_results = []
n = 20
step = 500
n_features = 1000
n_informative = n_features / 10
n_test_samples = 1000
for i in range(1, n + 1):
print('==================')
print('Iteration %s of %s' % (i, n))
print('==================')
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:(i * step)]
Y = Y[:(i * step)]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
plt.clf()
xx = range(0, n * step, step)
plt.title('Lasso regression on sample dataset (%d features)' % n_features)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of samples to classify')
plt.ylabel('Time (s)')
plt.show()
# now do a benchmark where the number of points is fixed
# and the variable is the number of features
scikit_results = []
glmnet_results = []
n = 20
step = 100
n_samples = 500
for i in range(1, n + 1):
print('==================')
print('Iteration %02d of %02d' % (i, n))
print('==================')
n_features = i * step
n_informative = n_features / 10
X, Y, coef_ = make_regression(
n_samples=(i * step) + n_test_samples, n_features=n_features,
noise=0.1, n_informative=n_informative, coef=True)
X_test = X[-n_test_samples:]
Y_test = Y[-n_test_samples:]
X = X[:n_samples]
Y = Y[:n_samples]
print("benchmarking scikit-learn: ")
scikit_results.append(bench(ScikitLasso, X, Y, X_test, Y_test, coef_))
print("benchmarking glmnet: ")
glmnet_results.append(bench(GlmnetLasso, X, Y, X_test, Y_test, coef_))
xx = np.arange(100, 100 + n * step, step)
plt.figure('scikit-learn vs. glmnet benchmark results')
plt.title('Regression in high dimensional spaces (%d samples)' % n_samples)
plt.plot(xx, scikit_results, 'b-', label='scikit-learn')
plt.plot(xx, glmnet_results, 'r-', label='glmnet')
plt.legend()
plt.xlabel('number of features')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.show()
| bsd-3-clause |
jskDr/jamespy_py3 | wireless/nb_polar_r13.py | 1 | 25585 | import numpy as np
import numba as nb
import matplotlib.pyplot as plt
def calc_ber(e_array):
return np.mean(np.abs(e_array))
# Imitate static variable for a python function using decorate and setattr
def static_vars(**kwargs):
'''
@static_vars(counter=0)
def foo():
foo.counter += 1
print("Counter is %d" % foo.counter)
'''
def decorate(func):
for k in kwargs:
setattr(func, k, kwargs[k])
return func
return decorate
@nb.jit
def encode(u1, u2):
return (u1 + u2) % 2, u2
@nb.jit
def f_neg(a, b):
return np.log((np.exp(a + b) + 1)/(np.exp(a)+np.exp(b)))
@nb.jit
def f_pos(a, b, u):
return (-1)**u*a + b
@nb.jit
def decode(y1, y2):
l1 = f_neg(y1, y2)
u1_hard = 0 if l1 > 0 else 1
l2 = f_pos(y1, y2, u1_hard)
u2_hard = 0 if l2 > 0 else 1
return u1_hard, u2_hard, l1, l2
@nb.jit
def channel(x1, x2):
y1 = 1.0 - x1*2
y2 = 1.0 - x2*2
return y1, y2
@nb.jit
def coding(u1, u2):
x1, x2 = encode(u1, u2)
y1, y2 = channel(x1, x2)
u1_, u2_, _, _ = decode(y1, y2)
e1, e2 = u1 - u1_, u2 - u2_
return e1, e2
@nb.jit
def coding_array(u_array, e_array):
for i in range(len(u_array)):
e_array[i,0], e_array[i,1] = coding(u_array[i,0], u_array[i,1])
def run_coding():
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = np.zeros_like(u_array)
coding_array(u_array, e_array)
print(e_array)
@nb.jit
def channel_awgn(x1, x2, SNRdB):
SNR = np.power(10, SNRdB/10)
Nsig = 1/np.sqrt(SNR)
n1 = np.random.normal(0) * Nsig
n2 = np.random.normal(0) * Nsig
y1 = 1.0 - x1*2 + n1
y2 = 1.0 - x2*2 + n2
return y1, y2
@nb.jit
def coding_awgn(u1, u2, SNRdB):
x1, x2 = encode(u1, u2)
y1, y2 = channel_awgn(x1, x2, SNRdB)
u1_, u2_, _, _ = decode(y1, y2)
e1, e2 = u1 - u1_, u2 - u2_
return e1, e2
@nb.jit
def coding_array_awgn(u_array, e_array, SNRdB):
for i in range(len(u_array)):
e_array[i,0], e_array[i,1] = coding_awgn(u_array[i,0], u_array[i,1], SNRdB)
def run_coding_awgn(SNRdB=10):
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = np.zeros_like(u_array)
coding_array_awgn(u_array, e_array, SNRdB)
# print(e_array)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
return BER
def main_run_coding_awgn(SNRdB_list=list(range(10))):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_awgn(SNRdB)
BER_list.append(BER)
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run_coding_awgn_tile(SNRdB=10, Ntile=1):
u_array_unit = np.array([(1,1), (1,0), (0,1), (0,0)])
u_array = np.tile(u_array_unit, (Ntile, 1))
e_array = np.zeros_like(u_array)
coding_array_awgn(u_array, e_array, SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
return BER
def main_run_coding_awgn_tile(SNRdB_list=list(range(10)), Ntile=1, flag_fig=False):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_awgn_tile(SNRdB, Ntile)
BER_list.append(BER)
if flag_fig:
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
@nb.jit
def encode_array(u_array):
x_array = np.zeros_like(u_array)
for i in range(len(u_array)):
x_array[i,0], x_array[i,1] = encode(u_array[i,0], u_array[i,1])
return x_array
@nb.jit
def channel_array(x_array):
y_array = np.zeros(x_array.shape, dtype=nb.float_)
for i in range(len(x_array)):
y_array[i,0], y_array[i,1] = channel(x_array[i,0], x_array[i,1])
return y_array
@nb.jit
def decode_array(y_array):
ud_array = np.zeros(y_array.shape, dtype=nb.int_)
for i in range(len(y_array)):
ud_array[i,0], ud_array[i,1], _, _ = decode(y_array[i,0], y_array[i,1])
return ud_array
@nb.jit
def coding_array_all(u_array):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_array(x_array)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
def run_coding_array_all():
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = coding_array_all(u_array)
print(e_array)
@nb.jit
def channel_array_awgn(x_array, SNRdB):
y_array = np.zeros(x_array.shape, dtype=nb.float_)
for i in range(len(x_array)):
y_array[i,0], y_array[i,1] = channel_awgn(x_array[i,0], x_array[i,1], SNRdB)
return y_array
@nb.jit
def _coding_array_all_awgn(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_array_awgn(x_array, SNRdB)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
def run_coding_array_all_awgn(SNRdB=10):
u_array = np.array([(1,1), (1,0), (0,1), (0,0)])
e_array = coding_array_all_awgn(u_array, SNRdB=SNRdB)
print(e_array)
def run_coding_array_all_awgn_tile(SNRdB=10, Ntile=1):
u_array_unit = np.array([(1,1), (1,0), (0,1), (0,0)])
u_array = np.tile(u_array_unit, (Ntile, 1))
e_array = coding_array_all_awgn(u_array, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
# print(BER)
return BER
def main_run_coding_array_all_awgn_tile(SNRdB_list=list(range(10)), Ntile=1, flag_fig=False):
BER_list = []
for SNRdB in SNRdB_list:
BER = run_coding_array_all_awgn_tile(SNRdB, Ntile)
BER_list.append(BER)
if flag_fig:
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
@nb.jit
def coding_array_all_awgn(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array(u_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ud_array = decode_array(y_array)
e_array = u_array - ud_array
return e_array
@nb.jit
def channel_numpy_awgn(x_array, SNRdB):
"""
출력을 (0,1) --> (1,-1)로 바꾸고 가우시안 노이즈를 더함.
"""
#y_array = np.zeros(x_array.shape, dtype=nb.float_)
SNR = np.power(10, SNRdB/10)
noise_sig = 1/np.sqrt(SNR)
n_array = np.random.normal(0.0, noise_sig, size=x_array.shape)
y_array = 1.0 - x_array*2 + n_array
return y_array
# Usage list
# main_run_coding_awgn()
# run_coding_awgn()
# run_coding()
# N >= 2 Polar coding (Generalized)
@nb.jit
def encode_n(u):
"""
x = uBF(xn) where n = log(N), N=len(u), B is bit-reverse
"""
x = np.copy(u)
L = len(u)
if L != 1:
u1 = u[0::2]
u2 = u[1::2]
u1u2 = np.mod(u1 + u2, 2)
x[:L/2] = encode_n(u1u2)
x[L/2:] = encode_n(u2)
return x
@nb.jit
def encode_array_n(u_array):
x_array = np.zeros_like(u_array)
for i in range(len(u_array)):
x_array[i] = encode_n(u_array[i])
return x_array
@nb.jit
def f_neg_n(a, b):
return np.log((np.exp(a + b) + 1)/(np.exp(a)+np.exp(b)))
@nb.jit
def f_pos_n(a, b, u):
return (-1)**u*a + b
@nb.jit
def decode_n_r0(y_array):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_hard[:L/2] = decode_n(l1)
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_hard[:L/2])
u_hard[L/2:], x_hard[L/2:] = decode_n(l2)
x_hard[:L/2] = np.mod(x_hard[:L/2] + x_hard[L/2:], 2)
return u_hard, x_hard
@nb.jit
def decode_n(y_array):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_n(l1)
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_n(l2)
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_array_n(y_array):
ud_array = np.zeros(y_array.shape, dtype=nb.int_) #nb.int_)
for i in range(len(y_array)):
ud_array[i], _ = decode_n(y_array[i])
return ud_array
@nb.jit
def coding_array_all_awgn_n(u_array, SNRdB=10):
e_array = np.zeros_like(u_array)
x_array = encode_array_n(u_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ud_array = decode_array_n(y_array)
e_array = u_array - ud_array
return e_array
class PolarCode:
def __init__(self, N_code=2, K_code=2):
"""
N_code: Code block size
K_code: Information bit size
"""
self.N_code = N_code
self.K_code = K_code
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run(self,
SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
u_array = np.random.randint(2, size=(N_iter, self.N_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_n(u_array, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
if flag_fig:
self.plot(SNRdB_list, BER_list)
self.BER_list = BER_list
# ====================================================================
# Frozen을 고려하는 Polar Coding 시스템
# ====================================================================
@nb.jit
def _decode_frozen_n(y_array, frozen_flag_n):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
if frozen_flag_n[0]:
u_hard[0] = 0
else:
u_hard[0] = 0 if y_array[0] > 0 else 1
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_frozen_n(l1, frozen_flag_n[:L/2])
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_frozen_n(l2, frozen_flag_n[L/2:])
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_frozen_n(y_array, frozen_flag_n):
"""
u_hard: input hard decision
x_hard: output hard decision
"""
u_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_hard = np.zeros(y_array.shape, dtype=nb.int_)
x_temp = np.zeros(y_array.shape, dtype=nb.int_)
L = len(y_array)
if L == 1:
u_hard[0] = 0 if y_array[0] > 0 else 1
if frozen_flag_n[0]:
x_hard[0] = 0
else:
x_hard[0] = u_hard[0]
else:
y1 = y_array[0::2]
y2 = y_array[1::2]
# print(L, y1, y2)
l1 = f_neg_n(y1, y2)
u_hard[:L/2], x_temp[:L/2] = decode_frozen_n(l1, frozen_flag_n[:L/2])
# print('[:L/2] ', l1, u_hard[:L/2], x_hard[:L/2])
l2 = f_pos_n(y1, y2, x_temp[:L/2])
u_hard[L/2:], x_temp[L/2:] = decode_frozen_n(l2, frozen_flag_n[L/2:])
x_temp[:L/2] = np.mod(x_temp[:L/2] + x_temp[L/2:], 2)
x_hard[0::2] = x_temp[:L/2]
x_hard[1::2] = x_temp[L/2:]
return u_hard, x_hard
@nb.jit
def decode_frozen_array_n(y_array, frozen_flag_n):
ud_array = np.zeros(y_array.shape, dtype=nb.int_)
for i in range(len(y_array)):
ud_array[i], _ = decode_frozen_n(y_array[i], frozen_flag_n)
return ud_array
@nb.jit
def frozen_encode_n(uf, u, f):
"""
Input:
uf: 길이 N_code인 코딩 블럭
u: 길이 K_code인 정보 블럭
f: 길이 N_code인 비트가 frozen인지 아닌지를 나타내는 벡터
"""
k = 0
for n in range(len(uf)):
if f[n]:
uf[n] = 0
else:
uf[n] = u[k]
k += 1
@nb.jit
def frozen_encode_array_n(u_array, frozen_flag_n):
N_iter = len(u_array)
N_code = len(frozen_flag_n)
uf_array = np.zeros(shape=(N_iter,N_code), dtype=nb.int_)
for i in range(N_iter):
frozen_encode_n(uf_array[i], u_array[i], frozen_flag_n)
return uf_array
@nb.jit
def frozen_decode_n(ud, ufd, f):
"""
Input:
ufd: 길이 N_code인 디코딩한 블럭
ud: 길이 K_code인 검출한 정보 블럭
f: 길이 N_code인 비트가 frozen인지 아닌지를 나타내는 벡터
"""
k = 0
for n in range(len(f)):
if f[n] == 0:
ud[k] = ufd[n]
k += 1
@nb.jit
def frozen_decode_array_n(ufd_array, frozen_flag_n):
N_iter = len(ufd_array)
N_code = len(frozen_flag_n)
K_code = N_code - np.sum(frozen_flag_n)
ud_array = np.zeros(shape=(N_iter,K_code), dtype=nb.int_)
for i in range(N_iter):
frozen_decode_n(ud_array[i], ufd_array[i], frozen_flag_n)
return ud_array
@nb.jit
def coding_array_all_awgn_frozen_n(u_array, frozen_flag_n, SNRdB=10):
e_array = np.zeros_like(u_array)
# encode를 하기 전과 decode 끝난 후에 frozen처리를 포함하면 됨.
# u_array는 길이가 K_code인 벡터들의 모임이고, uf_array는 길이가 N_code인 벡터들의 모임이다.
uf_array = frozen_encode_array_n(u_array, frozen_flag_n)
# encode_array_n()은 frozen 여부를 알 필요가 없음.
x_array = encode_array_n(uf_array)
y_array = channel_numpy_awgn(x_array, SNRdB)
ufd_array = decode_frozen_array_n(y_array, frozen_flag_n) # frozen을 고려한 함수로 변경되어야 함!
# ufd_array = decode_array_n(y_array)
ud_array = frozen_decode_array_n(ufd_array, frozen_flag_n)
e_array = u_array - ud_array
return e_array
class _PolarCodeFrozen:
def __init__(self, N_code=2, K_code=2, frozen_flag='manual', frozen_flag_n=np.zeros(2,dtype=int)):
"""
N_code=4: Code block size
K_code=2: Information bit size
frozen_flag_n=[1,1,0,0]: 코드 블럭 안의 매 비트가 frozen인지 아닌지를 표시함. Frozen이면 1, 아니면 0임.
Frozen 아닌 비트들의 갯 수는 Code_K와 동일해야 함.
"""
if frozen_flag == 'auto':
frozen_flag_n = polar_design_bec(N_code=N_code, K_code=K_code)
print('Auto: frozen_flag_n =', frozen_flag_n)
assert N_code == len(frozen_flag_n)
assert N_code - K_code == np.sum(frozen_flag_n)
self.N_code = N_code
self.K_code = K_code
self.frozen_flag_n = frozen_flag_n
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run(self,
SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
# 정보 비트수느느 K_code가 되어야 함. 나머지는 frozen_flag_n에 따라 0로 채워야 함.
u_array = np.random.randint(2, size=(N_iter, self.K_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_frozen_n(u_array, frozen_flag_n=self.frozen_flag_n, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
if flag_fig:
self.plot(SNRdB_list, BER_list)
print("SNRdB_list, BER_list")
print(SNRdB_list, BER_list)
self.BER_list = BER_list
def _polar_bsc(N_code=4, p=0.11, N_iter=1000):
"""
(0,1)에 대한 원래 코드를 (1,-1)로 바꾸어서 p대신 2*p를 사용해 디자인했음.
Input:
p=0.11: 오류 확률을 넣는다. 그리고 p*100%의 심볼은 1로 오튜가 났다고 가정하고
0, 1에 상관없이 오류는 p만큼 모든 심볼에 들어가는 걸로 되어 있음.
Comments:
udhat는 frozen bit에 대한 실제 데이터를 결정한 값이다. 이 값은 통상 BER 계산시에는 사용되지 않는다.
frozen bit로 설정해 오류 역전파가 없다는 가정으로 각 채널의 성능 평가를 위해 사용한다.
"""
# 모든 비트를 frozen 시킴
f = np.ones(N_code, dtype=int)
biterrd = np.zeros(N_code)
for _ in range(N_iter):
# 정상 입력은 모두 0으로 가정함.
y = np.ones(N_code) - 2*p
y[np.random.rand(N_code)<p] = -1 + 2*p
ud_hat, _ = decode_frozen_n(y, f)
biterrd += ud_hat
biterrd /= N_iter
return biterrd
def polar_bsc(N_code=4, p=0.11, N_iter=1000):
"""
(0,1)에 대한 원래 코드를 (1,-1)로 바꾸어서 p대신 2*p를 사용해 디자인했음.
Input:
p=0.11: 오류 확률을 넣는다. 그리고 p*100%의 심볼은 1로 오튜가 났다고 가정하고
0, 1에 상관없이 오류는 p만큼 모든 심볼에 들어가는 걸로 되어 있음.
Comments:
udhat는 frozen bit에 대한 실제 데이터를 결정한 값이다. 이 값은 통상 BER 계산시에는 사용되지 않는다.
frozen bit로 설정해 오류 역전파가 없다는 가정으로 각 채널의 성능 평가를 위해 사용한다.
"""
# 모든 비트를 frozen 시킴
f = np.ones(N_code, dtype=int)
biterrd = np.zeros(N_code)
for _ in range(N_iter):
# 정상 입력은 모두 0으로 가정함.
y_bin = np.zeros(N_code) + p
y_bin[np.random.rand(N_code)<p] = 1 - p
ud_hat, _ = decode_frozen_n(1-2*y_bin, f)
biterrd += ud_hat
biterrd /= N_iter
return biterrd
@nb.jit
def polar_bec(N_code=4, erase_prob=0.5):
"""
BEC에 대해 Polar code의 예측 성능을 구한다. 단, 비트당 제거 오류율은 erase_prob로 가정한다.
"""
n = int(np.log2(N_code))
E = np.zeros(N_code)
# E_out = np.zeros(N_code)
E[0] = erase_prob
for i in range(n):
LN = 2**i
# print(i, LN)
# print('E in:', E)
# i stage에서 끝은 LN*2이다. 안그러면 broadcast되어 버린다.
E[LN:LN*2] = E[:LN] * E[:LN]
E[:LN] = 1-(1-E[:LN])*(1-E[:LN])
# print('E out:', E)
return E
def polar_design_bec(N_code=4, K_code=2, erase_prob=0.5):
"""
BEC일 경우에 각 비트의 오류율을 계산해 frozen_flag를 만든다.
"""
biterrd = polar_bec(N_code=N_code, erase_prob=erase_prob)
idx = np.argsort(biterrd)
frozen_flag_n = np.ones(N_code, dtype=int)
frozen_flag_n[idx[:K_code]] = 0
print('BER for each bit', biterrd)
return frozen_flag_n
class _PolarCodeFrozen:
def __init__(self, N_code=2, K_code=2, frozen_flag='manual', frozen_flag_n=np.zeros(2,dtype=int)):
"""
N_code=4: Code block size
K_code=2: Information bit size
frozen_flag_n=[1,1,0,0]: 코드 블럭 안의 매 비트가 frozen인지 아닌지를 표시함. Frozen이면 1, 아니면 0임.
Frozen 아닌 비트들의 갯 수는 Code_K와 동일해야 함.
"""
if frozen_flag == 'auto':
frozen_flag_n = polar_design_bec(N_code=N_code, K_code=K_code)
print('Auto: frozen_flag_n =', frozen_flag_n)
assert N_code == len(frozen_flag_n)
assert N_code - K_code == np.sum(frozen_flag_n)
self.N_code = N_code
self.K_code = K_code
self.frozen_flag_n = frozen_flag_n
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def run(self,
SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
# 정보 비트수느느 K_code가 되어야 함. 나머지는 frozen_flag_n에 따라 0로 채워야 함.
u_array = np.random.randint(2, size=(N_iter, self.K_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_frozen_n(u_array, frozen_flag_n=self.frozen_flag_n, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
if flag_fig:
self.plot(SNRdB_list, BER_list)
print("SNRdB_list, BER_list")
print(SNRdB_list, BER_list)
self.BER_list = BER_list
class PolarCodeFrozen:
def __init__(self, N_code=2, K_code=2, frozen_flag='manual', frozen_flag_n=np.zeros(2,dtype=int)):
"""
N_code=4: Code block size
K_code=2: Information bit size
frozen_flag_n=[1,1,0,0]: 코드 블럭 안의 매 비트가 frozen인지 아닌지를 표시함. Frozen이면 1, 아니면 0임.
Frozen 아닌 비트들의 갯 수는 Code_K와 동일해야 함.
"""
if frozen_flag == 'auto':
frozen_flag_n = polar_design_bec(N_code=N_code, K_code=K_code)
print('Auto: frozen_flag_n =', frozen_flag_n)
assert N_code == len(frozen_flag_n)
assert N_code - K_code == np.sum(frozen_flag_n)
self.N_code = N_code
self.K_code = K_code
self.frozen_flag_n = frozen_flag_n
def plot(self, SNRdB_list, BER_list):
plt.semilogy(SNRdB_list, BER_list)
plt.grid()
plt.xlabel('SNR(dB)')
plt.ylabel('BER')
plt.title('Performance of Polar Code')
plt.show()
def display_flag(self, SNRdB_list, BER_list, flag_fig):
if flag_fig:
self.plot(SNRdB_list, BER_list)
print("SNRdB_list, BER_list")
print(SNRdB_list, BER_list)
self.BER_list = BER_list
def run(self, SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
# 정보 비트수는 K_code가 되어야 함. 나머지는 frozen_flag_n에 따라 0로 채워야 함.
u_array = np.random.randint(2, size=(N_iter, self.K_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_frozen_n(u_array, frozen_flag_n=self.frozen_flag_n, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
self.display_flag(SNRdB_list, BER_list, flag_fig)
class NPolarCodeFrozen(PolarCodeFrozen):
def __init__(self, N_code=4, K_code=4, P_code=2, frozen_flag='manual', frozen_flag_n=np.zeros(2,dtype=int)):
"""
Inputs:
P_code: the number of AE input bits
"""
super().__init__(N_code=N_code, K_code=K_code,
frozen_flag=frozen_flag, frozen_flag_n=frozen_flag_n)
self.P_code = P_code
def run(self, SNRdB_list=list(range(10)), N_iter=1, flag_fig=False):
# 정보 비트수는 K_code가 되어야 함. 나머지는 frozen_flag_n에 따라 0로 채워야 함.
u_array = np.random.randint(2, size=(N_iter, self.K_code))
BER_list = []
for SNRdB in SNRdB_list:
e_array = coding_array_all_awgn_frozen_n(u_array, frozen_flag_n=self.frozen_flag_n, SNRdB=SNRdB)
BER = np.sum(np.abs(e_array)) / np.prod(e_array.shape)
BER_list.append(BER)
self.display_flag(SNRdB_list, BER_list, flag_fig)
@nb.jit
def np_encode_n_NP(u, N_P):
"""
x = uBF(xn) where n = log(N), N=len(u), B is bit-reverse
"""
x = np.copy(u)
L = len(u)
if L != 1:
if L > N_P:
u1 = u[0::2]
u2 = u[1::2]
x[:L/2] = np_encode_n_NP(u1, N_P)
x[L/2:] = np_encode_n_NP(u2, N_P)
else:
u1 = u[0::2]
u2 = u[1::2]
u1u2 = np.mod(u1 + u2, 2)
x[:L/2] = np_encode_n_NP(u1u2, N_P)
x[L/2:] = np_encode_n_NP(u2, N_P)
return x
@nb.jit
def np_encode_n(u, P):
"""
Inputs:
P=2: AE 입력의 길이, P=1이면 AE 미사용, P=N이면 Polar 미사용
"""
return np_encode_n_NP(u, len(u)//P)
if __name__ == '__main__':
# main_run_coding_awgn()
# main_run_coding_array_all_awgn_tile(Ntile=100000, flag_fig=True)
#f = polar_design_bec(2,1)
#print(f)
polar = NPolarCodeFrozen(2, 2, 'auto')
polar.run([5], 10) | mit |
mathemage/h2o-3 | h2o-py/tests/testdir_algos/kmeans/pyunit_emptyclusKmeans.py | 8 | 2204 | from __future__ import division
from __future__ import print_function
from builtins import zip
from builtins import range
from past.utils import old_div
import sys
sys.path.insert(1,"../../../")
import h2o
from tests import pyunit_utils
import random
import numpy as np
from sklearn import preprocessing
from sklearn.cluster import KMeans
from h2o.estimators.kmeans import H2OKMeansEstimator
def emptyclusKmeans():
# Connect to a pre-existing cluster
# connect to localhost:54321
#Log.info("Importing ozone.csv data...\n")
ozone_sci = np.loadtxt(pyunit_utils.locate("smalldata/glm_test/ozone.csv"), delimiter=',', skiprows=1)
ozone_h2o = h2o.import_file(path=pyunit_utils.locate("smalldata/glm_test/ozone.csv"))
ncent = 10
nempty = random.randint(1,old_div(ncent,2))
initial_centers = [[41,190,67,7.4],
[36,118,72,8],
[12,149,74,12.6],
[18,313,62,11.5],
[23,299,65,8.6],
[19,99,59,13.8],
[8,19,61,20.1],
[16,256,69,9.7],
[11,290,66,9.2],
[14,274,68,10.9]]
for i in random.sample(list(range(ncent-1)), nempty):
initial_centers[i] = [100*i for z in range(1,len(initial_centers[0])+1)]
initial_centers_sci = np.asarray(initial_centers)
initial_centers_h2o = h2o.H2OFrame(initial_centers)
#Log.info("Initial cluster centers:")
print("H2O initial centers:")
initial_centers_h2o.show()
print("scikit initial centers:")
print(initial_centers_sci)
# H2O can handle empty clusters and so can scikit
#Log.info("Check that H2O can handle badly initialized centers")
km_sci = KMeans(n_clusters=ncent, init=initial_centers_sci, n_init=1)
km_sci.fit(preprocessing.scale(ozone_sci))
print("scikit final centers")
print(km_sci.cluster_centers_)
km_h2o = H2OKMeansEstimator(k=ncent, user_points=initial_centers_h2o, standardize=True)
km_h2o.train(x = list(range(ozone_h2o.ncol)), training_frame=ozone_h2o)
print("H2O final centers")
print(km_h2o.centers())
if __name__ == "__main__":
pyunit_utils.standalone_test(emptyclusKmeans)
else:
emptyclusKmeans()
| apache-2.0 |
fabioticconi/scikit-learn | sklearn/neighbors/base.py | 30 | 30564 | """Base and mixin classes for nearest neighbors"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck
# Multi-output support by Arnaud Joly <a.joly@ulg.ac.be>
#
# License: BSD 3 clause (C) INRIA, University of Amsterdam
import warnings
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import csr_matrix, issparse
from .ball_tree import BallTree
from .kd_tree import KDTree
from ..base import BaseEstimator
from ..metrics import pairwise_distances
from ..metrics.pairwise import PAIRWISE_DISTANCE_FUNCTIONS
from ..utils import check_X_y, check_array, _get_n_jobs, gen_even_slices
from ..utils.fixes import argpartition
from ..utils.multiclass import check_classification_targets
from ..externals import six
from ..externals.joblib import Parallel, delayed
from ..exceptions import NotFittedError
from ..exceptions import DataConversionWarning
VALID_METRICS = dict(ball_tree=BallTree.valid_metrics,
kd_tree=KDTree.valid_metrics,
# The following list comes from the
# sklearn.metrics.pairwise doc string
brute=(list(PAIRWISE_DISTANCE_FUNCTIONS.keys()) +
['braycurtis', 'canberra', 'chebyshev',
'correlation', 'cosine', 'dice', 'hamming',
'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean',
'yule', 'wminkowski']))
VALID_METRICS_SPARSE = dict(ball_tree=[],
kd_tree=[],
brute=PAIRWISE_DISTANCE_FUNCTIONS.keys())
def _check_weights(weights):
"""Check to make sure weights are valid"""
if weights in (None, 'uniform', 'distance'):
return weights
elif callable(weights):
return weights
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
def _get_weights(dist, weights):
"""Get the weights from an array of distances and a parameter ``weights``
Parameters
===========
dist: ndarray
The input distances
weights: {'uniform', 'distance' or a callable}
The kind of weighting used
Returns
========
weights_arr: array of the same shape as ``dist``
if ``weights == 'uniform'``, then returns None
"""
if weights in (None, 'uniform'):
return None
elif weights == 'distance':
# if user attempts to classify a point that was zero distance from one
# or more training points, those training points are weighted as 1.0
# and the other points as 0.0
if dist.dtype is np.dtype(object):
for point_dist_i, point_dist in enumerate(dist):
# check if point_dist is iterable
# (ex: RadiusNeighborClassifier.predict may set an element of
# dist to 1e-6 to represent an 'outlier')
if hasattr(point_dist, '__contains__') and 0. in point_dist:
dist[point_dist_i] = point_dist == 0.
else:
dist[point_dist_i] = 1. / point_dist
else:
with np.errstate(divide='ignore'):
dist = 1. / dist
inf_mask = np.isinf(dist)
inf_row = np.any(inf_mask, axis=1)
dist[inf_row] = inf_mask[inf_row]
return dist
elif callable(weights):
return weights(dist)
else:
raise ValueError("weights not recognized: should be 'uniform', "
"'distance', or a callable function")
class NeighborsBase(six.with_metaclass(ABCMeta, BaseEstimator)):
"""Base class for nearest neighbors estimators."""
@abstractmethod
def __init__(self):
pass
def _init_params(self, n_neighbors=None, radius=None,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, n_jobs=1):
self.n_neighbors = n_neighbors
self.radius = radius
self.algorithm = algorithm
self.leaf_size = leaf_size
self.metric = metric
self.metric_params = metric_params
self.p = p
self.n_jobs = n_jobs
if algorithm not in ['auto', 'brute',
'kd_tree', 'ball_tree']:
raise ValueError("unrecognized algorithm: '%s'" % algorithm)
if algorithm == 'auto':
if metric == 'precomputed':
alg_check = 'brute'
else:
alg_check = 'ball_tree'
else:
alg_check = algorithm
if callable(metric):
if algorithm == 'kd_tree':
# callable metric is only valid for brute force and ball_tree
raise ValueError(
"kd_tree algorithm does not support callable metric '%s'"
% metric)
elif metric not in VALID_METRICS[alg_check]:
raise ValueError("Metric '%s' not valid for algorithm '%s'"
% (metric, algorithm))
if self.metric_params is not None and 'p' in self.metric_params:
warnings.warn("Parameter p is found in metric_params. "
"The corresponding parameter from __init__ "
"is ignored.", SyntaxWarning, stacklevel=3)
effective_p = metric_params['p']
else:
effective_p = self.p
if self.metric in ['wminkowski', 'minkowski'] and effective_p < 1:
raise ValueError("p must be greater than one for minkowski metric")
self._fit_X = None
self._tree = None
self._fit_method = None
def _fit(self, X):
if self.metric_params is None:
self.effective_metric_params_ = {}
else:
self.effective_metric_params_ = self.metric_params.copy()
effective_p = self.effective_metric_params_.get('p', self.p)
if self.metric in ['wminkowski', 'minkowski']:
self.effective_metric_params_['p'] = effective_p
self.effective_metric_ = self.metric
# For minkowski distance, use more efficient methods where available
if self.metric == 'minkowski':
p = self.effective_metric_params_.pop('p', 2)
if p < 1:
raise ValueError("p must be greater than one "
"for minkowski metric")
elif p == 1:
self.effective_metric_ = 'manhattan'
elif p == 2:
self.effective_metric_ = 'euclidean'
elif p == np.inf:
self.effective_metric_ = 'chebyshev'
else:
self.effective_metric_params_['p'] = p
if isinstance(X, NeighborsBase):
self._fit_X = X._fit_X
self._tree = X._tree
self._fit_method = X._fit_method
return self
elif isinstance(X, BallTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'ball_tree'
return self
elif isinstance(X, KDTree):
self._fit_X = X.data
self._tree = X
self._fit_method = 'kd_tree'
return self
X = check_array(X, accept_sparse='csr')
n_samples = X.shape[0]
if n_samples == 0:
raise ValueError("n_samples must be greater than 0")
if issparse(X):
if self.algorithm not in ('auto', 'brute'):
warnings.warn("cannot use tree with sparse input: "
"using brute force")
if self.effective_metric_ not in VALID_METRICS_SPARSE['brute']:
raise ValueError("metric '%s' not valid for sparse input"
% self.effective_metric_)
self._fit_X = X.copy()
self._tree = None
self._fit_method = 'brute'
return self
self._fit_method = self.algorithm
self._fit_X = X
if self._fit_method == 'auto':
# A tree approach is better for small number of neighbors,
# and KDTree is generally faster when available
if ((self.n_neighbors is None or
self.n_neighbors < self._fit_X.shape[0] // 2) and
self.metric != 'precomputed'):
if self.effective_metric_ in VALID_METRICS['kd_tree']:
self._fit_method = 'kd_tree'
else:
self._fit_method = 'ball_tree'
else:
self._fit_method = 'brute'
if self._fit_method == 'ball_tree':
self._tree = BallTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'kd_tree':
self._tree = KDTree(X, self.leaf_size,
metric=self.effective_metric_,
**self.effective_metric_params_)
elif self._fit_method == 'brute':
self._tree = None
else:
raise ValueError("algorithm = '%s' not recognized"
% self.algorithm)
if self.n_neighbors is not None:
if self.n_neighbors <= 0:
raise ValueError(
"Expected n_neighbors > 0. Got %d" %
self.n_neighbors
)
return self
@property
def _pairwise(self):
# For cross-validation routines to split data correctly
return self.metric == 'precomputed'
class KNeighborsMixin(object):
"""Mixin for k-neighbors searches"""
def kneighbors(self, X=None, n_neighbors=None, return_distance=True):
"""Finds the K-neighbors of a point.
Returns indices of and distances to the neighbors of each point.
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors to get (default is the value
passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array
Array representing the lengths to points, only present if
return_distance=True
ind : array
Indices of the nearest points in the population matrix.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1,1,1]
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=1)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> print(neigh.kneighbors([[1., 1., 1.]])) # doctest: +ELLIPSIS
(array([[ 0.5]]), array([[2]]...))
As you can see, it returns [[0.5]], and [[2]], which means that the
element is at distance 0.5 and is the third element of samples
(indexes start at 0). You can also query for multiple points:
>>> X = [[0., 1., 0.], [1., 0., 1.]]
>>> neigh.kneighbors(X, return_distance=False) # doctest: +ELLIPSIS
array([[1],
[2]]...)
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if n_neighbors is None:
n_neighbors = self.n_neighbors
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
# Include an extra neighbor to account for the sample itself being
# returned, which is removed later
n_neighbors += 1
train_size = self._fit_X.shape[0]
if n_neighbors > train_size:
raise ValueError(
"Expected n_neighbors <= n_samples, "
" but n_samples = %d, n_neighbors = %d" %
(train_size, n_neighbors)
)
n_samples, _ = X.shape
sample_range = np.arange(n_samples)[:, None]
n_jobs = _get_n_jobs(self.n_jobs)
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
n_jobs=n_jobs, squared=True)
else:
dist = pairwise_distances(
X, self._fit_X, self.effective_metric_, n_jobs=n_jobs,
**self.effective_metric_params_)
neigh_ind = argpartition(dist, n_neighbors - 1, axis=1)
neigh_ind = neigh_ind[:, :n_neighbors]
# argpartition doesn't guarantee sorted order, so we sort again
neigh_ind = neigh_ind[
sample_range, np.argsort(dist[sample_range, neigh_ind])]
if return_distance:
if self.effective_metric_ == 'euclidean':
result = np.sqrt(dist[sample_range, neigh_ind]), neigh_ind
else:
result = dist[sample_range, neigh_ind], neigh_ind
else:
result = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
result = Parallel(n_jobs, backend='threading')(
delayed(self._tree.query, check_pickle=False)(
X[s], n_neighbors, return_distance)
for s in gen_even_slices(X.shape[0], n_jobs)
)
if return_distance:
dist, neigh_ind = tuple(zip(*result))
result = np.vstack(dist), np.vstack(neigh_ind)
else:
result = np.vstack(result)
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return result
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = result
else:
neigh_ind = result
sample_mask = neigh_ind != sample_range
# Corner case: When the number of duplicates are more
# than the number of neighbors, the first NN will not
# be the sample, but a duplicate.
# In that case mask the first duplicate.
dup_gr_nbrs = np.all(sample_mask, axis=1)
sample_mask[:, 0][dup_gr_nbrs] = False
neigh_ind = np.reshape(
neigh_ind[sample_mask], (n_samples, n_neighbors - 1))
if return_distance:
dist = np.reshape(
dist[sample_mask], (n_samples, n_neighbors - 1))
return dist, neigh_ind
return neigh_ind
def kneighbors_graph(self, X=None, n_neighbors=None,
mode='connectivity'):
"""Computes the (weighted) graph of k-Neighbors for points in X
Parameters
----------
X : array-like, shape (n_query, n_features), \
or (n_query, n_indexed) if metric == 'precomputed'
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
n_neighbors : int
Number of neighbors for each sample.
(default is value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples_fit]
n_samples_fit is the number of samples in the fitted data
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(n_neighbors=2)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.kneighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 1.],
[ 1., 0., 1.]])
See also
--------
NearestNeighbors.radius_neighbors_graph
"""
if n_neighbors is None:
n_neighbors = self.n_neighbors
# kneighbors does the None handling.
if X is not None:
X = check_array(X, accept_sparse='csr')
n_samples1 = X.shape[0]
else:
n_samples1 = self._fit_X.shape[0]
n_samples2 = self._fit_X.shape[0]
n_nonzero = n_samples1 * n_neighbors
A_indptr = np.arange(0, n_nonzero + 1, n_neighbors)
# construct CSR matrix representation of the k-NN graph
if mode == 'connectivity':
A_data = np.ones(n_samples1 * n_neighbors)
A_ind = self.kneighbors(X, n_neighbors, return_distance=False)
elif mode == 'distance':
A_data, A_ind = self.kneighbors(
X, n_neighbors, return_distance=True)
A_data = np.ravel(A_data)
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity" '
'or "distance" but got "%s" instead' % mode)
kneighbors_graph = csr_matrix((A_data, A_ind.ravel(), A_indptr),
shape=(n_samples1, n_samples2))
return kneighbors_graph
class RadiusNeighborsMixin(object):
"""Mixin for radius-based neighbors searches"""
def radius_neighbors(self, X=None, radius=None, return_distance=True):
"""Finds the neighbors within a given radius of a point or points.
Return the indices and distances of each point from the dataset
lying in a ball with size ``radius`` around the points of the query
array. Points lying on the boundary are included in the results.
The result points are *not* necessarily sorted by distance to their
query point.
Parameters
----------
X : array-like, (n_samples, n_features), optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Limiting distance of neighbors to return.
(default is the value passed to the constructor).
return_distance : boolean, optional. Defaults to True.
If False, distances will not be returned
Returns
-------
dist : array, shape (n_samples,) of arrays
Array representing the distances to each point, only present if
return_distance=True. The distance values are computed according
to the ``metric`` constructor parameter.
ind : array, shape (n_samples,) of arrays
An array of arrays of indices of the approximate nearest points
from the population matrix that lie within a ball of size
``radius`` around the query points.
Examples
--------
In the following example, we construct a NeighborsClassifier
class from an array representing our data set and ask who's
the closest point to [1, 1, 1]:
>>> import numpy as np
>>> samples = [[0., 0., 0.], [0., .5, 0.], [1., 1., .5]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.6)
>>> neigh.fit(samples) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> rng = neigh.radius_neighbors([[1., 1., 1.]])
>>> print(np.asarray(rng[0][0])) # doctest: +ELLIPSIS
[ 1.5 0.5]
>>> print(np.asarray(rng[1][0])) # doctest: +ELLIPSIS
[1 2]
The first array returned contains the distances to all points which
are closer than 1.6, while the second array returned contains their
indices. In general, multiple points can be queried at the same time.
Notes
-----
Because the number of neighbors of each point is not necessarily
equal, the results for multiple query points cannot be fit in a
standard data array.
For efficiency, `radius_neighbors` returns arrays of objects, where
each object is a 1D array of indices or distances.
"""
if self._fit_method is None:
raise NotFittedError("Must fit neighbors before querying.")
if X is not None:
query_is_train = False
X = check_array(X, accept_sparse='csr')
else:
query_is_train = True
X = self._fit_X
if radius is None:
radius = self.radius
n_samples = X.shape[0]
if self._fit_method == 'brute':
# for efficiency, use squared euclidean distances
if self.effective_metric_ == 'euclidean':
dist = pairwise_distances(X, self._fit_X, 'euclidean',
squared=True)
radius *= radius
else:
dist = pairwise_distances(X, self._fit_X,
self.effective_metric_,
**self.effective_metric_params_)
neigh_ind_list = [np.where(d <= radius)[0] for d in dist]
# See https://github.com/numpy/numpy/issues/5456
# if you want to understand why this is initialized this way.
neigh_ind = np.empty(n_samples, dtype='object')
neigh_ind[:] = neigh_ind_list
if return_distance:
dist_array = np.empty(n_samples, dtype='object')
if self.effective_metric_ == 'euclidean':
dist_list = [np.sqrt(d[neigh_ind[i]])
for i, d in enumerate(dist)]
else:
dist_list = [d[neigh_ind[i]]
for i, d in enumerate(dist)]
dist_array[:] = dist_list
results = dist_array, neigh_ind
else:
results = neigh_ind
elif self._fit_method in ['ball_tree', 'kd_tree']:
if issparse(X):
raise ValueError(
"%s does not work with sparse matrices. Densify the data, "
"or set algorithm='brute'" % self._fit_method)
results = self._tree.query_radius(X, radius,
return_distance=return_distance)
if return_distance:
results = results[::-1]
else:
raise ValueError("internal: _fit_method not recognized")
if not query_is_train:
return results
else:
# If the query data is the same as the indexed data, we would like
# to ignore the first nearest neighbor of every sample, i.e
# the sample itself.
if return_distance:
dist, neigh_ind = results
else:
neigh_ind = results
for ind, ind_neighbor in enumerate(neigh_ind):
mask = ind_neighbor != ind
neigh_ind[ind] = ind_neighbor[mask]
if return_distance:
dist[ind] = dist[ind][mask]
if return_distance:
return dist, neigh_ind
return neigh_ind
def radius_neighbors_graph(self, X=None, radius=None, mode='connectivity'):
"""Computes the (weighted) graph of Neighbors for points in X
Neighborhoods are restricted the points at a distance lower than
radius.
Parameters
----------
X : array-like, shape = [n_samples, n_features], optional
The query point or points.
If not provided, neighbors of each indexed point are returned.
In this case, the query point is not considered its own neighbor.
radius : float
Radius of neighborhoods.
(default is the value passed to the constructor).
mode : {'connectivity', 'distance'}, optional
Type of returned matrix: 'connectivity' will return the
connectivity matrix with ones and zeros, in 'distance' the
edges are Euclidean distance between points.
Returns
-------
A : sparse matrix in CSR format, shape = [n_samples, n_samples]
A[i, j] is assigned the weight of edge that connects i to j.
Examples
--------
>>> X = [[0], [3], [1]]
>>> from sklearn.neighbors import NearestNeighbors
>>> neigh = NearestNeighbors(radius=1.5)
>>> neigh.fit(X) # doctest: +ELLIPSIS
NearestNeighbors(algorithm='auto', leaf_size=30, ...)
>>> A = neigh.radius_neighbors_graph(X)
>>> A.toarray()
array([[ 1., 0., 1.],
[ 0., 1., 0.],
[ 1., 0., 1.]])
See also
--------
kneighbors_graph
"""
if X is not None:
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
n_samples2 = self._fit_X.shape[0]
if radius is None:
radius = self.radius
# construct CSR matrix representation of the NN graph
if mode == 'connectivity':
A_ind = self.radius_neighbors(X, radius,
return_distance=False)
A_data = None
elif mode == 'distance':
dist, A_ind = self.radius_neighbors(X, radius,
return_distance=True)
A_data = np.concatenate(list(dist))
else:
raise ValueError(
'Unsupported mode, must be one of "connectivity", '
'or "distance" but got %s instead' % mode)
n_samples1 = A_ind.shape[0]
n_neighbors = np.array([len(a) for a in A_ind])
A_ind = np.concatenate(list(A_ind))
if A_data is None:
A_data = np.ones(len(A_ind))
A_indptr = np.concatenate((np.zeros(1, dtype=int),
np.cumsum(n_neighbors)))
return csr_matrix((A_data, A_ind, A_indptr),
shape=(n_samples1, n_samples2))
class SupervisedFloatMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values, array of float values, shape = [n_samples]
or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
self._y = y
return self._fit(X)
class SupervisedIntegerMixin(object):
def fit(self, X, y):
"""Fit the model using X as training data and y as target values
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : {array-like, sparse matrix}
Target values of shape = [n_samples] or [n_samples, n_outputs]
"""
if not isinstance(X, (KDTree, BallTree)):
X, y = check_X_y(X, y, "csr", multi_output=True)
if y.ndim == 1 or y.ndim == 2 and y.shape[1] == 1:
if y.ndim != 1:
warnings.warn("A column-vector y was passed when a 1d array "
"was expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning, stacklevel=2)
self.outputs_2d_ = False
y = y.reshape((-1, 1))
else:
self.outputs_2d_ = True
check_classification_targets(y)
self.classes_ = []
self._y = np.empty(y.shape, dtype=np.int)
for k in range(self._y.shape[1]):
classes, self._y[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes)
if not self.outputs_2d_:
self.classes_ = self.classes_[0]
self._y = self._y.ravel()
return self._fit(X)
class UnsupervisedMixin(object):
def fit(self, X, y=None):
"""Fit the model using X as training data
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
"""
return self._fit(X)
| bsd-3-clause |
voxlol/scikit-learn | sklearn/ensemble/tests/test_forest.py | 48 | 35412 | """
Testing for the forest module (sklearn.ensemble.forest).
"""
# Authors: Gilles Louppe,
# Brian Holt,
# Andreas Mueller,
# Arnaud Joly
# License: BSD 3 clause
import pickle
from collections import defaultdict
from itertools import product
import numpy as np
from scipy.sparse import csr_matrix, csc_matrix, coo_matrix
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_less, assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn import datasets
from sklearn.decomposition import TruncatedSVD
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import ExtraTreesRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomTreesEmbedding
from sklearn.grid_search import GridSearchCV
from sklearn.svm import LinearSVC
from sklearn.utils.validation import check_random_state
from sklearn.tree.tree import SPARSE_SPLITTERS
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = check_random_state(0)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
FOREST_CLASSIFIERS = {
"ExtraTreesClassifier": ExtraTreesClassifier,
"RandomForestClassifier": RandomForestClassifier,
}
FOREST_REGRESSORS = {
"ExtraTreesRegressor": ExtraTreesRegressor,
"RandomForestRegressor": RandomForestRegressor,
}
FOREST_TRANSFORMERS = {
"RandomTreesEmbedding": RandomTreesEmbedding,
}
FOREST_ESTIMATORS = dict()
FOREST_ESTIMATORS.update(FOREST_CLASSIFIERS)
FOREST_ESTIMATORS.update(FOREST_REGRESSORS)
FOREST_ESTIMATORS.update(FOREST_TRANSFORMERS)
def check_classification_toy(name):
"""Check classification on a toy dataset."""
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
clf = ForestClassifier(n_estimators=10, max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf))
# also test apply
leaf_indices = clf.apply(X)
assert_equal(leaf_indices.shape, (len(X), clf.n_estimators))
def test_classification_toy():
for name in FOREST_CLASSIFIERS:
yield check_classification_toy, name
def check_iris_criterion(name, criterion):
# Check consistency on dataset iris.
ForestClassifier = FOREST_CLASSIFIERS[name]
clf = ForestClassifier(n_estimators=10, criterion=criterion,
random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.9, "Failed with criterion %s and score = %f"
% (criterion, score))
clf = ForestClassifier(n_estimators=10, criterion=criterion,
max_features=2, random_state=1)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert_greater(score, 0.5, "Failed with criterion %s and score = %f"
% (criterion, score))
def test_iris():
for name, criterion in product(FOREST_CLASSIFIERS, ("gini", "entropy")):
yield check_iris_criterion, name, criterion
def check_boston_criterion(name, criterion):
# Check consistency on dataset boston house prices.
ForestRegressor = FOREST_REGRESSORS[name]
clf = ForestRegressor(n_estimators=5, criterion=criterion, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=None, criterion %s "
"and score = %f" % (criterion, score))
clf = ForestRegressor(n_estimators=5, criterion=criterion,
max_features=6, random_state=1)
clf.fit(boston.data, boston.target)
score = clf.score(boston.data, boston.target)
assert_greater(score, 0.95, "Failed with max_features=6, criterion %s "
"and score = %f" % (criterion, score))
def test_boston():
for name, criterion in product(FOREST_REGRESSORS, ("mse", )):
yield check_boston_criterion, name, criterion
def check_regressor_attributes(name):
# Regression models should not have a classes_ attribute.
r = FOREST_REGRESSORS[name](random_state=0)
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
r.fit([[1, 2, 3], [4, 5, 6]], [1, 2])
assert_false(hasattr(r, "classes_"))
assert_false(hasattr(r, "n_classes_"))
def test_regressor_attributes():
for name in FOREST_REGRESSORS:
yield check_regressor_attributes, name
def check_probability(name):
# Predict probabilities.
ForestClassifier = FOREST_CLASSIFIERS[name]
with np.errstate(divide="ignore"):
clf = ForestClassifier(n_estimators=10, random_state=1, max_features=1,
max_depth=1)
clf.fit(iris.data, iris.target)
assert_array_almost_equal(np.sum(clf.predict_proba(iris.data), axis=1),
np.ones(iris.data.shape[0]))
assert_array_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)))
def test_probability():
for name in FOREST_CLASSIFIERS:
yield check_probability, name
def check_importances(name, X, y):
# Check variable importances.
ForestClassifier = FOREST_CLASSIFIERS[name]
for n_jobs in [1, 2]:
clf = ForestClassifier(n_estimators=10, n_jobs=n_jobs)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10)
assert_equal(n_important, 3)
X_new = clf.transform(X, threshold="mean")
assert_less(0 < X_new.shape[1], X.shape[1])
# Check with sample weights
sample_weight = np.ones(y.shape)
sample_weight[y == 1] *= 100
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
importances = clf.feature_importances_
assert_true(np.all(importances >= 0.0))
clf = ForestClassifier(n_estimators=50, n_jobs=n_jobs, random_state=0)
clf.fit(X, y, sample_weight=3 * sample_weight)
importances_bis = clf.feature_importances_
assert_almost_equal(importances, importances_bis)
def test_importances():
X, y = datasets.make_classification(n_samples=1000, n_features=10,
n_informative=3, n_redundant=0,
n_repeated=0, shuffle=False,
random_state=0)
for name in FOREST_CLASSIFIERS:
yield check_importances, name, X, y
def check_unfitted_feature_importances(name):
assert_raises(ValueError, getattr, FOREST_ESTIMATORS[name](random_state=0),
"feature_importances_")
def test_unfitted_feature_importances():
for name in FOREST_ESTIMATORS:
yield check_unfitted_feature_importances, name
def check_oob_score(name, X, y, n_estimators=20):
# Check that oob prediction is a good estimation of the generalization
# error.
# Proper behavior
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=n_estimators, bootstrap=True)
n_samples = X.shape[0]
est.fit(X[:n_samples // 2, :], y[:n_samples // 2])
test_score = est.score(X[n_samples // 2:, :], y[n_samples // 2:])
if name in FOREST_CLASSIFIERS:
assert_less(abs(test_score - est.oob_score_), 0.1)
else:
assert_greater(test_score, est.oob_score_)
assert_greater(est.oob_score_, .8)
# Check warning if not enough estimators
with np.errstate(divide="ignore", invalid="ignore"):
est = FOREST_ESTIMATORS[name](oob_score=True, random_state=0,
n_estimators=1, bootstrap=True)
assert_warns(UserWarning, est.fit, X, y)
def test_oob_score():
for name in FOREST_CLASSIFIERS:
yield check_oob_score, name, iris.data, iris.target
# csc matrix
yield check_oob_score, name, csc_matrix(iris.data), iris.target
# non-contiguous targets in classification
yield check_oob_score, name, iris.data, iris.target * 2 + 1
for name in FOREST_REGRESSORS:
yield check_oob_score, name, boston.data, boston.target, 50
# csc matrix
yield check_oob_score, name, csc_matrix(boston.data), boston.target, 50
def check_oob_score_raise_error(name):
ForestEstimator = FOREST_ESTIMATORS[name]
if name in FOREST_TRANSFORMERS:
for oob_score in [True, False]:
assert_raises(TypeError, ForestEstimator, oob_score=oob_score)
assert_raises(NotImplementedError, ForestEstimator()._set_oob_score,
X, y)
else:
# Unfitted / no bootstrap / no oob_score
for oob_score, bootstrap in [(True, False), (False, True),
(False, False)]:
est = ForestEstimator(oob_score=oob_score, bootstrap=bootstrap,
random_state=0)
assert_false(hasattr(est, "oob_score_"))
# No bootstrap
assert_raises(ValueError, ForestEstimator(oob_score=True,
bootstrap=False).fit, X, y)
def test_oob_score_raise_error():
for name in FOREST_ESTIMATORS:
yield check_oob_score_raise_error, name
def check_gridsearch(name):
forest = FOREST_CLASSIFIERS[name]()
clf = GridSearchCV(forest, {'n_estimators': (1, 2), 'max_depth': (1, 2)})
clf.fit(iris.data, iris.target)
def test_gridsearch():
# Check that base trees can be grid-searched.
for name in FOREST_CLASSIFIERS:
yield check_gridsearch, name
def check_parallel(name, X, y):
"""Check parallel computations in classification"""
ForestEstimator = FOREST_ESTIMATORS[name]
forest = ForestEstimator(n_estimators=10, n_jobs=3, random_state=0)
forest.fit(X, y)
assert_equal(len(forest), 10)
forest.set_params(n_jobs=1)
y1 = forest.predict(X)
forest.set_params(n_jobs=2)
y2 = forest.predict(X)
assert_array_almost_equal(y1, y2, 3)
def test_parallel():
for name in FOREST_CLASSIFIERS:
yield check_parallel, name, iris.data, iris.target
for name in FOREST_REGRESSORS:
yield check_parallel, name, boston.data, boston.target
def check_pickle(name, X, y):
# Check pickability.
ForestEstimator = FOREST_ESTIMATORS[name]
obj = ForestEstimator(random_state=0)
obj.fit(X, y)
score = obj.score(X, y)
pickle_object = pickle.dumps(obj)
obj2 = pickle.loads(pickle_object)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(X, y)
assert_equal(score, score2)
def test_pickle():
for name in FOREST_CLASSIFIERS:
yield check_pickle, name, iris.data[::2], iris.target[::2]
for name in FOREST_REGRESSORS:
yield check_pickle, name, boston.data[::2], boston.target[::2]
def check_multioutput(name):
# Check estimators on multi-output problems.
X_train = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1], [-2, 1],
[-1, 1], [-1, 2], [2, -1], [1, -1], [1, -2]]
y_train = [[-1, 0], [-1, 0], [-1, 0], [1, 1], [1, 1], [1, 1], [-1, 2],
[-1, 2], [-1, 2], [1, 3], [1, 3], [1, 3]]
X_test = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_test = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
y_pred = est.fit(X_train, y_train).predict(X_test)
assert_array_almost_equal(y_pred, y_test)
if name in FOREST_CLASSIFIERS:
with np.errstate(divide="ignore"):
proba = est.predict_proba(X_test)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = est.predict_log_proba(X_test)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
def test_multioutput():
for name in FOREST_CLASSIFIERS:
yield check_multioutput, name
for name in FOREST_REGRESSORS:
yield check_multioutput, name
def check_classes_shape(name):
# Test that n_classes_ and classes_ have proper shape.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Classification, single output
clf = ForestClassifier(random_state=0).fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(random_state=0).fit(X, _y)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_classes_shape():
for name in FOREST_CLASSIFIERS:
yield check_classes_shape, name
def test_random_trees_dense_type():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning a dense array.
# Create the RTE with sparse=False
hasher = RandomTreesEmbedding(n_estimators=10, sparse_output=False)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# Assert that type is ndarray, not scipy.sparse.csr.csr_matrix
assert_equal(type(X_transformed), np.ndarray)
def test_random_trees_dense_equal():
# Test that the `sparse_output` parameter of RandomTreesEmbedding
# works by returning the same array for both argument values.
# Create the RTEs
hasher_dense = RandomTreesEmbedding(n_estimators=10, sparse_output=False,
random_state=0)
hasher_sparse = RandomTreesEmbedding(n_estimators=10, sparse_output=True,
random_state=0)
X, y = datasets.make_circles(factor=0.5)
X_transformed_dense = hasher_dense.fit_transform(X)
X_transformed_sparse = hasher_sparse.fit_transform(X)
# Assert that dense and sparse hashers have same array.
assert_array_equal(X_transformed_sparse.toarray(), X_transformed_dense)
def test_random_hasher():
# test random forest hashing on circles dataset
# make sure that it is linearly separable.
# even after projected to two SVD dimensions
# Note: Not all random_states produce perfect results.
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X, y = datasets.make_circles(factor=0.5)
X_transformed = hasher.fit_transform(X)
# test fit and transform:
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
assert_array_equal(hasher.fit(X).transform(X).toarray(),
X_transformed.toarray())
# one leaf active per data point per forest
assert_equal(X_transformed.shape[0], X.shape[0])
assert_array_equal(X_transformed.sum(axis=1), hasher.n_estimators)
svd = TruncatedSVD(n_components=2)
X_reduced = svd.fit_transform(X_transformed)
linear_clf = LinearSVC()
linear_clf.fit(X_reduced, y)
assert_equal(linear_clf.score(X_reduced, y), 1.)
def test_random_hasher_sparse_data():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0)
hasher = RandomTreesEmbedding(n_estimators=30, random_state=1)
X_transformed = hasher.fit_transform(X)
X_transformed_sparse = hasher.fit_transform(csc_matrix(X))
assert_array_equal(X_transformed_sparse.toarray(), X_transformed.toarray())
def test_parallel_train():
rng = check_random_state(12321)
n_samples, n_features = 80, 30
X_train = rng.randn(n_samples, n_features)
y_train = rng.randint(0, 2, n_samples)
clfs = [
RandomForestClassifier(n_estimators=20, n_jobs=n_jobs,
random_state=12345).fit(X_train, y_train)
for n_jobs in [1, 2, 3, 8, 16, 32]
]
X_test = rng.randn(n_samples, n_features)
probas = [clf.predict_proba(X_test) for clf in clfs]
for proba1, proba2 in zip(probas, probas[1:]):
assert_array_almost_equal(proba1, proba2)
def test_distribution():
rng = check_random_state(12321)
# Single variable with 4 values
X = rng.randint(0, 4, size=(1000, 1))
y = rng.rand(1000)
n_trees = 500
clf = ExtraTreesRegressor(n_estimators=n_trees, random_state=42).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = sorted([(1. * count / n_trees, tree)
for tree, count in uniques.items()])
# On a single variable problem where X_0 has 4 equiprobable values, there
# are 5 ways to build a random tree. The more compact (0,1/0,0/--0,2/--) of
# them has probability 1/3 while the 4 others have probability 1/6.
assert_equal(len(uniques), 5)
assert_greater(0.20, uniques[0][0]) # Rough approximation of 1/6.
assert_greater(0.20, uniques[1][0])
assert_greater(0.20, uniques[2][0])
assert_greater(0.20, uniques[3][0])
assert_greater(uniques[4][0], 0.3)
assert_equal(uniques[4][1], "0,1/0,0/--0,2/--")
# Two variables, one with 2 values, one with 3 values
X = np.empty((1000, 2))
X[:, 0] = np.random.randint(0, 2, 1000)
X[:, 1] = np.random.randint(0, 3, 1000)
y = rng.rand(1000)
clf = ExtraTreesRegressor(n_estimators=100, max_features=1,
random_state=1).fit(X, y)
uniques = defaultdict(int)
for tree in clf.estimators_:
tree = "".join(("%d,%d/" % (f, int(t)) if f >= 0 else "-")
for f, t in zip(tree.tree_.feature,
tree.tree_.threshold))
uniques[tree] += 1
uniques = [(count, tree) for tree, count in uniques.items()]
assert_equal(len(uniques), 8)
def check_max_leaf_nodes_max_depth(name, X, y):
# Test precedence of max_leaf_nodes over max_depth.
ForestEstimator = FOREST_ESTIMATORS[name]
est = ForestEstimator(max_depth=1, max_leaf_nodes=4,
n_estimators=1).fit(X, y)
assert_greater(est.estimators_[0].tree_.max_depth, 1)
est = ForestEstimator(max_depth=1, n_estimators=1).fit(X, y)
assert_equal(est.estimators_[0].tree_.max_depth, 1)
def test_max_leaf_nodes_max_depth():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for name in FOREST_ESTIMATORS:
yield check_max_leaf_nodes_max_depth, name, X, y
def check_min_samples_leaf(name, X, y):
# Test if leaves contain more than leaf_count training examples
ForestEstimator = FOREST_ESTIMATORS[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
est = ForestEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.estimators_[0].tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def test_min_samples_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_samples_leaf, name, X, y
def check_min_weight_fraction_leaf(name, X, y):
# Test if leaves contain at least min_weight_fraction_leaf of the
# training set
ForestEstimator = FOREST_ESTIMATORS[name]
rng = np.random.RandomState(0)
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes in (None, 1000):
for frac in np.linspace(0, 0.5, 6):
est = ForestEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
if isinstance(est, (RandomForestClassifier,
RandomForestRegressor)):
est.bootstrap = False
est.fit(X, y, sample_weight=weights)
out = est.estimators_[0].tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
X = X.astype(np.float32)
for name in FOREST_ESTIMATORS:
yield check_min_weight_fraction_leaf, name, X, y
def check_sparse_input(name, X, X_sparse, y):
ForestEstimator = FOREST_ESTIMATORS[name]
dense = ForestEstimator(random_state=0, max_depth=2).fit(X, y)
sparse = ForestEstimator(random_state=0, max_depth=2).fit(X_sparse, y)
assert_array_almost_equal(sparse.apply(X), dense.apply(X))
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_array_almost_equal(sparse.predict(X), dense.predict(X))
assert_array_almost_equal(sparse.feature_importances_,
dense.feature_importances_)
if name in FOREST_CLASSIFIERS:
assert_array_almost_equal(sparse.predict_proba(X),
dense.predict_proba(X))
assert_array_almost_equal(sparse.predict_log_proba(X),
dense.predict_log_proba(X))
if name in FOREST_TRANSFORMERS:
assert_array_almost_equal(sparse.transform(X).toarray(),
dense.transform(X).toarray())
assert_array_almost_equal(sparse.fit_transform(X).toarray(),
dense.fit_transform(X).toarray())
def test_sparse_input():
X, y = datasets.make_multilabel_classification(return_indicator=True,
random_state=0,
n_samples=40)
for name, sparse_matrix in product(FOREST_ESTIMATORS,
(csr_matrix, csc_matrix, coo_matrix)):
yield check_sparse_input, name, X, sparse_matrix(X), y
def check_memory_layout(name, dtype):
# Check that it works no matter the memory layout
est = FOREST_ESTIMATORS[name](random_state=0, bootstrap=False)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if est.base_estimator.splitter in SPARSE_SPLITTERS:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# coo_matrix
X = coo_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_memory_layout():
for name, dtype in product(FOREST_CLASSIFIERS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
for name, dtype in product(FOREST_REGRESSORS, [np.float64, np.float32]):
yield check_memory_layout, name, dtype
def check_1d_input(name, X, X_2d, y):
ForestEstimator = FOREST_ESTIMATORS[name]
assert_raises(ValueError, ForestEstimator(random_state=0).fit, X, y)
est = ForestEstimator(random_state=0)
est.fit(X_2d, y)
if name in FOREST_CLASSIFIERS or name in FOREST_REGRESSORS:
assert_raises(ValueError, est.predict, X)
def test_1d_input():
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
for name in FOREST_ESTIMATORS:
yield check_1d_input, name, X, X_2d, y
def check_class_weights(name):
# Check class_weights resemble sample_weights behavior.
ForestClassifier = FOREST_CLASSIFIERS[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = ForestClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = ForestClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "balanced" which should also have no effect
clf4 = ForestClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = ForestClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = ForestClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in FOREST_CLASSIFIERS:
yield check_class_weights, name
def check_class_weight_balanced_and_bootstrap_multi_output(name):
# Test class_weight works for multi-output"""
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
clf = ForestClassifier(class_weight='balanced', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}, {-2: 1., 2: 1.}],
random_state=0)
clf.fit(X, _y)
# smoke test for subsample and balanced subsample
clf = ForestClassifier(class_weight='balanced_subsample', random_state=0)
clf.fit(X, _y)
clf = ForestClassifier(class_weight='subsample', random_state=0)
ignore_warnings(clf.fit)(X, _y)
def test_class_weight_balanced_and_bootstrap_multi_output():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_balanced_and_bootstrap_multi_output, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
ForestClassifier = FOREST_CLASSIFIERS[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = ForestClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Warning warm_start with preset
clf = ForestClassifier(class_weight='auto', warm_start=True,
random_state=0)
assert_warns(UserWarning, clf.fit, X, y)
assert_warns(UserWarning, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = ForestClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = ForestClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in FOREST_CLASSIFIERS:
yield check_class_weight_errors, name
def check_warm_start(name, random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = ForestEstimator(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = ForestEstimator(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
assert_array_equal(clf_ws.apply(X), clf_no_ws.apply(X),
err_msg="Failed with {0}".format(name))
def test_warm_start():
for name in FOREST_ESTIMATORS:
yield check_warm_start, name
def check_warm_start_clear(name):
# Test if fit clears state and grows a new forest when warm_start==False.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=False,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True,
random_state=2)
clf_2.fit(X, y) # inits state
clf_2.set_params(warm_start=False, random_state=1)
clf_2.fit(X, y) # clears old state and equals clf
assert_array_almost_equal(clf_2.apply(X), clf.apply(X))
def test_warm_start_clear():
for name in FOREST_ESTIMATORS:
yield check_warm_start_clear, name
def check_warm_start_smaller_n_estimators(name):
# Test if warm start second fit with smaller n_estimators raises error.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=1, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_smaller_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_smaller_n_estimators, name
def check_warm_start_equal_n_estimators(name):
# Test if warm start with equal n_estimators does nothing and returns the
# same forest and raises a warning.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
clf = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=True,
random_state=1)
clf_2.fit(X, y)
# Now clf_2 equals clf.
clf_2.set_params(random_state=2)
assert_warns(UserWarning, clf_2.fit, X, y)
# If we had fit the trees again we would have got a different forest as we
# changed the random state.
assert_array_equal(clf.apply(X), clf_2.apply(X))
def test_warm_start_equal_n_estimators():
for name in FOREST_ESTIMATORS:
yield check_warm_start_equal_n_estimators, name
def check_warm_start_oob(name):
# Test that the warm start computes oob score when asked.
X, y = datasets.make_hastie_10_2(n_samples=20, random_state=1)
ForestEstimator = FOREST_ESTIMATORS[name]
# Use 15 estimators to avoid 'some inputs do not have OOB scores' warning.
clf = ForestEstimator(n_estimators=15, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=True)
clf.fit(X, y)
clf_2 = ForestEstimator(n_estimators=5, max_depth=3, warm_start=False,
random_state=1, bootstrap=True, oob_score=False)
clf_2.fit(X, y)
clf_2.set_params(warm_start=True, oob_score=True, n_estimators=15)
clf_2.fit(X, y)
assert_true(hasattr(clf_2, 'oob_score_'))
assert_equal(clf.oob_score_, clf_2.oob_score_)
# Test that oob_score is computed even if we don't need to train
# additional trees.
clf_3 = ForestEstimator(n_estimators=15, max_depth=3, warm_start=True,
random_state=1, bootstrap=True, oob_score=False)
clf_3.fit(X, y)
assert_true(not(hasattr(clf_3, 'oob_score_')))
clf_3.set_params(oob_score=True)
ignore_warnings(clf_3.fit)(X, y)
assert_equal(clf.oob_score_, clf_3.oob_score_)
def test_warm_start_oob():
for name in FOREST_CLASSIFIERS:
yield check_warm_start_oob, name
for name in FOREST_REGRESSORS:
yield check_warm_start_oob, name
def test_dtype_convert():
classifier = RandomForestClassifier()
CLASSES = 15
X = np.eye(CLASSES)
y = [ch for ch in 'ABCDEFGHIJKLMNOPQRSTU'[:CLASSES]]
result = classifier.fit(X, y).predict(X)
assert_array_equal(result, y) | bsd-3-clause |
zfrxiaxia/Pythone_Coursera | Data Processing Using Python/4.2.2比较Microsoft和Intel在2014年每个月股票的最高收盘价.py | 1 | 1659 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 14 15:10:51 2016
"""
import time
from matplotlib.finance import quotes_historical_yahoo_ochl
from datetime import date
from datetime import datetime
import pandas as pd
import matplotlib.pyplot as plt
import pylab as pl
import numpy as np
today = date.today()
start = (today.year-3, today.month,today.day)
quotesMS = quotes_historical_yahoo_ochl('MSFT',start,today)
quotesIN = quotes_historical_yahoo_ochl('INTC',start,today)
lable = ['date','open','close','high','low','volume']
quotesdfMS = pd.DataFrame(quotesMS,columns=lable)
quotesdfIN = pd.DataFrame(quotesIN,columns=lable)
###
list1 = []
for i in range(0,len(quotesMS)):
x = date.fromordinal(int(quotesMS[i][0]))
y = date.strftime(x,'%y/%m/%d')
list1.append(y)
quotesdfMS.index = list1
quotesdfMS = quotesdfMS.drop(['date'],axis = 1)
list1 = []
quotesdfMS14 = quotesdfMS['14/01/01':'14/12/31']
for i in range(0, len(quotesdfMS14)):
list1.append(int(quotesdfMS14.index[i][3:5]))
quotesdfMS14['month'] = list1
###
list2 = []
for i in range(0,len(quotesIN)):
x = date.fromordinal(int(quotesIN[i][0]))
y = date.strftime(x,'%y/%m/%d')
list2.append(y)
quotesdfIN.index = list2
quotesdfIN = quotesdfIN.drop(['date'],axis = 1)
list2 = []
quotesdfIN14 = quotesdfIN['14/01/01':'14/12/31']
for i in range(0, len(quotesdfIN14)):
list2.append(int(quotesdfIN14.index[i][3:5]))
quotesdfIN14['month'] = list2
list3=[1,2,3,4,5,6,7,8,9,10,11,12]
pl.subplot(211)
plt.plot(list3,quotesdfMS14.groupby('month').max().close,color='r',marker='o')
pl.subplot(212)
plt.plot(list3,quotesdfIN14.groupby('month').max().close,color='green',marker='o')
| gpl-2.0 |
eranchetz/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_macosx.py | 69 | 15397 | from __future__ import division
import os
import numpy
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase,\
FigureManagerBase, FigureCanvasBase, NavigationToolbar2
from matplotlib.cbook import maxdict
from matplotlib.figure import Figure
from matplotlib.path import Path
from matplotlib.mathtext import MathTextParser
from matplotlib.colors import colorConverter
from matplotlib.widgets import SubplotTool
import matplotlib
from matplotlib.backends import _macosx
def show():
"""Show all the figures and enter the Cocoa mainloop.
This function will not return until all windows are closed or
the interpreter exits."""
# Having a Python-level function "show" wrapping the built-in
# function "show" in the _macosx extension module allows us to
# to add attributes to "show". This is something ipython does.
_macosx.show()
class RendererMac(RendererBase):
"""
The renderer handles drawing/rendering operations. Most of the renderer's
methods forwards the command to the renderer's graphics context. The
renderer does not wrap a C object and is written in pure Python.
"""
texd = maxdict(50) # a cache of tex image rasters
def __init__(self, dpi, width, height):
RendererBase.__init__(self)
self.dpi = dpi
self.width = width
self.height = height
self.gc = GraphicsContextMac()
self.mathtext_parser = MathTextParser('MacOSX')
def set_width_height (self, width, height):
self.width, self.height = width, height
def draw_path(self, gc, path, transform, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_path(path, transform, rgbFace)
def draw_markers(self, gc, marker_path, marker_trans, path, trans, rgbFace=None):
if rgbFace is not None:
rgbFace = tuple(rgbFace)
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
gc.draw_markers(marker_path, marker_trans, path, trans, rgbFace)
def draw_path_collection(self, *args):
gc = self.gc
args = args[:13]
gc.draw_path_collection(*args)
def draw_quad_mesh(self, *args):
gc = self.gc
gc.draw_quad_mesh(*args)
def new_gc(self):
self.gc.reset()
return self.gc
def draw_image(self, x, y, im, bbox, clippath=None, clippath_trans=None):
im.flipud_out()
nrows, ncols, data = im.as_rgba_str()
self.gc.draw_image(x, y, nrows, ncols, data, bbox, clippath, clippath_trans)
im.flipud_out()
def draw_tex(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
# todo, handle props, angle, origins
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
key = s, size, self.dpi, angle, texmanager.get_font_config()
im = self.texd.get(key) # Not sure what this does; just copied from backend_agg.py
if im is None:
Z = texmanager.get_grey(s, size, self.dpi)
Z = numpy.array(255.0 - Z * 255.0, numpy.uint8)
gc.draw_mathtext(x, y, angle, Z)
def _draw_mathtext(self, gc, x, y, s, prop, angle):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
size = prop.get_size_in_points()
ox, oy, width, height, descent, image, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
gc.draw_mathtext(x, y, angle, 255 - image.as_array())
def draw_text(self, gc, x, y, s, prop, angle, ismath=False):
if gc!=self.gc:
n = self.gc.level() - gc.level()
for i in range(n): self.gc.restore()
self.gc = gc
if ismath:
self._draw_mathtext(gc, x, y, s, prop, angle)
else:
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
gc.draw_text(x, y, unicode(s), family, size, weight, style, angle)
def get_text_width_height_descent(self, s, prop, ismath):
if ismath=='TeX':
# TODO: handle props
size = prop.get_size_in_points()
texmanager = self.get_texmanager()
Z = texmanager.get_grey(s, size, self.dpi)
m,n = Z.shape
# TODO: handle descent; This is based on backend_agg.py
return n, m, 0
if ismath:
ox, oy, width, height, descent, fonts, used_characters = \
self.mathtext_parser.parse(s, self.dpi, prop)
return width, height, descent
family = prop.get_family()
size = prop.get_size_in_points()
weight = prop.get_weight()
style = prop.get_style()
return self.gc.get_text_width_height_descent(unicode(s), family, size, weight, style)
def flipy(self):
return False
def points_to_pixels(self, points):
return points/72.0 * self.dpi
def option_image_nocomposite(self):
return True
class GraphicsContextMac(_macosx.GraphicsContext, GraphicsContextBase):
"""
The GraphicsContext wraps a Quartz graphics context. All methods
are implemented at the C-level in macosx.GraphicsContext. These
methods set drawing properties such as the line style, fill color,
etc. The actual drawing is done by the Renderer, which draws into
the GraphicsContext.
"""
def __init__(self):
GraphicsContextBase.__init__(self)
_macosx.GraphicsContext.__init__(self)
def set_foreground(self, fg, isRGB=False):
if not isRGB:
fg = colorConverter.to_rgb(fg)
_macosx.GraphicsContext.set_foreground(self, fg)
def set_clip_rectangle(self, box):
GraphicsContextBase.set_clip_rectangle(self, box)
if not box: return
_macosx.GraphicsContext.set_clip_rectangle(self, box.bounds)
def set_clip_path(self, path):
GraphicsContextBase.set_clip_path(self, path)
if not path: return
path = path.get_fully_transformed_path()
_macosx.GraphicsContext.set_clip_path(self, path)
########################################################################
#
# The following functions and classes are for pylab and implement
# window/figure managers, etc...
#
########################################################################
def draw_if_interactive():
"""
For performance reasons, we don't want to redraw the figure after
each draw command. Instead, we mark the figure as invalid, so that
it will be redrawn as soon as the event loop resumes via PyOS_InputHook.
This function should be called after each draw event, even if
matplotlib is not running interactively.
"""
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.invalidate()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
figure = FigureClass(*args, **kwargs)
canvas = FigureCanvasMac(figure)
manager = FigureManagerMac(canvas, num)
return manager
class FigureCanvasMac(_macosx.FigureCanvas, FigureCanvasBase):
"""
The canvas the figure renders into. Calls the draw and print fig
methods, creates the renderers, etc...
Public attribute
figure - A Figure instance
Events such as button presses, mouse movements, and key presses
are handled in the C code and the base class methods
button_press_event, button_release_event, motion_notify_event,
key_press_event, and key_release_event are called from there.
"""
def __init__(self, figure):
FigureCanvasBase.__init__(self, figure)
width, height = self.get_width_height()
self.renderer = RendererMac(figure.dpi, width, height)
_macosx.FigureCanvas.__init__(self, width, height)
def resize(self, width, height):
self.renderer.set_width_height(width, height)
dpi = self.figure.dpi
width /= dpi
height /= dpi
self.figure.set_size_inches(width, height)
def print_figure(self, filename, dpi=None, facecolor='w', edgecolor='w',
orientation='portrait', **kwargs):
if dpi is None: dpi = matplotlib.rcParams['savefig.dpi']
filename = unicode(filename)
root, ext = os.path.splitext(filename)
ext = ext[1:].lower()
if not ext:
ext = "png"
filename = root + "." + ext
if ext=="jpg": ext = "jpeg"
# save the figure settings
origfacecolor = self.figure.get_facecolor()
origedgecolor = self.figure.get_edgecolor()
# set the new parameters
self.figure.set_facecolor(facecolor)
self.figure.set_edgecolor(edgecolor)
if ext in ('jpeg', 'png', 'tiff', 'gif', 'bmp'):
width, height = self.figure.get_size_inches()
width, height = width*dpi, height*dpi
self.write_bitmap(filename, width, height)
elif ext == 'pdf':
self.write_pdf(filename)
elif ext in ('ps', 'eps'):
from backend_ps import FigureCanvasPS
# Postscript backend changes figure.dpi, but doesn't change it back
origDPI = self.figure.dpi
fc = self.switch_backends(FigureCanvasPS)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.dpi = origDPI
self.figure.set_canvas(self)
elif ext=='svg':
from backend_svg import FigureCanvasSVG
fc = self.switch_backends(FigureCanvasSVG)
fc.print_figure(filename, dpi, facecolor, edgecolor,
orientation, **kwargs)
self.figure.set_canvas(self)
else:
raise ValueError("Figure format not available (extension %s)" % ext)
# restore original figure settings
self.figure.set_facecolor(origfacecolor)
self.figure.set_edgecolor(origedgecolor)
class FigureManagerMac(_macosx.FigureManager, FigureManagerBase):
"""
Wrap everything up into a window for the pylab interface
"""
def __init__(self, canvas, num):
FigureManagerBase.__init__(self, canvas, num)
title = "Figure %d" % num
_macosx.FigureManager.__init__(self, canvas, title)
if matplotlib.rcParams['toolbar']=='classic':
self.toolbar = NavigationToolbarMac(canvas)
elif matplotlib.rcParams['toolbar']=='toolbar2':
self.toolbar = NavigationToolbar2Mac(canvas)
else:
self.toolbar = None
if self.toolbar is not None:
self.toolbar.update()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar != None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
# This is ugly, but this is what tkagg and gtk are doing.
# It is needed to get ginput() working.
self.canvas.figure.show = lambda *args: self.show()
def show(self):
self.canvas.draw()
def close(self):
Gcf.destroy(self.num)
class NavigationToolbarMac(_macosx.NavigationToolbar):
def __init__(self, canvas):
self.canvas = canvas
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
images = {}
for imagename in ("stock_left",
"stock_right",
"stock_up",
"stock_down",
"stock_zoom-in",
"stock_zoom-out",
"stock_save_as"):
filename = os.path.join(basedir, imagename+".ppm")
images[imagename] = self._read_ppm_image(filename)
_macosx.NavigationToolbar.__init__(self, images)
self.message = None
def _read_ppm_image(self, filename):
data = ""
imagefile = open(filename)
for line in imagefile:
if "#" in line:
i = line.index("#")
line = line[:i] + "\n"
data += line
imagefile.close()
magic, width, height, maxcolor, imagedata = data.split(None, 4)
width, height = int(width), int(height)
assert magic=="P6"
assert len(imagedata)==width*height*3 # 3 colors in RGB
return (width, height, imagedata)
def panx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.pan(direction)
self.canvas.invalidate()
def pany(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.pan(direction)
self.canvas.invalidate()
def zoomx(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].xaxis.zoom(direction)
self.canvas.invalidate()
def zoomy(self, direction):
axes = self.canvas.figure.axes
selected = self.get_active()
for i in selected:
axes[i].yaxis.zoom(direction)
self.canvas.invalidate()
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
class NavigationToolbar2Mac(_macosx.NavigationToolbar2, NavigationToolbar2):
def __init__(self, canvas):
NavigationToolbar2.__init__(self, canvas)
def _init_toolbar(self):
basedir = os.path.join(matplotlib.rcParams['datapath'], "images")
_macosx.NavigationToolbar2.__init__(self, basedir)
def draw_rubberband(self, event, x0, y0, x1, y1):
self.canvas.set_rubberband(x0, y0, x1, y1)
def release(self, event):
self.canvas.remove_rubberband()
def set_cursor(self, cursor):
_macosx.set_cursor(cursor)
def save_figure(self):
filename = _macosx.choose_save_file('Save the figure')
if filename is None: # Cancel
return
self.canvas.print_figure(filename)
def prepare_configure_subplots(self):
toolfig = Figure(figsize=(6,3))
canvas = FigureCanvasMac(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
return canvas
def set_message(self, message):
_macosx.NavigationToolbar2.set_message(self, message.encode('utf-8'))
########################################################################
#
# Now just provide the standard names that backend.__init__ is expecting
#
########################################################################
FigureManager = FigureManagerMac
| agpl-3.0 |
lmzintgraf/MultiMAuS | data/features/examples/aggregate_features_example.py | 1 | 2221 | """
Example script for how to use the AggregateFeatures class
@author Dennis Soemers
"""
from data.features.aggregate_features import AggregateFeatures
from data import utils_data
from os.path import join
import pandas as pd
def run_test():
# specify filepath here to load complete dataset from (training + test data)
DATA_FILEPATH = '../../real_data/transaction_log.csv'
# number of entries that will be used for training
NUM_TRAINING_INSTANCES = 50000
# load dataframe
df = pd.read_csv(DATA_FILEPATH)
# convert date columns to proper type
df["Global_Date"] = pd.to_datetime(df["Global_Date"])
df["Local_Date"] = pd.to_datetime(df["Local_Date"])
# extract part of data to use for training
df_training = df.iloc[:NUM_TRAINING_INSTANCES]
# extract part of data for testing (evaluating performance of trained model)
df_test = df.iloc[NUM_TRAINING_INSTANCES:]
# set the complete dataset to None so we don't accidentally use it anywhere below
df = None
# construct object that can compute features for us, based on training data
aggregate_features = AggregateFeatures(df_training)
# augment the training data with extra features.
aggregate_features.add_aggregate_features(df_training)
# augment the test data with extra features. In this case, also allow it to be used as history
# IMPORTANT: first we let our aggregate_features object do an unlabeled-data update from this test data
# this unlabeled update won't ''cheat'' and use the labels, but early transactions in the test data can
# be used in feature engineering for later transactions in the same test data
aggregate_features.update_unlabeled(df_test)
aggregate_features.add_aggregate_features(df_test)
#print(df_training.head())
#print(df_test.head())
df_training.to_csv(join(utils_data.FOLDER_REAL_DATA, 'aggregate_features_training_data.csv'), index_label=False)
df_test.to_csv(join(utils_data.FOLDER_REAL_DATA, 'aggregate_features_test_data.csv'), index_label=False)
if __name__ == '__main__':
#import cProfile
#pr = cProfile.Profile()
#pr.enable()
run_test()
#pr.disable()
#pr.print_stats(sort='cumtime')
| mit |
xhochy/arrow | dev/tasks/conda-recipes/clean.py | 2 | 1741 | from distutils.version import LooseVersion
from subprocess import check_output, check_call
from typing import List
import json
import os
import pandas as pd
import sys
VERSIONS_TO_KEEP = 5
PLATFORMS = ["linux-64", "osx-64", "win-64"]
PACKAGES = ["pyarrow", "arrow-cpp"]
def packages_to_delete(package_name: str, platform: str) -> List[str]:
env = os.environ.copy()
env["CONDA_SUBDIR"] = platform
pkgs_json = check_output(
[
"conda",
"search",
"--json",
"-c",
"arrow-nightlies",
"--override-channels",
package_name,
],
env=env,
)
pkgs = pd.DataFrame(json.loads(pkgs_json)[package_name])
pkgs["version"] = pkgs["version"].map(LooseVersion)
pkgs["py_version"] = pkgs["build"].str.slice(0, 4)
to_delete = []
for (subdir, python), group in pkgs.groupby(["subdir", "py_version"]):
group = group.sort_values(by="version", ascending=False)
if len(group) > VERSIONS_TO_KEEP:
del_candidates = group[VERSIONS_TO_KEEP:]
to_delete += (
f"arrow-nightlies/{package_name}/"
+ del_candidates["version"].astype(str)
+ del_candidates["url"].str.replace(
"https://conda.anaconda.org/arrow-nightlies", ""
)
).to_list()
return to_delete
if __name__ == "__main__":
to_delete = []
for platform in PLATFORMS:
for package in PACKAGES:
to_delete += packages_to_delete(package, platform)
for name in to_delete:
print(f"Deleting {name} …")
if "FORCE" in sys.argv:
check_call(["anaconda", "remove", "-f", name])
| apache-2.0 |
lidakanari/NeuroM | examples/boxplot.py | 5 | 2688 | #!/usr/bin/env python
# Copyright (c) 2015, Ecole Polytechnique Federale de Lausanne, Blue Brain Project
# All rights reserved.
#
# This file is part of NeuroM <https://github.com/BlueBrain/NeuroM>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the names of
# its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
''' Box Plot function for multiple neurons
'''
from neurom.view import common
def boxplot(neurons, feature, new_fig=True, subplot=False):
'''
Plot a histogram of the selected feature for the population of neurons.
Plots x-axis versus y-axis on a scatter|histogram|binned values plot.
More information about the plot and how it works.
Parameters
----------
neurons : list
List of Neurons. Single neurons must be encapsulated in a list.
feature : str
The feature of interest.
Options
-------
subplot : bool
Default is False, which returns a matplotlib figure object. If True,
returns a matplotlib axis object, for use as a subplot.
'''
feature_values = [getattr(neu, 'get_' + feature)() for neu in neurons]
_, ax = common.get_figure(new_fig=new_fig, subplot=subplot)
ax.boxplot(feature_values)
x_labels = ['neuron_id' for _ in neurons]
ax.set_xticklabels(x_labels)
| bsd-3-clause |
massmutual/scikit-learn | examples/linear_model/plot_ard.py | 248 | 2622 | """
==================================================
Automatic Relevance Determination Regression (ARD)
==================================================
Fit regression model with Bayesian Ridge Regression.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
The histogram of the estimated weights is very peaked, as a sparsity-inducing
prior is implied on the weights.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import ARDRegression, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weights
# Parameters of the example
np.random.seed(0)
n_samples, n_features = 100, 100
# Create Gaussian data
X = np.random.randn(n_samples, n_features)
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noite with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the ARD Regression
clf = ARDRegression(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot the true weights, the estimated weights and the histogram of the
# weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="ARD estimate")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc=1)
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |