repo_name
stringlengths 7
92
| path
stringlengths 5
129
| copies
stringclasses 201
values | size
stringlengths 4
6
| content
stringlengths 1.03k
375k
| license
stringclasses 15
values |
---|---|---|---|---|---|
g2p/systems | lib/systems/context.py | 1 | 17949 | # vim: set fileencoding=utf-8 sw=2 ts=2 et :
from __future__ import absolute_import
from __future__ import with_statement
from logging import getLogger
import networkx as NX
import yaml
from systems.collector import Aggregate, CResource
from systems.registry import get_registry
from systems.typesystem import EResource, Transition, ResourceRef
__all__ = ('Realizer', )
LOGGER = getLogger(__name__)
DESC_LIMIT = 64
def describe(thing):
return '%s' % str(thing)[:DESC_LIMIT]
class CycleError(Exception):
pass
class Node(object):
def __init__(self):
if type(self) == Node:
raise TypeError
def __repr__(self):
return '<%s>' % self
def __str__(self):
return type(self).__name__
class CheckPointNode(Node):
pass
class ExpandableNode(Node):
def __init__(self, res):
super(ExpandableNode, self).__init__()
if type(self) == ExpandableNode:
# Abstract class
raise TypeError
self._res = res
class BeforeExpandableNode(ExpandableNode):
def __str__(self):
return 'Before %s' % self._res
class AfterExpandableNode(ExpandableNode):
def __str__(self):
return 'After %s' % self._res
class GraphFirstNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphFirstNode'
class GraphLastNode(Node, yaml.YAMLObject):
yaml_tag = u'GraphLastNode'
node_types = (CheckPointNode, BeforeExpandableNode, AfterExpandableNode,
GraphFirstNode, GraphLastNode,
Transition, Aggregate, CResource, EResource, ResourceRef)
class ResourceGraph(yaml.YAMLObject):
"""
A graph of resources and transitions linked by dependencies.
Resources are positioned as two sentinels in the transition graph.
Invariant: directed, acyclic.
"""
def __init__(self, top=None):
self._graph = NX.DiGraph()
self._first = GraphFirstNode()
self._last = GraphLastNode()
self._graph.add_edge(self._first, self._last)
# Contains CResource and EResource, despite the name.
# Used to enforce max one resource per id.
self.__expandables = {}
# Received references, by name.
self.__received_refs = {}
# What nodes were processed (meaning expanding or collecting)
self.__processed = set()
# Pre-bound args pased by ref. Allow putting extra depends on them.
if top is not None:
if not isinstance(top, ResourceGraph):
raise TypeError(top, ResourceGraph)
self.__top = top
else:
self.__top = self
yaml_tag = u'!ResourceGraph'
@classmethod
def from_yaml(cls, loader, ynode):
rg = cls()
# Deep because of aliases and anchors, I think.
mp = loader.construct_mapping(ynode, deep=True)
pred_rels = mp['nodes']
for rel in pred_rels:
rg._add_node(rel['node'], depends=rel['depends'])
return rg
@classmethod
def to_yaml(cls, dumper, rg):
# This is incomplete.
pred_rels = [{'node': node, 'depends': list(depends), }
for (node, depends) in rg._iter_pred_rels()]
return dumper.represent_mapping(cls.yaml_tag, {
'nodes': pred_rels,
})
def _iter_node_preds(self, node0):
return (node
for node in self._graph.predecessors_iter(node0)
if node not in (self._first, self._last))
def _iter_pred_rels(self):
return ((node, self._iter_node_preds(node))
for node in self.sorted_nodes()
if node not in (self._first, self._last))
def sorted_nodes(self):
return NX.topological_sort(self._graph)
def sorted_transitions(self):
return [n for n in self.sorted_nodes()
if isinstance(n, Transition)]
def iter_uncollected_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, CResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_resources(self):
for nod in self._graph.nodes_iter():
if isinstance(nod, EResource):
if not nod in self.__processed:
yield nod
def iter_unexpanded_aggregates(self):
for agg in self._graph.nodes_iter():
if isinstance(agg, Aggregate):
if not agg in self.__processed:
yield agg
def iter_unprocessed(self):
for nod in self.iter_uncollected_resources():
yield nod
for nod in self.iter_unexpanded_resources():
yield nod
for nod in self.iter_unexpanded_aggregates():
yield nod
def has_unprocessed(self):
l = list(self.iter_unprocessed())
return bool(l) # Tests for non-emptiness
def require_acyclic(self):
if not NX.is_directed_acyclic_graph(self._graph):
# XXX NX doesn't have a 1-line method for listing those cycles
raise CycleError
def _add_node(self, node, depends=()):
if not isinstance(node, node_types):
raise TypeError(node, node_types)
self._graph.add_node(node)
self._graph.add_edge(self._first, node)
self._graph.add_edge(node, self._last)
for dep in depends:
depn = self._intern(dep)
self._add_node_dep(depn, node)
return node
def add_checkpoint(self, depends=()):
return self._add_node(CheckPointNode(), depends)
def add_transition(self, transition, depends=()):
if not isinstance(transition, Transition):
raise TypeError(transition, Transition)
return self._add_node(transition, depends)
def _add_aggregate(self, aggregate, depends=()):
if not isinstance(aggregate, Aggregate):
raise TypeError(aggregate, Aggregate)
return self._add_node(aggregate, depends)
def add_resource(self, resource, depends=()):
"""
Add a resource.
If an identical resource exists, it is returned.
"""
if not isinstance(resource, (CResource, EResource)):
raise TypeError(resource, (CResource, EResource))
if resource.identity in self.__expandables:
# We have this id already.
# Either it's the exact same resource, or a KeyError is thrown.
resource = self._intern(resource)
# XXX Need to bypass _intern for already expanded.
# XXX When we use add_to_top, we sometimes have to deal
# with a resource that's already been expanded.
# Those are not in the graph anymore. How do we refer to them?
else:
self.__expandables[resource.identity] = resource
# Even if already there, we need to add the depends.
resource = self._add_node(resource, depends)
# If already there, notice we aliase it.
return self.make_ref(resource)
def make_ref(self, res, depends=()):
res = self._intern(res)
if not isinstance(res, (CResource, EResource)):
raise TypeError(res, (CResource, EResource))
depends = list(depends)
depends.append(res)
return self._add_node(ResourceRef(res), depends)
def make_alias_ref(self, ref, depends=()):
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
depends = list(depends)
depends.append(ref)
return self._add_node(ResourceRef(ref.unref), depends)
def add_to_top(self, res):
"""
Add a resource to the top ResourceGraph.
Use it to put things that you don't necessarily
want to be after the outside dependencies the current graph has.
"""
ref = self.__top.add_resource(res)
return self._add_node(ref)
def _add_node_dep(self, node0, node1):
if not isinstance(node0, node_types):
raise TypeError(node0, node_types)
if not isinstance(node1, node_types):
raise TypeError(node1, node_types)
if not self._graph.has_node(node0):
raise KeyError(node0)
if not self._graph.has_node(node1):
raise KeyError(node1)
if self._graph.has_edge(node0, node1):
return False
if node0 == node1:
# Disallow self-loops to keep acyclic invariant.
# Also they don't make sense.
raise ValueError(node0)
# Invariant check
rev_path = NX.shortest_path(self._graph, node1, node0)
if rev_path is not False:
raise CycleError(rev_path)
self._graph.add_edge(node0, node1)
return True
def _intern(self, thing):
if not isinstance(thing, node_types):
raise TypeError
if thing not in self._graph:
raise KeyError(thing)
return thing
def add_dependency(self, elem0, elem1):
node0 = self._intern(elem0)
node1 = self._intern(elem1)
return self._add_node_dep(node0, node1)
def _is_direct_rconnect(self, r0, r1):
s0 = self._intern(r0)
s1 = self._intern(r1)
# shortest_path is also a test for connectedness.
return bool(NX.shortest_path(self._graph, s0, s1))
def resources_connected(self, r0, r1):
return self._is_direct_rconnect(r0, r1) \
or self._is_direct_rconnect(r1, r0)
def draw(self, fname):
return self.draw_agraph(fname)
def draw_agraph(self, fname):
# XXX pygraphviz has steep dependencies (x11 libs)
# and recommends (texlive) for a headless box.
# We duplicate the graph, otherwise networkx / pygraphviz
# would make a lossy conversion (sometimes refusing to convert), by adding
# nodes as their string representation. Madness, I know.
gr2 = NX.create_empty_copy(self._graph, False)
for node in self._graph.nodes_iter():
gr2.add_node(id(node))
for (n0, n1) in self._graph.edges_iter():
gr2.add_edge(id(n0), id(n1))
names = dict((id(node), { 'label': describe(node)})
for node in self._graph.nodes_iter())
gr2.delete_node(id(self._first))
gr2.delete_node(id(self._last))
g = NX.to_agraph(gr2, {
'graph': {
'nodesep': '0.2',
'rankdir': 'TB',
'ranksep': '0.5',
},
'node': {
'shape': 'box',
},
},
names)
g.write(fname + '.dot')
# Dot is good for DAGs.
g.layout(prog='dot')
g.draw(fname + '.svg')
with open(fname + '.yaml', 'w') as f:
yaml.dump(self, f)
# Fails with the expanded graph, due to instancemethod
#yaml.load(yaml.dump(self))
def draw_matplotlib(self, fname):
# Pyplot is stateful and awkward to use.
import matplotlib.pyplot as P
# Disable hold or it definitely won't work (probably a bug).
P.hold(False)
NX.draw(self._graph)
P.savefig(fname)
def collect_resources(self, r0s, r1):
"""
Replace an iterable of resources with one new resource.
May break the acyclic invariant, caveat emptor.
"""
# The invariant is kept iff the r0s don't have paths linking them.
# For our use case (collectors), we could allow paths provided they are
# internal to r0s. This introduces self-loops that we would then remove.
for r0 in r0s:
r0 = self._intern(r0)
if r0 in self.__processed:
raise RuntimeError
if r1 in self._graph:
raise ValueError(r1)
r1 = self._add_aggregate(r1)
for r0 in r0s:
r0 = self._intern(r0)
self._move_edges(r0, r1)
self.__processed.add(r0)
self.require_acyclic()
def _move_edges(self, n0, n1):
if n0 == n1:
raise RuntimeError
n0 = self._intern(n0)
n1 = self._intern(n1)
# list is used as a temporary
# add after delete in case of same.
for pred in list(self._graph.predecessors_iter(n0)):
self._graph.delete_edge(pred, n0)
self._graph.add_edge(pred, n1)
for succ in list(self._graph.successors_iter(n0)):
self._graph.delete_edge(n0, succ)
self._graph.add_edge(n1, succ)
self._graph.delete_node(n0)
# Can't undo. Invariant will stay broken.
def _split_node(self, res):
res = self._intern(res)
before = self._add_node(BeforeExpandableNode(res))
after = self._add_node(AfterExpandableNode(res))
self._graph.add_edge(before, after)
for pred in list(self._graph.predecessors_iter(res)):
self._graph.delete_edge(pred, res)
self._graph.add_edge(pred, before)
for succ in list(self._graph.successors_iter(res)):
self._graph.delete_edge(res, succ)
self._graph.add_edge(after, succ)
self._graph.delete_node(res)
return before, after
def _receive_by_ref(self, name, ref):
if name in self.__received_refs:
raise RuntimeError(name, ref)
ref = self._add_node(ref)
self.__received_refs[name] = ref
return ref
def _pass_by_ref(self, subgraph, name, ref):
# The origin/value distinction is important
# for aliased arguments (two refs, same val).
ref = self._intern(ref)
if not isinstance(ref, ResourceRef):
raise TypeError(ref, ResourceRef)
subgraph._receive_by_ref(name, ref)
def expand_resource(self, res):
"""
Replace res by a small resource graph.
The resource_graph is inserted in the main graph
between the sentinels that represent the resource.
"""
res = self._intern(res)
# We're processing from the outside in.
if res in self.__processed:
raise RuntimeError
resource_graph = ResourceGraph(self.__top)
if isinstance(res, EResource):
for (name, ref) in res.iter_passed_by_ref():
# ref will be present in both graphs.
self._pass_by_ref(resource_graph, name, ref)
elif isinstance(res, Aggregate):
pass
else:
raise TypeError(res)
res.expand_into(resource_graph)
# We expand from the outside in
if bool(resource_graph.__processed):
raise RuntimeError
# Do not skip sentinels.
for n in resource_graph._graph.nodes_iter():
self._add_node(n)
for (n0, n1) in resource_graph._graph.edges_iter():
self._add_node_dep(n0, n1)
for (id1, res1) in resource_graph.__expandables.iteritems():
# We expand from the outside in.
assert res1 not in self.__processed
if id1 in self.__expandables:
# Pass by reference if you must use the same resource
# in different contexts.
raise RuntimeError('ResourceBase collision.', res, res1)
else:
self.__expandables[id1] = res1
before, after = self._split_node(res)
self.__processed.add(res)
self._move_edges(resource_graph._first, before)
self._move_edges(resource_graph._last, after)
# What may break the invariant:
# Passing a ref to res, and making res depend on ref.
# ref ends up on both sides of ref.before.
self.require_acyclic()
class Realizer(object):
"""
A graph of realizables linked by dependencies.
"""
def __init__(self, expandable):
self.__resources = ResourceGraph()
self.__expandable = expandable
self.__state = 'init'
def require_state(self, state):
"""
Raise an exception if we are not in the required state.
"""
if self.__state != state:
raise RuntimeError(u'Realizer state should be «%s»' % state)
def ensure_frozen(self):
"""
Build the finished dependency graph.
Merge identical realizables, collect what can be.
"""
if self.__state == 'frozen':
return
# Order is important
self.require_state('init')
self.__expandable.expand_into(self.__resources)
#self.__resources.draw('/tmp/freezing')
self._expand()
#self.__resources.draw('/tmp/pre-collect')
self._collect()
self._expand_aggregates()
assert not bool(list(self.__resources.iter_unprocessed()))
self.__state = 'frozen'
#self.__resources.draw('/tmp/frozen')
def _collect(self):
# Collects compatible nodes into merged nodes.
def can_merge(part0, part1):
for n0 in part0:
for n1 in part1:
if self.__resources.resources_connected(n0, n1):
return False
return True
def possibly_merge(partition):
# Merge once if possible. Return true if did merge.
e = dict(enumerate(partition))
n = len(partition)
# Loop over the triangle of unordered pairs
for i in xrange(n):
for j in xrange(i + 1, n):
part0, part1 = e[i], e[j]
if can_merge(part0, part1):
partition.add(part0.union(part1))
partition.remove(part0)
partition.remove(part1)
return True
return False
reg = get_registry()
for collector in reg.collectors:
# Pre-partition is made of parts acceptable for the collector.
pre_partition = collector.partition(
[r for r in self.__resources.iter_uncollected_resources()
if collector.filter(r)])
for part in pre_partition:
# Collector parts are split again, the sub-parts are merged
# when dependencies allow.
# Not a particularly efficient algorithm, just simple.
# Gives one solution among many possibilities.
partition = set(frozenset((r, ))
for r in part
for part in pre_partition)
while possibly_merge(partition):
pass
# Let the collector handle the rest
for part in partition:
if not bool(part):
# Test for emptiness.
# Aggregate even singletons.
continue
merged = collector.collect(part)
self.__resources.collect_resources(part, merged)
assert not bool(list(self.__resources.iter_uncollected_resources()))
def _expand(self):
# Poor man's recursion
while True:
fresh = set(r
for r in self.__resources.iter_unexpanded_resources())
if bool(fresh) == False: # Test for emptiness
break
for r in fresh:
self.__resources.expand_resource(r)
assert not bool(list(self.__resources.iter_unexpanded_resources()))
def _expand_aggregates(self):
for a in list(self.__resources.iter_unexpanded_aggregates()):
self.__resources.expand_resource(a)
assert not bool(list(self.__resources.iter_unexpanded_aggregates()))
# Enforce the rule that aggregates can only expand into transitions.
if self.__resources.has_unprocessed():
raise RuntimeError(list(self.__resources.iter_unprocessed()))
def realize(self):
"""
Realize all realizables and transitions in dependency order.
"""
self.ensure_frozen()
for t in self.__resources.sorted_transitions():
t.realize()
self.__state = 'realized'
| gpl-2.0 |
Lyleo/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/numerix/__init__.py | 69 | 5473 | """
numerix imports either Numeric or numarray based on various selectors.
0. If the value "--numpy","--numarray" or "--Numeric" is specified on the
command line, then numerix imports the specified
array package.
1. The value of numerix in matplotlibrc: either Numeric or numarray
2. If none of the above is done, the default array package is Numeric.
Because the matplotlibrc always provides *some* value for numerix
(it has it's own system of default values), this default is most
likely never used.
To summarize: the commandline is examined first, the rc file second,
and the default array package is Numeric.
"""
import sys, os, struct
from matplotlib import rcParams, verbose
which = None, None
use_maskedarray = None
# First, see if --numarray or --Numeric was specified on the command
# line:
for a in sys.argv:
if a in ["--Numeric", "--numeric", "--NUMERIC",
"--Numarray", "--numarray", "--NUMARRAY",
"--NumPy", "--numpy", "--NUMPY", "--Numpy",
]:
which = a[2:], "command line"
if a == "--maskedarray":
use_maskedarray = True
if a == "--ma":
use_maskedarray = False
try: del a
except NameError: pass
if which[0] is None:
try: # In theory, rcParams always has *some* value for numerix.
which = rcParams['numerix'], "rc"
except KeyError:
pass
if use_maskedarray is None:
try:
use_maskedarray = rcParams['maskedarray']
except KeyError:
use_maskedarray = False
# If all the above fail, default to Numeric. Most likely not used.
if which[0] is None:
which = "numeric", "defaulted"
which = which[0].strip().lower(), which[1]
if which[0] not in ["numeric", "numarray", "numpy"]:
raise ValueError("numerix selector must be either 'Numeric', 'numarray', or 'numpy' but the value obtained from the %s was '%s'." % (which[1], which[0]))
if which[0] == "numarray":
import warnings
warnings.warn("numarray use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from na_imports import *
from numarray import *
from _na_imports import nx, inf, infinity, Infinity, Matrix, isnan, all
from numarray.numeric import nonzero
from numarray.convolve import cross_correlate, convolve
import numarray
version = 'numarray %s'%numarray.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numeric":
import warnings
warnings.warn("Numeric use as a numerix backed for matplotlib is deprecated",
DeprecationWarning, stacklevel=1)
#from nc_imports import *
from Numeric import *
from _nc_imports import nx, inf, infinity, Infinity, isnan, all, any
from Matrix import Matrix
import Numeric
version = 'Numeric %s'%Numeric.__version__
nan = struct.unpack('d', struct.pack('Q', 0x7ff8000000000000))[0]
elif which[0] == "numpy":
try:
import numpy.oldnumeric as numpy
from numpy.oldnumeric import *
except ImportError:
import numpy
from numpy import *
print 'except asarray', asarray
from _sp_imports import nx, infinity, rand, randn, isnan, all, any
from _sp_imports import UInt8, UInt16, UInt32, Infinity
try:
from numpy.oldnumeric.matrix import Matrix
except ImportError:
Matrix = matrix
version = 'numpy %s' % numpy.__version__
from numpy import nan
else:
raise RuntimeError("invalid numerix selector")
# Some changes are only applicable to the new numpy:
if (which[0] == 'numarray' or
which[0] == 'numeric'):
from mlab import amin, amax
newaxis = NewAxis
def typecode(a):
return a.typecode()
def iscontiguous(a):
return a.iscontiguous()
def byteswapped(a):
return a.byteswapped()
def itemsize(a):
return a.itemsize()
def angle(a):
return arctan2(a.imag, a.real)
else:
# We've already checked for a valid numerix selector,
# so assume numpy.
from mlab import amin, amax
newaxis = NewAxis
from numpy import angle
def typecode(a):
return a.dtype.char
def iscontiguous(a):
return a.flags.contiguous
def byteswapped(a):
return a.byteswap()
def itemsize(a):
return a.itemsize
verbose.report('numerix %s'%version)
# a bug fix for blas numeric suggested by Fernando Perez
matrixmultiply=dot
asum = sum
def _import_fail_message(module, version):
"""Prints a message when the array package specific version of an extension
fails to import correctly.
"""
_dict = { "which" : which[0],
"module" : module,
"specific" : version + module
}
print """
The import of the %(which)s version of the %(module)s module,
%(specific)s, failed. This is is either because %(which)s was
unavailable when matplotlib was compiled, because a dependency of
%(specific)s could not be satisfied, or because the build flag for
this module was turned off in setup.py. If it appears that
%(specific)s was not built, make sure you have a working copy of
%(which)s and then re-install matplotlib. Otherwise, the following
traceback gives more details:\n""" % _dict
g = globals()
l = locals()
__import__('ma', g, l)
__import__('fft', g, l)
__import__('linear_algebra', g, l)
__import__('random_array', g, l)
__import__('mlab', g, l)
la = linear_algebra
ra = random_array
| gpl-3.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/scipy/signal/spectral.py | 4 | 66089 | """Tools for spectral analysis.
"""
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy import fftpack
from . import signaltools
from .windows import get_window
from ._spectral import _lombscargle
from ._arraytools import const_ext, even_ext, odd_ext, zero_ext
import warnings
from scipy._lib.six import string_types
__all__ = ['periodogram', 'welch', 'lombscargle', 'csd', 'coherence',
'spectrogram', 'stft', 'istft', 'check_COLA']
def lombscargle(x,
y,
freqs,
precenter=False,
normalize=False):
"""
lombscargle(x, y, freqs)
Computes the Lomb-Scargle periodogram.
The Lomb-Scargle periodogram was developed by Lomb [1]_ and further
extended by Scargle [2]_ to find, and test the significance of weak
periodic signals with uneven temporal sampling.
When *normalize* is False (default) the computed periodogram
is unnormalized, it takes the value ``(A**2) * N/4`` for a harmonic
signal with amplitude A for sufficiently large N.
When *normalize* is True the computed periodogram is is normalized by
the residuals of the data around a constant reference model (at zero).
Input arrays should be one-dimensional and will be cast to float64.
Parameters
----------
x : array_like
Sample times.
y : array_like
Measurement values.
freqs : array_like
Angular frequencies for output periodogram.
precenter : bool, optional
Pre-center amplitudes by subtracting the mean.
normalize : bool, optional
Compute normalized periodogram.
Returns
-------
pgram : array_like
Lomb-Scargle periodogram.
Raises
------
ValueError
If the input arrays `x` and `y` do not have the same shape.
Notes
-----
This subroutine calculates the periodogram using a slightly
modified algorithm due to Townsend [3]_ which allows the
periodogram to be calculated using only a single pass through
the input arrays for each frequency.
The algorithm running time scales roughly as O(x * freqs) or O(N^2)
for a large number of samples and frequencies.
References
----------
.. [1] N.R. Lomb "Least-squares frequency analysis of unequally spaced
data", Astrophysics and Space Science, vol 39, pp. 447-462, 1976
.. [2] J.D. Scargle "Studies in astronomical time series analysis. II -
Statistical aspects of spectral analysis of unevenly spaced data",
The Astrophysical Journal, vol 263, pp. 835-853, 1982
.. [3] R.H.D. Townsend, "Fast calculation of the Lomb-Scargle
periodogram using graphics processing units.", The Astrophysical
Journal Supplement Series, vol 191, pp. 247-253, 2010
Examples
--------
>>> import scipy.signal
>>> import matplotlib.pyplot as plt
First define some input parameters for the signal:
>>> A = 2.
>>> w = 1.
>>> phi = 0.5 * np.pi
>>> nin = 1000
>>> nout = 100000
>>> frac_points = 0.9 # Fraction of points to select
Randomly select a fraction of an array with timesteps:
>>> r = np.random.rand(nin)
>>> x = np.linspace(0.01, 10*np.pi, nin)
>>> x = x[r >= frac_points]
Plot a sine wave for the selected times:
>>> y = A * np.sin(w*x+phi)
Define the array of frequencies for which to compute the periodogram:
>>> f = np.linspace(0.01, 10, nout)
Calculate Lomb-Scargle periodogram:
>>> import scipy.signal as signal
>>> pgram = signal.lombscargle(x, y, f, normalize=True)
Now make a plot of the input data:
>>> plt.subplot(2, 1, 1)
>>> plt.plot(x, y, 'b+')
Then plot the normalized periodogram:
>>> plt.subplot(2, 1, 2)
>>> plt.plot(f, pgram)
>>> plt.show()
"""
x = np.asarray(x, dtype=np.float64)
y = np.asarray(y, dtype=np.float64)
freqs = np.asarray(freqs, dtype=np.float64)
assert x.ndim == 1
assert y.ndim == 1
assert freqs.ndim == 1
if precenter:
pgram = _lombscargle(x, y - y.mean(), freqs)
else:
pgram = _lombscargle(x, y, freqs)
if normalize:
pgram *= 2 / np.dot(y, y)
return pgram
def periodogram(x, fs=1.0, window='boxcar', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimate power spectral density using a periodogram.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to 'boxcar'.
nfft : int, optional
Length of the FFT used. If `None` the length of `x` will be
used.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of `x`.
Notes
-----
.. versionadded:: 0.12.0
See Also
--------
welch: Estimate power spectral density using Welch's method
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.periodogram(x, fs)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([1e-7, 1e2])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[25000:])
0.00099728892368242854
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.periodogram(x, fs, 'flattop', scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.ylim([1e-4, 1e1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
x = np.asarray(x)
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape)
if window is None:
window = 'boxcar'
if nfft is None:
nperseg = x.shape[axis]
elif nfft == x.shape[axis]:
nperseg = nfft
elif nfft > x.shape[axis]:
nperseg = x.shape[axis]
elif nfft < x.shape[axis]:
s = [np.s_[:]]*len(x.shape)
s[axis] = np.s_[:nfft]
x = x[s]
nperseg = nfft
nfft = None
return welch(x, fs, window, nperseg, 0, nfft, detrend, return_onesided,
scaling, axis)
def welch(x, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density',
axis=-1):
r"""
Estimate power spectral density using Welch's method.
Welch's method [1]_ computes an estimate of the power spectral
density by dividing the data into overlapping segments, computing a
modified periodogram for each segment and averaging the
periodograms.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Pxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'
axis : int, optional
Axis along which the periodogram is computed; the default is
over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxx : ndarray
Power spectral density or power spectrum of x.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
If `noverlap` is 0, this method is equivalent to Bartlett's method
[2]_.
.. versionadded:: 0.12.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra",
Biometrika, vol. 37, pp. 1-16, 1950.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
>>> np.random.seed(1234)
Generate a test signal, a 2 Vrms sine wave at 1234 Hz, corrupted by
0.001 V**2/Hz of white noise sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2*np.sqrt(2)
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> x = amp*np.sin(2*np.pi*freq*time)
>>> x += np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
Compute and plot the power spectral density.
>>> f, Pxx_den = signal.welch(x, fs, nperseg=1024)
>>> plt.semilogy(f, Pxx_den)
>>> plt.ylim([0.5e-3, 1])
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('PSD [V**2/Hz]')
>>> plt.show()
If we average the last half of the spectral density, to exclude the
peak, we can recover the noise power on the signal.
>>> np.mean(Pxx_den[256:])
0.0009924865443739191
Now compute and plot the power spectrum.
>>> f, Pxx_spec = signal.welch(x, fs, 'flattop', 1024, scaling='spectrum')
>>> plt.figure()
>>> plt.semilogy(f, np.sqrt(Pxx_spec))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Linear spectrum [V RMS]')
>>> plt.show()
The peak height in the power spectrum is an estimate of the RMS
amplitude.
>>> np.sqrt(Pxx_spec.max())
2.0077340678640727
"""
freqs, Pxx = csd(x, x, fs, window, nperseg, noverlap, nfft, detrend,
return_onesided, scaling, axis)
return freqs, Pxx.real
def csd(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
detrend='constant', return_onesided=True, scaling='density', axis=-1):
r"""
Estimate the cross power spectral density, Pxy, using Welch's
method.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and `fs` is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the CSD is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Pxy : ndarray
Cross spectral density or cross power spectrum of x,y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method. [Equivalent to
csd(x,x)]
coherence: Magnitude squared coherence by Welch's method.
Notes
--------
By convention, Pxy is computed with the conjugate FFT of X
multiplied by the FFT of Y.
If the input series differ in length, the shorter series will be
zero-padded to match.
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Rabiner, Lawrence R., and B. Gold. "Theory and Application of
Digital Signal Processing" Prentice-Hall, pp. 414-419, 1975
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the magnitude of the cross spectral density.
>>> f, Pxy = signal.csd(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, np.abs(Pxy))
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('CSD [V**2/Hz]')
>>> plt.show()
"""
freqs, _, Pxy = _spectral_helper(x, y, fs, window, nperseg, noverlap, nfft,
detrend, return_onesided, scaling, axis,
mode='psd')
# Average over windows.
if len(Pxy.shape) >= 2 and Pxy.size > 0:
if Pxy.shape[-1] > 1:
Pxy = Pxy.mean(axis=-1)
else:
Pxy = np.reshape(Pxy, Pxy.shape[:-1])
return freqs, Pxy
def spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 8``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Sxx` has units of V**2/Hz and computing the power
spectrum ('spectrum') where `Sxx` has units of V**2, if `x`
is measured in V and `fs` is measured in Hz. Defaults to
'density'.
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are
['psd', 'complex', 'magnitude', 'angle', 'phase']. 'complex' is
equivalent to the output of `stft` with no padding or boundary
extension. 'magnitude' returns the absolute magnitude of the
STFT. 'angle' and 'phase' return the complex angle of the STFT,
with and without unwrapping, respectively.
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds
to the segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the
entire data stream is averaged over, one may wish to use a smaller
overlap (or perhaps none at all) when computing a spectrogram, to
maintain some statistical independence between individual segments.
It is for this reason that the default window is a Tukey window with
1/8th of a window's length overlap at each end.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the spectrogram.
>>> f, t, Sxx = signal.spectrogram(x, fs)
>>> plt.pcolormesh(t, f, Sxx)
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
modelist = ['psd', 'complex', 'magnitude', 'angle', 'phase']
if mode not in modelist:
raise ValueError('unknown value for mode {}, must be one of {}'
.format(mode, modelist))
# need to set default for nperseg before setting default for noverlap below
window, nperseg = _triage_segments(window, nperseg,
input_length=x.shape[axis])
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
if mode == 'psd':
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='psd')
else:
freqs, time, Sxx = _spectral_helper(x, x, fs, window, nperseg,
noverlap, nfft, detrend,
return_onesided, scaling, axis,
mode='stft')
if mode == 'magnitude':
Sxx = np.abs(Sxx)
elif mode in ['angle', 'phase']:
Sxx = np.angle(Sxx)
if mode == 'phase':
# Sxx has one additional dimension for time strides
if axis < 0:
axis -= 1
Sxx = np.unwrap(Sxx, axis=axis)
# mode =='complex' is same as `stft`, doesn't need modification
return freqs, time, Sxx
def check_COLA(window, nperseg, noverlap, tol=1e-10):
r"""
Check whether the Constant OverLap Add (COLA) constraint is met
Parameters
----------
window : str or tuple or array_like
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg.
nperseg : int
Length of each segment.
noverlap : int
Number of points to overlap between segments.
tol : float, optional
The allowed variance of a bin's weighted sum from the median bin
sum.
Returns
-------
verdict : bool
`True` if chosen combination satisfies COLA within `tol`,
`False` otherwise
See Also
--------
stft: Short Time Fourier Transform
istft: Inverse Short Time Fourier Transform
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction.
Some examples of windows that satisfy COLA:
- Rectangular window at overlap of 0, 1/2, 2/3, 3/4, ...
- Bartlett window at overlap of 1/2, 3/4, 5/6, ...
- Hann window at 1/2, 2/3, 3/4, ...
- Any Blackman family window at 2/3 overlap
- Any window with ``noverlap = nperseg-1``
A very comprehensive list of other windows may be found in [2]_,
wherein the COLA condition is satisfied when the "Amplitude
Flatness" is unity.
.. versionadded:: 0.19.0
References
----------
.. [1] Julius O. Smith III, "Spectral Audio Signal Processing", W3K
Publishing, 2011,ISBN 978-0-9745607-3-1.
.. [2] G. Heinzel, A. Ruediger and R. Schilling, "Spectrum and
spectral density estimation by the Discrete Fourier transform
(DFT), including a comprehensive list of window functions and
some new at-top windows", 2002,
http://hdl.handle.net/11858/00-001M-0000-0013-557A-5
Examples
--------
>>> from scipy import signal
Confirm COLA condition for rectangular window of 75% (3/4) overlap:
>>> signal.check_COLA(signal.boxcar(100), 100, 75)
True
COLA is not true for 25% (1/4) overlap, though:
>>> signal.check_COLA(signal.boxcar(100), 100, 25)
False
"Symmetrical" Hann window (for filter design) is not COLA:
>>> signal.check_COLA(signal.hann(120, sym=True), 120, 60)
False
"Periodic" or "DFT-even" Hann window (for FFT analysis) is COLA for
overlap of 1/2, 2/3, 3/4, etc.:
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 60)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 80)
True
>>> signal.check_COLA(signal.hann(120, sym=False), 120, 90)
True
"""
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
noverlap = int(noverlap)
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
step = nperseg - noverlap
binsums = sum(win[ii*step:(ii+1)*step] for ii in range(nperseg//step))
if nperseg % step != 0:
binsums[:nperseg % step] += win[-(nperseg % step):]
deviation = binsums - np.median(binsums)
return np.max(np.abs(deviation)) < tol
def stft(x, fs=1.0, window='hann', nperseg=256, noverlap=None, nfft=None,
detrend=False, return_onesided=True, boundary='zeros', padded=True,
axis=-1):
r"""
Compute the Short Time Fourier Transform (STFT).
STFTs can be used as a way of quantifying the change of a
nonstationary signal's frequency and phase content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`. When
specified, the COLA constraint must be met (see Notes below).
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to `False`.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned. Defaults to
`True`.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
'zeros', for zero padding extension. I.e. ``[1, 2, 3, 4]`` is
extended to ``[0, 1, 2, 3, 4, 0]`` for ``nperseg=3``.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `True`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`, as is the
default.
axis : int, optional
Axis along which the STFT is computed; the default is over the
last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Zxx : ndarray
STFT of `x`. By default, the last axis of `Zxx` corresponds
to the segment times.
See Also
--------
istft: Inverse Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
welch: Power spectral density by Welch's method.
spectrogram: Spectrogram by Welch's method.
csd: Cross spectral density by Welch's method.
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
Notes
-----
In order to enable inversion of an STFT via the inverse STFT in
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA), and the input signal must have complete
windowing coverage (i.e. ``(x.shape[axis] - nperseg) %
(nperseg-noverlap) == 0``). The `padded` argument may be used to
accomplish this.
The COLA constraint ensures that every point in the input data is
equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave whose frequency is slowly
modulated around 3kHz, corrupted by white noise of exponentially
decreasing magnitude sampled at 10 kHz.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.01 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> mod = 500*np.cos(2*np.pi*0.25*time)
>>> carrier = amp * np.sin(2*np.pi*3e3*time + mod)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> noise *= np.exp(-time/5)
>>> x = carrier + noise
Compute and plot the STFT's magnitude.
>>> f, t, Zxx = signal.stft(x, fs, nperseg=1000)
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.show()
"""
freqs, time, Zxx = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided,
scaling='spectrum', axis=axis,
mode='stft', boundary=boundary,
padded=padded)
return freqs, time, Zxx
def istft(Zxx, fs=1.0, window='hann', nperseg=None, noverlap=None, nfft=None,
input_onesided=True, boundary=True, time_axis=-1, freq_axis=-2):
r"""
Perform the inverse Short Time Fourier transform (iSTFT).
Parameters
----------
Zxx : array_like
STFT of the signal to be reconstructed. If a purely real array
is passed, it will be cast to a complex data type.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window. Must match the window used to generate the
STFT for faithful inversion.
nperseg : int, optional
Number of data points corresponding to each STFT segment. This
parameter must be specified if the number of data points per
segment is odd, or if the STFT was padded via ``nfft >
nperseg``. If `None`, the value depends on the shape of
`Zxx` and `input_onesided`. If `input_onesided` is True,
``nperseg=2*(Zxx.shape[freq_axis] - 1)``. Otherwise,
``nperseg=Zxx.shape[freq_axis]``. Defaults to `None`.
noverlap : int, optional
Number of points to overlap between segments. If `None`, half
of the segment length. Defaults to `None`. When specified, the
COLA constraint must be met (see Notes below), and should match
the parameter used to generate the STFT. Defaults to `None`.
nfft : int, optional
Number of FFT points corresponding to each STFT segment. This
parameter must be specified if the STFT was padded via ``nfft >
nperseg``. If `None`, the default values are the same as for
`nperseg`, detailed above, with one exception: if
`input_onesided` is True and
``nperseg==2*Zxx.shape[freq_axis] - 1``, `nfft` also takes on
that value. This case allows the proper inversion of an
odd-length unpadded STFT using ``nfft=None``. Defaults to
`None`.
input_onesided : bool, optional
If `True`, interpret the input array as one-sided FFTs, such
as is returned by `stft` with ``return_onesided=True`` and
`numpy.fft.rfft`. If `False`, interpret the input as a a
two-sided FFT. Defaults to `True`.
boundary : bool, optional
Specifies whether the input signal was extended at its
boundaries by supplying a non-`None` ``boundary`` argument to
`stft`. Defaults to `True`.
time_axis : int, optional
Where the time segments of the STFT is located; the default is
the last axis (i.e. ``axis=-1``).
freq_axis : int, optional
Where the frequency axis of the STFT is located; the default is
the penultimate axis (i.e. ``axis=-2``).
Returns
-------
t : ndarray
Array of output data times.
x : ndarray
iSTFT of `Zxx`.
See Also
--------
stft: Short Time Fourier Transform
check_COLA: Check whether the Constant OverLap Add (COLA) constraint
is met
Notes
-----
In order to enable inversion of an STFT via the inverse STFT with
`istft`, the signal windowing must obey the constraint of "Constant
OverLap Add" (COLA). This ensures that every point in the input data
is equally weighted, thereby avoiding aliasing and allowing full
reconstruction. Whether a choice of `window`, `nperseg`, and
`noverlap` satisfy this constraint can be tested with
`check_COLA`, by using ``nperseg = Zxx.shape[freq_axis]``.
An STFT which has been modified (via masking or otherwise) is not
guaranteed to correspond to a exactly realizible signal. This
function implements the iSTFT via the least-squares esimation
algorithm detailed in [2]_, which produces a signal that minimizes
the mean squared error between the STFT of the returned signal and
the modified STFT.
.. versionadded:: 0.19.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck
"Discrete-Time Signal Processing", Prentice Hall, 1999.
.. [2] Daniel W. Griffin, Jae S. Limdt "Signal Estimation from
Modified Short Fourier Transform", IEEE 1984,
10.1109/TASSP.1984.1164317
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate a test signal, a 2 Vrms sine wave at 50Hz corrupted by
0.001 V**2/Hz of white noise sampled at 1024 Hz.
>>> fs = 1024
>>> N = 10*fs
>>> nperseg = 512
>>> amp = 2 * np.sqrt(2)
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / float(fs)
>>> carrier = amp * np.sin(2*np.pi*50*time)
>>> noise = np.random.normal(scale=np.sqrt(noise_power),
... size=time.shape)
>>> x = carrier + noise
Compute the STFT, and plot its magnitude
>>> f, t, Zxx = signal.stft(x, fs=fs, nperseg=nperseg)
>>> plt.figure()
>>> plt.pcolormesh(t, f, np.abs(Zxx), vmin=0, vmax=amp)
>>> plt.ylim([f[1], f[-1]])
>>> plt.title('STFT Magnitude')
>>> plt.ylabel('Frequency [Hz]')
>>> plt.xlabel('Time [sec]')
>>> plt.yscale('log')
>>> plt.show()
Zero the components that are 10% or less of the carrier magnitude,
then convert back to a time series via inverse STFT
>>> Zxx = np.where(np.abs(Zxx) >= amp/10, Zxx, 0)
>>> _, xrec = signal.istft(Zxx, fs)
Compare the cleaned signal with the original and true carrier signals.
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([2, 2.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
Note that the cleaned signal does not start as abruptly as the original,
since some of the coefficients of the transient were also removed:
>>> plt.figure()
>>> plt.plot(time, x, time, xrec, time, carrier)
>>> plt.xlim([0, 0.1])
>>> plt.xlabel('Time [sec]')
>>> plt.ylabel('Signal')
>>> plt.legend(['Carrier + Noise', 'Filtered via STFT', 'True Carrier'])
>>> plt.show()
"""
# Make sure input is an ndarray of appropriate complex dtype
Zxx = np.asarray(Zxx) + 0j
freq_axis = int(freq_axis)
time_axis = int(time_axis)
if Zxx.ndim < 2:
raise ValueError('Input stft must be at least 2d!')
if freq_axis == time_axis:
raise ValueError('Must specify differing time and frequency axes!')
nseg = Zxx.shape[time_axis]
if input_onesided:
# Assume even segment length
n_default = 2*(Zxx.shape[freq_axis] - 1)
else:
n_default = Zxx.shape[freq_axis]
# Check windowing parameters
if nperseg is None:
nperseg = n_default
else:
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
if (input_onesided) and (nperseg == n_default + 1):
# Odd nperseg, no FFT padding
nfft = nperseg
else:
nfft = n_default
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
if not check_COLA(window, nperseg, noverlap):
raise ValueError('Window, STFT shape and noverlap do not satisfy the '
'COLA constraint.')
# Rearrange axes if necessary
if time_axis != Zxx.ndim-1 or freq_axis != Zxx.ndim-2:
# Turn negative indices to positive for the call to transpose
if freq_axis < 0:
freq_axis = Zxx.ndim + freq_axis
if time_axis < 0:
time_axis = Zxx.ndim + time_axis
zouter = list(range(Zxx.ndim))
for ax in sorted([time_axis, freq_axis], reverse=True):
zouter.pop(ax)
Zxx = np.transpose(Zxx, zouter+[freq_axis, time_axis])
# Get window as array
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of {0}'.format(nperseg))
if input_onesided:
ifunc = np.fft.irfft
else:
ifunc = fftpack.ifft
xsubs = ifunc(Zxx, axis=-2, n=nfft)[..., :nperseg, :]
# Initialize output and normalization arrays
outputlength = nperseg + (nseg-1)*nstep
x = np.zeros(list(Zxx.shape[:-2])+[outputlength], dtype=xsubs.dtype)
norm = np.zeros(outputlength, dtype=xsubs.dtype)
if np.result_type(win, xsubs) != xsubs.dtype:
win = win.astype(xsubs.dtype)
xsubs *= win.sum() # This takes care of the 'spectrum' scaling
# Construct the output from the ifft segments
# This loop could perhaps be vectorized/strided somehow...
for ii in range(nseg):
# Window the ifft
x[..., ii*nstep:ii*nstep+nperseg] += xsubs[..., ii] * win
norm[..., ii*nstep:ii*nstep+nperseg] += win**2
# Divide out normalization where non-tiny
x /= np.where(norm > 1e-10, norm, 1.0)
# Remove extension points
if boundary:
x = x[..., nperseg//2:-(nperseg//2)]
if input_onesided:
x = x.real
# Put axes back
if x.ndim > 1:
if time_axis != Zxx.ndim-1:
if freq_axis < time_axis:
time_axis -= 1
x = np.rollaxis(x, -1, time_axis)
time = np.arange(x.shape[0])/float(fs)
return time, x
def coherence(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', axis=-1):
r"""
Estimate the magnitude squared coherence estimate, Cxy, of
discrete-time signals X and Y using Welch's method.
``Cxy = abs(Pxy)**2/(Pxx*Pyy)``, where `Pxx` and `Pyy` are power
spectral density estimates of X and Y, and `Pxy` is the cross
spectral density estimate of X and Y.
Parameters
----------
x : array_like
Time series of measurement values
y : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` and `y` time series. Defaults
to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap: int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
axis : int, optional
Axis along which the coherence is computed for both inputs; the
default is over the last axis (i.e. ``axis=-1``).
Returns
-------
f : ndarray
Array of sample frequencies.
Cxy : ndarray
Magnitude squared coherence of x and y.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
--------
An appropriate amount of overlap will depend on the choice of window
and on your requirements. For the default Hann window an overlap of
50% is a reasonable trade off between accurately estimating the
signal power, while not over counting any of the data. Narrower
windows may require a larger overlap.
.. versionadded:: 0.16.0
References
----------
.. [1] P. Welch, "The use of the fast Fourier transform for the
estimation of power spectra: A method based on time averaging
over short, modified periodograms", IEEE Trans. Audio
Electroacoust. vol. 15, pp. 70-73, 1967.
.. [2] Stoica, Petre, and Randolph Moses, "Spectral Analysis of
Signals" Prentice Hall, 2005
Examples
--------
>>> from scipy import signal
>>> import matplotlib.pyplot as plt
Generate two test signals with some common features.
>>> fs = 10e3
>>> N = 1e5
>>> amp = 20
>>> freq = 1234.0
>>> noise_power = 0.001 * fs / 2
>>> time = np.arange(N) / fs
>>> b, a = signal.butter(2, 0.25, 'low')
>>> x = np.random.normal(scale=np.sqrt(noise_power), size=time.shape)
>>> y = signal.lfilter(b, a, x)
>>> x += amp*np.sin(2*np.pi*freq*time)
>>> y += np.random.normal(scale=0.1*np.sqrt(noise_power), size=time.shape)
Compute and plot the coherence.
>>> f, Cxy = signal.coherence(x, y, fs, nperseg=1024)
>>> plt.semilogy(f, Cxy)
>>> plt.xlabel('frequency [Hz]')
>>> plt.ylabel('Coherence')
>>> plt.show()
"""
freqs, Pxx = welch(x, fs, window, nperseg, noverlap, nfft, detrend,
axis=axis)
_, Pyy = welch(y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
_, Pxy = csd(x, y, fs, window, nperseg, noverlap, nfft, detrend, axis=axis)
Cxy = np.abs(Pxy)**2 / Pxx / Pyy
return freqs, Cxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=None, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='spectrum', axis=-1, mode='psd', boundary=None,
padded=False):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between
the stft, psd, csd, and spectrogram functions. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memory as `x` (i.e. ``_spectral_helper(x,
x, ...)``), the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. If `window` is a string or tuple, it is
passed to `get_window` to generate the window values, which are
DFT-even by default. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length must be nperseg. Defaults
to a Hann window.
nperseg : int, optional
Length of each segment. Defaults to None, but if window is str or
tuple, is set to 256, and if window is array_like, is set to the
length of the window.
noverlap : int, optional
Number of points to overlap between segments. If `None`,
``noverlap = nperseg // 2``. Defaults to `None`.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If
`None`, the FFT length is `nperseg`. Defaults to `None`.
detrend : str or function or `False`, optional
Specifies how to detrend each segment. If `detrend` is a
string, it is passed as the `type` argument to the `detrend`
function. If it is a function, it takes a segment and returns a
detrended segment. If `detrend` is `False`, no detrending is
done. Defaults to 'constant'.
return_onesided : bool, optional
If `True`, return a one-sided spectrum for real data. If
`False` return a two-sided spectrum. Note that for complex
data, a two-sided spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross
spectrum ('spectrum') where `Pxy` has units of V**2, if `x`
and `y` are measured in V and `fs` is measured in Hz.
Defaults to 'density'
axis : int, optional
Axis along which the FFTs are computed; the default is over the
last axis (i.e. ``axis=-1``).
mode: str {'psd', 'stft'}, optional
Defines what kind of return values are expected. Defaults to
'psd'.
boundary : str or None, optional
Specifies whether the input signal is extended at both ends, and
how to generate the new values, in order to center the first
windowed segment on the first input point. This has the benefit
of enabling reconstruction of the first input point when the
employed window function starts at zero. Valid options are
``['even', 'odd', 'constant', 'zeros', None]``. Defaults to
`None`.
padded : bool, optional
Specifies whether the input signal is zero-padded at the end to
make the signal fit exactly into an integer number of window
segments, so that all of the signal is included in the output.
Defaults to `False`. Padding occurs after boundary extension, if
`boundary` is not `None`, and `padded` is `True`.
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependent on *mode* kwarg.
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
if mode not in ['psd', 'stft']:
raise ValueError("Unknown value for mode %s, must be one of: "
"{'psd', 'stft'}" % mode)
boundary_funcs = {'even': even_ext,
'odd': odd_ext,
'constant': const_ext,
'zeros': zero_ext,
None: None}
if boundary not in boundary_funcs:
raise ValueError("Unknown boundary option '{0}', must be one of: {1}"
.format(boundary, list(boundary_funcs.keys())))
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is 'stft'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x, y, np.complex64)
else:
outdtype = np.result_type(x, np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
if nperseg is not None: # if specified by user
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
# parse window; if array like, then set nperseg = win.shape
win, nperseg = _triage_segments(window, nperseg,input_length=x.shape[-1])
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
else:
noverlap = int(noverlap)
if noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
nstep = nperseg - noverlap
# Padding occurs after boundary extension, so that the extended signal ends
# in zeros, instead of introducing an impulse at the end.
# I.e. if x = [..., 3, 2]
# extend then pad -> [..., 3, 2, 2, 3, 0, 0, 0]
# pad then extend -> [..., 3, 2, 0, 0, 0, 2, 3]
if boundary is not None:
ext_func = boundary_funcs[boundary]
x = ext_func(x, nperseg//2, axis=-1)
if not same_data:
y = ext_func(y, nperseg//2, axis=-1)
if padded:
# Pad to integer number of windowed segments
# I.e make x.shape[-1] = nperseg + (nseg-1)*nstep, with integer nseg
nadd = (-(x.shape[-1]-nperseg) % nstep) % nperseg
zeros_shape = list(x.shape[:-1]) + [nadd]
x = np.concatenate((x, np.zeros(zeros_shape)), axis=-1)
if not same_data:
zeros_shape = list(y.shape[:-1]) + [nadd]
y = np.concatenate((y, np.zeros(zeros_shape)), axis=-1)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
if mode == 'stft':
scale = np.sqrt(scale)
if return_onesided:
if np.iscomplexobj(x):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
warnings.warn('Input data is complex, switching to '
'return_onesided=False')
else:
sides = 'twosided'
if sides == 'twosided':
freqs = fftpack.fftfreq(nfft, 1/fs)
elif sides == 'onesided':
freqs = np.fft.rfftfreq(nfft, 1/fs)
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides)
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft,
sides)
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
result *= scale
if sides == 'onesided' and mode == 'psd':
if nfft % 2:
result[..., 1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[..., 1:-1] *= 2
time = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1,
nperseg - noverlap)/float(fs)
if boundary is not None:
time -= (nperseg/2) / fs
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'stft':
result = result.real
# Output is going to have new last axis for time/window index, so a
# negative axis index shifts down one
if axis < 0:
axis -= 1
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
return freqs, time, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft, sides):
"""
Calculate windowed FFT, for internal use by
scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
`_spectral helper`. All input validation is performed there, and the
data axis is assumed to be the last axis of x. It is not designed to
be called externally. The windows are not averaged over; the result
from each window is returned.
Returns
-------
result : ndarray
Array of FFT data
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
# http://stackoverflow.com/a/5568169
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
if sides == 'twosided':
func = fftpack.fft
else:
result = result.real
func = np.fft.rfft
result = func(result, n=nfft)
return result
def _triage_segments(window, nperseg,input_length):
"""
Parses window and nperseg arguments for spectrogram and _spectral_helper.
This is a helper function, not meant to be called externally.
Parameters
---------
window : string, tuple, or ndarray
If window is specified by a string or tuple and nperseg is not
specified, nperseg is set to the default of 256 and returns a window of
that length.
If instead the window is array_like and nperseg is not specified, then
nperseg is set to the length of the window. A ValueError is raised if
the user supplies both an array_like window and a value for nperseg but
nperseg does not equal the length of the window.
nperseg : int
Length of each segment
input_length: int
Length of input signal, i.e. x.shape[-1]. Used to test for errors.
Returns
-------
win : ndarray
window. If function was called with string or tuple than this will hold
the actual array used as a window.
nperseg : int
Length of each segment. If window is str or tuple, nperseg is set to
256. If window is array_like, nperseg is set to the length of the
6
window.
"""
#parse window; if array like, then set nperseg = win.shape
if isinstance(window, string_types) or isinstance(window, tuple):
# if nperseg not specified
if nperseg is None:
nperseg = 256 # then change to default
if nperseg > input_length:
warnings.warn('nperseg = {0:d} is greater than input length '
' = {1:d}, using nperseg = {1:d}'
.format(nperseg, input_length))
nperseg = input_length
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if input_length < win.shape[-1]:
raise ValueError('window is longer than input signal')
if nperseg is None:
nperseg = win.shape[0]
elif nperseg is not None:
if nperseg != win.shape[0]:
raise ValueError("value specified for nperseg is different from"
" length of window")
return win, nperseg
| gpl-3.0 |
se4u/pylearn2 | pylearn2/sandbox/cuda_convnet/bench.py | 44 | 3589 | __authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow"]
__license__ = "3-clause BSD"
__maintainer__ = "LISA Lab"
__email__ = "pylearn-dev@googlegroups"
from pylearn2.testing.skip import skip_if_no_gpu
skip_if_no_gpu()
import numpy as np
from theano.compat.six.moves import xrange
from theano import shared
from pylearn2.sandbox.cuda_convnet.filter_acts import FilterActs
from theano.tensor.nnet.conv import conv2d
from theano import function
import time
import matplotlib.pyplot as plt
def make_funcs(batch_size, rows, cols, channels, filter_rows,
num_filters):
rng = np.random.RandomState([2012,10,9])
filter_cols = filter_rows
base_image_value = rng.uniform(-1., 1., (channels, rows, cols,
batch_size)).astype('float32')
base_filters_value = rng.uniform(-1., 1., (channels, filter_rows,
filter_cols, num_filters)).astype('float32')
images = shared(base_image_value)
filters = shared(base_filters_value, name='filters')
# bench.py should always be run in gpu mode so we should not need a gpu_from_host here
output = FilterActs()(images, filters)
output_shared = shared( output.eval() )
cuda_convnet = function([], updates = { output_shared : output } )
cuda_convnet.name = 'cuda_convnet'
images_bc01v = base_image_value.transpose(3,0,1,2)
filters_bc01v = base_filters_value.transpose(3,0,1,2)
filters_bc01v = filters_bc01v[:,:,::-1,::-1]
images_bc01 = shared(images_bc01v)
filters_bc01 = shared(filters_bc01v)
output_conv2d = conv2d(images_bc01, filters_bc01,
border_mode='valid', image_shape = images_bc01v.shape,
filter_shape = filters_bc01v.shape)
output_conv2d_shared = shared(output_conv2d.eval())
baseline = function([], updates = { output_conv2d_shared : output_conv2d } )
baseline.name = 'baseline'
return cuda_convnet, baseline
def bench(f):
for i in xrange(3):
f()
trials = 10
t1 = time.time()
for i in xrange(trials):
f()
t2 = time.time()
return (t2-t1)/float(trials)
def get_speedup( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
return bench(baseline) / bench(cuda_convnet)
def get_time_per_10k_ex( *args, **kwargs):
cuda_convnet, baseline = make_funcs(*args, **kwargs)
batch_size = kwargs['batch_size']
return 10000 * bench(cuda_convnet) / float(batch_size)
def make_batch_size_plot(yfunc, yname, batch_sizes, rows, cols, channels, filter_rows, num_filters):
speedups = []
for batch_size in batch_sizes:
speedup = yfunc(batch_size = batch_size,
rows = rows,
cols = cols,
channels = channels,
filter_rows = filter_rows,
num_filters = num_filters)
speedups.append(speedup)
plt.plot(batch_sizes, speedups)
plt.title("cuda-convnet benchmark")
plt.xlabel("Batch size")
plt.ylabel(yname)
plt.show()
make_batch_size_plot(get_speedup, "Speedup factor", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 64,
filter_rows = 7,
num_filters = 64)
"""
make_batch_size_plot(get_time_per_10k_ex, "Time per 10k examples", batch_sizes = [1,2,5,25,32,50,63,64,65,96,100,127,128,129,159,160,161,191,192,193,200,255,256,257],
rows = 32,
cols = 32,
channels = 3,
filter_rows = 5,
num_filters = 64)
"""
| bsd-3-clause |
boomsbloom/dtm-fmri | DTM/for_gensim/lib/python2.7/site-packages/matplotlib/tests/test_coding_standards.py | 7 | 12216 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
from fnmatch import fnmatch
import os
from nose.tools import assert_equal
from nose.plugins.skip import SkipTest
from matplotlib.testing.noseclasses import KnownFailureTest
try:
import pep8
except ImportError:
HAS_PEP8 = False
else:
HAS_PEP8 = pep8.__version__ > '1.4.5'
import matplotlib
PEP8_ADDITIONAL_IGNORE = ['E111',
'E114',
'E115',
'E116',
'E121',
'E122',
'E123',
'E124',
'E125',
'E126',
'E127',
'E128',
'E129',
'E131',
'E265',
'E266',
'W503']
EXTRA_EXCLUDE_FILE = os.path.join(os.path.dirname(__file__),
'.pep8_test_exclude.txt')
if HAS_PEP8:
class StandardReportWithExclusions(pep8.StandardReport):
#: A class attribute to store the exception exclusion file patterns.
expected_bad_files = []
#: A class attribute to store the lines of failing tests.
_global_deferred_print = []
#: A class attribute to store patterns which have seen exceptions.
matched_exclusions = set()
def get_file_results(self):
# If the file had no errors, return self.file_errors
# (which will be 0).
if not self._deferred_print:
return self.file_errors
# Iterate over all of the patterns, to find a possible exclusion.
# If the filename is to be excluded, go ahead and remove the
# counts that self.error added.
for pattern in self.expected_bad_files:
if fnmatch(self.filename, pattern):
self.matched_exclusions.add(pattern)
# invert the error method's counters.
for _, _, code, _, _ in self._deferred_print:
self.counters[code] -= 1
if self.counters[code] == 0:
self.counters.pop(code)
self.messages.pop(code)
self.file_errors -= 1
self.total_errors -= 1
return self.file_errors
# mirror the content of StandardReport, only storing the output to
# file rather than printing. This could be a feature request for
# the PEP8 tool.
self._deferred_print.sort()
for line_number, offset, code, text, _ in self._deferred_print:
self._global_deferred_print.append(
self._fmt % {'path': self.filename,
'row': self.line_offset + line_number,
'col': offset + 1, 'code': code,
'text': text})
return self.file_errors
def assert_pep8_conformance(module=matplotlib, exclude_files=None,
extra_exclude_file=EXTRA_EXCLUDE_FILE,
pep8_additional_ignore=PEP8_ADDITIONAL_IGNORE,
dirname=None, expected_bad_files=None,
extra_exclude_directories=None):
"""
Tests the matplotlib codebase against the "pep8" tool.
Users can add their own excluded files (should files exist in the
local directory which is not in the repository) by adding a
".pep8_test_exclude.txt" file in the same directory as this test.
The file should be a line separated list of filenames/directories
as can be passed to the "pep8" tool's exclude list.
"""
if not HAS_PEP8:
raise SkipTest('The pep8 tool is required for this test')
# to get a list of bad files, rather than the specific errors, add
# "reporter=pep8.FileReport" to the StyleGuide constructor.
pep8style = pep8.StyleGuide(quiet=False,
reporter=StandardReportWithExclusions)
reporter = pep8style.options.reporter
if expected_bad_files is not None:
reporter.expected_bad_files = expected_bad_files
# Extend the number of PEP8 guidelines which are not checked.
pep8style.options.ignore = (pep8style.options.ignore +
tuple(pep8_additional_ignore))
# Support for egg shared object wrappers, which are not PEP8 compliant,
# nor part of the matplotlib repository.
# DO NOT ADD FILES *IN* THE REPOSITORY TO THIS LIST.
if exclude_files is not None:
pep8style.options.exclude.extend(exclude_files)
# Allow users to add their own exclude list.
if extra_exclude_file is not None and os.path.exists(extra_exclude_file):
with open(extra_exclude_file, 'r') as fh:
extra_exclude = [line.strip() for line in fh if line.strip()]
pep8style.options.exclude.extend(extra_exclude)
if extra_exclude_directories:
pep8style.options.exclude.extend(extra_exclude_directories)
if dirname is None:
dirname = os.path.dirname(module.__file__)
result = pep8style.check_files([dirname])
if reporter is StandardReportWithExclusions:
msg = ("Found code syntax errors (and warnings):\n"
"{0}".format('\n'.join(reporter._global_deferred_print)))
else:
msg = "Found code syntax errors (and warnings)."
assert_equal(result.total_errors, 0, msg)
# If we've been using the exclusions reporter, check that we didn't
# exclude files unnecessarily.
if reporter is StandardReportWithExclusions:
unexpectedly_good = sorted(set(reporter.expected_bad_files) -
reporter.matched_exclusions)
if unexpectedly_good:
raise ValueError('Some exclude patterns were unnecessary as the '
'files they pointed to either passed the PEP8 '
'tests or do not point to a file:\n '
'{0}'.format('\n '.join(unexpectedly_good)))
def test_pep8_conformance_installed_files():
exclude_files = ['_delaunay.py',
'_image.py',
'_tri.py',
'_backend_agg.py',
'_tkagg.py',
'ft2font.py',
'_cntr.py',
'_contour.py',
'_png.py',
'_path.py',
'ttconv.py',
'_gtkagg.py',
'_backend_gdk.py',
'pyparsing*',
'_qhull.py',
'_macosx.py']
expected_bad_files = ['_cm.py',
'_mathtext_data.py',
'backend_bases.py',
'cbook.py',
'collections.py',
'dviread.py',
'font_manager.py',
'fontconfig_pattern.py',
'gridspec.py',
'legend_handler.py',
'mathtext.py',
'patheffects.py',
'pylab.py',
'pyplot.py',
'rcsetup.py',
'stackplot.py',
'texmanager.py',
'transforms.py',
'type1font.py',
'widgets.py',
'testing/decorators.py',
'testing/jpl_units/Duration.py',
'testing/jpl_units/Epoch.py',
'testing/jpl_units/EpochConverter.py',
'testing/jpl_units/StrConverter.py',
'testing/jpl_units/UnitDbl.py',
'testing/jpl_units/UnitDblConverter.py',
'testing/jpl_units/UnitDblFormatter.py',
'testing/jpl_units/__init__.py',
'tri/triinterpolate.py',
'tests/test_axes.py',
'tests/test_bbox_tight.py',
'tests/test_delaunay.py',
'tests/test_dviread.py',
'tests/test_image.py',
'tests/test_legend.py',
'tests/test_lines.py',
'tests/test_mathtext.py',
'tests/test_rcparams.py',
'tests/test_simplification.py',
'tests/test_streamplot.py',
'tests/test_subplots.py',
'tests/test_tightlayout.py',
'tests/test_triangulation.py',
'compat/subprocess.py',
'backends/__init__.py',
'backends/backend_agg.py',
'backends/backend_cairo.py',
'backends/backend_cocoaagg.py',
'backends/backend_gdk.py',
'backends/backend_gtk.py',
'backends/backend_gtk3.py',
'backends/backend_gtk3cairo.py',
'backends/backend_gtkagg.py',
'backends/backend_gtkcairo.py',
'backends/backend_macosx.py',
'backends/backend_mixed.py',
'backends/backend_pgf.py',
'backends/backend_ps.py',
'backends/backend_svg.py',
'backends/backend_template.py',
'backends/backend_tkagg.py',
'backends/tkagg.py',
'backends/windowing.py',
'backends/qt_editor/formlayout.py',
'sphinxext/mathmpl.py',
'sphinxext/only_directives.py',
'sphinxext/plot_directive.py',
'projections/__init__.py',
'projections/geo.py',
'projections/polar.py',
'externals/six.py']
expected_bad_files = ['*/matplotlib/' + s for s in expected_bad_files]
assert_pep8_conformance(module=matplotlib,
exclude_files=exclude_files,
expected_bad_files=expected_bad_files)
def test_pep8_conformance_examples():
mpldir = os.environ.get('MPL_REPO_DIR', None)
if mpldir is None:
# try and guess!
fp = os.getcwd()
while len(fp) > 2:
if os.path.isdir(os.path.join(fp, 'examples')):
mpldir = fp
break
fp, tail = os.path.split(fp)
if mpldir is None:
raise KnownFailureTest("can not find the examples, set env "
"MPL_REPO_DIR to point to the top-level path "
"of the source tree")
exdir = os.path.join(mpldir, 'examples')
blacklist = ()
expected_bad_files = ['*/pylab_examples/table_demo.py',
'*/pylab_examples/tricontour_demo.py',
'*/pylab_examples/tripcolor_demo.py',
'*/pylab_examples/triplot_demo.py',
'*/shapes_and_collections/artist_reference.py']
assert_pep8_conformance(dirname=exdir,
extra_exclude_directories=blacklist,
pep8_additional_ignore=PEP8_ADDITIONAL_IGNORE +
['E116', 'E501', 'E402'],
expected_bad_files=expected_bad_files)
if __name__ == '__main__':
import nose
nose.runmodule(argv=['-s', '--with-doctest'], exit=False)
| mit |
weidel-p/nest-simulator | pynest/nest/tests/test_spatial/test_plotting.py | 12 | 5748 | # -*- coding: utf-8 -*-
#
# test_plotting.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for basic spatial plotting functions.
"""
import unittest
import nest
import numpy as np
try:
import matplotlib.pyplot as plt
tmp_fig = plt.figure() # make sure we can open a window; DISPLAY may not be set
plt.close(tmp_fig)
PLOTTING_POSSIBLE = True
except:
PLOTTING_POSSIBLE = False
@unittest.skipIf(not PLOTTING_POSSIBLE,
'Plotting impossible because matplotlib or display missing')
class PlottingTestCase(unittest.TestCase):
def test_PlotLayer(self):
"""Test plotting layer."""
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
nest.PlotLayer(l)
plotted_datapoints = plt.gca().collections[-1].get_offsets().data
reference_datapoints = nest.GetPosition(l)
self.assertTrue(np.allclose(plotted_datapoints, reference_datapoints))
def test_PlotTargets(self):
"""Test plotting targets."""
delta = 0.05
mask = {'rectangular': {'lower_left': [-delta, -2/3 - delta], 'upper_right': [2/3 + delta, delta]}}
cdict = {'rule': 'pairwise_bernoulli', 'p': 1.,
'mask': mask}
sdict = {'synapse_model': 'stdp_synapse'}
nest.ResetKernel()
l = nest.Create('iaf_psc_alpha',
positions=nest.spatial.grid(shape=[3, 3],
extent=[2., 2.],
edge_wrap=True))
# connect l -> l
nest.Connect(l, l, cdict, sdict)
ctr = nest.FindCenterElement(l)
fig = nest.PlotTargets(ctr, l)
fig.gca().set_title('Plain call')
plotted_datapoints = plt.gca().collections[0].get_offsets().data
eps = 0.01
pos = np.array(nest.GetPosition(l))
pos_xmask = pos[np.where(pos[:, 0] > -eps)]
reference_datapoints = pos_xmask[np.where(pos_xmask[:, 1] < eps)][::-1]
self.assertTrue(np.array_equal(np.sort(plotted_datapoints, axis=0), np.sort(reference_datapoints, axis=0)))
fig = nest.PlotTargets(ctr, l, mask=mask)
ax = fig.gca()
ax.set_title('Call with mask')
self.assertGreaterEqual(len(ax.patches), 1)
def test_plot_probability_kernel(self):
"""Plot parameter probability"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
def probability_calculation(distance):
return 1 - 1.5*distance
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
source = l[25]
source_pos = np.array(nest.GetPosition(source))
source_x, source_y = source_pos
# Calculate reference values
ref_probability = np.zeros(plot_shape[::-1])
for i, x in enumerate(np.linspace(plot_edges[0], plot_edges[1], plot_shape[0])):
positions = np.array([[x, y] for y in np.linspace(plot_edges[2], plot_edges[3], plot_shape[1])])
ref_distances = np.sqrt((positions[:, 0] - source_x)**2 + (positions[:, 1] - source_y)**2)
values = probability_calculation(ref_distances)
ref_probability[:, i] = np.maximum(np.minimum(np.array(values), 1.0), 0.0)
# Create the parameter
parameter = probability_calculation(nest.spatial.distance)
fig, ax = plt.subplots()
nest.PlotProbabilityParameter(source, parameter, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
img = ax.images[0]
img_data = img.get_array().data
self.assertTrue(np.array_equal(img_data, ref_probability))
def test_plot_probability_kernel_with_mask(self):
"""Plot parameter probability with mask"""
nest.ResetKernel()
plot_shape = [10, 10]
plot_edges = [-0.5, 0.5, -0.5, 0.5]
l = nest.Create('iaf_psc_alpha', positions=nest.spatial.grid([10, 10], edge_wrap=False))
parameter = 1 - 1.5*nest.spatial.distance
source = l[25]
masks = [{'circular': {'radius': 0.4}},
{'doughnut': {'inner_radius': 0.2, 'outer_radius': 0.45}},
{'rectangular': {'lower_left': [-.3, -.3], 'upper_right': [0.3, 0.3]}},
{'elliptical': {'major_axis': 0.8, 'minor_axis': 0.4}}]
fig, axs = plt.subplots(2, 2)
for mask, ax in zip(masks, axs.flatten()):
nest.PlotProbabilityParameter(source, parameter, mask=mask, ax=ax, shape=plot_shape, edges=plot_edges)
self.assertEqual(len(ax.images), 1)
self.assertGreaterEqual(len(ax.patches), 1)
def suite():
suite = unittest.makeSuite(PlottingTestCase, 'test')
return suite
if __name__ == "__main__":
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
plt.show()
| gpl-2.0 |
mattilyra/scikit-learn | sklearn/__init__.py | 27 | 3086 | """
Machine learning module for Python
==================================
sklearn is a Python module integrating classical machine
learning algorithms in the tightly-knit world of scientific Python
packages (numpy, scipy, matplotlib).
It aims to provide simple and efficient solutions to learning problems
that are accessible to everybody and reusable in various contexts:
machine-learning as a versatile tool for science and engineering.
See http://scikit-learn.org for complete documentation.
"""
import sys
import re
import warnings
# Make sure that DeprecationWarning within this package always gets printed
warnings.filterwarnings('always', category=DeprecationWarning,
module='^{0}\.'.format(re.escape(__name__)))
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.18.dev0'
try:
# This variable is injected in the __builtins__ by the build
# process. It used to enable importing subpackages of sklearn when
# the binaries are not built
__SKLEARN_SETUP__
except NameError:
__SKLEARN_SETUP__ = False
if __SKLEARN_SETUP__:
sys.stderr.write('Partial import of sklearn during the build process.\n')
# We are not importing the rest of the scikit during the build
# process, as it may not be compiled yet
else:
from . import __check_build
from .base import clone
__check_build # avoid flakes unused variable error
__all__ = ['calibration', 'cluster', 'covariance', 'cross_decomposition',
'cross_validation', 'datasets', 'decomposition', 'dummy',
'ensemble', 'exceptions', 'externals', 'feature_extraction',
'feature_selection', 'gaussian_process', 'grid_search',
'isotonic', 'kernel_approximation', 'kernel_ridge',
'lda', 'learning_curve', 'linear_model', 'manifold', 'metrics',
'mixture', 'model_selection', 'multiclass', 'multioutput',
'naive_bayes', 'neighbors', 'neural_network', 'pipeline',
'preprocessing', 'qda', 'random_projection', 'semi_supervised',
'svm', 'tree', 'discriminant_analysis',
# Non-modules:
'clone']
def setup_module(module):
"""Fixture for the tests to assure globally controllable seeding of RNGs"""
import os
import numpy as np
import random
# It could have been provided in the environment
_random_seed = os.environ.get('SKLEARN_SEED', None)
if _random_seed is None:
_random_seed = np.random.uniform() * (2 ** 31 - 1)
_random_seed = int(_random_seed)
print("I: Seeding RNGs with %r" % _random_seed)
np.random.seed(_random_seed)
random.seed(_random_seed)
| bsd-3-clause |
NelisVerhoef/scikit-learn | examples/cluster/plot_kmeans_digits.py | 230 | 4524 | """
===========================================================
A demo of K-Means clustering on the handwritten digits data
===========================================================
In this example we compare the various initialization strategies for
K-means in terms of runtime and quality of the results.
As the ground truth is known here, we also apply different cluster
quality metrics to judge the goodness of fit of the cluster labels to the
ground truth.
Cluster quality metrics evaluated (see :ref:`clustering_evaluation` for
definitions and discussions of the metrics):
=========== ========================================================
Shorthand full name
=========== ========================================================
homo homogeneity score
compl completeness score
v-meas V measure
ARI adjusted Rand index
AMI adjusted mutual information
silhouette silhouette coefficient
=========== ========================================================
"""
print(__doc__)
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
np.random.seed(42)
digits = load_digits()
data = scale(digits.data)
n_samples, n_features = data.shape
n_digits = len(np.unique(digits.target))
labels = digits.target
sample_size = 300
print("n_digits: %d, \t n_samples %d, \t n_features %d"
% (n_digits, n_samples, n_features))
print(79 * '_')
print('% 9s' % 'init'
' time inertia homo compl v-meas ARI AMI silhouette')
def bench_k_means(estimator, name, data):
t0 = time()
estimator.fit(data)
print('% 9s %.2fs %i %.3f %.3f %.3f %.3f %.3f %.3f'
% (name, (time() - t0), estimator.inertia_,
metrics.homogeneity_score(labels, estimator.labels_),
metrics.completeness_score(labels, estimator.labels_),
metrics.v_measure_score(labels, estimator.labels_),
metrics.adjusted_rand_score(labels, estimator.labels_),
metrics.adjusted_mutual_info_score(labels, estimator.labels_),
metrics.silhouette_score(data, estimator.labels_,
metric='euclidean',
sample_size=sample_size)))
bench_k_means(KMeans(init='k-means++', n_clusters=n_digits, n_init=10),
name="k-means++", data=data)
bench_k_means(KMeans(init='random', n_clusters=n_digits, n_init=10),
name="random", data=data)
# in this case the seeding of the centers is deterministic, hence we run the
# kmeans algorithm only once with n_init=1
pca = PCA(n_components=n_digits).fit(data)
bench_k_means(KMeans(init=pca.components_, n_clusters=n_digits, n_init=1),
name="PCA-based",
data=data)
print(79 * '_')
###############################################################################
# Visualize the results on PCA-reduced data
reduced_data = PCA(n_components=2).fit_transform(data)
kmeans = KMeans(init='k-means++', n_clusters=n_digits, n_init=10)
kmeans.fit(reduced_data)
# Step size of the mesh. Decrease to increase the quality of the VQ.
h = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].
# Plot the decision boundary. For that, we will assign a color to each
x_min, x_max = reduced_data[:, 0].min() - 1, reduced_data[:, 0].max() + 1
y_min, y_max = reduced_data[:, 1].min() - 1, reduced_data[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# Obtain labels for each point in mesh. Use last trained model.
Z = kmeans.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure(1)
plt.clf()
plt.imshow(Z, interpolation='nearest',
extent=(xx.min(), xx.max(), yy.min(), yy.max()),
cmap=plt.cm.Paired,
aspect='auto', origin='lower')
plt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=2)
# Plot the centroids as a white X
centroids = kmeans.cluster_centers_
plt.scatter(centroids[:, 0], centroids[:, 1],
marker='x', s=169, linewidths=3,
color='w', zorder=10)
plt.title('K-means clustering on the digits dataset (PCA-reduced data)\n'
'Centroids are marked with white cross')
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
CVML/scikit-learn | examples/neighbors/plot_approximate_nearest_neighbors_hyperparameters.py | 227 | 5170 | """
=================================================
Hyper-parameters of Approximate Nearest Neighbors
=================================================
This example demonstrates the behaviour of the
accuracy of the nearest neighbor queries of Locality Sensitive Hashing
Forest as the number of candidates and the number of estimators (trees)
vary.
In the first plot, accuracy is measured with the number of candidates. Here,
the term "number of candidates" refers to maximum bound for the number of
distinct points retrieved from each tree to calculate the distances. Nearest
neighbors are selected from this pool of candidates. Number of estimators is
maintained at three fixed levels (1, 5, 10).
In the second plot, the number of candidates is fixed at 50. Number of trees
is varied and the accuracy is plotted against those values. To measure the
accuracy, the true nearest neighbors are required, therefore
:class:`sklearn.neighbors.NearestNeighbors` is used to compute the exact
neighbors.
"""
from __future__ import division
print(__doc__)
# Author: Maheshakya Wijewardena <maheshakya.10@cse.mrt.ac.lk>
#
# License: BSD 3 clause
###############################################################################
import numpy as np
from sklearn.datasets.samples_generator import make_blobs
from sklearn.neighbors import LSHForest
from sklearn.neighbors import NearestNeighbors
import matplotlib.pyplot as plt
# Initialize size of the database, iterations and required neighbors.
n_samples = 10000
n_features = 100
n_queries = 30
rng = np.random.RandomState(42)
# Generate sample data
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=10,
random_state=0)
X_index = X[:n_samples]
X_query = X[n_samples:]
# Get exact neighbors
nbrs = NearestNeighbors(n_neighbors=1, algorithm='brute',
metric='cosine').fit(X_index)
neighbors_exact = nbrs.kneighbors(X_query, return_distance=False)
# Set `n_candidate` values
n_candidates_values = np.linspace(10, 500, 5).astype(np.int)
n_estimators_for_candidate_value = [1, 5, 10]
n_iter = 10
stds_accuracies = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]),
dtype=float)
accuracies_c = np.zeros((len(n_estimators_for_candidate_value),
n_candidates_values.shape[0]), dtype=float)
# LSH Forest is a stochastic index: perform several iteration to estimate
# expected accuracy and standard deviation displayed as error bars in
# the plots
for j, value in enumerate(n_estimators_for_candidate_value):
for i, n_candidates in enumerate(n_candidates_values):
accuracy_c = []
for seed in range(n_iter):
lshf = LSHForest(n_estimators=value,
n_candidates=n_candidates, n_neighbors=1,
random_state=seed)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query,
return_distance=False)
accuracy_c.append(np.sum(np.equal(neighbors_approx,
neighbors_exact)) /
n_queries)
stds_accuracies[j, i] = np.std(accuracy_c)
accuracies_c[j, i] = np.mean(accuracy_c)
# Set `n_estimators` values
n_estimators_values = [1, 5, 10, 20, 30, 40, 50]
accuracies_trees = np.zeros(len(n_estimators_values), dtype=float)
# Calculate average accuracy for each value of `n_estimators`
for i, n_estimators in enumerate(n_estimators_values):
lshf = LSHForest(n_estimators=n_estimators, n_neighbors=1)
# Build the LSH Forest index
lshf.fit(X_index)
# Get neighbors
neighbors_approx = lshf.kneighbors(X_query, return_distance=False)
accuracies_trees[i] = np.sum(np.equal(neighbors_approx,
neighbors_exact))/n_queries
###############################################################################
# Plot the accuracy variation with `n_candidates`
plt.figure()
colors = ['c', 'm', 'y']
for i, n_estimators in enumerate(n_estimators_for_candidate_value):
label = 'n_estimators = %d ' % n_estimators
plt.plot(n_candidates_values, accuracies_c[i, :],
'o-', c=colors[i], label=label)
plt.errorbar(n_candidates_values, accuracies_c[i, :],
stds_accuracies[i, :], c=colors[i])
plt.legend(loc='upper left', fontsize='small')
plt.ylim([0, 1.2])
plt.xlim(min(n_candidates_values), max(n_candidates_values))
plt.ylabel("Accuracy")
plt.xlabel("n_candidates")
plt.grid(which='both')
plt.title("Accuracy variation with n_candidates")
# Plot the accuracy variation with `n_estimators`
plt.figure()
plt.scatter(n_estimators_values, accuracies_trees, c='k')
plt.plot(n_estimators_values, accuracies_trees, c='g')
plt.ylim([0, 1.2])
plt.xlim(min(n_estimators_values), max(n_estimators_values))
plt.ylabel("Accuracy")
plt.xlabel("n_estimators")
plt.grid(which='both')
plt.title("Accuracy variation with n_estimators")
plt.show()
| bsd-3-clause |
huzq/scikit-learn | examples/cluster/plot_mean_shift.py | 23 | 1775 | """
=============================================
A demo of the mean-shift clustering algorithm
=============================================
Reference:
Dorin Comaniciu and Peter Meer, "Mean Shift: A robust approach toward
feature space analysis". IEEE Transactions on Pattern Analysis and
Machine Intelligence. 2002. pp. 603-619.
"""
print(__doc__)
import numpy as np
from sklearn.cluster import MeanShift, estimate_bandwidth
from sklearn.datasets import make_blobs
# #############################################################################
# Generate sample data
centers = [[1, 1], [-1, -1], [1, -1]]
X, _ = make_blobs(n_samples=10000, centers=centers, cluster_std=0.6)
# #############################################################################
# Compute clustering with MeanShift
# The following bandwidth can be automatically detected using
bandwidth = estimate_bandwidth(X, quantile=0.2, n_samples=500)
ms = MeanShift(bandwidth=bandwidth, bin_seeding=True)
ms.fit(X)
labels = ms.labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
print("number of estimated clusters : %d" % n_clusters_)
# #############################################################################
# Plot result
import matplotlib.pyplot as plt
from itertools import cycle
plt.figure(1)
plt.clf()
colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
for k, col in zip(range(n_clusters_), colors):
my_members = labels == k
cluster_center = cluster_centers[k]
plt.plot(X[my_members, 0], X[my_members, 1], col + '.')
plt.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=14)
plt.title('Estimated number of clusters: %d' % n_clusters_)
plt.show()
| bsd-3-clause |
waterponey/scikit-learn | examples/neighbors/plot_lof.py | 30 | 1939 | """
=================================================
Anomaly detection with Local Outlier Factor (LOF)
=================================================
This example presents the Local Outlier Factor (LOF) estimator. The LOF
algorithm is an unsupervised outlier detection method which computes the local
density deviation of a given data point with respect to its neighbors.
It considers as outlier samples that have a substantially lower density than
their neighbors.
The number of neighbors considered, (parameter n_neighbors) is typically
chosen 1) greater than the minimum number of objects a cluster has to contain,
so that other objects can be local outliers relative to this cluster, and 2)
smaller than the maximum number of close by objects that can potentially be
local outliers.
In practice, such informations are generally not available, and taking
n_neighbors=20 appears to work well in general.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.neighbors import LocalOutlierFactor
print(__doc__)
np.random.seed(42)
# Generate train data
X = 0.3 * np.random.randn(100, 2)
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
X = np.r_[X + 2, X - 2, X_outliers]
# fit the model
clf = LocalOutlierFactor(n_neighbors=20)
y_pred = clf.fit_predict(X)
y_pred_outliers = y_pred[200:]
# plot the level sets of the decision function
xx, yy = np.meshgrid(np.linspace(-5, 5, 50), np.linspace(-5, 5, 50))
Z = clf._decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.title("Local Outlier Factor (LOF)")
plt.contourf(xx, yy, Z, cmap=plt.cm.Blues_r)
a = plt.scatter(X[:200, 0], X[:200, 1], c='white')
b = plt.scatter(X[200:, 0], X[200:, 1], c='red')
plt.axis('tight')
plt.xlim((-5, 5))
plt.ylim((-5, 5))
plt.legend([a, b],
["normal observations",
"abnormal observations"],
loc="upper left")
plt.show()
| bsd-3-clause |
ryfeus/lambda-packs | LightGBM_sklearn_scipy_numpy/source/scipy/spatial/tests/test__plotutils.py | 15 | 2140 | from __future__ import division, print_function, absolute_import
import pytest
from numpy.testing import assert_, assert_array_equal
from scipy._lib._numpy_compat import suppress_warnings
try:
import matplotlib
matplotlib.rcParams['backend'] = 'Agg'
import matplotlib.pyplot as plt
from matplotlib.collections import LineCollection
from matplotlib import MatplotlibDeprecationWarning
has_matplotlib = True
except:
has_matplotlib = False
from scipy.spatial import \
delaunay_plot_2d, voronoi_plot_2d, convex_hull_plot_2d, \
Delaunay, Voronoi, ConvexHull
@pytest.mark.skipif(not has_matplotlib, reason="Matplotlib not available")
class TestPlotting:
points = [(0,0), (0,1), (1,0), (1,1)]
def test_delaunay(self):
# Smoke test
fig = plt.figure()
obj = Delaunay(self.points)
s_before = obj.simplices.copy()
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = delaunay_plot_2d(obj, ax=fig.gca())
assert_array_equal(obj.simplices, s_before) # shouldn't modify
assert_(r is fig)
delaunay_plot_2d(obj, ax=fig.gca())
def test_voronoi(self):
# Smoke test
fig = plt.figure()
obj = Voronoi(self.points)
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = voronoi_plot_2d(obj, ax=fig.gca())
assert_(r is fig)
voronoi_plot_2d(obj)
voronoi_plot_2d(obj, show_vertices=False)
def test_convex_hull(self):
# Smoke test
fig = plt.figure()
tri = ConvexHull(self.points)
with suppress_warnings as sup:
# filter can be removed when matplotlib 1.x is dropped
sup.filter(message="The ishold function was deprecated in version")
r = convex_hull_plot_2d(tri, ax=fig.gca())
assert_(r is fig)
convex_hull_plot_2d(tri)
| mit |
anntzer/seaborn | seaborn/_core.py | 1 | 44884 | import warnings
import itertools
from copy import copy
from functools import partial
from collections.abc import Iterable, Sequence, Mapping
from numbers import Number
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib as mpl
from ._decorators import (
share_init_params_with_map,
)
from .palettes import (
QUAL_PALETTES,
color_palette,
cubehelix_palette,
_parse_cubehelix_args,
)
from .utils import (
get_color_cycle,
remove_na,
)
class SemanticMapping:
"""Base class for mapping data values to plot attributes."""
# -- Default attributes that all SemanticMapping subclasses must set
# Whether the mapping is numeric, categorical, or datetime
map_type = None
# Ordered list of unique values in the input data
levels = None
# A mapping from the data values to corresponding plot attributes
lookup_table = None
def __init__(self, plotter):
# TODO Putting this here so we can continue to use a lot of the
# logic that's built into the library, but the idea of this class
# is to move towards semantic mappings that are agnositic about the
# kind of plot they're going to be used to draw.
# Fully achieving that is going to take some thinking.
self.plotter = plotter
def map(cls, plotter, *args, **kwargs):
# This method is assigned the __init__ docstring
method_name = "_{}_map".format(cls.__name__[:-7].lower())
setattr(plotter, method_name, cls(plotter, *args, **kwargs))
return plotter
def _lookup_single(self, key):
"""Apply the mapping to a single data value."""
return self.lookup_table[key]
def __call__(self, key, *args, **kwargs):
"""Get the attribute(s) values for the data key."""
if isinstance(key, (list, np.ndarray, pd.Series)):
return [self._lookup_single(k, *args, **kwargs) for k in key]
else:
return self._lookup_single(key, *args, **kwargs)
@share_init_params_with_map
class HueMapping(SemanticMapping):
"""Mapping that sets artist colors according to data values."""
# A specification of the colors that should appear in the plot
palette = None
# An object that normalizes data values to [0, 1] range for color mapping
norm = None
# A continuous colormap object for interpolating in a numeric context
cmap = None
def __init__(
self, plotter, palette=None, order=None, norm=None,
):
"""Map the levels of the `hue` variable to distinct colors.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["hue"]
if data.notna().any():
map_type = self.infer_map_type(
palette, norm, plotter.input_format, plotter.var_types["hue"]
)
# Our goal is to end up with a dictionary mapping every unique
# value in `data` to a color. We will also keep track of the
# metadata about this mapping we will need for, e.g., a legend
# --- Option 1: numeric mapping with a matplotlib colormap
if map_type == "numeric":
data = pd.to_numeric(data)
levels, lookup_table, norm, cmap = self.numeric_mapping(
data, palette, norm,
)
# --- Option 2: categorical mapping using seaborn palette
elif map_type == "categorical":
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
data, palette, order,
)
# --- Option 3: datetime mapping
else:
# TODO this needs actual implementation
cmap = norm = None
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), palette, order,
)
self.map_type = map_type
self.lookup_table = lookup_table
self.palette = palette
self.levels = levels
self.norm = norm
self.cmap = cmap
def _lookup_single(self, key):
"""Get the color for a single value, using colormap to interpolate."""
try:
# Use a value that's in the original data vector
value = self.lookup_table[key]
except KeyError:
# Use the colormap to interpolate between existing datapoints
# (e.g. in the context of making a continuous legend)
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
value = self.cmap(normed)
return value
def infer_map_type(self, palette, norm, input_format, var_type):
"""Determine how to implement the mapping."""
if palette in QUAL_PALETTES:
map_type = "categorical"
elif norm is not None:
map_type = "numeric"
elif isinstance(palette, (dict, list)):
map_type = "categorical"
elif input_format == "wide":
map_type = "categorical"
else:
map_type = var_type
return map_type
def categorical_mapping(self, data, palette, order):
"""Determine colors when the hue mapping is categorical."""
# -- Identify the order and name of the levels
levels = categorical_order(data, order)
n_colors = len(levels)
# -- Identify the set of colors to use
if isinstance(palette, dict):
missing = set(levels) - set(palette)
if any(missing):
err = "The palette dictionary is missing keys: {}"
raise ValueError(err.format(missing))
lookup_table = palette
else:
if palette is None:
if n_colors <= len(get_color_cycle()):
colors = color_palette(None, n_colors)
else:
colors = color_palette("husl", n_colors)
elif isinstance(palette, list):
if len(palette) != n_colors:
err = "The palette list has the wrong number of colors."
raise ValueError(err)
colors = palette
else:
colors = color_palette(palette, n_colors)
lookup_table = dict(zip(levels, colors))
return levels, lookup_table
def numeric_mapping(self, data, palette, norm):
"""Determine colors when the hue variable is quantitative."""
if isinstance(palette, dict):
# The presence of a norm object overrides a dictionary of hues
# in specifying a numeric mapping, so we need to process it here.
levels = list(sorted(palette))
colors = [palette[k] for k in sorted(palette)]
cmap = mpl.colors.ListedColormap(colors)
lookup_table = palette.copy()
else:
# The levels are the sorted unique values in the data
levels = list(np.sort(remove_na(data.unique())))
# --- Sort out the colormap to use from the palette argument
# Default numeric palette is our default cubehelix palette
# TODO do we want to do something complicated to ensure contrast?
palette = "ch:" if palette is None else palette
if isinstance(palette, mpl.colors.Colormap):
cmap = palette
elif str(palette).startswith("ch:"):
args, kwargs = _parse_cubehelix_args(palette)
cmap = cubehelix_palette(0, *args, as_cmap=True, **kwargs)
else:
try:
cmap = mpl.cm.get_cmap(palette)
except (ValueError, TypeError):
err = "Palette {} not understood"
raise ValueError(err)
# Now sort out the data normalization
if norm is None:
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = "``hue_norm`` must be None, tuple, or Normalize object."
raise ValueError(err)
if not norm.scaled():
norm(np.asarray(data.dropna()))
lookup_table = dict(zip(levels, cmap(norm(levels))))
return levels, lookup_table, norm, cmap
@share_init_params_with_map
class SizeMapping(SemanticMapping):
"""Mapping that sets artist sizes according to data values."""
# An object that normalizes data values to [0, 1] range
norm = None
def __init__(
self, plotter, sizes=None, order=None, norm=None,
):
"""Map the levels of the `size` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["size"]
if data.notna().any():
map_type = self.infer_map_type(
norm, sizes, plotter.var_types["size"]
)
# --- Option 1: numeric mapping
if map_type == "numeric":
levels, lookup_table, norm = self.numeric_mapping(
data, sizes, norm,
)
# --- Option 2: categorical mapping
elif map_type == "categorical":
levels, lookup_table = self.categorical_mapping(
data, sizes, order,
)
# --- Option 3: datetime mapping
# TODO this needs an actual implementation
else:
levels, lookup_table = self.categorical_mapping(
# Casting data to list to handle differences in the way
# pandas and numpy represent datetime64 data
list(data), sizes, order,
)
self.map_type = map_type
self.levels = levels
self.norm = norm
self.sizes = sizes
self.lookup_table = lookup_table
def infer_map_type(self, norm, sizes, var_type):
if norm is not None:
map_type = "numeric"
elif isinstance(sizes, (dict, list)):
map_type = "categorical"
else:
map_type = var_type
return map_type
def _lookup_single(self, key):
try:
value = self.lookup_table[key]
except KeyError:
normed = self.norm(key)
if np.ma.is_masked(normed):
normed = np.nan
size_values = self.lookup_table.values()
size_range = min(size_values), max(size_values)
value = size_range[0] + normed * np.ptp(size_range)
return value
def categorical_mapping(self, data, sizes, order):
levels = categorical_order(data, order)
if isinstance(sizes, dict):
# Dict inputs map existing data values to the size attribute
missing = set(levels) - set(sizes)
if any(missing):
err = f"Missing sizes for the following levels: {missing}"
raise ValueError(err)
lookup_table = sizes.copy()
elif isinstance(sizes, list):
# List inputs give size values in the same order as the levels
if len(sizes) != len(levels):
err = "The `sizes` list has the wrong number of values."
raise ValueError(err)
lookup_table = dict(zip(levels, sizes))
else:
if isinstance(sizes, tuple):
# Tuple input sets the min, max size values
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# Otherwise, we need to get the min, max size values from
# the plotter object we are attached to.
# TODO this is going to cause us trouble later, because we
# want to restructure things so that the plotter is generic
# across the visual representation of the data. But at this
# point, we don't know the visual representation. Likely we
# want to change the logic of this Mapping so that it gives
# points on a nornalized range that then gets unnormalized
# when we know what we're drawing. But given the way the
# package works now, this way is cleanest.
sizes = self.plotter._default_size_range
# For categorical sizes, use regularly-spaced linear steps
# between the minimum and maximum sizes
sizes = np.linspace(*sizes, len(levels))
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table
def numeric_mapping(self, data, sizes, norm):
if isinstance(sizes, dict):
# The presence of a norm object overrides a dictionary of sizes
# in specifying a numeric mapping, so we need to process it
# dictionary here
levels = list(np.sort(list(sizes)))
size_values = sizes.values()
size_range = min(size_values), max(size_values)
else:
# The levels here will be the unique values in the data
levels = list(np.sort(remove_na(data.unique())))
if isinstance(sizes, tuple):
# For numeric inputs, the size can be parametrized by
# the minimum and maximum artist values to map to. The
# norm object that gets set up next specifies how to
# do the mapping.
if len(sizes) != 2:
err = "A `sizes` tuple must have only 2 values"
raise ValueError(err)
size_range = sizes
elif sizes is not None:
err = f"Value for `sizes` not understood: {sizes}"
raise ValueError(err)
else:
# When not provided, we get the size range from the plotter
# object we are attached to. See the note in the categorical
# method about how this is suboptimal for future development.:
size_range = self.plotter._default_size_range
# Now that we know the minimum and maximum sizes that will get drawn,
# we need to map the data values that we have into that range. We will
# use a matplotlib Normalize class, which is typically used for numeric
# color mapping but works fine here too. It takes data values and maps
# them into a [0, 1] interval, potentially nonlinear-ly.
if norm is None:
# Default is a linear function between the min and max data values
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
# It is also possible to give different limits in data space
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = f"Value for size `norm` parameter not understood: {norm}"
raise ValueError(err)
else:
# If provided with Normalize object, copy it so we can modify
norm = copy(norm)
# Set the mapping so all output values are in [0, 1]
norm.clip = True
# If the input range is not set, use the full range of the data
if not norm.scaled():
norm(levels)
# Map from data values to [0, 1] range
sizes_scaled = norm(levels)
# Now map from the scaled range into the artist units
if isinstance(sizes, dict):
lookup_table = sizes
else:
lo, hi = size_range
sizes = lo + sizes_scaled * (hi - lo)
lookup_table = dict(zip(levels, sizes))
return levels, lookup_table, norm
@share_init_params_with_map
class StyleMapping(SemanticMapping):
"""Mapping that sets artist style according to data values."""
# Style mapping is always treated as categorical
map_type = "categorical"
def __init__(
self, plotter, markers=None, dashes=None, order=None,
):
"""Map the levels of the `style` variable to distinct values.
Parameters
----------
# TODO add generic parameters
"""
super().__init__(plotter)
data = plotter.plot_data["style"]
if data.notna().any():
# Cast to list to handle numpy/pandas datetime quirks
if variable_type(data) == "datetime":
data = list(data)
# Find ordered unique values
levels = categorical_order(data, order)
markers = self._map_attributes(
markers, levels, unique_markers(len(levels)), "markers",
)
dashes = self._map_attributes(
dashes, levels, unique_dashes(len(levels)), "dashes",
)
# Build the paths matplotlib will use to draw the markers
paths = {}
filled_markers = []
for k, m in markers.items():
if not isinstance(m, mpl.markers.MarkerStyle):
m = mpl.markers.MarkerStyle(m)
paths[k] = m.get_path().transformed(m.get_transform())
filled_markers.append(m.is_filled())
# Mixture of filled and unfilled markers will show line art markers
# in the edge color, which defaults to white. This can be handled,
# but there would be additional complexity with specifying the
# weight of the line art markers without overwhelming the filled
# ones with the edges. So for now, we will disallow mixtures.
if any(filled_markers) and not all(filled_markers):
err = "Filled and line art markers cannot be mixed"
raise ValueError(err)
lookup_table = {}
for key in levels:
lookup_table[key] = {}
if markers:
lookup_table[key]["marker"] = markers[key]
lookup_table[key]["path"] = paths[key]
if dashes:
lookup_table[key]["dashes"] = dashes[key]
self.levels = levels
self.lookup_table = lookup_table
def _lookup_single(self, key, attr=None):
"""Get attribute(s) for a given data point."""
if attr is None:
value = self.lookup_table[key]
else:
value = self.lookup_table[key][attr]
return value
def _map_attributes(self, arg, levels, defaults, attr):
"""Handle the specification for a given style attribute."""
if arg is True:
lookup_table = dict(zip(levels, defaults))
elif isinstance(arg, dict):
missing = set(levels) - set(arg)
if missing:
err = f"These `{attr}` levels are missing values: {missing}"
raise ValueError(err)
lookup_table = arg
elif isinstance(arg, Sequence):
if len(levels) != len(arg):
err = f"The `{attr}` argument has the wrong number of values"
raise ValueError(err)
lookup_table = dict(zip(levels, arg))
elif arg:
err = f"This `{attr}` argument was not understood: {arg}"
raise ValueError(err)
else:
lookup_table = {}
return lookup_table
# =========================================================================== #
class VectorPlotter:
"""Base class for objects underlying *plot functions."""
_semantic_mappings = {
"hue": HueMapping,
"size": SizeMapping,
"style": StyleMapping,
}
# TODO units is another example of a non-mapping "semantic"
# we need a general name for this and separate handling
semantics = "x", "y", "hue", "size", "style", "units"
wide_structure = {
"x": "index", "y": "values", "hue": "columns", "style": "columns",
}
flat_structure = {"x": "index", "y": "values"}
_default_size_range = 1, 2 # Unused but needed in tests, ugh
def __init__(self, data=None, variables={}):
self.assign_variables(data, variables)
for var, cls in self._semantic_mappings.items():
if var in self.semantics:
# Create the mapping function
map_func = partial(cls.map, plotter=self)
setattr(self, f"map_{var}", map_func)
# Call the mapping function to initialize with default values
getattr(self, f"map_{var}")()
@classmethod
def get_semantics(cls, kwargs):
"""Subset a dictionary` arguments with known semantic variables."""
return {k: kwargs[k] for k in cls.semantics}
def assign_variables(self, data=None, variables={}):
"""Define plot variables, optionally using lookup from `data`."""
x = variables.get("x", None)
y = variables.get("y", None)
if x is None and y is None:
self.input_format = "wide"
plot_data, variables = self._assign_variables_wideform(
data, **variables,
)
else:
self.input_format = "long"
plot_data, variables = self._assign_variables_longform(
data, **variables,
)
self.plot_data = plot_data
self.variables = variables
self.var_types = {
v: variable_type(
plot_data[v],
boolean_type="numeric" if v in "xy" else "categorical"
)
for v in variables
}
return self
def _assign_variables_wideform(self, data=None, **kwargs):
"""Define plot variables given wide-form data.
Parameters
----------
data : flat vector or collection of vectors
Data can be a vector or mapping that is coerceable to a Series
or a sequence- or mapping-based collection of such vectors, or a
rectangular numpy array, or a Pandas DataFrame.
kwargs : variable -> data mappings
Behavior with keyword arguments is currently undefined.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
"""
# TODO raise here if any kwarg values are not None,
# # if we decide for "structure-only" wide API
# First, determine if the data object actually has any data in it
empty = data is None or not len(data)
# Then, determine if we have "flat" data (a single vector)
if isinstance(data, dict):
values = data.values()
else:
values = np.atleast_1d(data)
flat = not any(
isinstance(v, Iterable) and not isinstance(v, (str, bytes))
for v in values
)
if empty:
# Make an object with the structure of plot_data, but empty
plot_data = pd.DataFrame(columns=self.semantics)
variables = {}
elif flat:
# Handle flat data by converting to pandas Series and using the
# index and/or values to define x and/or y
# (Could be accomplished with a more general to_series() interface)
flat_data = pd.Series(data).copy()
names = {
"values": flat_data.name,
"index": flat_data.index.name
}
plot_data = {}
variables = {}
for var in ["x", "y"]:
if var in self.flat_structure:
attr = self.flat_structure[var]
plot_data[var] = getattr(flat_data, attr)
variables[var] = names[self.flat_structure[var]]
plot_data = pd.DataFrame(plot_data).reindex(columns=self.semantics)
else:
# Otherwise assume we have some collection of vectors.
# Handle Python sequences such that entries end up in the columns,
# not in the rows, of the intermediate wide DataFrame.
# One way to accomplish this is to convert to a dict of Series.
if isinstance(data, Sequence):
data_dict = {}
for i, var in enumerate(data):
key = getattr(var, "name", i)
# TODO is there a safer/more generic way to ensure Series?
# sort of like np.asarray, but for pandas?
data_dict[key] = pd.Series(var)
data = data_dict
# Pandas requires that dict values either be Series objects
# or all have the same length, but we want to allow "ragged" inputs
if isinstance(data, Mapping):
data = {key: pd.Series(val) for key, val in data.items()}
# Otherwise, delegate to the pandas DataFrame constructor
# This is where we'd prefer to use a general interface that says
# "give me this data as a pandas DataFrame", so we can accept
# DataFrame objects from other libraries
wide_data = pd.DataFrame(data, copy=True)
# At this point we should reduce the dataframe to numeric cols
numeric_cols = wide_data.apply(variable_type) == "numeric"
wide_data = wide_data.loc[:, numeric_cols]
# Now melt the data to long form
melt_kws = {"var_name": "columns", "value_name": "values"}
if "index" in self.wide_structure.values():
melt_kws["id_vars"] = "index"
wide_data["index"] = wide_data.index.to_series()
plot_data = wide_data.melt(**melt_kws)
# Assign names corresponding to plot semantics
for var, attr in self.wide_structure.items():
plot_data[var] = plot_data[attr]
plot_data = plot_data.reindex(columns=self.semantics)
# Define the variable names
variables = {}
for var, attr in self.wide_structure.items():
obj = getattr(wide_data, attr)
variables[var] = getattr(obj, "name", None)
return plot_data, variables
def _assign_variables_longform(self, data=None, **kwargs):
"""Define plot variables given long-form data and/or vector inputs.
Parameters
----------
data : dict-like collection of vectors
Input data where variable names map to vector values.
kwargs : variable -> data mappings
Keys are seaborn variables (x, y, hue, ...) and values are vectors
in any format that can construct a :class:`pandas.DataFrame` or
names of columns or index levels in ``data``.
Returns
-------
plot_data : :class:`pandas.DataFrame`
Long-form data object mapping seaborn variables (x, y, hue, ...)
to data vectors.
variables : dict
Keys are defined seaborn variables; values are names inferred from
the inputs (or None when no name can be determined).
Raises
------
ValueError
When variables are strings that don't appear in ``data``.
"""
plot_data = {}
variables = {}
# Data is optional; all variables can be defined as vectors
if data is None:
data = {}
# TODO should we try a data.to_dict() or similar here to more
# generally accept objects with that interface?
# Note that dict(df) also works for pandas, and gives us what we
# want, whereas DataFrame.to_dict() gives a nested dict instead of
# a dict of series.
# Variables can also be extraced from the index attribute
# TODO is this the most general way to enable it?
# There is no index.to_dict on multiindex, unfortunately
try:
index = data.index.to_frame()
except AttributeError:
index = {}
# The caller will determine the order of variables in plot_data
for key, val in kwargs.items():
if isinstance(val, (str, bytes)):
# String inputs trigger __getitem__
if val in data:
# First try to get an entry in the data object
plot_data[key] = data[val]
variables[key] = val
elif val in index:
# Failing that, try to get an entry in the index object
plot_data[key] = index[val]
variables[key] = val
else:
# We don't know what this name means
err = f"Could not interpret input '{val}'"
raise ValueError(err)
else:
# Otherwise, assume the value is itself a vector of data
# TODO check for 1D here or let pd.DataFrame raise?
plot_data[key] = val
# Try to infer the name of the variable
variables[key] = getattr(val, "name", None)
# Construct a tidy plot DataFrame. This will convert a number of
# types automatically, aligning on index in case of pandas objects
plot_data = pd.DataFrame(plot_data, columns=self.semantics)
# Reduce the variables dictionary to fields with valid data
variables = {
var: name
for var, name in variables.items()
if plot_data[var].notnull().any()
}
return plot_data, variables
def _semantic_subsets(
self, grouping_semantics, reverse=False, from_comp_data=False,
):
"""Generator for getting subsets of data defined by semantic variables.
Parameters
----------
grouping_semantics : list of strings
Semantic variables that define the subsets of data.
reverse : bool, optional
If True, reverse the order of iteration.
from_comp_data : bool, optional
If True, use self.comp_data rather than self.plot_data
Yields
------
sub_vars : dict
Keys are semantic names, values are the level of that semantic.
sub_data : :class:`pandas.DataFrame`
Subset of ``plot_data`` for this combination of semantic values.
"""
if isinstance(grouping_semantics, str):
grouping_semantics = [grouping_semantics]
# Reduce to the semantics used in this plot
grouping_semantics = [
var for var in grouping_semantics if var in self.variables
]
if from_comp_data:
data = self.comp_data
else:
data = self.plot_data
if grouping_semantics:
grouped_data = data.groupby(
grouping_semantics, sort=False, as_index=False
)
grouping_keys = []
for var in grouping_semantics:
# TODO this is messy, add "semantic levels" property?
map_obj = getattr(self, f"_{var}_map")
grouping_keys.append(map_obj.levels)
iter_keys = itertools.product(*grouping_keys)
if reverse:
iter_keys = reversed(list(iter_keys))
for key in iter_keys:
# Pandas fails with singleton tuple inputs
pd_key = key[0] if len(key) == 1 else key
try:
data_subset = grouped_data.get_group(pd_key)
except KeyError:
continue
yield dict(zip(grouping_semantics, key)), data_subset
else:
yield {}, data
@property
def comp_data(self):
"""Dataframe with numeric x and y, after unit conversion and log scaling."""
if not hasattr(self, "ax"):
# Probably a good idea, but will need a bunch of tests updated
# Most of these tests should just use the external interface
# Then this can be reeneabled.
# raise AttributeError("No Axes attached to plotter")
return self.plot_data
if not hasattr(self, "_comp_data"):
comp_data = self.plot_data.copy(deep=False)
for var in "xy":
axis = getattr(self.ax, f"{var}axis")
comp_var = axis.convert_units(self.plot_data[var])
if axis.get_scale() == "log":
comp_var = np.log10(comp_var)
comp_data[var] = comp_var
self._comp_data = comp_data
return self._comp_data
def _attach(self, ax, allowed_types=None, log_scale=None):
"""Associate the plotter with a matplotlib Axes and initialize its units.
Parameters
----------
ax : :class:`matplotlib.axes.Axes`
Axes object that we will eventually plot onto.
allowed_types : str or list of str
If provided, raise when either the x or y variable does not have
one of the declared seaborn types.
log_scale : bool, number, or pair of bools or numbers
If not False, set the axes to use log scaling, with the given
base or defaulting to 10. If a tuple, interpreted as separate
arguments for the x and y axes.
"""
if allowed_types is None:
# TODO should we define this default somewhere?
allowed_types = ["numeric", "datetime", "categorical"]
elif isinstance(allowed_types, str):
allowed_types = [allowed_types]
for var in set("xy").intersection(self.variables):
# Check types of x/y variables
var_type = self.var_types[var]
if var_type not in allowed_types:
err = (
f"The {var} variable is {var_type}, but one of "
f"{allowed_types} is required"
)
raise TypeError(err)
# Register with the matplotlib unit conversion machinery
# TODO do we want to warn or raise if mixing units?
axis = getattr(ax, f"{var}axis")
seed_data = self.plot_data[var]
if var_type == "categorical":
seed_data = categorical_order(seed_data)
axis.update_units(seed_data)
# Possibly log-scale one or both axes
if log_scale is not None:
# Allow single value or x, y tuple
try:
scalex, scaley = log_scale
except TypeError:
scalex = log_scale if "x" in self.variables else False
scaley = log_scale if "y" in self.variables else False
for axis, scale in zip("xy", (scalex, scaley)):
if scale:
set_scale = getattr(ax, f"set_{axis}scale")
if scale is True:
set_scale("log")
else:
set_scale("log", **{f"base{axis}": scale})
self.ax = ax
def _add_axis_labels(self, ax, default_x="", default_y=""):
"""Add axis labels from internal variable names if not already existing."""
if not ax.get_xlabel():
ax.set_xlabel(self.variables.get("x", default_x))
if not ax.get_ylabel():
ax.set_ylabel(self.variables.get("y", default_y))
def variable_type(vector, boolean_type="numeric"):
"""Determine whether a vector contains numeric, categorical, or dateime data.
This function differs from the pandas typing API in two ways:
- Python sequences or object-typed PyData objects are considered numeric if
all of their entries are numeric.
- String or mixed-type data are considered categorical even if not
explicitly represented as a :class:pandas.api.types.CategoricalDtype`.
Parameters
----------
vector : :func:`pandas.Series`, :func:`numpy.ndarray`, or Python sequence
Input data to test.
binary_type : 'numeric' or 'categorical'
Type to use for vectors containing only 0s and 1s (and NAs).
Returns
-------
var_type : 'numeric', 'categorical', or 'datetime'
Name identifying the type of data in the vector.
"""
# Special-case all-na data, which is always "numeric"
if pd.isna(vector).all():
return "numeric"
# Special-case binary/boolean data, allow caller to determine
# This triggers a numpy warning when vector has strings/objects
# https://github.com/numpy/numpy/issues/6784
# Because we reduce with .all(), we are agnostic about whether the
# comparison returns a scalar or vector, so we will ignore the warning.
# It triggers a separate DeprecationWarning when the vector has datetimes:
# https://github.com/numpy/numpy/issues/13548
# This is considered a bug by numpy and will likely go away.
with warnings.catch_warnings():
warnings.simplefilter(
action='ignore', category=(FutureWarning, DeprecationWarning)
)
if np.isin(vector, [0, 1, np.nan]).all():
return boolean_type
# Defer to positive pandas tests
if pd.api.types.is_numeric_dtype(vector):
return "numeric"
if pd.api.types.is_categorical_dtype(vector):
return "categorical"
if pd.api.types.is_datetime64_dtype(vector):
return "datetime"
# --- If we get to here, we need to check the entries
# Check for a collection where everything is a number
def all_numeric(x):
for x_i in x:
if not isinstance(x_i, Number):
return False
return True
if all_numeric(vector):
return "numeric"
# Check for a collection where everything is a datetime
def all_datetime(x):
for x_i in x:
if not isinstance(x_i, (datetime, np.datetime64)):
return False
return True
if all_datetime(vector):
return "datetime"
# Otherwise, our final fallback is to consider things categorical
return "categorical"
def infer_orient(x=None, y=None, orient=None, require_numeric=True):
"""Determine how the plot should be oriented based on the data.
For historical reasons, the convention is to call a plot "horizontally"
or "vertically" oriented based on the axis representing its dependent
variable. Practically, this is used when determining the axis for
numerical aggregation.
Paramters
---------
x, y : Vector data or None
Positional data vectors for the plot.
orient : string or None
Specified orientation, which must start with "v" or "h" if not None.
require_numeric : bool
If set, raise when the implied dependent variable is not numeric.
Returns
-------
orient : "v" or "h"
Raises
------
ValueError: When `orient` is not None and does not start with "h" or "v"
TypeError: When dependant variable is not numeric, with `require_numeric`
"""
x_type = None if x is None else variable_type(x)
y_type = None if y is None else variable_type(y)
nonnumeric_dv_error = "{} orientation requires numeric `{}` variable."
single_var_warning = "{} orientation ignored with only `{}` specified."
if x is None:
if str(orient).startswith("h"):
warnings.warn(single_var_warning.format("Horizontal", "y"))
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif y is None:
if str(orient).startswith("v"):
warnings.warn(single_var_warning.format("Vertical", "x"))
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif str(orient).startswith("v"):
if require_numeric and y_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Vertical", "y"))
return "v"
elif str(orient).startswith("h"):
if require_numeric and x_type != "numeric":
raise TypeError(nonnumeric_dv_error.format("Horizontal", "x"))
return "h"
elif orient is not None:
raise ValueError(f"Value for `orient` not understood: {orient}")
elif x_type != "numeric" and y_type == "numeric":
return "v"
elif x_type == "numeric" and y_type != "numeric":
return "h"
elif require_numeric and "numeric" not in (x_type, y_type):
err = "Neither the `x` nor `y` variable appears to be numeric."
raise TypeError(err)
else:
return "v"
def unique_dashes(n):
"""Build an arbitrarily long list of unique dash styles for lines.
Parameters
----------
n : int
Number of unique dash specs to generate.
Returns
-------
dashes : list of strings or tuples
Valid arguments for the ``dashes`` parameter on
:class:`matplotlib.lines.Line2D`. The first spec is a solid
line (``""``), the remainder are sequences of long and short
dashes.
"""
# Start with dash specs that are well distinguishable
dashes = [
"",
(4, 1.5),
(1, 1),
(3, 1.25, 1.5, 1.25),
(5, 1, 1, 1),
]
# Now programatically build as many as we need
p = 3
while len(dashes) < n:
# Take combinations of long and short dashes
a = itertools.combinations_with_replacement([3, 1.25], p)
b = itertools.combinations_with_replacement([4, 1], p)
# Interleave the combinations, reversing one of the streams
segment_list = itertools.chain(*zip(
list(a)[1:-1][::-1],
list(b)[1:-1]
))
# Now insert the gaps
for segments in segment_list:
gap = min(segments)
spec = tuple(itertools.chain(*((seg, gap) for seg in segments)))
dashes.append(spec)
p += 1
return dashes[:n]
def unique_markers(n):
"""Build an arbitrarily long list of unique marker styles for points.
Parameters
----------
n : int
Number of unique marker specs to generate.
Returns
-------
markers : list of string or tuples
Values for defining :class:`matplotlib.markers.MarkerStyle` objects.
All markers will be filled.
"""
# Start with marker specs that are well distinguishable
markers = [
"o",
"X",
(4, 0, 45),
"P",
(4, 0, 0),
(4, 1, 0),
"^",
(4, 1, 45),
"v",
]
# Now generate more from regular polygons of increasing order
s = 5
while len(markers) < n:
a = 360 / (s + 1) / 2
markers.extend([
(s + 1, 1, a),
(s + 1, 0, a),
(s, 1, 0),
(s, 0, 0),
])
s += 1
# Convert to MarkerStyle object, using only exactly what we need
# markers = [mpl.markers.MarkerStyle(m) for m in markers[:n]]
return markers[:n]
def categorical_order(vector, order=None):
"""Return a list of unique data values.
Determine an ordered list of levels in ``values``.
Parameters
----------
vector : list, array, Categorical, or Series
Vector of "categorical" values
order : list-like, optional
Desired order of category levels to override the order determined
from the ``values`` object.
Returns
-------
order : list
Ordered list of category levels not including null values.
"""
if order is None:
if hasattr(vector, "categories"):
order = vector.categories
else:
try:
order = vector.cat.categories
except (TypeError, AttributeError):
try:
order = vector.unique()
except AttributeError:
order = pd.unique(vector)
if variable_type(vector) == "numeric":
order = np.sort(order)
order = filter(pd.notnull, order)
return list(order)
| bsd-3-clause |
jeffwdoak/free_energies | free_energies/electronicdos.py | 1 | 14960 | #!/usr/bin/python
# electronicdos.py v0.5 5-16-2012 Jeff Doak jeff.w.doak@gmail.com
import numpy as np
from scipy.interpolate import UnivariateSpline
from scipy.integrate import quad
from scipy.optimize import fsolve
import sys, subprocess
BOLTZCONST = 8.617e-5 #eV/K
class ElectronicDOS:
"""
Class to calculate equilibrium carrier concentrations, as well as
equilibrium thermodynamic properties of an electronic density of states.
Class constants of ElectronicDOS:
- BOLTZCONST - Boltzmann's Constant (eV/K)
Instance attributes of ElectronicDOS:
- n_atoms - number of atoms of the unit cell from which the DOS was
calculated
- energy - numpy array of energies at which DOS is calculated (eV)
- dos_tot - numpy array of density of spin up and spin down allowed electron
states at each energy in the array energy (# states/eV/atom)
- dos_spin - numpy array of the difference in density between spin up and
spin down states (# states/eV/atom)
- e_min - minimum energy in numpy array energy (eV)
- e_max - maximum energy in numpy array energy (eV)
- e_fermi - zero Kelvin fermi energy for the electronic DOS (eV)
- step_size - energy difference between two consecutive points in the DOSCAR
file (eV)
- vbm - valence band maximum, set to e_fermi for metals (eV)
- cbm - conduction band minimum, set to e_fermi for metals (eV)
- band_gap - band gap around the fermi energy, zero for metals (eV)
- temp - numpy array of temperatures at which to calculate equilibrium
electron chemical potentials, electron concentrations, and hole
concentrations (K)
- mu_e - numpy array of electron chemical potentials calculated at each
temperature in temp (eV)
- num_e - numpy array of equilibrium electron concentrations calculated at
each temperature in temp (# e's/atom)
- num_h - numpy array of equilibrium hole concentrations calculated at each
temperature in temp (# h's/atom)
- E_el - numpy array of electronic energy calculated at each temperature
in temp (eV/atom)
- S_el - numpy array of electronic entropy calculated at each temperature
in temp (kB/atom)
- F_el - numpy array of electronic free energy calculated at each
temperature in temp (eV/atom)
"""
def __init__(self,input_,format=None):
if isinstance(input_,str):
try:
input_ = open(input_,'r')
except IOError:
print "Error reading input file."
print "Program will now exit!"
sys.exit(1)
if isinstance(input_,file):
if format == "ezvasp":
self.read_ezvasp_dos(input_)
else:
self.read_doscar(input_)
nelec = subprocess.Popen("grep NELECT OUTCAR",
shell=True,stdin=None,stdout=subprocess.PIPE).communicate()[0]
self.nelec = int(float(nelec.split()[2]))
self.get_bandgap()
# Calculate finite temperature properties
self.temp = np.linspace(0,2000,21)
self.mu_e = np.zeros_like(self.temp)
self.num_e = np.zeros_like(self.temp)
self.num_h = np.zeros_like(self.temp)
self.E_el = np.zeros_like(self.temp)
self.S_el = np.zeros_like(self.temp)
self.F_el = np.zeros_like(self.temp)
# Calculate E_el_0
self.E_el_0 = None
tol = 1e-5
for i in range(len(self.temp)):
if i < tol:
self.mu_e[i] = self.e_fermi
self.E_el[i] = 0.0
self.S_el[i] = 0.0
self.num_e[i] = 0.0
self.num_h[i] = 0.0
elif i > 0.0:
self.mu_e[i] = self.calc_mu_e(self.temp[i])
if self.E_el_0 == None:
self.E_el_0 = self.calc_E_el(self.mu_e[i],self.temp[i])
self.num_e[i] = self.n(self.mu_e[i],self.temp[i])
self.num_h[i] = self.p(self.mu_e[i],self.temp[i])
self.E_el[i] = (self.calc_E_el(self.mu_e[i],self.temp[i]))
self.S_el[i] = self.calc_S_el(self.mu_e[i],self.temp[i])
self.E_el[1:] = self.E_el[1:] - self.E_el_0
self.F_el = self.E_el - self.temp*BOLTZCONST*self.S_el
def read_doscar(self,input_):
"""
Reads in a doscar file to grab the density of states as a function of
energy. The argument input_ is assumed to be a file object.
"""
self.n_atoms = int(input_.readline().split()[0])
# Discard header information
for i in range(4):
input_.readline()
# Read in Fermi Energy
line = input_.readline().split()
self.e_max = float(line[0])
self.e_min = float(line[1])
self.e_fermi = float(line[3])
energy = []; dos_tot = []; dos_spin = []
for line in input_:
line = line.split()
energy.append(float(line[0]))
if len(line) == 3:
dos_tot.append(float(line[1])) # DOS includes spin up and down
dos_spin.append(0.0)
elif len(line) == 5:
dos_tot.append(float(line[1])+float(line[2]))
dos_spin.append(float(line[1])-float(line[2]))
self.energy = np.array(energy)
#self.dos_tot = np.array(dos_tot)/float(self.n_atoms)
self.dos_tot = np.array(dos_tot)
#self.dos_spin = np.array(dos_spin)/float(self.n_atoms)
self.dos_spin = np.array(dos_spin)
self.dos_spline = UnivariateSpline(self.energy,self.dos_tot)
def read_ezvasp_dos(self,input_):
"""
Reads an ezvasp-formatted dos.out file to get the electronic density of
states. The argument input_ is assumned to be a file object.
"""
nions = subprocess.Popen("grep NIONS OUTCAR",
shell=True,stdin=None,stdout=subprocess.PIPE).communicate()[0]
self.n_atoms = int(float(nions.split()[-1]))
self.e_min = 0.0
line = input_.readline().split()
self.nelec = int(float(line[0]))
self.step_size = float(line[1])
self.scale = float(line[2])
energy = []; dos_tot = []
i = 0
for line in input_:
line = line.split()
dos_tot.append(float(line[0]))
energy.append(float(i)*self.step_size)
i += 1
self.energy = np.array(energy)
self.dos_tot = np.array(dos_tot)
self.dos_spin = np.zeros_like(self.dos_tot) # Change this for spin-polar
self.dos_spline = UnivariateSpline(self.energy,self.dos_tot)
self.e_max = self.energy[-1]
# Find the 0 Kelvin 'Fermi Energy' using ATAT's method
ne = 0.0
for i in range(len(self.dos_tot)):
ne += self.dos_tot[i]*self.step_size
e_fermi = self.energy[i]
if ne >= self.nelec:
break
self.e_fermi = e_fermi
def get_bandgap(self):
"""
Finds the band gap of a DOS around the fermi energy.
"""
self.step_size = self.energy[1] - self.energy[0]
i = 0
not_found = True
while not_found:
if self.energy[i] < self.e_fermi and self.dos_tot[i] > 1e-3:
bot = self.energy[i]
elif self.energy[i] > self.e_fermi and self.dos_tot[i] > 1e-3:
top = self.energy[i]
not_found = False
i += 1
if top - bot < 2*self.step_size:
self.vbm = self.cbm = self.e_fermi
self.band_gap = 0.0
else:
self.vbm = bot; self.cbm = top
self.band_gap = top - bot
def shift_energy(self,new_ref):
"""
Change the reference energy for all of the energy attributes.
"""
self.energy = self.energy - new_ref
self.e_min = self.e_min - new_ref
self.e_max = self.e_max - new_ref
self.e_fermi = self.e_fermi - new_ref
self.vbm = self.vbm - new_ref
self.cbm = self.cbm - new_ref
self.mu_e = self.mu_e - new_ref
#def sum_dos(self,weight,start,end,args=None):
def sum_dos(self,weight,start,end,args=None):
"""
Sums the density of states, dos, in the energy range [start,end], weighted
by the function weight, which takes as inputs energy and args.
"""
flag = False
sum = 0.
for i in range(len(self.energy)):
if flag:
sum += self.step_size*self.dos_tot[i]*weight(
self.energy[i],args)
if self.energy[i] > end:
break
elif self.energy[i] >= start:
flag = True
return sum
#def integrate_dos(self,weight,start,end,args=None,threshold=0.1):
def ium_dos(self,weight,start,end,args=None,threshold=0.1):
"""
Takes numpy arrays containing the energy and dos and integrates them over
the range [start,end] with the weighting function weight. Weight should take
as an argument the integrated energy and a list of other arguements args.
"""
def integrand(x,weight,args):
return self.dos_spline(x)*weight(x,args)
result = quad(
integrand,start,end,args=(weight,args),full_output=1,limit=350)
integral = result[0]
error = result[1]
#if error > integral*threshold:
# print "Numerical integration error is greater than"
# print str(threshold)+" of the integrated value."
# sys.exit(1)
return integral
def n(self,mu_e,T):
"""
Calculate the intrinsic number of conduction electrons per atom at an
electron chemical potential mu_e and temperature T.
"""
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
#n = self.integrate_dos(fermi,self.cbm,self.e_max,args=(mu_e,T))
#n = self.sum_dos(fermi,self.cbm,self.e_max,args=(mu_e,T))
n = self.sum_dos(fermi,mu_e,self.e_max,args=(mu_e,T))
return n
def p(self,mu_e,T):
"""
Calculate the intrinsic number of valence holes per atom at an electron
chemical potential of mu_e and temperature T.
"""
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((mu-x)/(BOLTZCONST*T))+1.)
#p = self.integrate_dos(fermi,self.e_min,self.vbm,args=(mu_e,T))
#p = self.sum_dos(fermi,self.e_min,self.vbm,args=(mu_e,T))
p = self.sum_dos(fermi,self.e_min,mu_e,args=(mu_e,T))
return p
def charge_neut2(self,mu_e,args):
def fermi(x,args):
mu = args[0]; T = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
T = args
n_sum = self.sum_dos(fermi,self.e_min,self.e_max,args=(mu_e,T))
return self.nelec - n_sum
def charge_neutrality(self,mu_e,args):
"""
Condition for charge neutrality for intrinsic doping in a perfect
semiconductor. This function should be overwritten for a more
complicated case.
"""
T = args # Args could also include atomic chemical potentials.
return self.p(mu_e,T) - self.n(mu_e,T)
def calc_mu_e(self,temp):
"""
Calculate the electron chemical potential at temperature temp using the
condition of charge neutrality.
"""
#mu_e = fsolve(self.charge_neutrality,self.e_fermi,args=(temp))
mu_e = fsolve(self.charge_neut2,self.e_fermi,args=(temp))
return mu_e
def calc_E_el(self,mu_e,T):
"""
Calculate the electronic energy at a temperature T and electron chemical
potential mu_e.
"""
def energy(x,args):
return x
def fermi_energy(x,args):
mu = args[0]; T = args[1]
if x-mu < -30.0*BOLTZCONST*T:
return x
elif x-mu > 30.0*BOLTZCONST*T:
return 0.0
else:
return x/(np.exp((x-mu)/(BOLTZCONST*T))+1.)
#E = self.integrate_dos(fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
#E_0 = self.integrate_dos(
# fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
E = self.sum_dos(fermi_energy,self.e_min,self.e_max,args=(mu_e,T))
#E_0 = self.sum_dos(energy,self.e_min,self.e_fermi,args=None)
return E
def calc_S_el(self,mu_e,T):
"""
Calculate the electronic entropy at an electron chemical potential mu_e
and temperature T.
"""
def weight(x,args):
mu = args[0]; T = args[1]
x = (x - mu)/(BOLTZCONST*T)
f = 1.0/(np.exp(x)+1)
if f > 1e-5 and (1.0 - f) > 1e-5:
return -f*np.log(f)-(1.-f)*np.log(1.-f)
else:
return 0.0
#f = -np.log(np.exp(x)+1)/(np.exp(x)+1)
#f += -np.log(np.exp(-x)+1)/(np.exp(-x)+1)
#return f
#S = self.integrate_dos(weight,self.e_min,self.e_max,args=(mu_e,T))
S = self.sum_dos(weight,self.e_min,self.e_max,args=(mu_e,T))
return S
def fermi_dirac_dist(x,args):
"""
Calculates the Fermi-Dirac distribution for an energy x, temperature
args[0], and electron chemical potential args[1].
"""
T = args[0]; mu = args[1]
return 1./(np.exp((x-mu)/(BOLTZCONST*T))+1.)
def test2(argv):
doscar = ElectronicDOS(open(str(argv[0]),'r'))
T = 500
#n = doscar.integrate_dos(
# fermi_dirac_dist,doscar.cbm,doscar.e_max,args=(T,doscar.e_fermi))
p = doscar.p(doscar.e_fermi,T)
print p
def test3(argv):
format = None
if len(argv) > 1:
format = str(argv[1])
doscar = ElectronicDOS(open(str(argv[0]),'r'),format)
print doscar.temp
print doscar.num_e
print doscar.num_h
print doscar.E_el
print doscar.S_el
print doscar.F_el
def atat_test(argv):
format = None
if len(argv) > 1:
format = str(argv[1])
doscar = ElectronicDOS(open(str(argv[0]),'r'),format)
print doscar.E_el_0
for i in range(len(doscar.temp)):
print doscar.temp[i],doscar.mu_e[i],doscar.E_el[i],doscar.S_el[i],doscar.F_el[i]
def test1(argv):
import matplotlib.pyplot as plt
doscar = ElectronicDOS(open(str(argv[0]),'r'))
plt.plot(doscar.energy,doscar.dos_tot)
plt.show()
def main(argv):
import matplotlib.pyplot as plt
doscar = open(str(argv[0]))
e_fermi,energy,n_tot,n_spin = read_doscar(doscar)
plt.plot(energy,n_tot)
if len(argv) > 1:
doscar2 = open(str(argv[1]))
e_fermi2,energy2,n_tot2,n_spin2 = read_doscar(doscar2)
plt.plot(energy2,n_tot2)
plt.show()
if __name__ == "__main__":
import sys
#test3(sys.argv[1:])
atat_test(sys.argv[1:])
| mit |
gotomypc/scikit-learn | examples/linear_model/plot_sgd_iris.py | 286 | 2202 | """
========================================
Plot multi-class SGD on the iris dataset
========================================
Plot decision surface of multi-class SGD on iris dataset.
The hyperplanes corresponding to the three one-versus-all (OVA) classifiers
are represented by the dashed lines.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.linear_model import SGDClassifier
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
# avoid this ugly slicing by using a two-dim dataset
y = iris.target
colors = "bry"
# shuffle
idx = np.arange(X.shape[0])
np.random.seed(13)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
h = .02 # step size in the mesh
clf = SGDClassifier(alpha=0.001, n_iter=100).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('tight')
# Plot also the training points
for i, color in zip(clf.classes_, colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=color, label=iris.target_names[i],
cmap=plt.cm.Paired)
plt.title("Decision surface of multi-class SGD")
plt.axis('tight')
# Plot the three one-against-all classifiers
xmin, xmax = plt.xlim()
ymin, ymax = plt.ylim()
coef = clf.coef_
intercept = clf.intercept_
def plot_hyperplane(c, color):
def line(x0):
return (-(x0 * coef[c, 0]) - intercept[c]) / coef[c, 1]
plt.plot([xmin, xmax], [line(xmin), line(xmax)],
ls="--", color=color)
for i, color in zip(clf.classes_, colors):
plot_hyperplane(i, color)
plt.legend()
plt.show()
| bsd-3-clause |
giruenf/GRIPy | app/app_utils.py | 1 | 29345 | import re
import os
import json
import importlib
import timeit
import inspect
import collections
from enum import Enum
from pathlib import Path
import numpy as np
from matplotlib.cm import cmap_d
import wx
import app
import fileio
from classes.om.base.manager import ObjectManager
from app import log
class GripyBitmap(wx.Bitmap):
def __init__(self, path_to_bitmap=None):
if path_to_bitmap is None:
super().__init__()
return
if os.path.exists(path_to_bitmap):
full_file_name = path_to_bitmap
elif os.path.exists(os.path.join(app.ICONS_PATH, \
path_to_bitmap)):
full_file_name = os.path.join(app.ICONS_PATH, path_to_bitmap)
else:
raise Exception('ERROR: Wrong bitmap path [{}, {}].'.format( \
app.ICONS_PATH, path_to_bitmap)
)
super().__init__(full_file_name)
class GripyIcon(wx.Icon):
def __init__(self, path_to_bitmap=None, type_=wx.BITMAP_TYPE_ANY):
# print(PurePath(app.ICONS_PATH, path_to_bitmap), 'r')
if path_to_bitmap is not None:
if Path(path_to_bitmap).exists():
pass
elif Path(app.ICONS_PATH, path_to_bitmap).exists():
path_to_bitmap = Path(app.ICONS_PATH, path_to_bitmap)
else:
raise Exception('ERROR: Wrong bitmap path.')
super().__init__(path_to_bitmap, type_)
def calc_well_time_from_depth(event, well_uid):
OM = ObjectManager()
well = OM.get(well_uid)
vp = None
for log_obj in OM.list('log', well.uid):
if log_obj.datatype == 'Velocity':
vp = log_obj
break
if vp is None:
raise Exception('ERROR [calc_prof_tempo]: Vp log not found.')
index_set = OM.get(vp.index_set_uid)
md = index_set.get_z_axis_indexes_by_type('MD')[0]
#
if md.data[0] != 0.0:
return
owt = [0.0]
#
for idx in range(1, len(md.data)):
if vp.data[idx - 1] == np.nan:
raise Exception('ERROR [calc_prof_tempo]: Found np.nan on Vp[{}] '.format(idx - 1))
diff_prof = md.data[idx] - md.data[idx - 1]
value = (float(diff_prof) / vp.data[idx - 1]) * 1000.0 # To milliseconds
value = owt[idx - 1] + value
owt.append(value)
#
owt = np.array(owt)
twt = owt * 2.0
#
print('\nOWT:', owt)
#
owt_index = OM.new('data_index', 0, 'One Way Time', 'TIME', 'ms', data=owt)
OM.add(owt_index, index_set.uid)
#
twt_index = OM.new('data_index', 0, 'Two Way Time', 'TWT', 'ms', data=twt)
OM.add(twt_index, index_set.uid)
#
def load_segy(event, filename, new_obj_name='', comparators_list=None,
iline_byte=9, xline_byte=21, offset_byte=37, tid='seismic',
datatype='amplitude', parentuid=None):
OM = ObjectManager()
disableAll = wx.WindowDisabler()
wait = wx.BusyInfo("Loading SEG-Y file...")
#
try:
print("\nLoading SEG-Y file...")
segy_file = fileio.segy.SEGYFile(filename)
# segy_file.print_dump()
# """
segy_file.read(comparators_list)
segy_file.organize_3D_data(iline_byte, xline_byte, offset_byte)
#
print('segy_file.traces.shape:', segy_file.traces.shape)
#
#
seis_like_obj = OM.new(tid, segy_file.traces, name=new_obj_name,
datatype=datatype
)
if not OM.add(seis_like_obj, parentuid):
raise Exception('Object was not added. tid={}'.format(tid))
#
#
z_index = OM.new('data_index',
name='Time',
datatype='TWT',
unit='ms',
start=0.0,
step=(segy_file.sample_rate * 1000),
samples=segy_file.number_of_samples
)
OM.add(z_index, seis_like_obj.uid)
#
try:
offset_index = OM.new('data_index',
segy_file.dimensions[2],
name='Offset',
datatype='OFFSET',
unit='m'
)
OM.add(offset_index, seis_like_obj.uid)
next_dim = 2
except Exception as e:
next_dim = 1
#
xline_index = OM.new('data_index',
segy_file.dimensions[1],
name='X Line',
datatype='X_LINE'
)
if OM.add(xline_index, seis_like_obj.uid):
next_dim += 1
#
iline_index = OM.new('data_index',
segy_file.dimensions[0],
name='I Line',
datatype='I_LINE'
)
OM.add(iline_index, seis_like_obj.uid)
#
seis_like_obj._create_data_index_map(
[iline_index.uid],
[xline_index.uid],
[offset_index.uid],
[z_index.uid]
)
print('seis_like_obj.traces.shape:', seis_like_obj.data.shape)
# """
except Exception as e:
raise e
finally:
del wait
del disableAll
#
# TODO: Verificar melhor opcao no Python 3.6
#
CallerInfo = collections.namedtuple('CallerInfo',
['object_', 'class_', 'module', 'function_name',
'filename', 'line_number', 'line_code'
]
)
def get_callers_stack():
"""
Based on: https://gist.github.com/techtonik/2151727 with some
changes.
Get a list with caller modules, objects and functions in the stack
with list index 0 being the latest call.
Returns:
list(collections.namedtuple('CallerInfo',
['object_', 'class_', 'module', 'function_name',
'filename', 'line_number', 'line_code']))
"""
ret_list = []
print('app_utils.get_callers_stack')
try:
stack = inspect.stack()
for i in range(1, len(stack)):
fi = stack[i]
module_ = None
obj = fi.frame.f_locals.get('self', None)
class_ = fi.frame.f_locals.get('__class__', None)
if obj:
module_ = inspect.getmodule(obj)
if not class_ and obj:
class_ = obj.__class__
ret_list.append(
CallerInfo(object_=obj, class_=class_, module=module_,
function_name=fi.function, filename=fi.filename,
line_number=fi.lineno, line_code=fi.code_context,
# index=fi.index,
# traceback=traceback, f_locals=fi.frame.f_locals
)
)
if fi.frame.f_locals.get('__name__') == '__main__':
break
except Exception as e:
print(e)
raise
return ret_list
def get_class_full_name(obj):
try:
full_name = obj.__class__.__module__ + "." + obj.__class__.__name__
except Exception as e:
msg = 'ERROR in function app.app_utils.get_class_full_name().'
log.exception(msg)
raise e
return full_name
def get_string_from_function(function_):
if not callable(function_):
msg = 'ERROR: Given input is not a function: {}.'.format(str(function_))
log.error(msg)
raise Exception(msg)
return function_.__module__ + '.' + function_.__name__
def get_function_from_filename(full_filename, function_name):
try:
# print ('\nget_function_from_filename', full_filename, function_name)
if function_name == '<module>':
return None
rel_path = os.path.relpath(full_filename, app.BASE_PATH)
module_rel_path = os.path.splitext(rel_path)[0]
# print (module_rel_path)
module_str = '.'.join(module_rel_path.split(os.path.sep))
# print (module_str)
module_ = importlib.import_module(module_str)
# print (module_, function_name)
function_ = getattr(module_, function_name)
return function_
except:
raise
def get_function_from_string(fullpath_function):
try:
# print ('\nget_function_from_string:', fullpath_function)
module_str = '.'.join(fullpath_function.split('.')[:-1])
function_str = fullpath_function.split('.')[-1]
# print ('importing module:', module_str)
module_ = importlib.import_module(module_str)
# print ('getting function:', function_str, '\n')
function_ = getattr(module_, function_str)
return function_
except Exception as e:
msg = 'ERROR in function app.app_utils.get_function_from_string({}).'.format(fullpath_function)
log.exception(msg)
print(msg)
raise e
class Chronometer(object):
def __init__(self):
self.start_time = timeit.default_timer()
def end(self):
self.total = timeit.default_timer() - self.start_time
return 'Execution in {:0.3f}s'.format(self.total)
# Phoenix DropTarget code
class DropTarget(wx.DropTarget):
def __init__(self, _test_func, callback=None):
wx.DropTarget.__init__(self)
self.data = wx.CustomDataObject('obj_uid')
self.SetDataObject(self.data)
self._test_func = _test_func
self._callback = callback
def OnDrop(self, x, y):
return True
def OnData(self, x, y, defResult):
obj_uid = self._get_object_uid()
if self._callback:
wx.CallAfter(self._callback, obj_uid)
return defResult
def OnDragOver(self, x, y, defResult):
obj_uid = self._get_object_uid()
if obj_uid:
if self._test_func(obj_uid):
return defResult
return wx.DragNone
def _get_object_uid(self):
if self.GetData():
obj_uid_bytes = self.data.GetData().tobytes()
obj_uid_str = obj_uid_bytes.decode()
if obj_uid_str:
obj_uid = parse_string_to_uid(obj_uid_str)
return obj_uid
return None
class GripyEnum(Enum):
def __repr__(self):
# return '{} object, name: {}, value: {}'.format(self.__class__, self.name, self.value)
return str(self.value)
def __eq__(self, other):
if type(other) is self.__class__:
return self.value is other.value
return self.value is other
def __lt__(self, other):
if type(other) is self.__class__:
return self.value < other.value
return self.value < other
def __le__(self, other):
return self.__eq__(other) or self.__lt__(other)
def __gt__(self, other):
if type(other) is self.__class__:
return self.value > other.value
return self.value > other
def __ge__(self, other):
return self.__eq__(other) or self.__gt__(other)
class GripyEnumBitwise(GripyEnum):
def __or__(self, other):
if type(other) is self.__class__:
return self.value | other.value
return self.value | other
def __ror__(self, other):
return self.__or__(other)
class WellPlotState(GripyEnum):
NORMAL_TOOL = 0
SELECTION_TOOL = 1
FLAGS = re.VERBOSE | re.MULTILINE | re.DOTALL
WHITESPACE = re.compile(r'[ \t\n\r]*', FLAGS)
WHITESPACE_STR = ' \t\n\r'
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL)
)
class GripyJSONEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, wx.Point):
return 'wx.Point' + str(obj)
elif isinstance(obj, wx.Size):
return 'wx.Size' + str(obj)
elif isinstance(obj, GripyEnum):
return str(obj.value)
elif callable(obj):
return get_string_from_function(obj)
try:
return str(obj)
except:
return super(GripyJSONDecoder, self).default(self, obj)
def clean_path_str(path):
# path = path.replace('\\' ,'/')
path = path.encode('ascii', 'ignore') # in order to save unicode characters
path = path.encode('string-escape')
return path
def write_json_file(py_object, fullfilename):
fullfilename = clean_path_str(fullfilename)
fullfilename = os.path.normpath(fullfilename)
directory = os.path.dirname(fullfilename)
if not os.path.exists(directory):
os.makedirs(directory)
msg = 'App.app_utils.write_json_file has created directory: {}'.format(directory)
# log.debug(msg)
print(msg)
f = open(fullfilename, 'w')
f.write(json.dumps(py_object, indent=4, cls=GripyJSONEncoder))
f.close()
class GripyJSONDecoder(json.JSONDecoder):
def decode(self, s, _w=WHITESPACE.match):
self.scan_once = gripy_make_scanner(self)
return super(GripyJSONDecoder, self).decode(s, _w)
def gripy_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
# encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
object_pairs_hook = context.object_pairs_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
if string[idx:idx + 10] == '"wx.Point(':
return GripyJSONParser((string, idx + 10), _scan_once, wx.Point)
elif string[idx:idx + 9] == '"wx.Size(':
return GripyJSONParser((string, idx + 9), _scan_once, wx.Size)
return parse_string(string, idx + 1, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), strict,
_scan_once, object_hook, object_pairs_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
def GripyJSONParser(s_and_end, scan_once, _class, _w=WHITESPACE.match, _ws=WHITESPACE_STR):
s, end = s_and_end
values = []
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
# Look-ahead for trivial empty array
if nextchar == ']':
return values, end + 1
_append = values.append
while True:
try:
value, end = scan_once(s, end)
except StopIteration:
raise ValueError("Expecting object {}, {}".format(s, end))
_append(value)
nextchar = s[end:end + 1]
if nextchar in _ws:
end = _w(s, end + 1).end()
nextchar = s[end:end + 1]
end += 1
if nextchar == ')' and s[end:end + 1] == '"':
end += 1
break
elif nextchar != ',':
raise ValueError("Expecting ',' delimiter {}, {}".format(s, end))
try:
if s[end] in _ws:
end += 1
if s[end] in _ws:
end = _w(s, end + 1).end()
except IndexError:
pass
return _class(int(values[0]), int(values[1])), end
def read_json_file(full_filename):
# print ('\nread_json_file:', fullfilename, type(fullfilename))
# fullfilename = fullfilename.replace('\\' ,'/')
# fullfilename = fullfilename.encode('ascii', 'ignore') # in order to save unicode characters
# fullfilename = fullfilename.encode('string-escape')
json_file = open(full_filename, 'r')
state = json.load(json_file, cls=GripyJSONDecoder)
json_file.close()
return state
def parse_string_to_uid(obj_uid_string):
"""
Parse a uid String (which may contains non uid characters like " and \) to
a uid tuple in a format (tid, oid).
Parameters
----------
obj_uid_string : str
The uid String.
Returns
-------
tuple
A pair (tid, oid) which can be a Gripy object identifier.
"""
try:
# print ('parse_string_to_uid:', obj_uid_string)
left_index = obj_uid_string.find('(')
right_index = obj_uid_string.rfind(')')
if left_index == -1 or right_index == -1:
return None
elif right_index < left_index:
return None
obj_uid_string = obj_uid_string[left_index + 1:right_index]
tid, oid = obj_uid_string.split(',')
tid = tid.strip('\'\" ')
oid = int(oid.strip('\'\" '))
return tid, oid
except:
raise
def get_wx_colour_from_seq_string(seq_str):
# tuple or list
if seq_str.startswith('(') or seq_str.startswith('['):
seq_str = seq_str[1:-1]
val = tuple([int(c.strip()) for c in seq_str.split(',')])
color = wx.Colour(val)
print('_get_wx_colour:', color,
color.GetAsString(wx.C2S_HTML_SYNTAX))
return color
return None
# Have colormaps separated into categories:
# http://matplotlib.org/examples/color/colormaps_reference.html
"""
# MPL 1.4/1.5 COLORS
MPL_CATS_CMAPS = [('Perceptually Uniform Sequential', [
'viridis', 'plasma', 'inferno', 'magma']),
('Sequential', [
'Greys', 'Purples', 'Blues', 'Greens', 'Oranges', 'Reds',
'YlOrBr', 'YlOrRd', 'OrRd', 'PuRd', 'RdPu', 'BuPu',
'GnBu', 'PuBu', 'YlGnBu', 'PuBuGn', 'BuGn', 'YlGn']),
('Sequential (2)', [
'binary', 'gist_yarg', 'gist_gray', 'gray', 'bone', 'pink',
'spring', 'summer', 'autumn', 'winter', 'cool', 'Wistia',
'hot', 'afmhot', 'gist_heat', 'copper']),
('Diverging', [
'PiYG', 'PRGn', 'BrBG', 'PuOr', 'RdGy', 'RdBu',
'RdYlBu', 'RdYlGn', 'Spectral', 'coolwarm', 'bwr', 'seismic']),
('Qualitative', [
'Pastel1', 'Pastel2', 'Paired', 'Accent',
'Dark2', 'Set1', 'Set2', 'Set3',
'tab10', 'tab20', 'tab20b', 'tab20c']),
('Miscellaneous', [
'flag', 'prism', 'ocean', 'gist_earth', 'terrain', 'gist_stern',
'gnuplot', 'gnuplot2', 'CMRmap', 'cubehelix', 'brg', 'hsv',
'gist_rainbow', 'rainbow', 'jet', 'nipy_spectral', 'gist_ncar'])]
"""
# MPL 2.0 COLORS
MPL_CATS_CMAPS = [
('Perceptually Uniform Sequential',
['viridis', 'inferno', 'plasma', 'magma']
),
('Sequential',
['Blues', 'BuGn', 'BuPu', 'GnBu', 'Greens', 'Greys',
'Oranges', 'OrRd', 'PuBu', 'PuBuGn', 'PuRd', 'Purples',
'RdPu', 'Reds', 'YlGn', 'YlGnBu', 'YlOrBr', 'YlOrRd'
]
),
('Sequential (2)',
['afmhot', 'autumn', 'bone', 'cool', 'copper', 'gist_heat',
'gray', 'hot', 'pink', 'spring', 'summer', 'winter'
]
),
('Diverging',
['BrBG', 'bwr', 'coolwarm', 'PiYG', 'PRGn', 'PuOr',
'RdBu', 'RdGy', 'RdYlBu', 'RdYlGn', 'Spectral', 'seismic'
]
),
('Qualitative',
['Accent', 'Dark2', 'Paired', 'Pastel1', 'Pastel2', 'Set1',
'Set2', 'Set3', 'Vega10', 'Vega20', 'Vega20b', 'Vega20c'
]
),
('Miscellaneous',
['gist_earth', 'terrain', 'ocean', 'gist_stern', 'brg',
'CMRmap', 'cubehelix', 'gnuplot', 'gnuplot2', 'gist_ncar',
'nipy_spectral', 'jet', 'rainbow', 'gist_rainbow', 'hsv',
'flag', 'prism'
]
)
]
# MPL_COLORMAPS = [value for (key, values) in MPL_CATS_CMAPS for value in values]
MPL_COLORMAPS = sorted(cmap_d)
"""
MPL_COLORMAPS = ['Accent', 'Accent_r', 'Blues', 'Blues_r', 'BrBG', 'BrBG_r',
'BuGn', 'BuGn_r', 'BuPu', 'BuPu_r', 'CMRmap', 'CMRmap_r',
'Dark2', 'Dark2_r', 'GnBu', 'GnBu_r', 'Greens', 'Greens_r',
'Greys', 'Greys_r', 'OrRd', 'OrRd_r', 'Oranges', 'Oranges_r',
'PRGn', 'PRGn_r', 'Paired', 'Paired_r', 'Pastel1', 'Pastel1_r',
'Pastel2', 'Pastel2_r', 'PiYG', 'PiYG_r', 'PuBu', 'PuBu_r',
'PuBuGn', 'PuBuGn_r', 'PuOr', 'PuOr_r', 'PuRd', 'PuRd_r',
'Purples', 'Purples_r', 'RdBu', 'RdBu_r', 'RdGy', 'RdGy_r',
'RdPu', 'RdPu_r', 'RdYlBu', 'RdYlBu_r', 'RdYlGn', 'RdYlGn_r',
'Reds', 'Reds_r', 'Set1', 'Set1_r', 'Set2', 'Set2_r',
'Set3', 'Set3_r', 'Spectral', 'Spectral_r', 'Vega10', 'Vega10_r',
'Vega20', 'Vega20_r', 'Vega20b', 'Vega20b_r', 'Vega20c', 'Vega20c_r',
'Wistia', 'Wistia_r', 'YlGn', 'YlGn_r', 'YlGnBu', 'YlGnBu_r',
'YlOrBr', 'YlOrBr_r', 'YlOrRd', 'YlOrRd_r',
'afmhot', 'afmhot_r', 'autumn', 'autumn_r', 'binary', 'binary_r',
'bone', 'bone_r', 'brg', 'brg_r', 'bwr', 'bwr_r',
'cool', 'cool_r', 'coolwarm', 'coolwarm_r', 'copper', 'copper_r',
'cubehelix', 'cubehelix_r', 'flag', 'flag_r',
'gist_earth', 'gist_earth_r', 'gist_gray', 'gist_gray_r',
'gist_heat', 'gist_heat_r', 'gist_ncar', 'gist_ncar_r',
'gist_rainbow', 'gist_rainbow_r', 'gist_stern', 'gist_stern_r',
'gist_yarg', 'gist_yarg_r', 'gnuplot', 'gnuplot2', 'gnuplot2_r',
'gnuplot_r', 'gray', 'gray_r', 'hot', 'hot_r', 'hsv', 'hsv_r',
'inferno', 'inferno_r', 'jet', 'jet_r', 'magma', 'magma_r',
'nipy_spectral', 'nipy_spectral_r', 'ocean', 'ocean_r',
'pink', 'pink_r', 'plasma', 'plasma_r', 'prism', 'prism_r',
'rainbow', 'rainbow_r', 'seismic', 'seismic_r',
'spectral', 'spectral_r', 'spring', 'spring_r',
'summer', 'summer_r', 'terrain', 'terrain_r',
'viridis', 'viridis_r', 'winter', 'winter_r']
"""
###############################################################################
###############################################################################
MPL_COLORS = collections.OrderedDict()
MPL_COLORS['Black'] = None
MPL_COLORS['Maroon'] = None
MPL_COLORS['Green'] = wx.Colour(0, 100, 0) # Dark Green
MPL_COLORS['Olive'] = wx.Colour(128, 128, 0)
MPL_COLORS['Navy'] = None
MPL_COLORS['Purple'] = None
MPL_COLORS['Teal'] = wx.Colour(0, 128, 128)
MPL_COLORS['Gray'] = None
MPL_COLORS['Silver'] = wx.Colour(192, 192, 192)
MPL_COLORS['Red'] = None
MPL_COLORS['Lime'] = wx.Colour(0, 255, 0) # Green
MPL_COLORS['Yellow'] = None
MPL_COLORS['Blue'] = None
MPL_COLORS['Fuchsia'] = wx.Colour(255, 0, 255)
MPL_COLORS['Aqua'] = wx.Colour(0, 255, 255)
MPL_COLORS['White'] = None
MPL_COLORS['SkyBlue'] = wx.Colour(135, 206, 235)
MPL_COLORS['LightGray'] = wx.Colour(211, 211, 211)
MPL_COLORS['DarkGray'] = wx.Colour(169, 169, 169)
MPL_COLORS['SlateGray'] = wx.Colour(112, 128, 144)
MPL_COLORS['DimGray'] = wx.Colour(105, 105, 105)
MPL_COLORS['BlueViolet'] = wx.Colour(138, 43, 226)
MPL_COLORS['DarkViolet'] = wx.Colour(148, 0, 211)
MPL_COLORS['Magenta'] = None
MPL_COLORS['DeepPink'] = wx.Colour(148, 0, 211)
MPL_COLORS['Brown'] = None
MPL_COLORS['Crimson'] = wx.Colour(220, 20, 60)
MPL_COLORS['Firebrick'] = None
MPL_COLORS['DarkRed'] = wx.Colour(139, 0, 0)
MPL_COLORS['DarkSlateGray'] = wx.Colour(47, 79, 79)
MPL_COLORS['DarkSlateBlue'] = wx.Colour(72, 61, 139)
MPL_COLORS['Wheat'] = None
MPL_COLORS['BurlyWood'] = wx.Colour(222, 184, 135)
MPL_COLORS['Tan'] = None
MPL_COLORS['Gold'] = None
MPL_COLORS['Orange'] = None
MPL_COLORS['DarkOrange'] = wx.Colour(255, 140, 0)
MPL_COLORS['Coral'] = None
MPL_COLORS['DarkKhaki'] = wx.Colour(189, 183, 107)
MPL_COLORS['GoldenRod'] = None
MPL_COLORS['DarkGoldenrod'] = wx.Colour(184, 134, 11)
MPL_COLORS['Chocolate'] = wx.Colour(210, 105, 30)
MPL_COLORS['Sienna'] = None
MPL_COLORS['SaddleBrown'] = wx.Colour(139, 69, 19)
MPL_COLORS['GreenYellow'] = wx.Colour(173, 255, 47)
MPL_COLORS['Chartreuse'] = wx.Colour(127, 255, 0)
MPL_COLORS['SpringGreen'] = wx.Colour(0, 255, 127)
MPL_COLORS['MediumSpringGreen'] = wx.Colour(0, 250, 154)
MPL_COLORS['MediumAquamarine'] = wx.Colour(102, 205, 170)
MPL_COLORS['LimeGreen'] = wx.Colour(50, 205, 50)
MPL_COLORS['LightSeaGreen'] = wx.Colour(32, 178, 170)
MPL_COLORS['MediumSeaGreen'] = wx.Colour(60, 179, 113)
MPL_COLORS['DarkSeaGreen'] = wx.Colour(143, 188, 143)
MPL_COLORS['SeaGreen'] = wx.Colour(46, 139, 87)
MPL_COLORS['ForestGreen'] = wx.Colour(34, 139, 34)
MPL_COLORS['DarkOliveGreen'] = wx.Colour(85, 107, 47)
MPL_COLORS['DarkGreen'] = wx.Colour(1, 50, 32)
MPL_COLORS['LightCyan'] = wx.Colour(224, 255, 255)
MPL_COLORS['Thistle'] = None
MPL_COLORS['PowderBlue'] = wx.Colour(176, 224, 230)
MPL_COLORS['LightSteelBlue'] = wx.Colour(176, 196, 222)
MPL_COLORS['LightSkyBlue'] = wx.Colour(135, 206, 250)
MPL_COLORS['MediumTurquoise'] = wx.Colour(72, 209, 204)
MPL_COLORS['Turquoise'] = None
MPL_COLORS['DarkTurquoise'] = wx.Colour(0, 206, 209)
MPL_COLORS['DeepSkyBlue'] = wx.Colour(0, 191, 255)
MPL_COLORS['DodgerBlue'] = wx.Colour(30, 144, 255)
MPL_COLORS['CornflowerBlue'] = wx.Colour(100, 149, 237)
MPL_COLORS['CadetBlue'] = wx.Colour(95, 158, 160)
MPL_COLORS['DarkCyan'] = wx.Colour(0, 139, 139)
MPL_COLORS['SteelBlue'] = wx.Colour(70, 130, 180)
MPL_COLORS['RoyalBlue'] = wx.Colour(65, 105, 225)
MPL_COLORS['SlateBlue'] = wx.Colour(106, 90, 205)
MPL_COLORS['DarkBlue'] = wx.Colour(0, 0, 139)
MPL_COLORS['MediumBlue'] = wx.Colour(0, 0, 205)
MPL_COLORS['SandyBrown'] = wx.Colour(244, 164, 96)
MPL_COLORS['DarkSalmon'] = wx.Colour(233, 150, 122)
MPL_COLORS['Salmon'] = None
MPL_COLORS['Tomato'] = wx.Colour(255, 99, 71)
MPL_COLORS['Violet'] = wx.Colour(238, 130, 238)
MPL_COLORS['HotPink'] = wx.Colour(255, 105, 180)
MPL_COLORS['RosyBrown'] = wx.Colour(188, 143, 143)
MPL_COLORS['MediumVioletRed'] = wx.Colour(199, 21, 133)
MPL_COLORS['DarkMagenta'] = wx.Colour(139, 0, 139)
MPL_COLORS['DarkOrchid'] = wx.Colour(153, 50, 204)
MPL_COLORS['Indigo'] = wx.Colour(75, 0, 130)
MPL_COLORS['MidnightBlue'] = wx.Colour(25, 25, 112)
MPL_COLORS['MediumSlateBlue'] = wx.Colour(123, 104, 238)
MPL_COLORS['MediumPurple'] = wx.Colour(147, 112, 219)
MPL_COLORS['MediumOrchid'] = wx.Colour(186, 85, 211)
MPL_COLORS = collections.OrderedDict(sorted(MPL_COLORS.items()))
###############################################################################
###############################################################################
# Based on https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
# 10/September/2019 - Adriano Santana
MPL_LINESTYLES = collections.OrderedDict()
MPL_LINESTYLES['Solid'] = (0, ())
MPL_LINESTYLES['Dotted'] = (0, (1, 1))
MPL_LINESTYLES['Loosely dotted'] = (0, (1, 10))
MPL_LINESTYLES['Densely dotted'] = (0, (1, 1))
MPL_LINESTYLES['Dashed'] = (0, (5, 5))
MPL_LINESTYLES['Loosely dashed'] = (0, (5, 10))
MPL_LINESTYLES['Densely dashed'] = (0, (5, 1))
MPL_LINESTYLES['Dashdotted'] = (0, (3, 5, 1, 5))
MPL_LINESTYLES['Loosely dashdotted'] = (0, (3, 10, 1, 10))
MPL_LINESTYLES['Densely dashdotted'] = (0, (3, 1, 1, 1))
MPL_LINESTYLES['Dashdotdotted'] = (0, (3, 5, 1, 5, 1, 5))
MPL_LINESTYLES['Loosely dashdotdotted'] = (0, (3, 10, 1, 10, 1, 10))
MPL_LINESTYLES['Densely dashdotdotted'] = (0, (3, 1, 1, 1, 1, 1))
| apache-2.0 |
woutdenolf/spectrocrunch | spectrocrunch/visualization/tests/test_scene.py | 1 | 2667 | # -*- coding: utf-8 -*-
import unittest
import matplotlib.pyplot as plt
import numpy as np
from .. import scene
from ...patch.pint import ureg
class test_scene(unittest.TestCase):
def test_images(self):
n0, n1 = 5, 10
img = np.arange(n0 * n1).reshape(n0, n1)
unit0 = ureg.mm
unit1 = ureg.micrometer
s1 = scene.Scene(unit0=unit0, unit1=unit1)
s2 = scene.Scene(unit0=unit0, unit1=unit1)
s2.transpose(True)
# s2.flipx(increasing=True)
s2.axlabels = ["dim0", "dim1"]
s2.cmap = plt.get_cmap("gray")
o1 = scene.Image(
img, lim0=s1.q0([8, 8 + n0 - 1]), lim1=s1.q1([10 + n1 - 1, 10])
)
s1.register(o1)
s2.register(o1)
p0 = sorted(o1.datarange(0, border=False))
p1 = sorted(o1.datarange(1, border=False))
o = scene.Polyline([p0[0], p0[1], p0[1], p0[0]], [p1[0], p1[0], p1[1], p1[1]])
s1.register(o)
s2.register(o)
o.set_setting("scatter", True)
o2 = scene.Image(
img, lim0=s1.q0([-2, -2 + n0 - 1]), lim1=s1.q1([-1, -1 + n1 - 1])
)
s1.register(o2)
s2.register(o2)
o.set_setting("scatter", True)
p0 = sorted(o2.datarange(0, border=False))
p1 = sorted(o2.datarange(1, border=False))
o = scene.Text(
[p0[0], p0[1], p0[1], p0[0]],
[p1[0], p1[0], p1[1], p1[1]],
labels=[1, 2, 3, 4],
)
s1.register(o)
s2.register(o)
f, ax = plt.subplots()
s1.setaxes(ax)
f, ax = plt.subplots()
s2.setaxes(ax)
# Update scene 1
s1.updateview()
# Shift image, axes scaling and update scene 2
o1.lim[0] = s1.q0([9, 9 + n0 - 1])
s2.setdatarange(0, s1.q0([0, 1]))
s2.setdatarange(1, s1.q1([0, 1]))
s2.updateview()
# plt.pause(0.01)
# Update scene 1
s1.updateview()
# Reset axes of scene 1
f, ax = plt.subplots()
s1.setaxes(ax)
# Shift image, axes offset, different normalization and update scene 1
o1.lim[0] = s1.q0([9, 9 + n0 - 1])
s1.set_settings({"cnorm": "power", "cnormargs": (0.1,)})
s1.updateview()
# plt.pause(0.01)
# plt.show()
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_scene("test_images"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
| mit |
adybbroe/atrain_match | atrain_match/reshaped_files_scr/plot_ctth_boxplots_mlvl2_temperature_pressure_height.py | 1 | 16002 | """Read all matched data and make some plotting
"""
import os
import re
from glob import glob
import numpy as np
from matchobject_io import (readCaliopImagerMatchObj,
CalipsoImagerTrackObject)
from plot_kuipers_on_area_util import (PerformancePlottingObject,
ppsMatch_Imager_CalipsoObject)
import matplotlib.pyplot as plt
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
from utils.get_flag_info import get_calipso_clouds_of_type_i
from utils.get_flag_info import (get_semi_opaque_info_pps2014,
get_calipso_high_clouds,
get_calipso_medium_clouds,
get_calipso_low_clouds)
from my_dir import ADIR
def make_boxplot(caObj, name, month="xx", modis_lvl2=False, use_m2_pix=True):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
height_c = 1000*caObj.calipso.all_arrays['layer_top_altitude'][:,0]
cloud_elevation = 1000*caObj.calipso.all_arrays['layer_top_altitude'][:,0]-caObj.calipso.all_arrays['elevation']
if modis_lvl2:
height_imager = caObj.modis.all_arrays['height']
else:
height_imager = caObj.imager.all_arrays['imager_ctth_m_above_seasurface']
if height_imager is None:
height_imager = caObj.imager.all_arrays['ctth_height']+caObj.calipso.all_arrays['elevation']
use = np.logical_and(height_imager >-1,
height_c>=0)
use = np.logical_and(height_imager <45000,use)
USE_ONLY_PIXELS_WHERE_PPS_AND_MODIS_C6_HAVE_VALUES=use_m2_pix
if USE_ONLY_PIXELS_WHERE_PPS_AND_MODIS_C6_HAVE_VALUES:
height_mlvl2 = caObj.modis.all_arrays['height']
height_pps = caObj.imager.all_arrays['imager_ctth_m_above_seasurface']
use = np.logical_and(use, height_mlvl2>-1)
use = np.logical_and(use, height_mlvl2<45000)
use = np.logical_and(use, height_pps>-1)
use = np.logical_and(use, height_pps<45000)
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = height_imager - height_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
MAE = np.mean(abias[c_all])
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-500,500, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-1000,1000, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-1500,1500, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),2000,15000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-15000, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), -10*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-14000,8000)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title("%s MAE = %3.0f"%(name,MAE))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_%s_5_95_filt.png"%(name))
elevation_zero = np.logical_and(use,caObj.calipso.all_arrays['elevation']>5000)
low_clouds = height_c<2500
medium_clouds = np.logical_and(height_c>=2500, height_c<=5000)
high_clouds = height_c>5000
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=50)
ax.fill_between(np.arange(0,8),-500,500, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-1000,1000, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-1500,1500, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),2000,15000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-15000, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), -10*1000 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high], bias[elevation_zero]],whis=[5, 95],sym='',
labels=["low <2.5km","medium","high>5km", "ground>5km"],
showmeans=True, patch_artist=True)
ax.set_ylim(-8000,8000)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title("Calipso %s \nHeight bias comparison MAE= %3.0f"%(name, MAE))
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_hkm_%s_5_95_filt.png"%(name))
def make_boxplot_temperature(caObj, name, modis_lvl2=False):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
temp_c = caObj.calipso.all_arrays['layer_top_temperature'][:,0] +273.15
if modis_lvl2:
temp_pps = caObj.modis.all_arrays['temperature']
else:
temp_pps = caObj.imager.all_arrays['ctth_temperature']
if modis_lvl2:
height_pps = caObj.modis.all_arrays['height']
else:
height_pps = caObj.imager.all_arrays['ctth_height']
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
use = np.logical_and(temp_pps >100,
caObj.calipso.all_arrays['layer_top_altitude'][:,0]>=0)
use = np.logical_and(height_pps <45000,use)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = temp_pps - temp_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-2.5,2.5, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-5,5, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-7.5,7.5, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),10,150, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-20,-10, facecolor='red', alpha=0.2)
for y_val in [-5,-4,-3,-2,-1,1,2,3,4,5]:
plt.plot(np.arange(0,8), y_val*20 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-20,100)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title(name)
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_temperature_%s_5_95_filt.png"%(name))
def make_boxplot_pressure(caObj, name, modis_lvl2=False):
low_clouds = get_calipso_low_clouds(caObj)
high_clouds = get_calipso_high_clouds(caObj)
medium_clouds = get_calipso_medium_clouds(caObj)
pressure_c = caObj.calipso.all_arrays['layer_top_pressure'][:,0]
if modis_lvl2:
pressure_pps = caObj.modis.all_arrays['pressure']
else:
pressure_pps = 0.01*caObj.imager.all_arrays['ctth_pressure']
if modis_lvl2:
height_pps = caObj.modis.all_arrays['height']
else:
height_pps = caObj.imager.all_arrays['ctth_height']
thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10,
caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0)
thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
use = np.logical_and(pressure_pps >0,
caObj.calipso.all_arrays['layer_top_altitude'][:,0]>=0)
low = np.logical_and(low_clouds,use)
medium = np.logical_and(medium_clouds,use)
high = np.logical_and(high_clouds,use)
c_all = np.logical_or(high,np.logical_or(low,medium))
high_very_thin = np.logical_and(high, very_thin)
high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
high_thick = np.logical_and(high, ~thin)
#print "thin, thick high", np.sum(high_thin), np.sum(high_thick)
bias = pressure_pps - pressure_c
abias = np.abs(bias)
#abias[abias>2000]=2000
print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))
c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
number_of = np.sum(c_all)
#print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
from matplotlib import rcParams
rcParams.update({'figure.autolayout': True})
fig = plt.figure(figsize = (6,9))
ax = fig.add_subplot(111)
plt.xticks(rotation=70)
ax.fill_between(np.arange(0,8),-50,50, facecolor='green', alpha=0.6)
ax.fill_between(np.arange(0,8),-100,100, facecolor='green', alpha=0.4)
ax.fill_between(np.arange(0,8),-150,150, facecolor='green', alpha=0.2)
ax.fill_between(np.arange(0,8),200,2000, facecolor='red', alpha=0.2)
ax.fill_between(np.arange(0,8),-2000,-200, facecolor='red', alpha=0.2)
for y_val in [-6,-4,-2,2,4,6,8,-8]:
plt.plot(np.arange(0,8), y_val*100 + 0*np.arange(0,8),':k', alpha=0.4)
plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
ax.set_ylim(-1000,800)
for box in bplot['boxes']:
box.set_facecolor('0.9')
plt.title(name)
plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_pressure_%s_5_95_filt.png"%(name))
def investigate_nn_ctth_modis_lvl2():
#november
ROOT_DIR_MODIS_nn_imager = (
ADIR + "/DATA_MISC/reshaped_files/"
"global_modis_14th_created20170324/Reshaped_Files_merged/eos2/1km/2010/%s/*h5")
ROOT_DIR_MODIS_old = (
ADIR + "/DATA_MISC/reshaped_files/"
"global_modis_14th_created20161108/Reshaped_Files/merged/*%s*h5")
for month in [ "06", "09", "01"]:
for ROOT_DIR, name in zip(
[ROOT_DIR_MODIS_nn_imager,
ROOT_DIR_MODIS_nn_imager,
ROOT_DIR_MODIS_old],
["modis_nnIMAGER",
"modis_lvl2_C6",
"modis_CTTHold"]):
name = "%s_%s"%(name, month)
print ROOT_DIR
files = glob(ROOT_DIR%(month))
caObj = CalipsoImagerTrackObject()
for filename in files:
#print filename
caObj += readCaliopImagerMatchObj(filename)
modis_lvl2 = False
if "modis_lvl2" in name:
modis_lvl2 = True
use_m2_pix=True
if "old" in name:
use_m2_pix=False
make_boxplot(caObj, name, month = month, modis_lvl2=modis_lvl2, use_m2_pix=use_m2_pix)
make_boxplot_pressure(caObj, name, modis_lvl2=modis_lvl2)
make_boxplot_temperature(caObj, name, modis_lvl2=modis_lvl2)
if __name__ == "__main__":
investigate_nn_ctth_modis_lvl2()
| gpl-3.0 |
afruizc/microsoft_malware_challenge | src/models/first_model/get_conf_matrix.py | 2 | 2842 | """
This is a script that is used to generate a confussion matrix for
a classification method. This uses 10-k cross_validation with in
order to provide sensible resutls and not overfit.
"""
__author__ = "Andres Ruiz"
__license__ = "Apache"
__email__ = "afruizc __thingy__ cs unm edu"
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.metrics import confusion_matrix, accuracy_score, log_loss
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import svm_bow
def plot_confusion_matrix(cm, title='Confusion matrix', normalized=True,
cmap=plt.cm.Oranges, save_file=""):
"""
Displays the confussion matrix indicated by `cm`. If argument
`normalized` is Ture, then the matrix is normalized. Optionally
the image can be saved to a file
Arguments:
----------
`cm`: The confusion matrix to be displayed.
`title`: The title for the window.
`normalized`: If True, normalizes the matrix before showing it.
`cmap`: Colormap to use.
`save_file`: If string different than empty, the resulting image is
stored in such file.
"""
if normalized:
cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
if save_file:
plt.savefig(save_file)
def get_indices(data, indices):
result = []
for i in indices:
result.append(data[i])
return result
def main():
e = svm_bow.Executor()
e.load_data()
e.config_model()
fold = KFold(len(e.train['data']), n_folds=10)
conf_mat_avg = np.zeros((9, 9))
c = 0
for train, test in fold:
X_train = get_indices(e.train['data'], train)
X_test = get_indices(e.train['data'], test)
y_train = get_indices(e.train['target'], train)
y_test = get_indices(e.train['target'], test)
c += 1
print("Fitting run {}.".format(c))
model = e.param_tunning.fit(X_train, y_train)
print("Predicting...")
y_pred = model.predict(X_test)
y_pred_prob = model.predict_proba(X_test)
conf_matrix = confusion_matrix(y_test, y_pred)
accruacy = accuracy_score(y_test, y_pred)
loss = log_loss(y_test, y_pred_prob)
plot_confusion_matrix(conf_matrix,
save_file='fold_{}.png'.format(c))
np.savetxt('conf_matrix_fold{}'.format(c), conf_matrix)
print("Fold %d. Accuracy: %lf Loss: %lf" % (c, accruacy, loss))
conf_mat_avg += conf_matrix
np.savetxt('conf_matrix.txt', conf_mat_avg)
conf_mat_avg /= 10.0
plot_confusion_matrix(conf_mat_avg, save_file='final_cm.png')
if __name__ == '__main__':
main()
| apache-2.0 |
hmtai6/universe_NeonRace-v0 | DQN_breakout/DQN.py | 1 | 9601 | import argparse
import logging
import sys
import gc
import cv2
import matplotlib.pyplot as plt
import gym
import universe # register the universe environments
from universe import wrappers
from collections import deque
from skimage.color import rgb2gray
from skimage.transform import resize
import numpy as np
import tensorflow as tf
import time
import gym, time, random, threading
from keras.models import *
from keras.layers import *
from keras import backend as K
from keras.models import load_model
LEARNING_RATE = 0.005
MOMENTUM = 0.2
MIN_GRAD = 0.0001
ENV_NAME = 'break_out'
SAVE_SUMMARY_PATH = './logs'
SAVE_NETWORK_PATH = './network'
LOAD_NETWOROK = False
INITIAL_REPLAY_SIZE = 200000 # Nb steps for memory, before training
NUM_REPLAY_MEMORY = 400000 # Number of replay memory the agent uses for training
TRAIN_INTERVAL = 1000
GAMMA = 0.99 # Discount factor
STATE_LENGTH = 4 # Number of most recent frames to produce the input to the network
FRAME_WIDTH = 84
FRAME_HEIGHT = 84
class DQN:
def __init__(self, input_shape, nb_actions,
init_epsilon=1.0,
final_epsilon=0.1,
exploration_steps=1000000):
self.input_shape = input_shape
self.nb_actions = nb_actions
self.final_epsilon = final_epsilon
self.epsilon = init_epsilon
self.epsilon_step = (init_epsilon - final_epsilon) / exploration_steps
self.t = 0
# Parameters used for summary
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
# create replay memory
self.replay_memory = deque()
# create network
self.state, self.q_vals, self.network = self._build_network()
q_network_weights = self.network.trainable_weights
# create target network
self.state_t, self.q_vals_t, self.network_t = self._build_network()
q_network_weights_t = self.network_t.trainable_weights
# define copy operation
self.update_target_network = [q_network_weights_t[i].assign(q_network_weights[i]) for i in range(len(q_network_weights_t))]
# Define loss and gradient update operation
self.a, self.y, self.loss, self.grads_update = self._build_train_op(q_network_weights)
self.sess = tf.InteractiveSession()
self.saver = tf.train.Saver(q_network_weights)
self.summary_placeholders, self.update_ops, self.summary_op = self._build_summary()
self.summary_writer = tf.summary.FileWriter(SAVE_SUMMARY_PATH, self.sess.graph)
if not os.path.exists(SAVE_NETWORK_PATH):
os.makedirs(SAVE_NETWORK_PATH)
self.sess.run(tf.global_variables_initializer())
if LOAD_NETWOROK:
self._load_netowrk()
self.sess.run(self.update_target_network)
def _build_network(self):
model = Sequential()
model.add(Conv2D(32, 8, strides=(4, 4), activation='relu', input_shape=[self.input_shape[0], self.input_shape[1], self.input_shape[2]]))
model.add(Conv2D(64, 4, strides=(2, 2), activation='relu'))
model.add(Conv2D(64, 3, strides=(1, 1), activation='relu'))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dense(self.nb_actions))
state = tf.placeholder(tf.float32, [None, self.input_shape[0], self.input_shape[1], self.input_shape[2]])
q_vals = model(state)
return state, q_vals, model
def _build_train_op(self, network_weights):
a = tf.placeholder(tf.int64, [None])
y = tf.placeholder(tf.float32, [None])
# convert into to one hot
a_one_hot = tf.one_hot(a, self.nb_actions, 1.0, 0.)
q_value = tf.reduce_sum(tf.multiply(self.q_vals, a_one_hot), reduction_indices=1)
# clip the error
error = tf.abs(y - q_value)
clipped = tf.clip_by_value(error, 0.0, 1.0)
linear = error - clipped
loss = tf.reduce_mean(0.5 * tf.square(clipped) + linear)
rms_optimizer = tf.train.RMSPropOptimizer(LEARNING_RATE, momentum=MOMENTUM, epsilon=MIN_GRAD)
grads_update = rms_optimizer.minimize(loss, var_list=network_weights)
return a, y, loss, grads_update
def get_initial_state(self, observation, last_observation):
processed_observation = np.maximum(observation, last_observation)
processed_observation = np.uint8(resize(rgb2gray(processed_observation), (FRAME_WIDTH, FRAME_HEIGHT)) * 255)
state = [processed_observation for _ in range(STATE_LENGTH)]
return np.stack(state, axis=0)
def _build_summary(self):
# Parameters used for summary
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode = 0
episode_total_reward = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Total Reward/Episode', episode_total_reward)
episode_avg_max_q = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Average Max Q/Episode', episode_avg_max_q)
episode_duration = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Duration/Episode', episode_duration)
episode_avg_loss = tf.Variable(0.)
tf.summary.scalar(ENV_NAME + '/Average Loss/Episode', episode_avg_loss)
summary_vars = [episode_total_reward, episode_avg_max_q, episode_duration, episode_avg_loss]
summary_placeholders = [tf.placeholder(tf.float32) for _ in range(len(summary_vars))]
update_ops = [summary_vars[i].assign(summary_placeholders[i]) for i in range(len(summary_vars))]
summary_op = tf.summary.merge_all()
return summary_placeholders, update_ops, summary_op
def load_network(self):
checkpoint = tf.train.get_checkpoint_state(SAVE_NETWORK_PATH)
if checkpoint and checkpoint.model_checkpoint_path:
self.saver.restore(self.sess, checkpoint.model_checkpoint_path)
print('Successfully loaded: ' + checkpoint.model_checkpoint_path)
else:
print('Training new network...')
def get_action_test(self, state):
return np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}))
def get_action(self, state):
if self.epsilon >= random.random() or self.t < INITIAL_REPLAY_SIZE:
action = random.randrange(self.nb_actions)
else:
action = np.argmax(self.q_values.eval(feed_dict={self.s: [np.float32(state / 255.0)]}))
# Anneal epsilon linearly over time
if self.epsilon > self.final_epsilon and self.t >= INITIAL_REPLAY_SIZE:
self.epsilon -= self.epsilon_step
return action
def _train(self):
s_batch = []
a_batch = []
r_batch = []
s__batch = []
t_batch = []
y_batch = []
# sample from memory
minibatch = random.sample(self.replay_memory, BATCH_SIZE)
for data in minibatch:
s_batch.append(data[0])
a_batch.append(data[1])
r_batch.append(data[2])
s__batch.append(data[3])
t_batch.append(data[4])
# bool to int
t_batch = np.array(t_batch) + 0
next_actions_batch = np.argmax(self.q_vals.eval(feed_dict={self.s: s__batch}), axis=1)
target_q_values_batch = self.q_vals_t.eval(feed_dict={self.s_t: s__batch})
for i in range(len(minibatch)):
y_batch.append(r_batch[i] + (1 - t_batch[i]) * GAMMA * target_q_values_batch[i][next_actions_batch[i]])
loss, _ = self.sess.run([self.loss, self.grads_update], feed_dict={
self.s: np.float32(np.array(s_batch) / 255.0),
self.a: a_batch,
self.y: y_batch
})
self.total_loss += loss
def add_memory(self, s, a, r, t, s_):
next_state = np.append(s[1:, :, :], s_, axis=0)
# clip reward into -1,1
reward = np.clip(r, -1, 1)
# add into replay memory
self.replay_memory.append((s, a, next_state, t))
if len(self.replay_memory) > NUM_REPLAY_MEMORY :
self.replay_memory.popleft()
if self.t > INITIAL_REPLAY_SIZE:
# train network
if self.t % TRAIN_INTERVAL == 0:
self._train()
# update target network
if self.t % TARGET_UPDATE_INTERVAL == 0:
self.sess.run(self.update_target_network)
# save network
if self.t % SAVE_INTERVAL == 0:
s_path = self.saver.save(self.sess, SAVE_NETWORK_PATH, global_step=self.t)
print('saved network')
self.total_reward += reward
self.total_q_max += np.max(self.q_vals.eval(feed_dict={self.s: [np.float32(s / 255.0)]}))
self.duration += 1
if t:
# write summary
if self.t >= INITIAL_REPLAY_SIZE:
stats = [self.total_reward, self.total_q_max/float(self.duration),
self.duration, self.total_loss/ (float(self.duration)/ float(TRAIN_INTERVAL))]
for i in range(len(stats)):
self.sess.run(self.update_ops[i], feed_dict={self.summary_placeholders[i]: float(stats[i])})
summary_str = self.sess.run(self.summary_op)
self.summary_writer.add_summary(summary_str, self.episode + 1)
self.total_reward = 0
self.total_q_max = 0
self.total_loss = 0
self.duration = 0
self.episode += 1
self.t += 1
return next_state | mit |
libAtoms/matscipy | examples/electrochemistry/pnp_batch/cell_1d/stern_layer_sweep/pnp_plot.py | 2 | 6741 | # positional args
# datadir, figfile, param, param_label
import os.path, re, sys
import numpy as np
from glob import glob
from cycler import cycler
from itertools import cycle
from itertools import groupby
import matplotlib.pyplot as plt
# Ensure variable is defined
try:
datadir
except NameError:
try:
datadir = sys.argv[1]
except:
datadir = 'data'
try:
figfile
except NameError:
try:
figfile = sys.argv[2]
except:
figfile = 'fig.png'
try:
param
except NameError:
try:
param = sys.argv[3]
except:
param = 'c'
try:
param_unit
except NameError:
try:
param_label = sys.argv[4]
except:
param_label = 'c (\mathrm{mM})'
try:
glob_pattern
except NameError:
glob_pattern = os.path.join(datadir, 'NaCl*.txt')
def right_align_legend(leg):
hp = leg._legend_box.get_children()[1]
for vp in hp.get_children():
for row in vp.get_children():
row.set_width(100) # need to adapt this manually
row.mode= "expand"
row.align="right"
# sort file names as normal humans expect
# https://stackoverflow.com/questions/2669059/how-to-sort-alpha-numeric-set-in-python
scientific_number_regex = '([-+]?[\d]+\.?[\d]*(?:[Ee][-+]?[\d]+)?)'
def alpha_num_order(x):
"""Sort the given iterable in the way that humans expect."""
def convert(text):
try:
ret = float(text) # if text.isdigit() else text
except:
ret = text
return ret
return [ convert(c) for c in re.split(scientific_number_regex, x) ]
dat_files = sorted(glob(glob_pattern),key=alpha_num_order)
N = len(dat_files) # number of data sets
M = 2 # number of species
# matplotlib settings
SMALL_SIZE = 8
MEDIUM_SIZE = 12
BIGGER_SIZE = 16
# plt.rc('axes', prop_cycle=default_cycler)
plt.rc('font', size=MEDIUM_SIZE) # controls default text sizes
plt.rc('axes', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels
plt.rc('legend', fontsize=MEDIUM_SIZE) # legend fontsize
plt.rc('figure', titlesize=BIGGER_SIZE) # fontsize of the figure titlex
plt.rcParams["figure.figsize"] = (16,10) # the standard figure size
plt.rcParams["lines.linewidth"] = 3
plt.rcParams["lines.markersize"] = 14
plt.rcParams["lines.markeredgewidth"]=1
# line styles
# https://matplotlib.org/3.1.0/gallery/lines_bars_and_markers/linestyles.html
# linestyle_str = [
# ('solid', 'solid'), # Same as (0, ()) or '-'
# ('dotted', 'dotted'), # Same as (0, (1, 1)) or '.'
# ('dashed', 'dashed'), # Same as '--'
# ('dashdot', 'dashdot')] # Same as '-.'
linestyle_tuple = [
('loosely dotted', (0, (1, 10))),
('dotted', (0, (1, 1))),
('densely dotted', (0, (1, 1))),
('loosely dashed', (0, (5, 10))),
('dashed', (0, (5, 5))),
('densely dashed', (0, (5, 1))),
('loosely dashdotted', (0, (3, 10, 1, 10))),
('dashdotted', (0, (3, 5, 1, 5))),
('densely dashdotted', (0, (3, 1, 1, 1))),
('dashdotdotted', (0, (3, 5, 1, 5, 1, 5))),
('loosely dashdotdotted', (0, (3, 10, 1, 10, 1, 10))),
('densely dashdotdotted', (0, (3, 1, 1, 1, 1, 1)))]
# color maps for potential and concentration plots
cmap_u = plt.get_cmap('Reds')
cmap_c = [plt.get_cmap('Oranges'), plt.get_cmap('Blues')]
# general line style cycler
line_cycler = cycler( linestyle = [ s for _,s in linestyle_tuple ] )
# potential anc concentration cyclers
u_cycler = cycler( color = cmap_u( np.linspace(0.4,0.8,N) ) )
u_cycler = len(line_cycler)*u_cycler + len(u_cycler)*line_cycler
c_cyclers = [ cycler( color = cmap( np.linspace(0.4,0.8,N) ) ) for cmap in cmap_c ]
c_cyclers = [ len(line_cycler)*c_cycler + len(c_cycler)*line_cycler for c_cycler in c_cyclers ]
# https://matplotlib.org/3.1.1/tutorials/intermediate/constrainedlayout_guide.html
fig, (ax1,ax2,ax3) = plt.subplots(
nrows=1, ncols=3, figsize=[24,7], constrained_layout=True)
ax1.set_xlabel('z (nm)')
ax1.set_ylabel('potential (V)')
ax2.set_xlabel('z (nm)')
ax2.set_ylabel('concentration (mM)')
ax3.set_xlabel('z (nm)')
ax3.set_ylabel('concentration (mM)')
# ax1.axvline(x=pnp.lambda_D()*1e9, label='Debye Length', color='grey', linestyle=':')
species_label = [
'$[\mathrm{Na}^+], ' + param_label + '$',
'$[\mathrm{Cl}^-], ' + param_label + '$']
c_regex = re.compile(r'{}_{}'.format(param,scientific_number_regex))
c_graph_handles = [ [] for _ in range(M) ]
for f, u_style, c_styles in zip(dat_files,u_cycler,zip(*c_cyclers)):
print("Processing {:s}".format(f))
# extract nominal concentration from file name
nominal_c = float( c_regex.search(f).group(1) )
dat = np.loadtxt(f,unpack=True)
x = dat[0,:]
u = dat[1,:]
c = dat[2:,:]
c_label = '{:> 4.2g}'.format(nominal_c)
# potential
ax1.plot(x*1e9, u, marker=None, label=c_label, linewidth=1, **u_style)
for i in range(c.shape[0]):
# concentration
ax2.plot(x*1e9, c[i], marker='',
label=c_label, linewidth=2, **c_styles[i])
# semilog concentration
c_graph_handles[i].extend( ax3.semilogy(x*1e9, c[i], marker='',
label=c_label, linewidth=2, **c_styles[i]) )
# legend placement
# https://stackoverflow.com/questions/4700614/how-to-put-the-legend-out-of-the-plot
u_legend = ax1.legend(loc='center right', title='potential, ${}$'.format(param_label), bbox_to_anchor=(-0.2,0.5) )
first_c_legend = ax3.legend(handles=c_graph_handles[0], title=species_label[0], loc='upper left', bbox_to_anchor=(1.00, 1.02) )
second_c_legend = ax3.legend(handles=c_graph_handles[1], title=species_label[1], loc='lower left', bbox_to_anchor=(1.00,-0.02) )
ax3.add_artist(first_c_legend) # add automatically removed first legend again
c_legends = [ first_c_legend, second_c_legend ]
legends = [ u_legend, *c_legends ]
for l in legends:
right_align_legend(l)
# https://matplotlib.org/3.1.1/tutorials/intermediate/constrainedlayout_guide.html
for l in legends:
l.set_in_layout(False)
# trigger a draw so that constrained_layout is executed once
# before we turn it off when printing....
fig.canvas.draw()
# we want the legend included in the bbox_inches='tight' calcs.
for l in legends:
l.set_in_layout(True)
# we don't want the layout to change at this point.
fig.set_constrained_layout(False)
# fig.tight_layout(pad=3.0, w_pad=2.0, h_pad=1.0)
# plt.show()
fig.savefig(figfile, bbox_inches='tight', dpi=100)
| gpl-2.0 |
evanthebouncy/nnhmm | uai_network/draw.py | 7 | 2929 | import numpy as np
import matplotlib.pylab as plt
import multiprocessing as mp
from matplotlib import figure
from data import *
FIG = plt.figure()
def draw_coord(coord, name, lab=[1.0, 0.0]):
color = 1.0 if lab[0] > lab[1] else -1.0
ret = np.zeros(shape=[L,L,1])
coord_x, coord_y = coord
coord_x_idx = np.argmax(coord_x)
coord_y_idx = np.argmax(coord_y)
ret[coord_x_idx][coord_y_idx][0] = color
draw(ret, name)
def draw(m, name, extra=None):
FIG.clf()
matrix = m
orig_shape = np.shape(matrix)
# lose the channel shape in the end of orig_shape
new_shape = orig_shape[:-1]
matrix = np.reshape(matrix, new_shape)
ax = FIG.add_subplot(1,1,1)
ax.set_aspect('equal')
plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.gray)
# plt.imshow(matrix, interpolation='nearest', cmap=plt.cm.ocean)
plt.colorbar()
if extra != None:
greens, reds = extra
grn_x, grn_y, = greens
red_x, red_y = reds
plt.scatter(x=grn_x, y=grn_y, c='g', s=40)
plt.scatter(x=red_x, y=red_y, c='r', s=40)
# # put a blue dot at (10, 20)
# plt.scatter([10], [20])
# # put a red dot, size 40, at 2 locations:
# plt.scatter(x=[3, 4], y=[5, 6], c='r', s=40)
# # plt.plot()
plt.savefig(name)
def draw_orig(img, name):
ret = np.reshape(img, [L,L,1])
draw(ret, name)
def draw_allob(img, name, ob_prefix):
ret = np.zeros([L,L,1])
for ii in range(L):
for jj in range(L):
labb = img[ii][jj][0] - img[ii][jj][1]
ret[ii][jj][0] = labb
grn_x = []
grn_y = []
red_x = []
red_y = []
for obob in ob_prefix:
ob_c, labb = obob
if labb[0] > labb[1]:
grn_x.append(ob_c[0])
grn_y.append(ob_c[1])
else:
red_x.append(ob_c[0])
red_y.append(ob_c[1])
draw(ret, name, ((grn_y, grn_x), (red_y, red_x)))
def draw_obs(obs, name):
ret_shape = [L, L, 1]
ret = np.zeros(shape=ret_shape)
for ob, lab in obs:
ii, jj = ob
labb = 1.0 if lab[0] > lab[1] else -1.0
# labb = lab[0]
ret[ii][jj][0] = labb
draw(ret, name)
def draw_annotate(x_cords, y_cords, anns, name):
FIG.clf()
y = x_cords
z = y_cords
n = anns
fig = FIG
ax = fig.add_subplot(1,1,1)
ax.set_xlim([0,L])
ax.set_ylim([0,L])
ax.set_ylim(ax.get_ylim()[::-1])
ax.scatter(z, y)
for i, txt in enumerate(n):
ax.annotate(txt, (z[i],y[i]))
fig.savefig(name)
def draw_obs_trace(obs, name):
x_coords = []
y_coords = []
anno = []
for i, ob in enumerate(obs):
ob_coord, ob_outcome = ob
x_coords.append(ob_coord[0])
y_coords.append(ob_coord[1])
anno.append("O"+str(i)+str(int(ob_outcome[0])))
draw_annotate(x_coords, y_coords, anno, name)
def draw_all_preds(all_preds, name):
ret_shape = [L, L, 1]
ret = np.zeros(shape=ret_shape)
for qq, labb in all_preds:
i, j = qq
# ret[i][j][0] = 1.0 if labb[0] > labb[1] else 0.0
# ret[i][j][0] = labb[0]
ret[i][j][0] = labb[0]
draw(ret, name)
| mit |
droter/trading-with-python | lib/backtest.py | 74 | 7381 | #-------------------------------------------------------------------------------
# Name: backtest
# Purpose: perform routine backtesting tasks.
# This module should be useable as a stand-alone library outide of the TWP package.
#
# Author: Jev Kuznetsov
#
# Created: 03/07/2014
# Copyright: (c) Jev Kuznetsov 2013
# Licence: BSD
#-------------------------------------------------------------------------------
import pandas as pd
import matplotlib.pyplot as plt
import sys
import numpy as np
def tradeBracket(price,entryBar,upper=None, lower=None, timeout=None):
'''
trade a bracket on price series, return price delta and exit bar #
Input
------
price : numpy array of price values
entryBar: entry bar number, *determines entry price*
upper : high stop
lower : low stop
timeout : max number of periods to hold
Returns exit price and number of bars held
'''
assert isinstance(price, np.ndarray) , 'price must be a numpy array'
# create list of exit indices and add max trade duration. Exits are relative to entry bar
if timeout: # set trade length to timeout or series length
exits = [min(timeout,len(price)-entryBar-1)]
else:
exits = [len(price)-entryBar-1]
p = price[entryBar:entryBar+exits[0]+1] # subseries of price
# extend exits list with conditional exits
# check upper bracket
if upper:
assert upper>p[0] , 'Upper bracket must be higher than entry price '
idx = np.where(p>upper)[0] # find where price is higher than the upper bracket
if idx.any():
exits.append(idx[0]) # append first occurence
# same for lower bracket
if lower:
assert lower<p[0] , 'Lower bracket must be lower than entry price '
idx = np.where(p<lower)[0]
if idx.any():
exits.append(idx[0])
exitBar = min(exits) # choose first exit
return p[exitBar], exitBar
class Backtest(object):
"""
Backtest class, simple vectorized one. Works with pandas objects.
"""
def __init__(self,price, signal, signalType='capital',initialCash = 0, roundShares=True):
"""
Arguments:
*price* Series with instrument price.
*signal* Series with capital to invest (long+,short-) or number of shares.
*sitnalType* capital to bet or number of shares 'capital' mode is default.
*initialCash* starting cash.
*roundShares* round off number of shares to integers
"""
#TODO: add auto rebalancing
# check for correct input
assert signalType in ['capital','shares'], "Wrong signal type provided, must be 'capital' or 'shares'"
#save internal settings to a dict
self.settings = {'signalType':signalType}
# first thing to do is to clean up the signal, removing nans and duplicate entries or exits
self.signal = signal.ffill().fillna(0)
# now find dates with a trade
tradeIdx = self.signal.diff().fillna(0) !=0 # days with trades are set to True
if signalType == 'shares':
self.trades = self.signal[tradeIdx] # selected rows where tradeDir changes value. trades are in Shares
elif signalType =='capital':
self.trades = (self.signal[tradeIdx]/price[tradeIdx])
if roundShares:
self.trades = self.trades.round()
# now create internal data structure
self.data = pd.DataFrame(index=price.index , columns = ['price','shares','value','cash','pnl'])
self.data['price'] = price
self.data['shares'] = self.trades.reindex(self.data.index).ffill().fillna(0)
self.data['value'] = self.data['shares'] * self.data['price']
delta = self.data['shares'].diff() # shares bought sold
self.data['cash'] = (-delta*self.data['price']).fillna(0).cumsum()+initialCash
self.data['pnl'] = self.data['cash']+self.data['value']-initialCash
@property
def sharpe(self):
''' return annualized sharpe ratio of the pnl '''
pnl = (self.data['pnl'].diff()).shift(-1)[self.data['shares']!=0] # use only days with position.
return sharpe(pnl) # need the diff here as sharpe works on daily returns.
@property
def pnl(self):
'''easy access to pnl data column '''
return self.data['pnl']
def plotTrades(self):
"""
visualise trades on the price chart
long entry : green triangle up
short entry : red triangle down
exit : black circle
"""
l = ['price']
p = self.data['price']
p.plot(style='x-')
# ---plot markers
# this works, but I rather prefer colored markers for each day of position rather than entry-exit signals
# indices = {'g^': self.trades[self.trades > 0].index ,
# 'ko':self.trades[self.trades == 0].index,
# 'rv':self.trades[self.trades < 0].index}
#
#
# for style, idx in indices.iteritems():
# if len(idx) > 0:
# p[idx].plot(style=style)
# --- plot trades
#colored line for long positions
idx = (self.data['shares'] > 0) | (self.data['shares'] > 0).shift(1)
if idx.any():
p[idx].plot(style='go')
l.append('long')
#colored line for short positions
idx = (self.data['shares'] < 0) | (self.data['shares'] < 0).shift(1)
if idx.any():
p[idx].plot(style='ro')
l.append('short')
plt.xlim([p.index[0],p.index[-1]]) # show full axis
plt.legend(l,loc='best')
plt.title('trades')
class ProgressBar:
def __init__(self, iterations):
self.iterations = iterations
self.prog_bar = '[]'
self.fill_char = '*'
self.width = 50
self.__update_amount(0)
def animate(self, iteration):
print '\r',self,
sys.stdout.flush()
self.update_iteration(iteration + 1)
def update_iteration(self, elapsed_iter):
self.__update_amount((elapsed_iter / float(self.iterations)) * 100.0)
self.prog_bar += ' %d of %s complete' % (elapsed_iter, self.iterations)
def __update_amount(self, new_amount):
percent_done = int(round((new_amount / 100.0) * 100.0))
all_full = self.width - 2
num_hashes = int(round((percent_done / 100.0) * all_full))
self.prog_bar = '[' + self.fill_char * num_hashes + ' ' * (all_full - num_hashes) + ']'
pct_place = (len(self.prog_bar) // 2) - len(str(percent_done))
pct_string = '%d%%' % percent_done
self.prog_bar = self.prog_bar[0:pct_place] + \
(pct_string + self.prog_bar[pct_place + len(pct_string):])
def __str__(self):
return str(self.prog_bar)
def sharpe(pnl):
return np.sqrt(250)*pnl.mean()/pnl.std()
| bsd-3-clause |
Upward-Spiral-Science/claritycontrol | code/scripts/roi_analysis.py | 1 | 2744 | #!/usr/bin/python
#-*- coding:utf-8 -*-
__author__ = 'david'
from __builtin__ import *
import gc
import numpy as np
from skimage.feature import greycomatrix, greycoprops
import matplotlib as mpl
mpl.use('TkAgg') # Solve runtime issue
import matplotlib.pyplot as plt
## Fake imge and label volumes to fast test functionality
def loadImg():
return np.random.random_sample((100,100,100))
def loadAtlas():
atlas_volume = np.zeros((100,100,100),dtype=np.uint32)
atlas_volume[10:50,10:50,10:50]=np.ones((40,40,40),dtype=np.uint32)*1
atlas_volume[50:90,10:50,10:50]=np.ones((40,40,40),dtype=np.uint32)*2
atlas_volume[10:50,50:90,10:50]=np.ones((40,40,40),dtype=np.uint32)*3
atlas_volume[50:90,50:90,10:50]=np.ones((40,40,40),dtype=np.uint32)*4
atlas_volume[10:50,10:50,50:90]=np.ones((40,40,40),dtype=np.uint32)*5
atlas_volume[50:90,10:50,50:90]=np.ones((40,40,40),dtype=np.uint32)*6
atlas_volume[10:50,50:90,50:90]=np.ones((40,40,40),dtype=np.uint32)*7
atlas_volume[50:90,50:90,50:90]=np.ones((40,40,40),dtype=np.uint32)*8
return atlas_volume
## END
## True data
# path = "~/Workspaces/claritycontrol/code/data/raw/"
# token = "Fear199"
# pathname = path+token+".img"
#
# img_volume = nib.load(pathname).get_data()[:,:,:,0]
## END
## get atlas values
atlas_volume = loadAtlas()
print atlas_volume.shape
atlas_values, atlas_count = np.unique(atlas_volume,return_counts=True)
atlas_values = atlas_values[1:] # remove background
## get img
img_volume = loadImg()
print img_volume.shape
class_id = 0 # Fear, Control, Cocaine
subject_id = 199
## normalize volume Z-standardization
img_volume = (img_volume-np.mean(img_volume))/np.std(img_volume)
## prepare results matrix
columns = ['class_id', 'subject_id', 'roi', 'mean', 'std', 'energy', 'entropy', 'correlation', 'contrast', 'variance', 'sumMean',
'inertial', 'clusterShade', 'clusterTendency', 'homogeneity', 'maxProbability', 'inverseVariance']
features = np.zeros((len(atlas_values), len(columns)), dtype=np.float32)
## compute GLCM and properties
for roi_id in range(len(atlas_values)):
features[roi_id, 0] = class_id
features[roi_id, 1] = subject_id
features[roi_id, 2] = atlas_values[roi_id]
## mask img and get roi block
mask_volume = (atlas_volume == atlas_values[roi_id])
xs, ys, zs = mask_volume.nonzero()
roi_block = np.multiply(img_volume, mask_volume)[min(xs):max(xs), min(ys):max(ys), min(zs):max(zs)]
del mask_volume # memory collect
## compute mean and std
features[roi_id, 3] = np.mean(roi_block[roi_block != 0])
features[roi_id, 4] = np.std(roi_block[roi_block != 0])
## compute GLCM and properties
# features[roi_id, 5] = 0
# features[roi_id, 6] = 0
| apache-2.0 |
kostajaitachi/shogun | examples/undocumented/python_modular/graphical/regression_lars.py | 26 | 3327 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from modshogun import RegressionLabels, RealFeatures
from modshogun import LeastAngleRegression, LinearRidgeRegression, LeastSquaresRegression
from modshogun import MeanSquaredError
# we compare LASSO with ordinary least-squares (OLE)
# in the ideal case, the MSE of OLE should coincide
# with LASSO at the end of the path
#
# if OLE is unstable, we may use RidgeRegression (with
# a small regularization coefficient) to simulate OLE
use_ridge = False
np.random.seed(1024)
n = 200
ntrain = 100
p = 7
correlation = 0.6
mean = np.zeros(p)
cov = correlation*np.ones((p,p)) + (1-correlation)*np.eye(p)
Xall = np.random.multivariate_normal(mean, cov, n)
# model is the linear combination of the first three variables plus noise
yall = 2*Xall[:,0] + 5*Xall[:,1] + -3*Xall[:,2] + 0.5*np.random.randn(n)
X = Xall[0:ntrain,:]
y = yall[0:ntrain]
Xtest = Xall[ntrain:,:]
ytest = yall[ntrain:]
# preprocess data
for i in xrange(p):
X[:,i] -= np.mean(X[:,i])
X[:,i] /= np.linalg.norm(X[:,i])
y -= np.mean(y)
# train LASSO
LeastAngleRegression = LeastAngleRegression()
LeastAngleRegression.set_labels(RegressionLabels(y))
LeastAngleRegression.train(RealFeatures(X.T))
# train ordinary LSR
if use_ridge:
lsr = LinearRidgeRegression(0.01, RealFeatures(X.T), Labels(y))
lsr.train()
else:
lsr = LeastSquaresRegression()
lsr.set_labels(RegressionLabels(y))
lsr.train(RealFeatures(X.T))
# gather LASSO path
path = np.zeros((p, LeastAngleRegression.get_path_size()))
for i in xrange(path.shape[1]):
path[:,i] = LeastAngleRegression.get_w(i)
evaluator = MeanSquaredError()
# apply on training data
mse_train = np.zeros(LeastAngleRegression.get_path_size())
for i in xrange(mse_train.shape[0]):
LeastAngleRegression.switch_w(i)
ypred = LeastAngleRegression.apply(RealFeatures(X.T))
mse_train[i] = evaluator.evaluate(ypred, RegressionLabels(y))
ypred = lsr.apply(RealFeatures(X.T))
mse_train_lsr = evaluator.evaluate(ypred, RegressionLabels(y))
# apply on test data
mse_test = np.zeros(LeastAngleRegression.get_path_size())
for i in xrange(mse_test.shape[0]):
LeastAngleRegression.switch_w(i)
ypred = LeastAngleRegression.apply(RealFeatures(Xtest.T))
mse_test[i] = evaluator.evaluate(ypred, RegressionLabels(y))
ypred = lsr.apply(RealFeatures(Xtest.T))
mse_test_lsr = evaluator.evaluate(ypred, RegressionLabels(y))
fig = plt.figure()
ax_path = fig.add_subplot(1,2,1)
plt.plot(xrange(path.shape[1]), path.T, '.-')
plt.legend(['%d' % (x+1) for x in xrange(path.shape[0])])
plt.xlabel('iteration')
plt.title('LASSO path')
ax_tr = fig.add_subplot(2,2,2)
plt.plot(range(mse_train.shape[0])[1:], mse_train[1:], 'k.-')
plt.plot(range(mse_train.shape[0])[1:], np.zeros(mse_train.shape[0])[1:] + mse_train_lsr, 'r-')
plt.legend(('LASSO', 'LeastSquares'))
plt.xlabel('# of non-zero variables')
plt.ylabel('MSE')
plt.title('MSE on training data')
ax_tt = fig.add_subplot(2,2,4)
plt.plot(range(mse_test.shape[0])[1:], mse_test[1:], 'k.-')
plt.plot(range(mse_test.shape[0])[1:], np.zeros(mse_test.shape[0])[1:] + mse_test_lsr, 'r-')
plt.legend(('LASSO', 'LeastSquares'), loc='lower right')
plt.xlabel('# of non-zero variables')
plt.ylabel('MSE')
plt.title('MSE on test data')
plt.show()
| gpl-3.0 |
kmiernik/Pyspectr | bin/spectrum_fitter.py | 1 | 6663 | #!/usr/bin/env python3
"""
K. Miernik 2013
k.a.miernik@gmail.com
GPL v3
Spectrum fitting code
"""
import argparse
import math
import numpy
import os
import sys
import time
import xml.etree.ElementTree as ET
import matplotlib.pyplot as plt
from lmfit import minimize, Parameters, report_errors
from Pyspectr.hisfile import HisFile as HisFile
from Pyspectr.peak_fitter import PeakFitter as PeakFitter
from Pyspectr.exceptions import GeneralError as GeneralError
class SpectrumParser:
def __init__(self, file_name):
self.base_name, ext = os.path.splitext(file_name)
if len(ext) > 0 and ext in (".gz", ".his", ".tgz"):
self.file_type = 'his'
self.data_file = HisFile(file_name)
elif len(ext) > 0 and ext in ".txt":
self.file_type = 'txt'
self.data_file = numpy.loadtxt(file_name)
else:
raise GeneralError(
'Files other than txt, his, tgz and gz are not supported')
def parse(self, spectrum, show, pause):
spectra_ids = spectrum.get('id')
id_list = []
if self.file_type == 'his':
for element in spectra_ids.split(','):
element = element.split('-')
if len(element) > 1:
new_elements = []
for i in range(int(element[0]), int(element[1]) + 1):
id_list.append(i)
else:
id_list.append(int(element[0]))
elif self.file_type == 'txt':
if spectra_ids != '':
raise GeneralError('Spectrum id not supported for txt files')
else:
id_list.append('')
peaks = spectrum.findall('peak')
x_min = int(spectrum.get('min'))
x_max = int(spectrum.get('max'))
smin = spectrum.get('smin')
smax = spectrum.get('smax')
for spectrum_id in id_list:
plot_name = '{}_{}'.format(self.base_name, spectrum_id)
PF = PeakFitter(peaks, spectrum.get('baseline'), plot_name)
if self.file_type == 'txt':
data_x = self.data_file[x_min:x_max, 0]
data_y = self.data_file[x_min:x_max, 1]
if self.data_file.shape[1] == 2:
data_dy = []
for y in data_y:
dy = numpy.sqrt(y) if y > 0 else 1.0
data_dy.append(dy)
data_dy = numpy.array(data_dy)
else:
data_dy = self.data_file[x_min:x_max, 2]
for iy, y in enumerate(data_dy):
if y <= 0:
data_dy[iy] = 1.0
elif self.file_type == 'his':
data = self.data_file.load_histogram(spectrum_id)
if data[0] != 1:
raise GeneralError('Only 1D histograms are supported')
data_x = data[1][x_min:x_max]
data_y = data[3][x_min:x_max]
data_dy = []
for y in data_y:
dy = numpy.sqrt(y) if y > 0 else 1.0
data_dy.append(dy)
data_dy = numpy.array(data_dy)
if smin is not None and smax is not None:
width = [float(smin), float(smax)]
else:
width = None
fit_result = PF.fit(data_x, data_y, data_dy, width=width)
if show == 'plot' or show == 'svg':
plt.clf()
plt.xlabel('Channel')
plt.ylabel('Counts')
plt.plot(data_x, data_y, linestyle='steps-mid')
plt.plot(data_x, fit_result['baseline'], linestyle='--')
plt.plot(fit_result['x_axis'], fit_result['fit'], linewidth=1.0)
if show == 'svg':
svg_name = 'fit_{0}_{1}_{2}'.format(plot_name,
int(data_x[0]), int(data_x[-1]))
svg_name = svg_name.replace('.', '').\
replace('/', '') + '.svg'
plt.savefig(svg_name)
else:
plt.show()
plt.draw()
time.sleep(pause)
elif show == 'quiet':
pass
for i, peak in enumerate(peaks):
if peak.get('ignore') == 'True':
continue
x0 = PF.params['x{}'.format(i)].value
dx = PF.params['x{}'.format(i)].stderr
A = PF.params['A{}'.format(i)].value
dA = PF.params['A{}'.format(i)].stderr
s = PF.params['s{}'.format(i)].value
E = peaks[i].get('E')
name = peaks[i].get('name')
if name is None:
name = ""
Area = PF.find_area(data_x, i)
print('{:>8} {:>8} {:>8.2f} {:>8.2f}'.\
format(name, E, x0, dx),
'{:>8.1f} {:>8.1f} {:>8.3f} {:>8.1f}'.\
format(A, dA, s, Area))
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='')
parser.add_argument('config', nargs=1,
help='Config files')
parser.add_argument('--pause', '-p', nargs=1, type=float, default=[0.5],
help='Pause time in seconds')
out_group = parser.add_mutually_exclusive_group()
out_group.add_argument('--plot', action='store_true',
help='Plot window during fitting')
out_group.add_argument('--svg', action='store_true',
help='SVG files saved during fitting')
out_group.add_argument('--quiet', action='store_true',
help='No output during fitting')
args = parser.parse_args()
show = 'plot'
if args.svg:
show = 'svg'
elif args.quiet:
show = 'quiet'
try:
tree = ET.parse(args.config[0])
except (xml.parsers.expat.ExpatError,
xml.etree.ElementTree.ParseError) as err:
print("File '{0}' parsing error: {1}".format(
args.config[0], err))
exit()
root = tree.getroot()
for data_file in root.findall('data_file'):
SP = SpectrumParser(data_file.get('name'))
print('# File: ', data_file.get('name'))
print('# {: ^8} {:^7} {:^8} {:^8} {:^8} {:^8} {:^8} {:^8}'
.format('Name', 'E', 'x0', 'dx', 'A', 'dA', 's', 'Area'))
for spectrum in data_file.findall('spectrum'):
SP.parse(spectrum, show, args.pause[0])
| gpl-3.0 |
wesleybowman/karsten | project/rawADCPclass.py | 1 | 4107 | from __future__ import division
import numpy as np
import sys
sys.path.append('/home/wesley/github/UTide/')
from utide import ut_solv, ut_reconstr
#from shortest_element_path import shortest_element_path
#import matplotlib.pyplot as plt
#import matplotlib.tri as Tri
#import matplotlib.ticker as ticker
#import seaborn
import scipy.io as sio
import h5py
from os import path
class Struct:
def __init__(self, **entries):
self.__dict__.update(entries)
class rawADCP:
def __init__(self, filename):
self.QC = ['raw data']
self.load(filename)
self.Params_Stn4_SWNSreport(filename)
self.load_rbrdata()
## set options
self.options = {}
self.options['showPA'] = 1
self.options['showRBRavg'] = 1
## save a flow file in BPformat
#save_FlowFile_BPFormat(fileinfo,adcp,rbr,saveparams,options)
def load(self, filename):
try:
self.mat = sio.loadmat(filename,
struct_as_record=False, squeeze_me=True)
self.adcp = self.mat['adcp']
except NotImplementedError:
self.mat = h5py.File(filename)
self.adcp = self.mat['adcp']
#self.adcp = Struct(**self.mat['adcp'])
def Params_Stn4_SWNSreport(self, filename):
fname = filename.split('/')
filebase = fname[-1].split('_')[0]
self.fileinfo = {}
self.fileinfo['datadir'] = path.join(*fname[:-1]) + '/'
self.fileinfo['ADCP'] = filebase + '_raw'
self.fileinfo['outdir'] = path.join(*fname[:-1]) + '/'
self.fileinfo['flowfile'] = filebase + '_Flow'
self.fileinfo['rbr']= 'station4_grandPassageII_RBRSN_011857.mat'
self.fileinfo['paramfile']= 'Params_Stn4_SWNSreport'
#%% ADCP parameters
self.saveparams = {}
self.saveparams['tmin'] = 209
self.saveparams['tmax'] = 240
self.saveparams['zmin'] = 0
self.saveparams['zmax'] = 20
self.saveparams['approxdepth'] = 15.5
self.saveparams['flooddir'] = 0
self.saveparams['declination'] = -17.25
self.saveparams['lat'] = 44.2605
self.saveparams['lon'] = -66.3354
self.saveparams['dabADCP'] = 0.5
self.saveparams['dabPS'] = -0.6
self.saveparams['rbr_hr_offset'] = 3
def load_rbrdata(self):
rbrFile = self.fileinfo['datadir'] + self.fileinfo['rbr']
try:
rbrMat = sio.loadmat(rbrFile,
struct_as_record=False, squeeze_me=True)
except NotImplementedError:
rbrMat = h5py.File(rbrFile)
rbr = rbrMat['rbr']
rbrout = {}
rbrout['mtime'] = rbr.yd
rbrout['temp'] = rbr.temperature
rbrout['pres'] = rbr.pressure
rbrout['depth'] = rbr.depth
rbrout['mtime'] = rbr.yd
self.rbr = rbrout
if __name__ == '__main__':
#filename = 'GP-120726-BPd_raw.mat'
filename = '140703-EcoEII_database/data/GP-120726-BPd_raw.mat'
data = rawADCP(filename)
#stn = 'GP-120726-BPd';
#%% File information
#fileinfo.datadir = '../data/'; %path to raw data files
#fileinfo.ADCP = [stn '_raw']; %name of ADCP file
#fileinfo.outdir = '../data/'; %path to output directory
#fileinfo.flowfile = [stn,'_Flow']; %name of output file with Flow data
#fileinfo.rbr = ['station4_grandPassageII_RBRSN_011857.mat'];
#fileinfo.paramfile = mfilename;
#
#%% ADCP parameters
#saveparams.tmin = 209; %tmin (year day)
#saveparams.tmax = 240; %tmax (year day)
#saveparams.zmin = 0; %minimum z to include in saves file
#saveparams.zmax = 20;
#saveparams.approxdepth = 15.5; %Approximate depth
#saveparams.flooddir= 0; %Flood direction (relative to true north, CW is positive)
#saveparams.declination = -17.25;%Declination angle
#saveparams.lat = 44.2605; %latitude
#saveparams.lon = -66.3354; %longitude
#saveparams.dabADCP = 0.5; %depth above bottom of ADCP
#saveparams.dabPS = -0.6; %depth above bottom of pressure sensor
#saveparams.rbr_hr_offset = 3; % hour offset to convert rbr time to UTC
| mit |
runt18/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/colors.py | 1 | 31702 | """
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given a len 3 rgb tuple of 0-1 floats, return the hex string'
return '#{0:02x}{1:02x}{2:02x}'.format(*tuple([round(val*255) for val in rgb]))
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "{0!s}"'.format(s))
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "{0!s}" is unhashable even inside a tuple'.format(str(arg)))
try:
if cbook.is_string_like(arg):
color = self.colors.get(arg, None)
if color is None:
str1 = cnames.get(arg, arg)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(arg)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is {0:d}; must be 3 or 4'.format(len(arg)))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "{0!s}"\n{1!s}'.format(str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], arg[3] * alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "{0!s}"\n{1!s}'.format(str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
if c.lower() == 'none':
return np.zeros((0,4), dtype=np.float_)
except AttributeError:
pass
if len(c) == 0:
return np.zeros((0,4), dtype=np.float_)
try:
result = np.array([self.to_rgba(c, alpha)], dtype=np.float_)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
result = np.zeros((len(c), 4))
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha) # change in place
return np.asarray(result, np.float_)
colorConverter = ColorConverter()
def makeMappingArray(N, data):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = np.arange(float(N))
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=1.0, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
self._lut[:-3, -1] = alpha
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.asarray(X)
xa = xma.filled(0)
mask_bad = ma.getmask(xma)
if xa.dtype.char in np.typecodes['Float']:
np.putmask(xa, xa==1.0, 0.9999999) #Treat 1.0 as slightly less than 1.
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
if NP_CLIP_OUT:
np.clip(xa * self.N, -1, self.N, out=xa)
else:
xa = np.clip(xa * self.N, -1, self.N)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
np.putmask(xa, xa>self.N-1, self._i_over)
np.putmask(xa, xa<0, self._i_under)
if mask_bad is not None and mask_bad.shape == xa.shape:
np.putmask(xa, mask_bad, self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = 1.0):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = 1.0):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = 1.0):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:func:`makeMappingArray`
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N, self._segmentdata['red'])
self._lut[:-3, 1] = makeMappingArray(self.N, self._segmentdata['green'])
self._lut[:-3, 2] = makeMappingArray(self.N, self._segmentdata['blue'])
self._isinit = True
self._set_extremes()
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (val-vmin) * (1.0/(vmax-vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.minimum(A)
self.vmax = ma.maximum(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None: self.vmin = ma.minimum(A)
if self.vmax is None: self.vmax = ma.maximum(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
if cbook.iterable(value):
vtype = 'array'
val = ma.asarray(value).astype(np.float)
else:
vtype = 'scalar'
val = ma.array([value]).astype(np.float)
self.autoscale_None(val)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
return 0.0 * val
else:
if clip:
mask = ma.getmask(val)
val = ma.array(np.clip(val.filled(vmax), vmin, vmax),
mask=mask)
result = (ma.log(val)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
if vtype == 'scalar':
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
| agpl-3.0 |
smunaut/gnuradio | gr-filter/examples/fir_filter_ccc.py | 6 | 4023 | #!/usr/bin/env python
#
# Copyright 2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, filter
from gnuradio import analog
from gnuradio import blocks
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = analog.noise_source_c(analog.GR_GAUSSIAN, 1)
self.head = blocks.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_out = blocks.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
xzturn/tensorflow | tensorflow/lite/micro/examples/micro_speech/apollo3/compare_1k.py | 9 | 5012 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Debugging script for checking calculation values."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import struct
import matplotlib.pyplot as plt
import numpy as np
# import soundfile as sf
def new_data_to_array(fn, datatype='int16'):
"""Converts file information to an in-memory array."""
vals = []
with open(fn) as f:
for n, line in enumerate(f):
if n != 0:
vals.extend([int(v, 16) for v in line.split()])
b = ''.join(map(chr, vals))
if datatype == 'int8':
typestr = 'b'
arraylen = int(len(b))
elif datatype == 'int16':
typestr = 'h'
arraylen = int(len(b) // 2)
elif datatype == 'int32':
typestr = 'i'
arraylen = int(len(b) // 4)
if datatype == 'uint8':
typestr = 'B'
arraylen = int(len(b))
elif datatype == 'uint16':
typestr = 'H'
arraylen = int(len(b) // 2)
elif datatype == 'uint32':
typestr = 'I'
arraylen = int(len(b) // 4)
y = np.array(struct.unpack('<' + typestr * arraylen, b))
return y
# x is the fixed-point input in Qm.n format
def to_float(x, n):
return x.astype(float) * 2**(-n)
micro_windowed_input = new_data_to_array(
'micro_windowed_input.txt', datatype='int32')
cmsis_windowed_input = new_data_to_array(
'cmsis_windowed_input.txt', datatype='int16')
micro_dft = new_data_to_array('micro_dft.txt', datatype='int32')
cmsis_dft = new_data_to_array('cmsis_dft.txt', datatype='int16')
py_dft = np.fft.rfft(to_float(cmsis_windowed_input, 15), n=512)
py_result = np.empty((2 * py_dft.size), dtype=np.float)
py_result[0::2] = np.real(py_dft)
py_result[1::2] = np.imag(py_dft)
micro_power = new_data_to_array('micro_power.txt', datatype='int32')
cmsis_power = new_data_to_array('cmsis_power.txt', datatype='int16')
py_power = np.square(np.abs(py_dft))
micro_power_avg = new_data_to_array('micro_power_avg.txt', datatype='uint8')
cmsis_power_avg = new_data_to_array('cmsis_power_avg.txt', datatype='uint8')
plt.figure(1)
plt.subplot(311)
plt.plot(micro_windowed_input, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_windowed_input, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_windowed_input, 30), label='Micro to float')
plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS to float')
plt.legend()
plt.figure(2)
plt.subplot(311)
plt.plot(micro_dft, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_dft, label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_dft, 22), label='Micro to float')
# CMSIS result has 6 fractionanl bits (not 7) due to documentation error (see
# README.md)
plt.plot(to_float(cmsis_dft, 6), label='CMSIS to float')
plt.plot(py_result, label='Python result')
plt.legend()
plt.figure(3)
plt.subplot(311)
plt.plot(micro_power, label='Micro fixed')
plt.legend()
plt.subplot(312)
plt.plot(cmsis_power[0:256], label='CMSIS fixed')
plt.legend()
plt.subplot(313)
plt.plot(to_float(micro_power, 22), label='Micro to float')
plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS to float')
plt.plot(py_power, label='Python result')
plt.legend()
plt.figure(4)
plt.plot(micro_power_avg, label='Micro fixed')
plt.plot(cmsis_power_avg, label='CMSIS fixed')
plt.legend()
plt.show()
# t = np.arange(16000.*0.03)/16000.
# # Factor of 10 because micro preprocessing overflows otherwise
# sin1k = 0.1*np.sin(2*np.pi*1000*t)
#
# plt.figure(1)
# plt.subplot(511)
# plt.plot(sin1k)
# plt.title('Input sine')
#
# plt.subplot(512)
# plt.plot(to_float(micro_windowed_input, 30), label='Micro-Lite')
# plt.plot(to_float(cmsis_windowed_input, 15), label='CMSIS')
# plt.title('Windowed sine')
# plt.legend(loc='center right')
#
# plt.subplot(513)
# plt.plot(to_float(micro_dft, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_dft, 6), label='CMSIS')
# plt.title('FFT')
# plt.legend(loc='center')
#
# plt.subplot(514)
# plt.plot(to_float(micro_power, 22), label='Micro-Lite')
# plt.plot(to_float(cmsis_power[0:256], 6), label='CMSIS')
# plt.title('|FFT|^2')
# plt.legend(loc='center right')
#
# plt.subplot(515)
# plt.plot(micro_power_avg, label='Micro-Lite')
# plt.plot(cmsis_power_avg, label='CMSIS')
# plt.title('Averaged |FFT|^2')
# plt.legend(loc='center right')
#
# plt.tight_layout(pad=0, w_pad=0.2, h_pad=0.2)
#
# plt.show()
#
| apache-2.0 |
djgagne/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
natasasdj/OpenWPM | analysis/05_images_pixels.py | 2 | 6264 | import os
import sqlite3
import pandas as pd
from matplotlib.colors import LogNorm
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from statsmodels.distributions.empirical_distribution import ECDF
def thousands(x, pos):
if x>=1e9:
return '%dB' % (x*1e-9)
elif x>=1e6:
return '%dM' % (x*1e-6)
elif x>=1e3:
return '%dK' % (x*1e-3)
else:
return x
formatter = FuncFormatter(thousands)
res_dir = res_dir = '/home/nsarafij/project/OpenWPM/analysis/results/'
db = res_dir + 'images.sqlite'
conn = sqlite3.connect(db)
query = 'SELECT * FROM Images'
df = pd.read_sql_query(query,conn)
df['pixels']=map(int,df['pixels'])
df['pixels'].max() #178,560,000
df['pixels'].isnull().sum() #2,797,214
df['pixels'].isnull().sum()/float(df.shape[0])*100 #8.8%
pixels = df['pixels'].fillna(-1).map(int)
def ecdf_for_plot(sample):
#x = np.linspace(min(sample), max(sample))
print "sample: ",type(sample)
x = sample.sort_values(ascending = False)
ecdf = ECDF(x)
# print ecdf
print "ecdf: ",type(ecdf)
y = ecdf(x)
#print y
print "y: ", type(y)
return (x,y)
fig_dir = '/home/nsarafij/project/OpenWPM/analysis/figs_10k/'
(x,y) = ecdf_for_plot(pixels)
plt.figure()
plt.step(x,y)
plt.title('CDF of the total number of pixels')
plt.xlabel('total number of pixels')
plt.grid(True)
plt.xscale('symlog')
plt.savefig(os.path.join(fig_dir,'04a_pix_distr.png'))
plt.show()
grouped = df.groupby('pixels')
s_pix_count = grouped.size()
s_pix_count_=s_pix_count/float(df.shape[0])*100
df_pix_count = pd.DataFrame(s_pix_count,columns=['count'])
# count of total number of pixels
fig,ax=plt.subplots()
plt.scatter(s_pix_count.index,s_pix_count,marker='.',color='darkblue')
#s_pix_count_lim = s_pix_count[s_pix_count > 0.0001*df.shape[0]]
#plt.scatter(s_pix_count_lim.index,s_pix_count_lim, marker='.',color='lightblue')
plt.xscale('symlog')
plt.yscale('log')
plt.xlabel('total number of pixels')
plt.ylabel('number of images')
plt.xlim([-1,1e8])
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_count.png',format='png')
fig.savefig(fig_dir + 'pix_count.eps',format='eps')
fig,ax=plt.subplots()
plt.scatter(s_pix_count_.index,s_pix_count_,marker='.',color='darkblue')
plt.xlabel('total number of pixels')
plt.ylabel('percentage of total number of images')
plt.xscale('symlog')
plt.yscale('log')
plt.xlim([-1,1e8])
plt.ylim([1e-6,1e2])
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_perc.png',format='png')
fig.savefig(fig_dir + 'pix_perc.eps',format='eps')
# Top 20 size counts of images
s_pix_count_sort = s_pix_count.sort_values(ascending=False)
s_pix_perc_sort = s_pix_count_sort/float(df.shape[0])*100
x=range(1,21)
labels = map(str,[ int(a) for a in list(s_pix_count_sort.index[0:20]) ])
fig, ax = plt.subplots()
plt.bar(x,s_pix_count_sort.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('count')
plt.xlabel('total number of pixels')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_count_top20.png',format='png')
fig.savefig(fig_dir + 'pix_count_top20.eps',format='eps')
x=range(1,21)
labels = map(str,[ int(a) for a in list(s_pix_perc_sort.index[0:20]) ])
fig, ax = plt.subplots()
plt.bar(x,s_pix_perc_sort.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('percentage of total number of images')
plt.xlabel('total number of pixels')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_perc_top20.png',format='png')
fig.savefig(fig_dir + 'pix_perc_top20.eps',format='eps')
#s=df['size'][df['size']!=df['cont_length']]
#l=s.tolist()
#df['pixels'].fillna(value=-100,inplace=True)
'''
grouped = df.groupby(['pixels','type'])
s_pix_type_count = grouped.size()
df_pix_type_count = pd.DataFrame(s_type_count,columns=['count'])
'''
# scatter plot no of pixels vs size with the color showing count of a pixel-size pair
grouped = df.groupby(['pixels','size'])
pix_size_count = grouped.size().sort_values()
pixels = pix_size_count.index.get_level_values(level='pixels')
size = pix_size_count.index.get_level_values(level='size')
fig,ax=plt.subplots()
plt.scatter(pixels,size,c=pix_size_count,cmap="Reds", norm=LogNorm(),edgecolors='none')
cbar = plt.colorbar()
cbar.set_label('count of images')
plt.grid(True)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim([-1,1e8])
plt.ylim([-1,1e8])
plt.xlabel('total no of pixels')
plt.ylabel('file size [bytes]')
#plt.show()
fig.savefig(fig_dir + 'pix_size_count.png',format='png')
fig.savefig(fig_dir + 'pix_size_count.eps',format='eps')
fig,ax=plt.subplots()
plt.scatter(pixels,size,c=pix_size_count/float(df.shape[0])*100,cmap="Reds", norm=LogNorm(),edgecolors='none')
cbar = plt.colorbar()
cbar.set_label('percentage of total number of images')
plt.grid(True)
plt.xscale('symlog')
plt.yscale('symlog')
plt.xlim([-1,1e8])
plt.ylim([-1,1e8])
plt.xlabel('total no of pixels')
plt.ylabel('file size [bytes]')
#plt.show()
fig.savefig(fig_dir + 'pix_size_perc.png',format='png')
fig.savefig(fig_dir + 'pix_size_perc.eps',format='eps')
# top 20 pixel size count
pix_size_count.sort_values(ascending = False,inplace = True)
x=range(1,21)
labels = map(str,[(int(a),int(b)) for (a,b) in pix_size_count.index[0:20]])
fig, ax = plt.subplots()
plt.bar(x,pix_size_count.iloc[0:20],align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('count')
plt.xlabel('total number of pixels, file size [bytes]')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_size_count_top20.png',format='png')
fig.savefig(fig_dir + 'pix_size_count_top20.eps',format='eps')
fig, ax = plt.subplots()
plt.bar(x,pix_size_count.iloc[0:20]/float(df.shape[0])*100,align='center', label ='all')
plt.xticks(x, labels,rotation=70)
plt.ylabel('percentage of total number of images')
plt.xlabel('total number of pixels, file size [bytes]')
ax.yaxis.set_major_formatter(formatter)
fig.tight_layout()
plt.grid(True)
plt.show()
fig.savefig(fig_dir + 'pix_size_perc_top20.png',format='png')
fig.savefig(fig_dir + 'pix_size_perc_top20.eps',format='eps')
| gpl-3.0 |
semiautomaticgit/SemiAutomaticClassificationPlugin | semiautomaticclassificationplugin.py | 1 | 86616 | # -*- coding: utf-8 -*-
'''
/**************************************************************************************************************************
SemiAutomaticClassificationPlugin
The Semi-Automatic Classification Plugin for QGIS allows for the supervised classification of remote sensing images,
providing tools for the download, the preprocessing and postprocessing of images.
-------------------
begin : 2012-12-29
copyright : (C) 2012-2021 by Luca Congedo
email : ing.congedoluca@gmail.com
**************************************************************************************************************************/
/**************************************************************************************************************************
*
* This file is part of Semi-Automatic Classification Plugin
*
* Semi-Automatic Classification Plugin is free software: you can redistribute it and/or modify it under
* the terms of the GNU General Public License as published by the Free Software Foundation,
* version 3 of the License.
*
* Semi-Automatic Classification Plugin is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with
* Semi-Automatic Classification Plugin. If not, see <http://www.gnu.org/licenses/>.
*
**************************************************************************************************************************/
'''
global PluginCheck
PluginCheck = 'Yes'
import os
import sys
try:
from .core import config as cfg
except:
PluginCheck = 'No'
# try importing different path
from PyQt5.QtCore import QSettings
rK = QSettings()
mPythonSettings = rK.value(cfg.regPythonModulesPathSettings, str(cfg.PythonModulesPathSettings))
if len(mPythonSettings) > 0:
for ppS in mPythonSettings.split(';'):
if len(ppS) > 0:
sys.path.insert(1, ppS)
import platform
import inspect
import shutil
import time
import datetime
import subprocess
import numpy as np
import urllib
import requests
import ssl
import smtplib
import gc
from http.cookiejar import CookieJar
import itertools
import zipfile
import tarfile
import base64
import random
import re
import xml.etree.cElementTree as ET
from xml.dom import minidom
import json
import hashlib
import ctypes
import shlex
from collections import Counter
import multiprocessing as mp
try:
mp.set_start_method('spawn')
except:
pass
from multiprocessing import Pool, Manager
# Import the PyQt libraries
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import Qt, QObject, QFileInfo, QSettings, QDir, QDate, QVariant, pyqtSignal
from PyQt5.QtWidgets import QApplication, QTreeWidgetItem
from PyQt5.QtNetwork import QNetworkRequest
# Import the QGIS libraries
import qgis.core as qgisCore
import qgis.gui as qgisGui
import qgis.utils as qgisUtils
from osgeo import gdal
from osgeo import ogr
from osgeo import osr
# Initialize Qt ui
from .ui.resources_rc import *
from .ui.ui_semiautomaticclassificationplugin import Ui_SemiAutomaticClassificationPlugin
from .ui.ui_semiautomaticclassificationplugin_welcome import Ui_SCP_Welcome
from .ui.semiautomaticclassificationplugindialog import SemiAutomaticClassificationPluginDialog
from .ui.semiautomaticclassificationplugindialog import SpectralSignatureDialog
from .ui.semiautomaticclassificationplugindialog import WelcomeDialog
from .ui.semiautomaticclassificationplugindialog import ScatterPlotDialog
from .ui.semiautomaticclassificationplugindialog import DockClassDialog
# Import plugin version
from .__init__ import version as semiautomaticclassVersion
# required by other modules
cfg.QObjectSCP = QObject
cfg.pyqtSignalSCP = pyqtSignal
if PluginCheck == 'Yes':
try:
from .core.messages import Messages as msgs
from .core.utils import Utils
from .core.signature_importer import Signature_Importer
from .maininterface.downloadproductpointer import DownloadProductPointer
from .maininterface.downloadproducts import DownloadProducts
from .spectralsignature.spectralsignatureplot import SpectralSignaturePlot
from .spectralsignature.scatter_plot import Scatter_Plot
from .dock.manualroi import ManualROI
from .dock.regionroi import RegionROI
from .dock.scpdock import SCPDock
from .dock.classificationpreview import ClassificationPreview
from .maininterface.multipleroiTab import MultipleROITab
from .spectralsignature.usgs_spectral_lib import USGS_Spectral_Lib
from .maininterface.landsatTab import LandsatTab
from .maininterface.asterTab import ASTERTab
from .maininterface.modisTab import MODISTab
from .maininterface.sentinel1Tab import Sentinel1Tab
from .maininterface.sentinel2Tab import Sentinel2Tab
from .maininterface.sentinel3Tab import Sentinel3Tab
from .maininterface.GOESTab import GOESTab
from .maininterface.accuracy import Accuracy
from .maininterface.crossclassificationTab import CrossClassification
from .maininterface.bandcombination import BandCombination
from .maininterface.splitTab import SplitTab
from .maininterface.reprojectrasterbands import ReprojectRasterBands
from .maininterface.pcaTab import PcaTab
from .maininterface.clusteringTab import ClusteringTab
from .maininterface.classSignatureTab import ClassSignatureTab
from .maininterface.zonalStatRasterTab import ZonalStatRasterTab
from .maininterface.vectortorasterTab import VectorToRasterTab
from .maininterface.bandsetTab import BandsetTab
from .maininterface.algorithmWeightTab import AlgWeightTab
from .maininterface.signatureThresholdTab import SigThresholdTab
from .maininterface.LCSignatureThresholdTab import LCSigThresholdTab
from .maininterface.rgblistTab import RGBListTab
from .maininterface.bandsetlistTab import BandSetListTab
from .maininterface.LCSignaturePixel import LCSigPixel
from .maininterface.LCSignaturePixel2 import LCSigPixel2
from .maininterface.bandcalcTab import BandCalcTab
from .maininterface.batchTab import BatchTab
from .maininterface.clipmultiplerasters import ClipMultipleRasters
from .maininterface.stackrasterbands import StackRasterBands
from .maininterface.mosaicbandsets import MosaicBandSets
from .maininterface.cloudmasking import CloudMasking
from .maininterface.spectraldistancebandsets import SpectralDistanceBandsets
from .maininterface.randomForestTab import ClassRandomForestTab
from .maininterface.editraster import EditRaster
from .maininterface.sieveTab import SieveRaster
from .maininterface.erosionTab import ErosionRaster
from .maininterface.dilationTab import DilationRaster
from .maininterface.neighborpixelsTab import NeighborPixels
from .maininterface.clipmultiplerasterspointer import ClipMultiplerastersPointer
from .maininterface.landcoverchange import LandCoverChange
from .maininterface.classreportTab import ClassReportTab
from .maininterface.classificationTab import ClassificationTab
from .maininterface.classtovectorTab import ClassToVectorTab
from .maininterface.reclassificationTab import ReclassificationTab
from .maininterface.settings import Settings
from .core.input import Input
from .ui.ui_utils import Ui_Utils
except:
PluginCheck = 'No'
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please, restart QGIS for executing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
try:
import scipy.stats.distributions as statdistr
from scipy.spatial.distance import cdist
from scipy import signal
from scipy.ndimage import label
from scipy.cluster.vq import vq, kmeans, whiten
cfg.scipyCheck = 'Yes'
except:
cfg.scipyCheck = 'No'
try:
from matplotlib.ticker import MaxNLocator
import matplotlib.pyplot as mplplt
import matplotlib.colors as mplcolors
cfg.matplotlibCheck = 'Yes'
except Exception as err:
cfg.testMatplotlibV = err
cfg.matplotlibCheck = 'No'
class SemiAutomaticClassificationPlugin:
def __init__(self, iface):
try:
cfg.osSCP = os
cfg.sysSCP = sys
cfg.platformSCP = platform
cfg.shutilSCP = shutil
cfg.inspectSCP = inspect
cfg.timeSCP = time
cfg.datetimeSCP = datetime
cfg.subprocessSCP = subprocess
cfg.urllibSCP = urllib
cfg.requestsSCP = requests
cfg.itertoolsSCP = itertools
cfg.zipfileSCP = zipfile
cfg.tarfileSCP = tarfile
cfg.base64SCP = base64
cfg.randomSCP = random
cfg.QtCoreSCP = QtCore
cfg.QtGuiSCP = QtGui
cfg.QtWidgetsSCP = QtWidgets
cfg.QTreeWidgetItemSCP = QTreeWidgetItem
cfg.QNetworkRequestSCP = QNetworkRequest
cfg.QtSCP = Qt
cfg.QVariantSCP = QVariant
cfg.QFileInfoSCP = QFileInfo
cfg.QSettingsSCP = QSettings
cfg.QDirSCP = QDir
cfg.QDateSCP = QDate
cfg.qgisCoreSCP = qgisCore
cfg.qgisGuiSCP = qgisGui
cfg.gdalSCP = gdal
cfg.ogrSCP = ogr
cfg.osrSCP = osr
cfg.sslSCP = ssl
cfg.smtplibSCP = smtplib
cfg.CookieJarSCP = CookieJar
cfg.gcSCP = gc
cfg.reSCP = re
cfg.ETSCP = ET
cfg.minidomSCP = minidom
cfg.jsonSCP = json
cfg.hashlibSCP = hashlib
cfg.ctypesSCP = ctypes
cfg.shlexSCP = shlex
cfg.counterSCP = Counter
cfg.multiPSCP = mp
cfg.poolSCP = Pool
cfg.MultiManagerSCP = Manager
except:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please, restart QGIS for executing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
return
try:
cfg.np = np
except:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Error. Check Python Numpy installation for the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Critical)
try:
if cfg.scipyCheck == 'Yes':
cfg.statdistrSCP = statdistr
cfg.cdistSCP = cdist
cfg.signalSCP = signal
cfg.labelSCP = label
cfg.vqSCP = vq
cfg.kmeansSCP = kmeans
cfg.whitenSCP = whiten
if cfg.matplotlibCheck == 'Yes':
cfg.MaxNLocatorSCP = MaxNLocator
cfg.mplpltSCP = mplplt
cfg.mplcolorsSCP = mplcolors
except:
pass
if cfg.scipyCheck == 'No':
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Error. Check Python Scipy installation for the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Critical)
if cfg.matplotlibCheck == 'No':
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Error. Check Python Matplotlib installation for the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Critical)
if PluginCheck == 'Yes':
# reference to QGIS interface
cfg.iface = iface
# reference to map canvas
cfg.cnvs = iface.mapCanvas()
# create the dialog
cfg.dlg = SemiAutomaticClassificationPluginDialog()
# reference to ui
cfg.ui = cfg.dlg.ui
# class dock dialog
cfg.dockclassdlg = DockClassDialog(cfg.iface.mainWindow(), cfg.iface)
# reference dock class ui
cfg.uidc = cfg.dockclassdlg.ui
# welcome dialog
cfg.welcomedlg = WelcomeDialog()
# spectral signature plot dialog
cfg.spectralplotdlg = SpectralSignatureDialog()
cfg.uisp = cfg.spectralplotdlg.ui
# scatter plot dialog
cfg.scatterplotdlg = ScatterPlotDialog()
cfg.uiscp = cfg.scatterplotdlg.ui
cfg.mx = msgs(cfg.iface)
cfg.utls = Utils()
cfg.SCPD = SCPDock()
cfg.classPrev = ClassificationPreview(cfg.cnvs)
cfg.spSigPlot = SpectralSignaturePlot()
cfg.scaPlT = Scatter_Plot()
cfg.multiROI = MultipleROITab()
cfg.usgsLib = USGS_Spectral_Lib()
cfg.acc = Accuracy()
cfg.crossC = CrossClassification()
cfg.bsComb = BandCombination()
cfg.splitT = SplitTab()
cfg.rprjRstBndsT = ReprojectRasterBands()
cfg.pcaT = PcaTab()
cfg.clusteringT = ClusteringTab()
cfg.classSigT = ClassSignatureTab()
cfg.znlSttRstT = ZonalStatRasterTab()
cfg.vctRstrT = VectorToRasterTab()
cfg.bst = BandsetTab()
cfg.algWT = AlgWeightTab()
cfg.signT = SigThresholdTab()
cfg.LCSignT = LCSigThresholdTab()
cfg.RGBLT = RGBListTab()
cfg.bstLT = BandSetListTab()
cfg.bCalc = BandCalcTab()
cfg.batchT= BatchTab()
cfg.clipMulti = ClipMultipleRasters()
cfg.stackRstr = StackRasterBands()
cfg.mosaicBS = MosaicBandSets()
cfg.cloudMsk = CloudMasking()
cfg.spclDstBS = SpectralDistanceBandsets()
cfg.rndmFrst = ClassRandomForestTab()
cfg.editRstr = EditRaster()
cfg.sieveRstr = SieveRaster()
cfg.ersnRstr = ErosionRaster()
cfg.dltnRstr = DilationRaster()
cfg.clssNghbr = NeighborPixels()
cfg.downProd = DownloadProducts()
cfg.landsatT = LandsatTab()
cfg.ASTERT = ASTERTab()
cfg.MODIST = MODISTab()
cfg.sentinel1T = Sentinel1Tab()
cfg.sentinel2T = Sentinel2Tab()
cfg.sentinel3T = Sentinel3Tab()
cfg.goesT = GOESTab()
cfg.landCC = LandCoverChange()
cfg.classRep = ClassReportTab()
cfg.classTab = ClassificationTab()
cfg.classVect = ClassToVectorTab()
cfg.reclassification = ReclassificationTab()
cfg.sigImport = Signature_Importer()
cfg.mnlROI = ManualROI(cfg.cnvs)
cfg.regionROI = RegionROI(cfg.cnvs)
cfg.dwnlPrdPnt = DownloadProductPointer(cfg.cnvs)
cfg.clipMultiP = ClipMultiplerastersPointer(cfg.cnvs)
cfg.LCSPixel = LCSigPixel(cfg.cnvs)
cfg.LCSPixel2 = LCSigPixel2(cfg.cnvs)
cfg.sets = Settings()
cfg.uiUtls = Ui_Utils()
cfg.ipt = Input()
# connect when map is clicked
cfg.mnlROI.rightClicked.connect(cfg.SCPD.clckR)
cfg.mnlROI.leftClicked.connect(cfg.SCPD.clckL)
cfg.mnlROI.moved.connect(cfg.SCPD.movedPointer)
cfg.regionROI.ROIleftClicked.connect(cfg.SCPD.pointerClickROI)
cfg.regionROI.ROIrightClicked.connect(cfg.SCPD.pointerRightClickROI)
cfg.regionROI.moved.connect(cfg.SCPD.movedPointer)
cfg.clipMultiP.leftClicked.connect(cfg.clipMulti.pointerLeftClick)
cfg.clipMultiP.rightClicked.connect(cfg.clipMulti.pointerRightClick)
cfg.dwnlPrdPnt.leftClicked.connect(cfg.downProd.pointerLeftClick)
cfg.dwnlPrdPnt.rightClicked.connect(cfg.downProd.pointerRightClick)
cfg.classPrev.leftClicked.connect(cfg.SCPD.pointerClickPreview)
cfg.classPrev.rightClicked.connect(cfg.SCPD.pointerRightClickPreview)
cfg.LCSPixel.MaprightClicked.connect(cfg.LCSignT.pointerLeftClick)
cfg.LCSPixel.MapleftClicked.connect(cfg.LCSignT.pointerLeftClick)
cfg.LCSPixel2.MaprightClicked.connect(cfg.spSigPlot.pointerLeftClick)
cfg.LCSPixel2.MapleftClicked.connect(cfg.spSigPlot.pointerLeftClick)
# system variables
cfg.utls.findSystemSpecs()
cfg.utls.readVariables()
# set font
try:
f, s, i = cfg.utls.readQGISVariableFont()
font = cfg.QtGuiSCP.QFont()
font.setFamily(f)
font.setPointSize(int(s))
cfg.dlg.setFont(font)
cfg.ui.menu_treeWidget.setFont(font)
except:
pass
# initialize plugin directory
cfg.plgnDir = cfg.QFileInfoSCP(cfg.qgisCoreSCP.QgsApplication.qgisUserDatabaseFilePath()).path() + '/python/plugins/' + str(__name__).split('.')[0]
# locale name
lclNm = cfg.QSettingsSCP().value('locale/userLocale')[0:2]
self.registryKeys()
if len(cfg.PythonPathSettings) > 0:
mp.set_executable(cfg.PythonPathSettings)
# temporary directory
tmpDir = cfg.utls.getTempDirectory()
cfg.ui.temp_directory_label.setText(tmpDir)
# log file path
cfg.logFile = cfg.tmpDir.replace('//', '/') + '/__0semiautomaticclass.log'
# locale
lclPth = ''
if cfg.QFileInfoSCP(cfg.plgnDir).exists():
lclPth = cfg.plgnDir + '/i18n/semiautomaticclassificationplugin_' + lclNm + '.qm'
if cfg.QFileInfoSCP(lclPth).exists():
trnsltr = cfg.QtCoreSCP.QTranslator()
trnsltr.load(lclPth)
if cfg.QtCoreSCP.qVersion() > '4.3.3':
cfg.QtCoreSCP.QCoreApplication.installTranslator(trnsltr)
# info
cfg.sysSCPInfo = str(' SemiAutomaticClass ' + semiautomaticclassVersion() + ' - QGIS v. ' + str(cfg.QGISVer) + ' L:' + lclNm + ' - OS ' + str(cfg.sysSCPNm) + ' - 64bit =' + cfg.sysSCP64bit)
# multiprocess Windows
if cfg.sysSCPNm == 'Windows':
mp.set_executable(os.path.join(sys.exec_prefix, 'pythonw.exe'))
# Mac OS
elif cfg.sysSCPNm == 'Darwin':
dPref = os.environ['PATH'].split(':')
for flPref in dPref:
flPrefPy = os.path.join(flPref, 'python3')
# first test
if os.path.isfile(flPrefPy):
mp.set_executable(flPrefPy)
cfg.sysSCPInfo = cfg.sysSCPInfo + ' - python path =' + flPrefPy
# second test
if 'library' in flPref.lower():
if os.path.isfile(flPrefPy):
mp.set_executable(flPrefPy)
cfg.sysSCPInfo = cfg.sysSCPInfo + ' - python path =' + flPrefPy
break
# GDAL config
try:
cfg.gdalSCP.SetConfigOption('GDAL_NUM_THREADS', str(cfg.threads))
cfg.gdalSCP.SetCacheMax(int(cfg.RAMValue * 0.3 * 1000000))
cfg.gdalSCP.SetConfigOption('GDAL_DISABLE_READDIR_ON_OPEN', 'TRUE')
cfg.gdalSCP.SetConfigOption('GDAL_CACHEMAX', '4')
cfg.gdalSCP.SetConfigOption('VSI_CACHE', 'FALSE')
except:
pass
# read registry keys
def registryKeys(self):
''' registry keys '''
cfg.firstInstallVal = cfg.utls.readRegistryKeys(cfg.regFirstInstall, cfg.firstInstallVal)
cfg.logSetVal = cfg.utls.readRegistryKeys(cfg.regLogKey, cfg.logSetVal)
cfg.downNewsVal = cfg.utls.readRegistryKeys(cfg.downNewsKey, cfg.downNewsVal)
cfg.vrtRstProjVal = cfg.utls.readRegistryKeys(cfg.vrtRstProjKey, cfg.vrtRstProjVal)
cfg.ROIClrVal = cfg.utls.readRegistryKeys(cfg.regROIClr, cfg.ROIClrVal)
cfg.ROITrnspVal = int(cfg.utls.readRegistryKeys(cfg.regROITransp, cfg.ROITrnspVal))
cfg.outTempRastFormat = cfg.utls.readRegistryKeys(cfg.regTempRasterFormat, str(cfg.outTempRastFormat))
cfg.rasterCompression = cfg.utls.readRegistryKeys(cfg.regRasterCompression, str(cfg.rasterCompression))
cfg.parallelWritingCheck = cfg.utls.readRegistryKeys(cfg.regparallelWritingCheck, str(cfg.parallelWritingCheck))
cfg.RAMValue = int(cfg.utls.readRegistryKeys(cfg.regRAMValue, str(cfg.RAMValue)))
cfg.threads = int(cfg.utls.readRegistryKeys(cfg.regThreadsValue, str(cfg.threads)))
cfg.gdalPath = cfg.utls.readRegistryKeys(cfg.regGDALPathSettings, str(cfg.gdalPath))
cfg.PythonPathSettings = cfg.utls.readRegistryKeys(cfg.regPythonPathSettings, str(cfg.PythonPathSettings))
cfg.PythonModulesPathSettings = cfg.utls.readRegistryKeys(cfg.regPythonModulesPathSettings, str(cfg.PythonModulesPathSettings))
cfg.tmpDir = cfg.utls.readRegistryKeys(cfg.regTmpDir, cfg.tmpDir)
cfg.fldID_class = cfg.utls.readRegistryKeys(cfg.regIDFieldName, cfg.fldID_class)
cfg.fldMacroID_class = cfg.utls.readRegistryKeys(cfg.regMacroIDFieldName, cfg.fldMacroID_class)
cfg.macroclassCheck = cfg.utls.readRegistryKeys(cfg.regConsiderMacroclass, cfg.macroclassCheck)
cfg.sentinelAlternativeSearch = cfg.utls.readRegistryKeys(cfg.regSentinelAlternativeSearch, cfg.sentinelAlternativeSearch)
cfg.LCsignatureCheckBox = cfg.utls.readRegistryKeys(cfg.regLCSignature, cfg.LCsignatureCheckBox)
cfg.fldROI_info = cfg.utls.readRegistryKeys(cfg.regInfoFieldName, cfg.fldROI_info)
cfg.fldROIMC_info = cfg.utls.readRegistryKeys(cfg.regMCInfoFieldName, cfg.fldROIMC_info)
cfg.variableName = cfg.utls.readRegistryKeys(cfg.regVariableName, cfg.variableName)
cfg.vectorVariableName = cfg.utls.readRegistryKeys(cfg.regVectorVariableName, cfg.vectorVariableName)
cfg.SMTPCheck = cfg.utls.readRegistryKeys(cfg.regSMTPCheck, cfg.SMTPCheck)
cfg.SMTPServer = cfg.utls.readRegistryKeys(cfg.regSMTPServer, cfg.SMTPServer)
cfg.SMTPtoEmails = cfg.utls.readRegistryKeys(cfg.regSMTPtoEmails, cfg.SMTPtoEmails)
cfg.SMTPUser = cfg.utls.readRegistryKeys(cfg.regSMTPUser, cfg.SMTPUser)
cfg.SMTPPassword = cfg.utls.readRegistryKeys(cfg.regSMTPPassword, cfg.SMTPPassword)
cfg.USGSUser = cfg.utls.readRegistryKeys(cfg.regUSGSUser, cfg.USGSUser)
cfg.USGSPass = cfg.utls.readRegistryKeys(cfg.regUSGSPass, cfg.USGSPass)
cfg.USGSUserASTER = cfg.utls.readRegistryKeys(cfg.regUSGSUserASTER, cfg.USGSUserASTER)
cfg.USGSPassASTER = cfg.utls.readRegistryKeys(cfg.regUSGSPassASTER, cfg.USGSPassASTER)
cfg.SciHubUser = cfg.utls.readRegistryKeys(cfg.regSciHubUser, cfg.SciHubUser)
cfg.SciHubService = cfg.utls.readRegistryKeys(cfg.regSciHubService, cfg.SciHubService)
cfg.SciHubPass = cfg.utls.readRegistryKeys(cfg.regSciHubPass, cfg.SciHubPass)
cfg.sigPLRoundCharList = cfg.roundCharList
cfg.scatPlRoundCharList = cfg.roundCharList
cfg.grpNm = cfg.utls.readRegistryKeys(cfg.regGroupName, cfg.grpNm)
cfg.rasterDataType = cfg.utls.readRegistryKeys(cfg.regRasterDataType, cfg.rasterDataType)
cfg.expressionListBC = cfg.utls.readRegistryKeys(cfg.regExpressionListBC, cfg.expressionListBC)
cfg.soundVal = cfg.utls.readRegistryKeys(cfg.regSound, cfg.soundVal)
cfg.windowSizeW = cfg.utls.readRegistryKeys(cfg.regWindowSizeW, cfg.windowSizeW)
cfg.windowSizeH = cfg.utls.readRegistryKeys(cfg.regWindowSizeH, cfg.windowSizeH)
cfg.splitterSizeS = cfg.utls.readRegistryKeys(cfg.regSplitterSizeS, cfg.splitterSizeS)
def initGui(self):
if PluginCheck == 'Yes':
try:
cfg.iface.addDockWidget(cfg.QtSCP.LeftDockWidgetArea, cfg.dockclassdlg)
except:
msg = ''
try:
import scipy.stats.distributions as statdistr
except:
msg = 'SciPy'
try:
from matplotlib.ticker import MaxNLocator
except:
msg = 'Matplotlib'
try:
import numpy as np
except:
msg = 'NumPy'
try:
from osgeo import gdal
except:
msg = 'Gdal'
if len(msg) > 0:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Semi-Automatic Classification Plugin possible missing dependecies: ' + msg), level=qgisCore.Qgis.Info)
else:
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please restart QGIS for installing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
return
from .modules.modules import Modules
cfg.SCPModules = Modules()
cfg.SCPModules.loading()
cfg.ipt.loadInputToolbar()
cfg.algName = cfg.algMinDist
cfg.ui.algorithm_combo.setCurrentIndex(0)
# vector to raster type of conversion
cfg.ui.conversion_type_combo.addItem(cfg.convCenterPixels)
cfg.ui.conversion_type_combo.addItem(cfg.convAllPixelsTouch)
cfg.centerOfPixels = cfg.ui.conversion_type_combo.itemText(0)
''' menu '''
cfg.ipt.loadMenu()
# set plugin version
cfg.ui.plugin_version_label.setText(semiautomaticclassVersion())
cfg.uidc.plugin_version_label2.setText('SCP ' + semiautomaticclassVersion())
# row height
cfg.ui.download_images_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.tableWidget_band_calc.verticalHeader().setDefaultSectionSize(24)
cfg.ui.landsat_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.sentinel_2_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.sentinel_2_tableWidget, [[0, 400], [1, 200], [2, 60]])
cfg.ui.ASTER_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.ASTER_tableWidget, [[0, 400], [1, 200], [2, 60]])
cfg.ui.MODIS_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.MODIS_tableWidget, [[0, 400], [1, 200], [2, 60]])
cfg.ui.LCS_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.signature_threshold_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.point_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.ui.log_tableWidget.verticalHeader().setDefaultSectionSize(24)
cfg.utls.setColumnWidthList(cfg.ui.log_tableWidget, [[0, 100], [1, 200], [2, 800]])
# spectral signature plot list
cfg.utls.insertTableColumn(cfg.uisp.signature_list_plot_tableWidget, 6, cfg.tableColString, None, 'Yes')
cfg.utls.sortTableColumn(cfg.uisp.signature_list_plot_tableWidget, 3)
cfg.utls.setColumnWidthList(cfg.uisp.signature_list_plot_tableWidget, [[0, 30], [1, 40], [2, 100], [3, 40], [4, 100], [5, 30]])
try:
cfg.uisp.signature_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(2, cfg.QtWidgetsSCP.QHeaderView.Stretch)
cfg.uisp.signature_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(4, cfg.QtWidgetsSCP.QHeaderView.Stretch)
except:
pass
cfg.SCPD.clearTree()
# passwords
cfg.ui.smtp_password_lineEdit.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
cfg.ui.password_usgs_lineEdit.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
cfg.ui.password_usgs_lineEdit_2.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
cfg.ui.password_scihub_lineEdit.setEchoMode(cfg.QtWidgetsSCP.QLineEdit.Password)
# scatter plot list
cfg.utls.insertTableColumn(cfg.uiscp.scatter_list_plot_tableWidget, 6, cfg.tableColString, None, 'Yes')
cfg.utls.sortTableColumn(cfg.uiscp.scatter_list_plot_tableWidget, 3)
cfg.utls.setColumnWidthList(cfg.uiscp.scatter_list_plot_tableWidget, [[0, 30], [1, 40], [2, 100], [3, 40], [4, 100], [5, 30]])
try:
cfg.uiscp.scatter_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(2, cfg.QtWidgetsSCP.QHeaderView.Stretch)
cfg.uiscp.scatter_list_plot_tableWidget.horizontalHeader().setSectionResizeMode(4, cfg.QtWidgetsSCP.QHeaderView.Stretch)
except:
pass
# signature threshold
cfg.utls.insertTableColumn(cfg.ui.signature_threshold_tableWidget, 7, cfg.tableColString, None, 'Yes')
cfg.utls.setColumnWidthList(cfg.ui.signature_threshold_tableWidget, [[4, 100], [5, 100], [6, 100]])
try:
cfg.ui.signature_threshold_tableWidget.horizontalHeader().setSectionResizeMode(1, cfg.QtWidgetsSCP.QHeaderView.Stretch)
cfg.ui.signature_threshold_tableWidget.horizontalHeader().setSectionResizeMode(3, cfg.QtWidgetsSCP.QHeaderView.Stretch)
except:
pass
# product download tab
cfg.utls.setColumnWidthList(cfg.ui.download_images_tableWidget, [[0, 100], [1, 400]])
# USGS spectral lbrary
cfg.usgsLib.addSpectralLibraryToCombo(cfg.usgs_lib_list)
cfg.usgs_C1p = cfg.plgnDir + '/' + cfg.usgs_C1p
cfg.usgs_C2p = cfg.plgnDir + '/' + cfg.usgs_C2p
cfg.usgs_C3p = cfg.plgnDir + '/' + cfg.usgs_C3p
cfg.usgs_C4p = cfg.plgnDir + '/' + cfg.usgs_C4p
cfg.usgs_C5p = cfg.plgnDir + '/' + cfg.usgs_C5p
cfg.usgs_C6p = cfg.plgnDir + '/' + cfg.usgs_C6p
cfg.usgs_C7p = cfg.plgnDir + '/' + cfg.usgs_C7p
# band calc expression
cfg.bCalc.createExpressionList(cfg.expressionListBC)
cfg.batchT.addFunctionsToTable(cfg.functionNames)
cfg.bst.addSatelliteToCombo(cfg.satWlList)
cfg.downProd.addSatelliteToCombo(cfg.downProductList)
cfg.scaPlT.addColormapToCombo(cfg.scatterColorMap)
cfg.bst.addUnitToCombo(cfg.unitList)
cfg.SCPD.previewSize()
# set log state
if cfg.logSetVal == 'Yes':
cfg.ui.log_checkBox.setCheckState(2)
cfg.mx.msg19()
elif cfg.logSetVal == 'No':
cfg.ui.log_checkBox.setCheckState(0)
# set download news state
cfg.ui.download_news_checkBox.setCheckState(int(cfg.downNewsVal))
# set download news state
cfg.ui.virtual_raster_load_checkBox.setCheckState(int(cfg.vrtRstProjVal))
# set raster format
if cfg.outTempRastFormat == 'VRT':
cfg.ui.virtual_raster_checkBox.setCheckState(2)
elif cfg.outTempRastFormat == 'GTiff':
cfg.ui.virtual_raster_checkBox.setCheckState(0)
# set raster compression
if cfg.rasterCompression == 'Yes':
cfg.ui.raster_compression_checkBox.setCheckState(2)
elif cfg.rasterCompression == 'No':
cfg.ui.raster_compression_checkBox.setCheckState(0)
# set raster compression
if cfg.parallelWritingCheck == 'Yes':
cfg.ui.parallel_writing_checkBox.setCheckState(2)
elif cfg.parallelWritingCheck == 'No':
cfg.ui.parallel_writing_checkBox.setCheckState(0)
# set SMTP checkbox state
cfg.ui.smtp_checkBox.setCheckState(int(cfg.SMTPCheck))
# set sound state
cfg.ui.sound_checkBox.setCheckState(int(cfg.soundVal))
# connect to project loaded
cfg.qgisCoreSCP.QgsProject.instance().readProject.connect(self.projectLoaded)
cfg.qgisCoreSCP.QgsProject.instance().projectSaved.connect(self.projectSaved)
cfg.iface.newProjectCreated.connect(self.newProjectLoaded)
#cfg.qgisCoreSCP.QgsProject.instance().readMapLayer.connect(self.test)
#cfg.qgisCoreSCP.QgsProject.instance().layerLoaded.connect(self.test)
''' Help tab '''
cfg.utls.makeDirectory(cfg.tmpDir + '/_images/')
cfg.ui.help_textBrowser.setSearchPaths([cfg.tmpDir])
''' Docks '''
# set ROI color
cfg.ui.change_color_Button.setStyleSheet('background-color :' + cfg.ROIClrVal)
# set ROI transparency
cfg.ui.transparency_Slider.setValue(cfg.ROITrnspVal)
# set RAM value
cfg.ui.RAM_spinBox.setValue(cfg.RAMValue)
# set CPU value
cfg.ui.CPU_spinBox.setValue(cfg.threads)
# macroclass checkbox
if cfg.macroclassCheck == 'No':
cfg.ui.macroclass_checkBox.setCheckState(0)
cfg.ui.class_checkBox.blockSignals(True)
cfg.ui.class_checkBox.setCheckState(2)
cfg.ui.class_checkBox.blockSignals(False)
elif cfg.macroclassCheck == 'Yes':
cfg.ui.macroclass_checkBox.setCheckState(2)
cfg.ui.class_checkBox.blockSignals(True)
cfg.ui.class_checkBox.setCheckState(0)
cfg.ui.class_checkBox.blockSignals(False)
# macroclass checkbox
if cfg.macroclassCheckRF == 'No':
cfg.ui.macroclass_checkBox_rf.setCheckState(0)
cfg.ui.class_checkBox_rf.blockSignals(True)
cfg.ui.class_checkBox_rf.setCheckState(2)
cfg.ui.class_checkBox_rf.blockSignals(False)
elif cfg.macroclassCheckRF == 'Yes':
cfg.ui.macroclass_checkBox_rf.setCheckState(2)
cfg.ui.class_checkBox_rf.blockSignals(True)
cfg.ui.class_checkBox_rf.setCheckState(0)
cfg.ui.class_checkBox_rf.blockSignals(False)
# LC signature checkbox
if cfg.LCsignatureCheckBox == 'No':
cfg.ui.LC_signature_checkBox.setCheckState(0)
elif cfg.LCsignatureCheckBox == 'Yes':
cfg.ui.LC_signature_checkBox.setCheckState(2)
try:
# set SMTP server
cfg.ui.smtp_server_lineEdit.setText(cfg.SMTPServer)
# set SMTP to emails
cfg.ui.to_email_lineEdit.setText(cfg.SMTPtoEmails)
# set SMTP user and password
cfg.ui.smtp_user_lineEdit.setText(cfg.SMTPUser)
if cfg.SMTPPassword is not None:
SMTPPsw = cfg.utls.decryptPassword(cfg.SMTPPassword[2:-1])
cfg.ui.smtp_password_lineEdit.setText(str(SMTPPsw)[2:-1])
cfg.SMTPPassword = str(SMTPPsw)[2:-1]
# set USGS user and password
cfg.ui.user_usgs_lineEdit.setText(cfg.USGSUser)
if cfg.USGSPass is not None:
USGSPsw = cfg.utls.decryptPassword(cfg.USGSPass[2:-1])
cfg.ui.password_usgs_lineEdit.setText(str(USGSPsw)[2:-1])
cfg.ui.user_usgs_lineEdit_2.setText(cfg.USGSUserASTER)
if cfg.USGSPassASTER is not None:
USGSPsw2 = cfg.utls.decryptPassword(cfg.USGSPassASTER[2:-1])
cfg.ui.password_usgs_lineEdit_2.setText(str(USGSPsw2)[2:-1])
# set SciHub user and password
cfg.ui.sentinel_service_lineEdit.setText(cfg.SciHubService)
cfg.ui.user_scihub_lineEdit.setText(cfg.SciHubUser)
if cfg.SciHubPass is not None:
sciHubPsw = cfg.utls.decryptPassword(cfg.SciHubPass[2:-1])
cfg.ui.password_scihub_lineEdit.setText(str(sciHubPsw)[2:-1])
except Exception as err:
# logger
cfg.utls.logCondition(str(__name__) + '-' + (cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), ' ERROR exception: ' + str(err))
cfg.ui.sentinel2_alternative_search_checkBox.blockSignals(True)
cfg.ui.sentinel2_alternative_search_checkBox.setCheckState(int(cfg.sentinelAlternativeSearch))
cfg.ui.sentinel2_alternative_search_checkBox.blockSignals(False)
''' SCP tab '''
cfg.ui.SCP_tabs.currentChanged.connect(cfg.ipt.SCPTabChanged)
cfg.ui.main_tabWidget.currentChanged.connect(cfg.ipt.mainTabChanged)
# hide tabs
cfg.ui.SCP_tabs.setStyleSheet('QTabBar::tab {padding: 0px; max-height: 0px;}')
# set window size
cfg.dlg.resize(int(cfg.windowSizeW), int(cfg.windowSizeH))
cfg.ui.widget.setMinimumSize(cfg.QtCoreSCP.QSize(50, 0))
cfg.ui.widget.setMaximumSize(cfg.QtCoreSCP.QSize(400, 16777215))
cfg.ui.splitter.setSizes(eval(cfg.splitterSizeS))
cfg.ui.splitter.splitterMoved.connect(cfg.ipt.movedSplitter)
cfg.ui.menu_treeWidget.itemSelectionChanged.connect(cfg.ipt.menuIndex)
cfg.ui.f_filter_lineEdit.textChanged.connect(cfg.ipt.filterTree)
''' Multiple ROI tab '''
# connect to add point
cfg.ui.add_point_pushButton.clicked.connect(cfg.multiROI.addPointToTable)
# connect to create random points
cfg.ui.add_random_point_pushButton.clicked.connect(cfg.multiROI.createRandomPoint)
# connect to remove point
cfg.ui.remove_point_pushButton.clicked.connect(cfg.multiROI.removePointFromTable)
# connect to save point ROIs
cfg.ui.save_point_rois_pushButton.clicked.connect(cfg.multiROI.createROIfromPoint)
# connect to import points
cfg.ui.import_point_list_pushButton.clicked.connect(cfg.multiROI.importPoints)
# connect to export point list
cfg.ui.export_point_list_pushButton.clicked.connect(cfg.multiROI.exportPointList)
# connect the signature calculation checkBox 2
cfg.ui.signature_checkBox2.stateChanged.connect(cfg.multiROI.signatureCheckbox2)
# connect to text changed
cfg.ui.stratified_lineEdit.textChanged.connect(cfg.multiROI.textChanged)
''' Import spectral signature tab '''
# connect the import library
cfg.ui.open_library_pushButton.clicked.connect(cfg.SCPD.openLibraryFile)
# connect the open shapefile
cfg.ui.open_shapefile_pushButton.clicked.connect(cfg.sigImport.openShapefileI)
# connect the import shapefile
cfg.ui.import_shapefile_pushButton.clicked.connect(cfg.utls.importShapefile)
# connect the chapter changed
cfg.ui.usgs_chapter_comboBox.currentIndexChanged.connect(cfg.usgsLib.chapterChanged)
# connect the library changed
cfg.ui.usgs_library_comboBox.currentIndexChanged.connect(cfg.usgsLib.libraryChanged)
# connect the close library
cfg.ui.add_usgs_library_pushButton.clicked.connect(cfg.usgsLib.addSignatureToList)
''' Export spectral signature tab '''
# connect to export signature to SCP file
cfg.ui.export_SCP_pushButton.clicked.connect(cfg.SCPD.exportSignatureFile)
cfg.ui.export_SHP_pushButton.clicked.connect(cfg.SCPD.exportSignatureShapefile)
# connect to export signature to CSV
cfg.ui.export_CSV_library_toolButton.clicked.connect(cfg.SCPD.exportToCSVLibrary)
''' Algorithm weight tab '''
cfg.ui.reset_weights_pushButton.clicked.connect(cfg.algWT.resetWeights)
cfg.ui.set_weight_value_pushButton.clicked.connect(cfg.algWT.setWeights)
''' Signature threshold tab '''
# edited cell
cfg.ui.signature_threshold_tableWidget.cellChanged.connect(cfg.signT.editedThresholdTable)
cfg.ui.reset_threshold_pushButton.clicked.connect(cfg.signT.resetThresholds)
cfg.ui.automatic_threshold_pushButton.clicked.connect(cfg.signT.setAllWeightsVariance)
cfg.ui.set_threshold_value_pushButton.clicked.connect(cfg.signT.setThresholds)
cfg.ui.signature_threshold_tableWidget.horizontalHeader().sectionClicked.connect(cfg.signT.orderedTable)
''' LC Signature threshold tab '''
cfg.ui.LCS_tableWidget.cellChanged.connect(cfg.LCSignT.editedThresholdTable)
cfg.ui.LCS_tableWidget.horizontalHeader().sectionClicked.connect(cfg.LCSignT.orderedTable)
cfg.ui.automatic_threshold_pushButton_2.clicked.connect(cfg.LCSignT.setAllWeightsVariance)
# connect to activate pointer
cfg.ui.LCS_pointerButton.clicked.connect(cfg.LCSignT.pointerActive)
cfg.ui.LCS_ROI_button.clicked.connect(cfg.LCSignT.ROIThreshold)
cfg.ui.set_min_max_Button.clicked.connect(cfg.LCSignT.setMinimumMaximum)
# connect the include signature checkBox
cfg.ui.LCS_include_checkBox.stateChanged.connect(cfg.LCSignT.includeCheckbox)
cfg.ui.LCS_cut_checkBox.stateChanged.connect(cfg.LCSignT.cutCheckbox)
# add to spectral signature plot
cfg.ui.signature_spectral_plot_toolButton_2.clicked.connect(cfg.LCSignT.addSignatureToSpectralPlot)
''' RGB List tab '''
cfg.ui.RGB_tableWidget.cellChanged.connect(cfg.RGBLT.editedTable)
cfg.ui.add_RGB_pushButton.clicked.connect(cfg.RGBLT.addRGBToTable)
cfg.ui.remove_RGB_toolButton.clicked.connect(cfg.RGBLT.removeRGBFromTable)
cfg.ui.sort_by_name_toolButton_2.clicked.connect(cfg.RGBLT.sortRGBName)
cfg.ui.clear_RGB_list_toolButton.clicked.connect(cfg.RGBLT.clearTableAction)
cfg.ui.move_up_toolButton_3.clicked.connect(cfg.RGBLT.moveUpRGB)
cfg.ui.move_down_toolButton_3.clicked.connect(cfg.RGBLT.moveDownRGB)
cfg.ui.all_RGB_list_toolButton.clicked.connect(cfg.RGBLT.allRGBListAction)
cfg.ui.export_RGB_List_toolButton.clicked.connect(cfg.RGBLT.exportRGBList)
cfg.ui.import_RGB_List_toolButton.clicked.connect(cfg.RGBLT.importRGB)
''' Band set List tab '''
cfg.ui.add_bandset_pushButton.clicked.connect(cfg.bstLT.addBandSetToTable)
cfg.ui.rgb_toolButton.clicked.connect(cfg.bstLT.displayRGB)
cfg.ui.remove_bandset_toolButton.clicked.connect(cfg.bstLT.removeBandSetFromTable)
cfg.ui.move_up_toolButton_4.clicked.connect(cfg.bstLT.moveUpBandset)
cfg.ui.move_down_toolButton_4.clicked.connect(cfg.bstLT.moveDownBandset)
# connect to double click
cfg.ui.band_set_list_tableWidget.doubleClicked.connect(cfg.bstLT.doubleClick)
cfg.ui.export_bandset_List_toolButton.clicked.connect(cfg.bstLT.exportList)
cfg.ui.import_bandset_List_toolButton.clicked.connect(cfg.bstLT.importList)
# connect to filter
cfg.ui.band_set_filter_lineEdit.textChanged.connect(cfg.bstLT.filterTable)
''' Download product tab '''
# connect to find images button
cfg.ui.find_images_toolButton.clicked.connect(cfg.downProd.findImages)
cfg.ui.selectUL_toolButton_3.clicked.connect(cfg.downProd.pointerActive)
# connect to display button
cfg.ui.toolButton_display.clicked.connect(cfg.downProd.displayImages)
cfg.ui.toolButton_OSM.clicked.connect(cfg.downProd.displayOSM)
cfg.ui.remove_image_toolButton.clicked.connect(cfg.downProd.removeImageFromTable)
cfg.ui.clear_table_toolButton.clicked.connect(cfg.downProd.clearTable)
cfg.ui.download_images_Button.clicked.connect(cfg.downProd.downloadImages)
cfg.ui.export_links_Button.clicked.connect(cfg.downProd.exportLinks)
cfg.ui.import_table_pushButton.clicked.connect(cfg.downProd.importTableText)
cfg.ui.export_table_pushButton.clicked.connect(cfg.downProd.exportTableText)
cfg.ui.check_toolButton.clicked.connect(cfg.downProd.checkAllBands)
cfg.ui.show_area_radioButton_2.clicked.connect(cfg.downProd.showHideArea)
cfg.ui.remember_user_checkBox_2.stateChanged.connect(cfg.downProd.rememberUserCheckbox)
cfg.ui.user_usgs_lineEdit.editingFinished.connect(cfg.downProd.rememberUser)
cfg.ui.password_usgs_lineEdit.editingFinished.connect(cfg.downProd.rememberUser)
cfg.ui.reset_sentinel_service_toolButton.clicked.connect(cfg.downProd.resetService)
cfg.ui.remember_user_checkBox.stateChanged.connect(cfg.downProd.rememberUserCheckboxSentinel2)
cfg.ui.sentinel2_alternative_search_checkBox.stateChanged.connect(cfg.downProd.alternativeCheckboxSentinel2)
cfg.ui.user_scihub_lineEdit.editingFinished.connect(cfg.downProd.rememberUserSentinel2)
cfg.ui.password_scihub_lineEdit.editingFinished.connect(cfg.downProd.rememberUserSentinel2)
cfg.ui.sentinel_service_lineEdit.editingFinished.connect(cfg.downProd.rememberService)
cfg.ui.check_toolButton_2.clicked.connect(cfg.downProd.checkAllBandsSentinel2)
cfg.ui.check_toolButton_3.clicked.connect(cfg.downProd.checkAllBandsSentinel3)
cfg.ui.check_toolButton_4.clicked.connect(cfg.downProd.checkAllBandsGOES)
cfg.ui.remember_user_checkBox_3.stateChanged.connect(cfg.downProd.rememberUserCheckboxEarthdata)
cfg.ui.user_usgs_lineEdit_2.editingFinished.connect(cfg.downProd.rememberUserEarthdata)
cfg.ui.password_usgs_lineEdit_2.editingFinished.connect(cfg.downProd.rememberUserEarthdata)
cfg.ui.download_images_tableWidget.itemSelectionChanged.connect(cfg.downProd.tableClick)
# connect to filter
cfg.ui.products_filter_lineEdit.textChanged.connect(cfg.downProd.filterTable)
''' Classification dock '''
# button band set
cfg.uidc.bandset_toolButton.clicked.connect(cfg.utls.bandSetTab)
cfg.uidc.band_processing_toolButton.clicked.connect(cfg.utls.bandProcessingTab)
cfg.uidc.preprocessing_toolButton_2.clicked.connect(cfg.utls.preProcessingTab)
cfg.uidc.postprocessing_toolButton_2.clicked.connect(cfg.utls.postProcessingTab)
cfg.uidc.bandcalc_toolButton_2.clicked.connect(cfg.utls.bandCalcTab)
cfg.uidc.download_images_toolButton_2.clicked.connect(cfg.utls.selectTabDownloadImages)
cfg.uidc.basic_tools_toolButton.clicked.connect(cfg.utls.basicToolsTab)
cfg.uidc.batch_toolButton.clicked.connect(cfg.utls.batchTab)
cfg.uidc.userguide_toolButton_2.clicked.connect(cfg.ipt.quickGuide)
cfg.uidc.help_toolButton_2.clicked.connect(cfg.ipt.askHelp)
cfg.uidc.support_toolButton.clicked.connect(cfg.ipt.supportSCP)
cfg.uidc.tabWidget_dock.currentChanged.connect(cfg.ipt.dockTabChanged)
# button new input
cfg.uidc.button_new_input.clicked.connect(cfg.SCPD.createInput)
# button reset
cfg.uidc.button_reset_input.clicked.connect(cfg.SCPD.resetInput)
# connect to save to shapefile
cfg.uidc.button_Save_ROI.clicked.connect(cfg.SCPD.saveROItoShapefile)
# connect to undo save ROI
cfg.uidc.undo_save_Button.clicked.connect(cfg.SCPD.undoSaveROI)
cfg.uidc.redo_save_Button.clicked.connect(cfg.SCPD.redoSaveROI)
# connect the signature calculation checkBox
cfg.uidc.signature_checkBox.stateChanged.connect(cfg.SCPD.signatureCheckbox)
cfg.uidc.scatterPlot_toolButton.clicked.connect(cfg.SCPD.addROIToScatterPlot)
# connect the save input checkBox
cfg.uidc.save_input_checkBox.stateChanged.connect(cfg.SCPD.saveInputCheckbox)
# connect to open training file
cfg.uidc.trainingFile_toolButton.clicked.connect(cfg.SCPD.openTrainingFile)
# connect to export signature list file
cfg.uidc.export_signature_list_toolButton.clicked.connect(cfg.utls.exportSignaturesTab)
# connect to import library file
cfg.uidc.import_library_toolButton.clicked.connect(cfg.utls.importSignaturesTab)
# add to spectral signature plot
cfg.uidc.signature_spectral_plot_toolButton.clicked.connect(cfg.SCPD.addSignatureToSpectralPlot)
# connect to filter
cfg.uidc.ROI_filter_lineEdit.textChanged.connect(cfg.SCPD.filterTree)
# connect to delete signature
cfg.uidc.delete_Signature_Button.clicked.connect(cfg.SCPD.removeSelectedSignatures)
# connect to merge signatures
cfg.uidc.merge_signature_toolButton.clicked.connect(cfg.SCPD.mergeSelectedSignatures)
cfg.uidc.calculate_signature_toolButton.clicked.connect(cfg.SCPD.calculateSignatures)
# connect the ROI macroclass ID
cfg.uidc.ROI_Macroclass_ID_spin.valueChanged.connect(cfg.SCPD.setROIMacroID)
# connect the ROI Macroclass
cfg.uidc.ROI_Macroclass_line.editingFinished.connect(cfg.SCPD.roiMacroclassInfo)
# custom expression
cfg.uidc.custom_index_lineEdit.editingFinished.connect(cfg.SCPD.customExpressionEdited)
# connect the ROI Class ID
cfg.uidc.ROI_ID_spin.valueChanged.connect(cfg.SCPD.setROIID)
# connect the ROI Class
cfg.uidc.ROI_Class_line.editingFinished.connect(cfg.SCPD.roiClassInfo)
# connect the rapid ROI checkBox
cfg.uidc.display_cursor_checkBox.stateChanged.connect(cfg.SCPD.vegetationIndexCheckbox)
# connect the vegetation index combo
cfg.uidc.vegetation_index_comboBox.currentIndexChanged.connect(cfg.SCPD.vegetationIndexName)
# connect the rapid ROI checkBox
cfg.uidc.rapid_ROI_checkBox.stateChanged.connect(cfg.SCPD.rapidROICheckbox)
# connect the vegetation index display checkbox
cfg.uidc.rapidROI_band_spinBox.valueChanged.connect(cfg.SCPD.rapidROIband)
''' Classification tab '''
# connect to algorithm weight button
cfg.ui.algorithm_weight_button.clicked.connect(cfg.utls.algorithmBandWeightTab)
# connect to threshold button
cfg.ui.algorithm_threshold_button.clicked.connect(cfg.utls.signatureThresholdTab)
# connect to LCS threshold button
cfg.ui.LC_signature_button.clicked.connect(cfg.utls.LCSThresholdTab)
# connect the algorithm combo
cfg.ui.algorithm_combo.currentIndexChanged.connect(cfg.classTab.algorithmName)
# connect the algorithm threshold
cfg.ui.alg_threshold_SpinBox.valueChanged.connect(cfg.classTab.algorithmThreshold)
# connect to run classification
cfg.ui.button_classification.clicked.connect(cfg.classTab.runClassificationAction)
cfg.ui.classification.clicked.connect(cfg.batchT.setFunctionButton)
# connect the macroclass checkBox
cfg.ui.macroclass_checkBox.stateChanged.connect(cfg.classTab.macroclassCheckbox)
cfg.ui.class_checkBox.stateChanged.connect(cfg.classTab.classCheckbox)
# connect the LC signature checkBox
cfg.ui.LC_signature_checkBox.stateChanged.connect(cfg.classTab.LCSignature_Checkbox)
# connect the mask checkBox
cfg.ui.mask_checkBox.stateChanged.connect(cfg.classTab.maskCheckbox)
# connect to reset qml button
cfg.ui.resetQmlButton.clicked.connect(cfg.classTab.resetQmlStyle)
# connect to reset mask button
cfg.ui.resetMaskButton.clicked.connect(cfg.classTab.resetMask)
# connect to qml button
cfg.ui.qml_Button.clicked.connect(cfg.classTab.selectQmlStyle)
''' Spectral signature plot '''
# connect the sigma checkBox
cfg.uisp.sigma_checkBox.stateChanged.connect(cfg.spSigPlot.sigmaCheckbox)
cfg.uisp.band_lines_checkBox.stateChanged.connect(cfg.spSigPlot.refreshPlot)
cfg.uisp.grid_checkBox.stateChanged.connect(cfg.spSigPlot.refreshPlot)
# connect to remove signature button
cfg.uisp.remove_Signature_Button.clicked.connect(cfg.spSigPlot.removeSignature)
# connect to calculate spectral distances button
cfg.uisp.calculate_spectral_distance_Button.clicked.connect(cfg.spSigPlot.calculateSpectralDistances)
# connect to fit to axes
cfg.uisp.fitToAxes_pushButton.clicked.connect(cfg.spSigPlot.fitPlotToAxes)
# connect to plot spinbox
cfg.uisp.plot_text_spinBox.valueChanged.connect(cfg.spSigPlot.setPlotLegendLenght)
# connect to value range
cfg.uisp.value_range_pushButton.clicked.connect(cfg.spSigPlot.editValueRange)
cfg.uisp.set_min_max_Button.clicked.connect(cfg.spSigPlot.setMinimumMaximum)
cfg.uisp.automatic_threshold_pushButton_2.clicked.connect(cfg.spSigPlot.setAllWeightsVariance)
# connect to activate pointer
cfg.uisp.LCS_pointerButton_2.clicked.connect(cfg.spSigPlot.pointerActive)
cfg.uisp.LCS_ROI_button_2.clicked.connect(cfg.spSigPlot.ROIThreshold)
# undo threshold
cfg.uisp.undo_threshold_Button.clicked.connect(cfg.spSigPlot.undoThreshold)
# connect the include signature checkBox
cfg.uisp.LCS_include_checkBox_2.stateChanged.connect(cfg.spSigPlot.includeCheckbox)
cfg.uisp.LCS_cut_checkBox_2.stateChanged.connect(cfg.spSigPlot.cutCheckbox)
# connect to add to signature list
cfg.uisp.add_signature_list_pushButton.clicked.connect(cfg.spSigPlot.addToSignatureList)
# connect to save plot
cfg.uisp.save_plot_pushButton.clicked.connect(cfg.spSigPlot.savePlot)
# connect to edited cell
cfg.uisp.signature_list_plot_tableWidget.cellChanged.connect(cfg.spSigPlot.editedCell)
cfg.uisp.signature_list_plot_tableWidget.horizontalHeader().sectionClicked.connect(cfg.spSigPlot.orderedTable)
# connect to signature plot list double click
cfg.uisp.signature_list_plot_tableWidget.doubleClicked.connect(cfg.spSigPlot.signatureListDoubleClick)
''' Scatter plot tab '''
# connect to scatter plot button
cfg.uiscp.scatter_ROI_Button.clicked.connect(cfg.scaPlT.scatterPlotCalc)
# connect to Band X spinbox
cfg.uiscp.bandX_spinBox.valueChanged.connect(cfg.scaPlT.bandXPlot)
# connect to Band Y spinbox
cfg.uiscp.bandY_spinBox.valueChanged.connect(cfg.scaPlT.bandYPlot)
# connect double click ROI list to zoom
cfg.uiscp.scatter_list_plot_tableWidget.doubleClicked.connect(cfg.scaPlT.scatterPlotDoubleClick)
# connect to edited cell
cfg.uiscp.scatter_list_plot_tableWidget.cellChanged.connect(cfg.scaPlT.editedCell)
# connect to remove signature button
cfg.uiscp.remove_Signature_Button.clicked.connect(cfg.scaPlT.removeScatter)
# connect to save plot
cfg.uiscp.save_plot_pushButton_2.clicked.connect(cfg.scaPlT.savePlot)
# connect to fit to axes
cfg.uiscp.fitToAxes_pushButton_2.clicked.connect(cfg.scaPlT.fitPlotToAxes)
cfg.uiscp.plot_temp_ROI_pushButton.clicked.connect(cfg.scaPlT.addTempROIToScatterPlot)
cfg.uiscp.plot_display_pushButton.clicked.connect(cfg.scaPlT.addDisplayToScatterPlot)
cfg.uiscp.plot_image_pushButton.clicked.connect(cfg.scaPlT.addImageToScatterPlot)
# connect to change color button
cfg.uiscp.polygon_color_Button.clicked.connect(cfg.scaPlT.changePolygonColor)
cfg.uiscp.plot_color_ROI_pushButton.clicked.connect(cfg.scaPlT.colorPlot)
# connect to select value range
cfg.uiscp.draw_polygons_pushButton.clicked.connect(cfg.scaPlT.selectRange)
cfg.uiscp.remove_polygons_pushButton.clicked.connect(cfg.scaPlT.removePolygons)
cfg.uiscp.show_polygon_area_pushButton.clicked.connect(cfg.scaPlT.showScatterPolygonArea)
cfg.uiscp.add_signature_list_pushButton.clicked.connect(cfg.scaPlT.addToSignatureList)
''' Band set tab '''
# connect to refresh button
cfg.ui.toolButton_reload_3.clicked.connect(cfg.bst.rasterBandName)
# button reload
cfg.ui.toolButton_reload.clicked.connect(cfg.ipt.checkRefreshRasterLayer)
# connect to add file button
cfg.ui.toolButton_input_raster.clicked.connect(cfg.bst.addFileToBandSetAction)
# connect to add raster band button
cfg.ui.add_raster_bands_Button.clicked.connect(cfg.bst.addBandToSet)
# connect to select all bands button
cfg.ui.select_all_bands_Button.clicked.connect(cfg.bst.selectAllBands)
# connect to clear band set button
cfg.ui.clear_bandset_toolButton.clicked.connect(cfg.bst.clearBandSetAction)
# connect to move up band button
cfg.ui.move_up_toolButton.clicked.connect(cfg.bst.moveUpBand)
# connect to move down band button
cfg.ui.move_down_toolButton.clicked.connect(cfg.bst.moveDownBand)
# connect to sort by name button
cfg.ui.sort_by_name_toolButton.clicked.connect(cfg.bst.sortBandName)
# connect to remove band button
cfg.ui.remove_toolButton.clicked.connect(cfg.bst.removeBand)
# connect add band set
cfg.ui.add_band_set_toolButton.clicked.connect(cfg.bst.addBandSetTabAction)
# connect to changed tab
cfg.ui.Band_set_tabWidget.currentChanged.connect(cfg.bst.tabBandSetChanged)
# connect close tab
cfg.ui.Band_set_tabWidget.tabCloseRequested.connect(cfg.bst.closeBandSetTab)
# combo layer
cfg.ui.image_raster_name_combo.currentIndexChanged.connect(cfg.bst.rasterLayerName)
# connect to import band set button
cfg.ui.import_bandset_toolButton.clicked.connect(cfg.bst.importBandSet)
# connect to export band set button
cfg.ui.export_bandset_toolButton.clicked.connect(cfg.bst.exportBandSet)
# connect to satellite wavelength combo
cfg.ui.wavelength_sat_combo.currentIndexChanged.connect(cfg.bst.satelliteWavelength)
# connect to unit combo
cfg.ui.unit_combo.currentIndexChanged.connect(cfg.bst.setBandUnit)
# connect to date edit
cfg.ui.bandset_dateEdit.dateChanged.connect(cfg.bst.setBandsetDate)
# connect to band set process button
cfg.ui.band_set_process_toolButton.clicked.connect(cfg.bst.performBandSetTools)
# connect to filter
cfg.ui.bands_filter_lineEdit.textChanged.connect(cfg.bst.filterTable)
''' Pre processing tab '''
''' Clip multiple rasters '''
# connect to clip button
cfg.ui.clip_Button.clicked.connect(cfg.clipMulti.clipRastersAction)
cfg.ui.clip_multiple_rasters.clicked.connect(cfg.batchT.setFunctionButton)
# connect to activate UL pointer
cfg.ui.selectUL_toolButton.clicked.connect(cfg.clipMulti.pointerActive)
# connect to refresh shape button
cfg.ui.toolButton_reload_8.clicked.connect(cfg.clipMulti.refreshShapeClip)
cfg.ui.show_area_radioButton_3.clicked.connect(cfg.clipMulti.showHideArea)
cfg.ui.shapefile_checkBox.stateChanged.connect(cfg.clipMulti.checkboxShapeChanged)
cfg.ui.temporary_ROI_checkBox.stateChanged.connect(cfg.clipMulti.checkboxTempROIChanged)
# connect the shapefile combo
cfg.ui.shapefile_comboBox.currentIndexChanged.connect(cfg.clipMulti.referenceLayerName)
''' Stack raster bands '''
# connect to stack button
cfg.ui.stack_Button.clicked.connect(cfg.stackRstr.stackAction)
cfg.ui.stack_raster_bands.clicked.connect(cfg.batchT.setFunctionButton)
''' Spectral change band sets '''
# connect to calculate button
cfg.ui.spectral_distance_bandsets_toolButton.clicked.connect(cfg.spclDstBS.calculateDistanceAction)
cfg.ui.spectral_distance.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.min_distance_radioButton_2.clicked.connect(cfg.spclDstBS.radioMinDistChanged)
cfg.ui.spectral_angle_map_radioButton_2.clicked.connect(cfg.spclDstBS.radioSAMChanged)
''' Mosaic band sets '''
# connect to mosaic button
cfg.ui.mosaic_bandsets_toolButton.clicked.connect(cfg.mosaicBS.mosaicAction)
cfg.ui.mosaic_bandsets.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.mosaic_band_sets_lineEdit.textChanged.connect(cfg.mosaicBS.textChanged)
''' Cloud masking '''
# connect to mask button
cfg.ui.cloud_mask_toolButton.clicked.connect(cfg.cloudMsk.maskAction)
cfg.ui.cloud_masking.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.cloud_mask_classes_lineEdit.textChanged.connect(cfg.cloudMsk.textChanged)
# connect to refresh button
cfg.ui.toolButton_reload_23.clicked.connect(cfg.utls.refreshClassificationLayer)
''' ASTER tab '''
# connect to input button
cfg.ui.toolButton_directoryInput_ASTER.clicked.connect(cfg.ASTERT.inputASTER)
cfg.ui.ASTER_tableWidget.cellChanged.connect(cfg.ASTERT.editedCell)
cfg.ui.earth_sun_dist_lineEdit_2.textChanged.connect(cfg.ASTERT.editedEarthSunDist)
cfg.ui.sun_elev_lineEdit_2.textChanged.connect(cfg.ASTERT.editedSunElevation)
cfg.ui.date_lineEdit_2.textChanged.connect(cfg.ASTERT.editedDate)
cfg.ui.pushButton_Conversion_3.clicked.connect(cfg.ASTERT.performASTERCorrection)
cfg.ui.aster_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.pushButton_remove_band_2.clicked.connect(cfg.ASTERT.removeHighlightedBand)
''' MODIS tab '''
# connect to input button
cfg.ui.toolButton_directoryInput_MODIS.clicked.connect(cfg.MODIST.inputMODIS)
cfg.ui.MODIS_tableWidget.cellChanged.connect(cfg.MODIST.editedCell)
cfg.ui.pushButton_Conversion_4.clicked.connect(cfg.MODIST.performMODISConversion)
cfg.ui.modis_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.pushButton_remove_band_3.clicked.connect(cfg.MODIST.removeHighlightedBand)
''' Landsat tab '''
# connect to input button
cfg.ui.toolButton_directoryInput.clicked.connect(cfg.landsatT.inputLandsat)
cfg.ui.toolButton_directoryInput_MTL.clicked.connect(cfg.landsatT.inputMTL)
cfg.ui.pushButton_Conversion.clicked.connect(cfg.landsatT.performLandsatCorrection)
cfg.ui.landsat_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.pushButton_remove_band.clicked.connect(cfg.landsatT.removeHighlightedBand)
cfg.ui.landsat_tableWidget.cellChanged.connect(cfg.landsatT.editedCell)
cfg.ui.earth_sun_dist_lineEdit.textChanged.connect(cfg.landsatT.editedEarthSunDist)
cfg.ui.sun_elev_lineEdit.textChanged.connect(cfg.landsatT.editedSunElevation)
cfg.ui.date_lineEdit.textChanged.connect(cfg.landsatT.editedDate)
cfg.ui.satellite_lineEdit.textChanged.connect(cfg.landsatT.editedSatellite)
''' Sentinel-1 tab '''
# connect to input button
cfg.ui.S1_toolButton_fileInput.clicked.connect(cfg.sentinel1T.inputSentinel)
cfg.ui.S1_toolButton_directoryInput_xml.clicked.connect(cfg.sentinel1T.inputXML)
cfg.ui.pushButton_Conversion_6.clicked.connect(cfg.sentinel1T.performSentinelConversion)
cfg.ui.sentinel1_conversion.clicked.connect(cfg.batchT.setFunctionButton)
''' Sentinel-2 tab '''
# connect to input button
cfg.ui.S2_toolButton_directoryInput.clicked.connect(cfg.sentinel2T.inputSentinel)
cfg.ui.pushButton_Conversion_2.clicked.connect(cfg.sentinel2T.performSentinelConversion)
cfg.ui.sentinel2_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.S2_satellite_lineEdit.textChanged.connect(cfg.sentinel2T.editedSatellite)
cfg.ui.S2_pushButton_remove_band.clicked.connect(cfg.sentinel2T.removeHighlightedBand)
cfg.ui.sentinel_2_tableWidget.cellChanged.connect(cfg.sentinel2T.editedCell)
cfg.ui.S2_toolButton_directoryInput_xml2.clicked.connect(cfg.sentinel2T.inputXML2)
''' Sentinel-3 tab '''
# connect to input button
cfg.ui.S3_toolButton_directoryInput.clicked.connect(cfg.sentinel3T.inputSentinel)
cfg.ui.pushButton_Conversion_5.clicked.connect(cfg.sentinel3T.performSentinelConversion)
cfg.ui.sentinel3_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.S3_pushButton_remove_band.clicked.connect(cfg.sentinel3T.removeHighlightedBand)
''' GOES tab '''
# connect to input button
cfg.ui.GOES_toolButton_directoryInput.clicked.connect(cfg.goesT.inputGOES)
cfg.ui.pushButton_Conversion_8.clicked.connect(cfg.goesT.performGOESConversion)
cfg.ui.goes_conversion.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.GOES_pushButton_remove_band.clicked.connect(cfg.goesT.removeHighlightedBand)
''' Classification neighbor tab'''
cfg.ui.class_neighbor_toolButton.clicked.connect(cfg.clssNghbr.classNeighborAction)
cfg.ui.neighbor_pixels.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.toolButton_input_matrix.clicked.connect(cfg.clssNghbr.inputMatrixFile)
''' Reproject raster bands tab '''
# connect to refresh button
cfg.ui.toolButton_reload_25.clicked.connect(cfg.rprjRstBndsT.refreshClassificationLayer)
cfg.ui.use_align_raster_checkBox.stateChanged.connect(cfg.rprjRstBndsT.checkboxAlignChanged)
cfg.ui.use_epsg_checkBox.stateChanged.connect(cfg.rprjRstBndsT.checkboxEPSGChanged)
# connect to reproject raster button
cfg.ui.reproject_Button.clicked.connect(cfg.rprjRstBndsT.reprojectRasterBands)
cfg.ui.reproject_raster_bands.clicked.connect(cfg.batchT.setFunctionButton)
''' Split tab '''
# connect the classification combo
cfg.ui.raster_name_combo.currentIndexChanged.connect(cfg.splitT.rasterLayerName)
# connect to refresh button
cfg.ui.toolButton_reload_9.clicked.connect(cfg.splitT.refreshClassificationLayer)
# connect to split raster button
cfg.ui.split_Button.clicked.connect(cfg.splitT.splitRaster)
cfg.ui.split_raster_bands.clicked.connect(cfg.batchT.setFunctionButton)
''' PCA tab '''
# connect to PCA button
cfg.ui.pca_Button.clicked.connect(cfg.pcaT.calculatePCAAction)
cfg.ui.pca.clicked.connect(cfg.batchT.setFunctionButton)
''' K-means tab '''
# connect to kmeans button
cfg.ui.kmeans_Button.clicked.connect(cfg.clusteringT.calculateClusteringAction)
cfg.ui.clustering.clicked.connect(cfg.batchT.setFunctionButton)
# connect the algorithm combo
cfg.ui.kmean_minmax_radioButton.clicked.connect(cfg.clusteringT.radiokmean_minmaxChanged)
cfg.ui.kmean_siglist_radioButton.clicked.connect(cfg.clusteringT.radiokmean_siglistChanged)
cfg.ui.kmean_randomsiglist_radioButton.clicked.connect(cfg.clusteringT.radiokmean_randomsiglistChanged)
cfg.ui.kmeans_radioButton.clicked.connect(cfg.clusteringT.radioKmeansChanged)
cfg.ui.isodata_radioButton.clicked.connect(cfg.clusteringT.radioIsodataChanged)
cfg.ui.min_distance_radioButton.clicked.connect(cfg.clusteringT.radioMinDistChanged)
cfg.ui.spectral_angle_map_radioButton.clicked.connect(cfg.clusteringT.radioSAMChanged)
''' Random forest tab '''
# connect to calculate button
cfg.ui.button_random_forest.clicked.connect(cfg.rndmFrst.performRandomForest)
cfg.ui.random_forest.clicked.connect(cfg.batchT.setFunctionButton)
# connect the macroclass checkBox
cfg.ui.macroclass_checkBox_rf.stateChanged.connect(cfg.rndmFrst.macroclassCheckbox)
cfg.ui.class_checkBox_rf.stateChanged.connect(cfg.rndmFrst.classCheckbox)
cfg.ui.classifier_Button.clicked.connect(cfg.rndmFrst.selectRFClassifier)
# connect to reset classifier
cfg.ui.resetClassifierButton.clicked.connect(cfg.rndmFrst.resetRFClassifier)
''' Vector to Raster tab '''
cfg.ui.toolButton_reload_16.clicked.connect(cfg.vctRstrT.reloadVectorList)
cfg.ui.toolButton_reload_17.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.convert_vector_toolButton.clicked.connect(cfg.vctRstrT.convertToRasterAction)
cfg.ui.vector_to_raster.clicked.connect(cfg.batchT.setFunctionButton)
cfg.ui.vector_name_combo.currentIndexChanged.connect(cfg.utls.refreshVectorFields)
cfg.ui.field_checkBox.stateChanged.connect(cfg.vctRstrT.checkboxFieldChanged)
cfg.ui.constant_value_checkBox.stateChanged.connect(cfg.vctRstrT.checkboxConstantValueChanged)
''' Post processing tab '''
''' accuracy tab '''
# connect the classification combo
cfg.ui.classification_name_combo.currentIndexChanged.connect(cfg.acc.classificationLayerName)
# connect to refresh button
cfg.ui.toolButton_reload_4.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect the reference combo
cfg.ui.reference_name_combo.currentIndexChanged.connect(cfg.acc.referenceLayerName)
# connect to refresh button
cfg.ui.buttonReload_shape_4.clicked.connect(cfg.acc.refreshReferenceLayer)
# connect to calculate error matrix button
cfg.ui.calculateMatrix_toolButton.clicked.connect(cfg.acc.calculateErrorMatrix)
cfg.ui.accuracy.clicked.connect(cfg.batchT.setFunctionButton)
''' Land cover change '''
# connect to refresh button reference classification
cfg.ui.toolButton_reload_5.clicked.connect(cfg.landCC.refreshClassificationReferenceLayer)
# connect to refresh button new classification
cfg.ui.toolButton_reload_6.clicked.connect(cfg.landCC.refreshNewClassificationLayer)
# connect the classification reference combo
cfg.ui.classification_reference_name_combo.currentIndexChanged.connect(cfg.landCC.classificationReferenceLayerName)
# connect the new classification combo
cfg.ui.new_classification_name_combo.currentIndexChanged.connect(cfg.landCC.newClassificationLayerName)
# connect the mask unchanged checkBox
cfg.ui.mask_unchanged_checkBox.stateChanged.connect(cfg.landCC.maskUnchangedCheckbox)
# connect to calculate land cover change button
cfg.ui.calculateLandCoverChange_toolButton.clicked.connect(cfg.landCC.landCoverChangeAction)
cfg.ui.land_cover_change.clicked.connect(cfg.batchT.setFunctionButton)
''' Classification report '''
# connect to refresh button
cfg.ui.toolButton_reload_10.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect to calculate button
cfg.ui.calculateReport_toolButton.clicked.connect(cfg.classRep.calculateClassReport)
cfg.ui.classification_report.clicked.connect(cfg.batchT.setFunctionButton)
''' Band set combination tab '''
# connect to calculate button
cfg.ui.calculateBandSetComb_toolButton.clicked.connect(cfg.bsComb.calculateBandSetCombination)
cfg.ui.band_combination.clicked.connect(cfg.batchT.setFunctionButton)
''' Cross classification tab '''
# connect the classification combo
cfg.ui.classification_name_combo_2.currentIndexChanged.connect(cfg.crossC.classificationLayerName)
# connect to refresh button
cfg.ui.toolButton_reload_21.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect the reference combo
cfg.ui.reference_name_combo_2.currentIndexChanged.connect(cfg.crossC.referenceLayerName)
# connect to refresh button
cfg.ui.buttonReload_shape_5.clicked.connect(cfg.crossC.refreshReferenceLayer)
# connect to calculate error matrix button
cfg.ui.calculatecrossClass_toolButton.clicked.connect(cfg.crossC.calculateCrossClassification)
cfg.ui.cross_classification.clicked.connect(cfg.batchT.setFunctionButton)
''' Class signature '''
# connect to calculate signature
cfg.ui.class_signature_Button.clicked.connect(cfg.classSigT.calculateClassSignatureAction)
cfg.ui.class_signature.clicked.connect(cfg.batchT.setFunctionButton)
# connect to refresh button
cfg.ui.toolButton_reload_22.clicked.connect(cfg.utls.refreshClassificationLayer)
''' Classification to vector '''
# connect to refresh button
cfg.ui.toolButton_reload_12.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect to convert button
cfg.ui.convert_toolButton.clicked.connect(cfg.classVect.convertClassificationToVectorAction)
cfg.ui.classification_to_vector.clicked.connect(cfg.batchT.setFunctionButton)
''' Reclassification '''
# connect to refresh button
cfg.ui.toolButton_reload_11.clicked.connect(cfg.utls.refreshClassificationLayer)
# connect to reclassify button
cfg.ui.reclassify_toolButton.clicked.connect(cfg.reclassification.reclassifyAction)
cfg.ui.reclassification.clicked.connect(cfg.batchT.setFunctionButton)
# connect to calculate unique values button
cfg.ui.calculate_unique_values_toolButton.clicked.connect(cfg.reclassification.calculateUniqueValues)
# connect to incremental new values button
cfg.ui.incremental_new_values_toolButton.clicked.connect(cfg.reclassification.incrementalNewValues)
# connect to add value button
cfg.ui.add_value_pushButton.clicked.connect(cfg.reclassification.addRowToTable)
# connect to remove point
cfg.ui.remove_row_pushButton.clicked.connect(cfg.reclassification.removePointFromTable)
# connect to import band set button
cfg.ui.import_reclass_toolButton.clicked.connect(cfg.reclassification.importReclass)
# connect to export band set button
cfg.ui.export_reclass_toolButton.clicked.connect(cfg.reclassification.exportReclass)
# connect to edited cell
cfg.ui.reclass_values_tableWidget.cellChanged.connect(cfg.reclassification.editedCell)
''' Edit Raster tab'''
# connect to set value
cfg.ui.raster_set_value_toolButton.clicked.connect(cfg.editRstr.setRasterValueAction)
cfg.ui.edit_raster_using_vector.clicked.connect(cfg.batchT.setFunctionButton)
# connect to refresh rasters button
cfg.ui.toolButton_reload_14.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.undo_edit_Button.clicked.connect(cfg.editRstr.undoEdit)
# connect the expression text
cfg.ui.expression_lineEdit.textChanged.connect(cfg.editRstr.textChanged)
cfg.ui.use_constant_val_checkBox.stateChanged.connect(cfg.editRstr.checkboxConstantValChanged)
cfg.ui.use_field_vector_checkBox.stateChanged.connect(cfg.editRstr.checkboxVectorFieldChanged)
cfg.ui.use_expression_checkBox.stateChanged.connect(cfg.editRstr.checkboxUseExpressionChanged)
cfg.ui.edit_val_use_ROI_radioButton.clicked.connect(cfg.editRstr.radioUseROIPolygonChanged)
cfg.ui.edit_val_use_vector_radioButton.clicked.connect(cfg.editRstr.radioUseVectorChanged)
cfg.ui.toolButton_reload_20.clicked.connect(cfg.editRstr.reloadVectorList)
cfg.ui.vector_name_combo_2.currentIndexChanged.connect(cfg.utls.refreshVectorFields2)
''' Classification sieve tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_15.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.sieve_toolButton.clicked.connect(cfg.sieveRstr.sieveClassificationAction)
cfg.ui.classification_sieve.clicked.connect(cfg.batchT.setFunctionButton)
''' Classification erosion tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_18.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.class_erosion_toolButton.clicked.connect(cfg.ersnRstr.erosionClassificationAction)
cfg.ui.classification_erosion.clicked.connect(cfg.batchT.setFunctionButton)
# connect the value text
cfg.ui.erosion_classes_lineEdit.textChanged.connect(cfg.ersnRstr.textChanged)
''' Classification dilation tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_19.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.class_dilation_toolButton.clicked.connect(cfg.dltnRstr.dilationClassificationAction)
cfg.ui.classification_dilation.clicked.connect(cfg.batchT.setFunctionButton)
# connect the value text
cfg.ui.dilation_classes_lineEdit.textChanged.connect(cfg.dltnRstr.textChanged)
''' Classification zonal stat tab'''
# connect to refresh rasters button
cfg.ui.toolButton_reload_24.clicked.connect(cfg.utls.refreshClassificationLayer)
cfg.ui.buttonReload_shape_6.clicked.connect(cfg.znlSttRstT.refreshReferenceLayer)
cfg.ui.zonal_stat_raster_toolButton.clicked.connect(cfg.znlSttRstT.zonalStatRasterAction)
cfg.ui.zonal_stat_raster.clicked.connect(cfg.batchT.setFunctionButton)
# connect the classification combo
cfg.ui.classification_name_combo_5.currentIndexChanged.connect(cfg.znlSttRstT.classificationLayerName)
# connect the reference combo
cfg.ui.reference_name_combo_3.currentIndexChanged.connect(cfg.znlSttRstT.referenceLayerName)
''' Band Calc tab '''
# connect to refresh button
cfg.ui.toolButton_reload_13.clicked.connect(cfg.bCalc.rasterBandName)
# connect to calc button
cfg.ui.toolButton_calculate.clicked.connect(cfg.bCalc.calculateButton)
cfg.ui.band_calc.clicked.connect(cfg.batchT.setFunctionButton)
# connect to import expression button
cfg.ui.toolButton_import_expression.clicked.connect(cfg.bCalc.importExpressionList)
# connect the expression text
cfg.ui.plainTextEdit_calc.textChanged.connect(cfg.bCalc.textChanged)
# connect double click table
cfg.ui.tableWidget_band_calc.doubleClicked.connect(cfg.bCalc.doubleClick)
# connect the intersection checkBox
cfg.ui.intersection_checkBox.stateChanged.connect(cfg.bCalc.intersectionCheckbox)
# connect the extent checkBox
cfg.ui.extent_checkBox.stateChanged.connect(cfg.bCalc.extentCheckbox)
# connect to raster type combo
cfg.ui.raster_type_combo.currentIndexChanged.connect(cfg.bCalc.setRasterType)
# connect to expression buttons
cfg.ui.toolButton_plus.clicked.connect(cfg.bCalc.buttonPlus)
cfg.ui.toolButton_minus.clicked.connect(cfg.bCalc.buttonMinus)
cfg.ui.toolButton_product.clicked.connect(cfg.bCalc.buttonProduct)
cfg.ui.toolButton_ratio.clicked.connect(cfg.bCalc.buttonRatio)
cfg.ui.toolButton_power.clicked.connect(cfg.bCalc.buttonPower)
cfg.ui.toolButton_sqrt.clicked.connect(cfg.bCalc.buttonSQRT)
cfg.ui.toolButton_lbracket.clicked.connect(cfg.bCalc.buttonLbracket)
cfg.ui.toolButton_rbracket.clicked.connect(cfg.bCalc.buttonRbracket)
cfg.ui.toolButton_greater.clicked.connect(cfg.bCalc.buttonGreater)
cfg.ui.toolButton_less.clicked.connect(cfg.bCalc.buttonLower)
cfg.ui.toolButton_equal.clicked.connect(cfg.bCalc.buttonEqual)
cfg.ui.toolButton_unequal.clicked.connect(cfg.bCalc.buttonUnequal)
cfg.ui.band_calc_function_tableWidget.doubleClicked.connect(cfg.bCalc.setFunction)
# decision rules
cfg.ui.decision_rules_tableWidget.cellChanged.connect(cfg.bCalc.editedDecisionRulesTable)
cfg.ui.band_calc_tabWidget.currentChanged.connect(cfg.bCalc.tabChanged)
# connect to add rule
cfg.ui.add_rule_toolButton.clicked.connect(cfg.bCalc.addRowToTable)
cfg.ui.remove_rule_toolButton.clicked.connect(cfg.bCalc.removeHighlightedRule)
# connect to clear button
cfg.ui.clear_rules_toolButton.clicked.connect(cfg.bCalc.clearRulesAction)
cfg.ui.export_rules_toolButton.clicked.connect(cfg.bCalc.exportRules)
cfg.ui.import_rules_toolButton.clicked.connect(cfg.bCalc.importRules)
cfg.ui.move_up_toolButton_2.clicked.connect(cfg.bCalc.moveUpRule)
cfg.ui.move_down_toolButton_2.clicked.connect(cfg.bCalc.moveDownRule)
# connect to filter
cfg.ui.bandcalc_filter_lineEdit.textChanged.connect(cfg.bCalc.filterTable)
''' Batch tab '''
# connect the batch text
#cfg.ui.plainTextEdit_batch.textChanged.connect(cfg.batchT.textChanged)
# connect to calc button
cfg.ui.toolButton_run_batch.clicked.connect(cfg.batchT.runButton)
cfg.ui.check_batch.clicked.connect(cfg.batchT.textChanged)
cfg.ui.clear_batch_toolButton.clicked.connect(cfg.batchT.clearBatch)
cfg.ui.export_batch_toolButton.clicked.connect(cfg.batchT.exportBatch)
cfg.ui.import_batch_toolButton.clicked.connect(cfg.batchT.importBatch)
# connect to table double click
cfg.ui.batch_tableWidget.doubleClicked.connect(cfg.batchT.setFunction)
''' Settings tab '''
# connect the ID field name line
cfg.ui.ID_field_name_lineEdit.textChanged.connect(cfg.sets.IDFieldNameChange)
# connect the macroclass ID field name line
cfg.ui.MID_field_name_lineEdit.textChanged.connect(cfg.sets.MacroIDFieldNameChange)
# connect the macroclass Info field name line
cfg.ui.MCInfo_field_name_lineEdit.textChanged.connect(cfg.sets.MacroInfoFieldNameChange)
# connect the Info field name line
cfg.ui.Info_field_name_lineEdit.textChanged.connect(cfg.sets.InfoFieldNameChange)
# connect the variable name line
cfg.ui.variable_name_lineEdit.textChanged.connect(cfg.sets.VariableNameChange)
# connect the group name line
cfg.ui.group_name_lineEdit.textChanged.connect(cfg.sets.GroupNameChange)
# connect the SMTP line
cfg.ui.smtp_server_lineEdit.textChanged.connect(cfg.sets.SMTPServerChange)
# connect the SMTP to emails line
cfg.ui.to_email_lineEdit.textChanged.connect(cfg.sets.SMTPtoEmailsChange)
# connect the SMTP user
cfg.ui.smtp_user_lineEdit.editingFinished.connect(cfg.sets.rememberUser)
# connect the SMTP password
cfg.ui.smtp_password_lineEdit.editingFinished.connect(cfg.sets.rememberUser)
# connect the SMTP checkbox
cfg.ui.remeber_settings_checkBox.stateChanged.connect(cfg.sets.rememberUserCheckbox)
# connect the SMTP checkBox
cfg.ui.smtp_checkBox.stateChanged.connect(cfg.sets.SMTPCheckbox)
# connect to reset field names button
cfg.ui.reset_field_names_Button.clicked.connect(cfg.sets.resetFieldNames)
# connect to reset variable name button
cfg.ui.reset_variable_name_Button.clicked.connect(cfg.sets.resetVariableName)
# connect to reset group name button
cfg.ui.reset_group_name_Button.clicked.connect(cfg.sets.resetGroupName)
# connect the log file checkBox
cfg.ui.log_checkBox.stateChanged.connect(cfg.sets.logCheckbox)
# connect the download news checkBox
cfg.ui.download_news_checkBox.stateChanged.connect(cfg.sets.downloadNewsCheckbox)
# connect the virtual raster checkBox
cfg.ui.virtual_raster_load_checkBox.stateChanged.connect(cfg.sets.virtualRasterCheckbox)
# connect the sound checkBox
cfg.ui.sound_checkBox.stateChanged.connect(cfg.sets.soundCheckbox)
# connect the virtual raster format checkBox
cfg.ui.virtual_raster_checkBox.stateChanged.connect(cfg.sets.virtualRasterFormatCheckbox)
# connect the raster compression checkBox
cfg.ui.raster_compression_checkBox.stateChanged.connect(cfg.sets.rasterCompressionCheckbox)
# connect the parallel writing checkBox
cfg.ui.parallel_writing_checkBox.stateChanged.connect(cfg.sets.parallelWritingCheckbox)
# connect to change temporary directory button
cfg.ui.temp_directory_Button.clicked.connect(cfg.sets.changeTempDir)
# connect to reset temporary directory button
cfg.ui.reset_temp_directory_Button.clicked.connect(cfg.sets.resetTempDir)
# connect to clear log button
cfg.ui.clearLog_Button.clicked.connect(cfg.utls.clearLogFile)
# connect to export log button
cfg.ui.exportLog_Button.clicked.connect(cfg.sets.copyLogFile)
# connect to test dependencies button
cfg.ui.test_dependencies_Button.clicked.connect(cfg.sets.testDependencies)
# connect to RAM spinbox
cfg.ui.RAM_spinBox.valueChanged.connect(cfg.sets.RAMSettingChange)
# connect to thread spinbox
cfg.ui.CPU_spinBox.valueChanged.connect(cfg.sets.threadSettingChange)
# connect the Python path line
cfg.ui.python_path_lineEdit.textChanged.connect(cfg.sets.PythonPathSettingChange)
# connect the Python modules path line
cfg.ui.python_path_lineEdit_2.textChanged.connect(cfg.sets.PythonModulePathSettingChange)
# connect the GDAL path line
cfg.ui.gdal_path_lineEdit.textChanged.connect(cfg.sets.GDALPathSettingChange)
# connect to change color button
cfg.ui.change_color_Button.clicked.connect(cfg.sets.changeROIColor)
# connect to change color button
cfg.ui.reset_color_Button.clicked.connect(cfg.sets.resetROIStyle)
# connect to transparency slider
cfg.ui.transparency_Slider.valueChanged.connect(cfg.sets.changeROITransparency)
# first install
if cfg.firstInstallVal == 'Yes':
cfg.utls.welcomeTab()
cfg.utls.setQGISRegSetting(cfg.regFirstInstall, 'No')
cfg.utls.findAvailableRAM()
cfg.utls.findAvailableProcessors()
# welcome message
lWelcome = cfg.plgnDir + '/ui/welcome.html'
htmlTextF = open(lWelcome, 'r')
htmlText = htmlTextF.read()
cfg.uidc.main_textBrowser.clear()
cfg.uidc.main_textBrowser.setHtml(htmlText)
htmlTextF.close()
if cfg.osSCP.path.isfile(cfg.plgnDir + '/firstrun'):
cfg.ipt.welcomeText('https://semiautomaticgit.github.io/SemiAutomaticClassificationPluginWelcome/changelog.html')
cfg.osSCP.remove(cfg.plgnDir + '/firstrun')
else:
dateV = cfg.datetimeSCP.datetime.now()
dStr = dateV.strftime('%Y_%m_%d')
cfg.ipt.welcomeText('https://semiautomaticgit.github.io/SemiAutomaticClassificationPluginWelcome/welcome' + '_' + dStr + '.html', 'https://semiautomaticgit.github.io/SemiAutomaticClassificationPluginWelcome/welcome.html')
cfg.utls.cleanOldTempDirectory()
cfg.skipRegistry = False
else:
dockclassdlg = DockClassDialog(qgisUtils.iface.mainWindow(), qgisUtils.iface)
qgisUtils.iface.removeDockWidget(dockclassdlg)
# save signature list when saving project
def projectSaved(self):
if cfg.skipProjectSaved == 'No':
if len(cfg.signIDs) > 0:
cfg.SCPD.saveSignatureListToFile()
if cfg.scpFlPath is not None:
cfg.SCPD.saveMemToSHP(cfg.shpLay)
cfg.utls.zipDirectoryInFile(cfg.scpFlPath, cfg.inptDir)
cfg.downProd.saveDownloadTable()
try:
scpPath = cfg.utls.readProjectVariable('trainingLayer', '')
name = cfg.utls.fileNameNoExt(scpPath)
duplicateID = cfg.utls.layerID(name, cfg.shpLay.id())
cfg.qgisCoreSCP.QgsProject.instance().removeMapLayer(duplicateID)
except:
pass
# reset all variables and interface
def resetSCP(self):
# logger
cfg.utls.logToFile(str(__name__) + '-' + str(cfg.inspectSCP.stack()[0][3])+ ' ' + cfg.utls.lineOfCode(), 'LOG ACTIVE' + cfg.sysSCPInfo)
cfg.scpFlPath = None
cfg.ui.image_raster_name_combo.blockSignals(True)
cfg.ui.Band_set_tabWidget.blockSignals(True)
cfg.rasterComboEdited = 'No'
cfg.projPath = cfg.qgisCoreSCP.QgsProject.instance().fileName()
cfg.lastSaveDir = cfg.osSCP.path.dirname(cfg.projPath)
cfg.projPath = cfg.qgisCoreSCP.QgsProject.instance().fileName()
cfg.lastSaveDir = cfg.osSCP.path.dirname(cfg.projPath)
cfg.signList = {}
cfg.signIDs = {}
cfg.spectrPlotList = {}
cfg.signPlotIDs = {}
cfg.scatterPlotIDs = {}
cfg.scatterPlotList = {}
cfg.undoIDList = {}
cfg.undoSpectrPlotList = {}
cfg.lstROI = None
cfg.lstROI2 = None
cfg.rpdROICheck = '2'
cfg.vegIndexCheck = 2
cfg.sigClcCheck = 2
cfg.utls.clearTable(cfg.uisp.signature_list_plot_tableWidget)
cfg.utls.clearTable(cfg.uiscp.scatter_list_plot_tableWidget)
cfg.utls.clearTable(cfg.ui.signature_threshold_tableWidget)
cfg.utls.clearTable(cfg.ui.download_images_tableWidget)
cfg.utls.clearTable(cfg.ui.LCS_tableWidget)
cfg.treeDockItm = {}
cfg.treeDockMCItm = {}
cfg.SCPD.clearTree()
cfg.scaPlT.scatterPlotListTable(cfg.uiscp.scatter_list_plot_tableWidget)
cfg.spSigPlot.refreshPlot()
cfg.LCSignT.LCSignatureThresholdListTable()
# reload layers in combos
cfg.ipt.refreshRasterLayer()
cfg.utls.refreshVectorLayer()
cfg.utls.refreshClassificationLayer()
cfg.utls.refreshRasterExtent()
cfg.acc.refreshReferenceLayer()
cfg.crossC.refreshReferenceLayer()
cfg.znlSttRstT.refreshReferenceLayer()
cfg.znlSttRstT.loadStatisticCombo()
cfg.clssNghbr.loadStatisticCombo()
cfg.landCC.refreshClassificationReferenceLayer()
cfg.landCC.refreshNewClassificationLayer()
# read variables
cfg.utls.readVariables()
# set ROI color
cfg.ui.change_color_Button.setStyleSheet('background-color :' + cfg.ROIClrVal)
# set ROI transparency
cfg.ui.transparency_Slider.setValue(cfg.ROITrnspVal)
# set RAM value
cfg.ui.RAM_spinBox.setValue(cfg.RAMValue)
# set CPU value
cfg.ui.CPU_spinBox.setValue(cfg.threads)
# rapid ROI band
cfg.uidc.rapidROI_band_spinBox.setValue(int(cfg.ROIband))
# min ROI size
cfg.Min_region_size_spin.setValue(int(cfg.minROISz))
# max ROI width
cfg.Max_ROI_width_spin.setValue(int(cfg.maxROIWdth))
# range radius
cfg.Range_radius_spin.setValue(float(cfg.rngRad))
# ROI ID field
cfg.uidc.ROI_ID_spin.setValue(int(cfg.ROIID))
# ROI macro ID field
cfg.uidc.ROI_Macroclass_ID_spin.setValue(int(cfg.ROIMacroID))
# preview size
cfg.preview_size_spinBox.setValue(float(cfg.prvwSz))
# set ID field name line
cfg.ui.ID_field_name_lineEdit.setText(cfg.fldID_class)
cfg.ui.MID_field_name_lineEdit.setText(cfg.fldMacroID_class)
# set Info field name line
cfg.ui.Info_field_name_lineEdit.setText(cfg.fldROI_info)
cfg.ui.MCInfo_field_name_lineEdit.setText(cfg.fldROIMC_info)
cfg.ui.variable_name_lineEdit.setText(cfg.variableName)
cfg.ui.group_name_lineEdit.setText(cfg.grpNm)
# gdal path
cfg.ui.gdal_path_lineEdit.setText(cfg.gdalPath)
cfg.ui.python_path_lineEdit.setText(cfg.PythonPathSettings)
cfg.ui.python_path_lineEdit_2.setText(cfg.PythonModulesPathSettings)
# set signature calculation checkbox state
try:
cfg.uidc.rapid_ROI_checkBox.setCheckState(int(cfg.rpdROICheck))
except:
pass
# set vegetation index calculation checkbox state
try:
cfg.uidc.display_cursor_checkBox.setCheckState(int(cfg.vegIndexCheck))
except:
pass
# set signature calculation checkbox state
try:
cfg.uidc.signature_checkBox.setCheckState(int(cfg.sigClcCheck))
cfg.ui.signature_checkBox2.setCheckState(int(cfg.sigClcCheck))
except:
pass
# set save input checkbox state
try:
cfg.uidc.save_input_checkBox.setCheckState(int(cfg.saveInputCheck))
except:
pass
# load classification algorithm
idAlg = cfg.ui.algorithm_combo.findText(cfg.algName)
if idAlg >= 0:
cfg.ui.algorithm_combo.setCurrentIndex(idAlg)
else:
cfg.ui.algorithm_combo.setCurrentIndex(0)
cfg.algName = cfg.algMinDist
# ROI info
cfg.uidc.ROI_Class_line.setText(cfg.ROIInfo)
cfg.uidc.ROI_Macroclass_line.setText(cfg.ROIMacroClassInfo)
cfg.uidc.custom_index_lineEdit.setText(cfg.customExpression)
# RGB list
cfg.RGBLT.RGBListTable(cfg.RGBList)
# reload raster bands in checklist
cfg.bst.rasterBandName()
cfg.rasterComboEdited = 'Yes'
cfg.ui.image_raster_name_combo.blockSignals(False)
cfg.ui.Band_set_tabWidget.blockSignals(False)
# new project
def newProjectLoaded(self):
# clear band set
t = cfg.ui.Band_set_tabWidget.count()
for index in reversed(list(range(0, t))):
cfg.bst.deleteBandSetTab(index)
self.resetSCP()
cfg.bCalc.rasterBandName()
cfg.SCPD.openInput()
cfg.bstLT.BandSetListTable()
# read project variables
def projectLoaded(self):
self.resetSCP()
# load product download table
cfg.downProd.openDownloadTable()
cfg.bCalc.rasterBandName()
cfg.SCPD.openInput()
cfg.bstLT.BandSetListTable()
# run
def run(self):
# show the dialog
cfg.dlg.show()
# Run the dialog event loop
pointer_result = cfg.dlg.exec_()
# remove plugin menu and icon
def unload(self):
cfg.utls.createBackupFile(cfg.scpFlPath)
# save window size
try:
cfg.utls.setQGISRegSetting(cfg.regWindowSizeW, cfg.dlg.size().width())
cfg.utls.setQGISRegSetting(cfg.regWindowSizeH, cfg.dlg.size().height())
except:
pass
try:
qgisUtils.iface.removeDockWidget(cfg.dockclassdlg)
del cfg.toolBar2
del cfg.toolBar3
cfg.menu.deleteLater()
# remove temp files
if cfg.tmpDir is not None and cfg.QDirSCP(cfg.tmpDir).exists():
cfg.shutilSCP.rmtree(cfg.tmpDir, True)
oDir = cfg.utls.makeDirectory(str(cfg.QDirSCP.tempPath() + '/' + cfg.tempDirName))
except:
if PluginCheck == 'Yes':
qgisUtils.iface.messageBar().pushMessage('Semi-Automatic Classification Plugin', QApplication.translate('semiautomaticclassificationplugin', 'Please, restart QGIS for executing the Semi-Automatic Classification Plugin'), level=qgisCore.Qgis.Info)
| gpl-3.0 |
drammock/mne-python | tutorials/stats-sensor-space/40_cluster_1samp_time_freq.py | 10 | 5666 | """
===============================================================
Non-parametric 1 sample cluster statistic on single trial power
===============================================================
This script shows how to estimate significant clusters
in time-frequency power estimates. It uses a non-parametric
statistical procedure based on permutations and cluster
level statistics.
The procedure consists of:
- extracting epochs
- compute single trial power estimates
- baseline line correct the power estimates (power ratios)
- compute stats to see if ratio deviates from 1.
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.time_frequency import tfr_morlet
from mne.stats import permutation_cluster_1samp_test
from mne.datasets import sample
print(__doc__)
###############################################################################
# Set parameters
# --------------
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_raw.fif'
tmin, tmax, event_id = -0.3, 0.6, 1
# Setup for reading the raw data
raw = mne.io.read_raw_fif(raw_fname)
events = mne.find_events(raw, stim_channel='STI 014')
include = []
raw.info['bads'] += ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = mne.pick_types(raw.info, meg='grad', eeg=False, eog=True,
stim=False, include=include, exclude='bads')
# Load condition 1
event_id = 1
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True,
reject=dict(grad=4000e-13, eog=150e-6))
# just use right temporal sensors for speed
epochs.pick_channels(mne.read_vectorview_selection('Right-temporal'))
evoked = epochs.average()
# Factor to down-sample the temporal dimension of the TFR computed by
# tfr_morlet. Decimation occurs after frequency decomposition and can
# be used to reduce memory usage (and possibly computational time of downstream
# operations such as nonparametric statistics) if you don't need high
# spectrotemporal resolution.
decim = 5
freqs = np.arange(8, 40, 2) # define frequencies of interest
sfreq = raw.info['sfreq'] # sampling in Hz
tfr_epochs = tfr_morlet(epochs, freqs, n_cycles=4., decim=decim,
average=False, return_itc=False, n_jobs=1)
# Baseline power
tfr_epochs.apply_baseline(mode='logratio', baseline=(-.100, 0))
# Crop in time to keep only what is between 0 and 400 ms
evoked.crop(-0.1, 0.4)
tfr_epochs.crop(-0.1, 0.4)
epochs_power = tfr_epochs.data
###############################################################################
# Define adjacency for statistics
# -------------------------------
# To compute a cluster-corrected value, we need a suitable definition
# for the adjacency/adjacency of our values. So we first compute the
# sensor adjacency, then combine that with a grid/lattice adjacency
# assumption for the time-frequency plane:
sensor_adjacency, ch_names = mne.channels.find_ch_adjacency(
tfr_epochs.info, 'grad')
# Subselect the channels we are actually using
use_idx = [ch_names.index(ch_name.replace(' ', ''))
for ch_name in tfr_epochs.ch_names]
sensor_adjacency = sensor_adjacency[use_idx][:, use_idx]
assert sensor_adjacency.shape == \
(len(tfr_epochs.ch_names), len(tfr_epochs.ch_names))
assert epochs_power.data.shape == (
len(epochs), len(tfr_epochs.ch_names),
len(tfr_epochs.freqs), len(tfr_epochs.times))
adjacency = mne.stats.combine_adjacency(
sensor_adjacency, len(tfr_epochs.freqs), len(tfr_epochs.times))
# our adjacency is square with each dim matching the data size
assert adjacency.shape[0] == adjacency.shape[1] == \
len(tfr_epochs.ch_names) * len(tfr_epochs.freqs) * len(tfr_epochs.times)
###############################################################################
# Compute statistic
# -----------------
threshold = 3.
n_permutations = 50 # Warning: 50 is way too small for real-world analysis.
T_obs, clusters, cluster_p_values, H0 = \
permutation_cluster_1samp_test(epochs_power, n_permutations=n_permutations,
threshold=threshold, tail=0,
adjacency=adjacency,
out_type='mask', verbose=True)
###############################################################################
# View time-frequency plots
# -------------------------
evoked_data = evoked.data
times = 1e3 * evoked.times
plt.figure()
plt.subplots_adjust(0.12, 0.08, 0.96, 0.94, 0.2, 0.43)
# Create new stats image with only significant clusters
T_obs_plot = np.nan * np.ones_like(T_obs)
for c, p_val in zip(clusters, cluster_p_values):
if p_val <= 0.05:
T_obs_plot[c] = T_obs[c]
# Just plot one channel's data
ch_idx, f_idx, t_idx = np.unravel_index(
np.nanargmax(np.abs(T_obs_plot)), epochs_power.shape[1:])
# ch_idx = tfr_epochs.ch_names.index('MEG 1332') # to show a specific one
vmax = np.max(np.abs(T_obs))
vmin = -vmax
plt.subplot(2, 1, 1)
plt.imshow(T_obs[ch_idx], cmap=plt.cm.gray,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.imshow(T_obs_plot[ch_idx], cmap=plt.cm.RdBu_r,
extent=[times[0], times[-1], freqs[0], freqs[-1]],
aspect='auto', origin='lower', vmin=vmin, vmax=vmax)
plt.colorbar()
plt.xlabel('Time (ms)')
plt.ylabel('Frequency (Hz)')
plt.title(f'Induced power ({tfr_epochs.ch_names[ch_idx]})')
ax2 = plt.subplot(2, 1, 2)
evoked.plot(axes=[ax2], time_unit='s')
plt.show()
| bsd-3-clause |
pdamodaran/yellowbrick | tests/rand.py | 1 | 3176 | # tests.random
# A visualizer that draws a random scatter plot for testing.
#
# Author: Benjamin Bengfort <bbengfort@districtdatalabs.com>
# Created: Wed Mar 21 17:51:15 2018 -0400
#
# ID: random.py [] benjamin@bengfort.com $
"""
A visualizer that draws a random scatter plot for testing.
"""
##########################################################################
## Imports
##########################################################################
import numpy as np
from yellowbrick.base import Visualizer
from yellowbrick.style import resolve_colors
from sklearn.datasets import make_blobs
##########################################################################
## Random Visualizer
##########################################################################
class RandomVisualizer(Visualizer):
"""
Creates random scatter plots as a testing utility.
Data generation uses scikit-learn make_blobs to create scatter plots that
have reasonable visual features and multiple colors.
Parameters
----------
ax : matplotlib Axes, default: None
The axis to plot the figure on. If None is passed in the current axes
will be used (or generated if required).
n_samples : int, default: 100
The number of points to generate for the scatter plot
n_blobs : int or array of shape [n_centers, 2]
Define the number of blobs to create or specify their centers.
random_state : int, RandomState or None:
Used to specify the seed of the random state to ensure tests work.
"""
def __init__(self, ax=None, n_samples=100, n_blobs=3,
random_state=None, **kwargs):
super(RandomVisualizer, self).__init__(ax=ax, **kwargs)
if isinstance(random_state, (int, float)) or random_state is None:
random_state = np.random.RandomState(random_state)
self.set_params(
n_samples=n_samples, n_blobs=n_blobs, random_state=random_state,
)
def generate(self):
"""
Returns random data according to the visualizer specification.
Returns
-------
X : array of shape [n_samples, 2]
2 dimensional array of points to plot
y : vector with length n_samples
Center/blob each point belongs to (used for color)
"""
return make_blobs(
self.n_samples, 2, self.n_blobs, random_state=self.random_state
)
def fit(self, *args, **kwargs):
X, c = self.generate()
x = X[:,0]
y = X[:,1]
self.draw(x, y, c)
return self
def draw(self, x, y, c):
colors = resolve_colors(self.n_blobs)
for i in np.arange(self.n_blobs):
mask = c==i
label = "c{}".format(i)
self.ax.scatter(x[mask], y[mask], label=label, c=colors[i])
return self.ax
def finalize(self):
self.ax.legend(frameon=True)
self.ax.set_ylabel("$y$")
self.ax.set_xlabel("$x$")
self.ax.set_title("Random Scatter Plot")
return self.ax
if __name__ == '__main__':
r = RandomVisualizer()
r.fit()
r.poof(outpath='test.png')
| apache-2.0 |
start-jsk/jsk_apc | jsk_apc2016_common/python/jsk_apc2016_common/rbo_segmentation/evaluate.py | 1 | 7455 | from apc_data import APCDataSet, APCSample
from probabilistic_segmentation import ProbabilisticSegmentationRF, ProbabilisticSegmentationBP
import pickle
import os
import matplotlib.pyplot as plt
import numpy as np
import copy
import rospkg
def _fast_hist(a, b, n):
k = (a >= 0) & (a < n)
hist = np.bincount(n * a[k].astype(int) +
b[k], minlength=n**2).reshape(n, n)
return hist
def label_accuracy_score(label_true, label_pred, n_class):
"""Returns accuracy score evaluation result.
- overall accuracy
- mean accuracy
- mean IU
- fwavacc
"""
hist = _fast_hist(label_true.flatten(), label_pred.flatten(), n_class)
acc = np.diag(hist).sum() / hist.sum().astype(np.float64)
acc_cls = np.diag(hist) / hist.sum(axis=1).astype(np.float64)
acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist)).astype(np.float64)
mean_iu = np.nanmean(iu)
freq = hist.sum(axis=1) / hist.sum().astype(np.float64)
fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
return acc, acc_cls, mean_iu, fwavacc
# previously declared in main.py
def combine_datasets(datasets):
samples = []
for d in datasets:
samples += d.samples
return APCDataSet(samples=samples)
def load_datasets(dataset_names, data_path, cache_path):
datasets = dict()
for dataset_name in dataset_names:
dataset_path = os.path.join(
data_path, 'rbo_apc/{}'.format(dataset_name))
datasets[dataset_name] = APCDataSet(
name=dataset_name, dataset_path=dataset_path,
cache_path=cache_path, load_from_cache=True)
return datasets
def evaluate(bp, test_data):
acc_list = []
acc_cls_list = []
mean_iu_list = []
fwavacc_list = []
for sample in test_data.samples:
if len(sample.object_masks) == 0:
continue
pred_target = sample.object_masks.keys()[0]
if pred_target == 'shelf':
if len(sample.object_masks.keys()) == 1:
continue
pred_target = sample.object_masks.keys()[1]
bp.predict(sample, pred_target)
print 'done'
images = []
images.append(bp.posterior_images_smooth['shelf'])
objects = []
objects.append('shelf')
for _object in bp.posterior_images_smooth.keys():
if _object != 'shelf':
images.append(bp.posterior_images_smooth[_object])
objects.append(_object)
pred = np.argmax(np.array(images), axis=0)
# remove dataset that does not have complete set
objects_copy = copy.copy(objects)
object_masks_keys = sample.object_masks.keys()
if 'shelf' in objects_copy: objects_copy.remove('shelf')
if 'shelf' in object_masks_keys: object_masks_keys.remove('shelf')
if set(objects_copy) != set(object_masks_keys):
#print 'skip posterior_image keys ', objects_copy
#print 'skip object_mask keys ', object_masks_keys
continue
true = np.zeros_like(pred)
for i, _object in enumerate(objects):
if _object != 'shelf':
true[sample.object_masks[_object]] = i
masked_pred = pred[sample.bin_mask]
masked_true = true[sample.bin_mask]
acc, acc_cls, mean_iu, fwavacc = label_accuracy_score(masked_true, masked_pred, len(objects))
acc_list.append(acc)
acc_cls_list.append(acc_cls)
mean_iu_list.append(mean_iu)
fwavacc_list.append(fwavacc)
"""
label_pred = np.zeros(pred.shape[1:]).astype(np.int64)
label_true = np.zeros(pred.shape[1:]).astype(np.int64)
for i in range(pred.shape[0]):
label_pred[pred[i]] = i
label_true[true[i]] = i
label_pred_masked = label_pred[sample.bin_mask]
label_true_masked = label_true[sample.bin_mask]
"""
return acc_list, acc_cls_list, mean_iu_list, fwavacc_list
def create_dataset(dataset_path):
# initialize empty dataset
dataset = APCDataSet(from_pkl=False)
data_file_prefixes = []
key = '.jpg'
for dir_name, sub_dirs, files in os.walk(dataset_path):
for f in files:
if key == f[-len(key):]:
data_file_prefixes.append(
os.path.join(dir_name, f[:-len(key)]))
print data_file_prefixes
for file_prefix in data_file_prefixes:
dataset.samples.append(
APCSample(data_2016_prefix=file_prefix,
labeled=True, is_2016=True, infer_shelf_mask=True))
return dataset
###############################################################################
# prepare dataset #
###############################################################################
#data_path = '/home/leus/ros/indigo/src/start-jsk/jsk_apc/jsk_apc2016_common/data'
#cache_path = os.path.join(data_path, 'cache')
#dataset_path = os.path.join(data_path, 'rbo_apc')
rospack = rospkg.RosPack()
common_path = rospack.get_path('jsk_apc2016_common')
data_path = common_path + '/data/'
dataset_name = 'tokyo_run/single_item_labeled'
dataset_path = os.path.join(data_path, dataset_name)
data = create_dataset(dataset_path)
###############################################################################
# dataset #
###############################################################################
train_data, test_data = data.split_simple(portion_training=0.7)
###############################################################################
# all features #
###############################################################################
all_features = ['color', 'height3D', 'dist2shelf']
params = {
'use_features': all_features,
'segmentation_method': "max_smooth", 'selection_method': "max_smooth",
'make_convex': True, 'do_shrinking_resegmentation': True,
'do_greedy_resegmentation': True}
bp = ProbabilisticSegmentationBP(**params)
bp.fit(train_data)
acc_list, acc_cls_list, mean_iu_list, fwavacc_list = evaluate(bp, test_data)
print 'all features acc ', np.mean(acc_list)
print 'all features acc_cls ', np.mean(acc_cls_list)
print 'all features mean_iu ', np.mean(mean_iu_list)
print 'all features fwavcc ', np.mean(fwavacc_list)
###############################################################################
# # Color only #
###############################################################################
params = {
'use_features': ['color'],
'segmentation_method': "max_smooth", 'selection_method': "max_smooth",
'make_convex': True, 'do_shrinking_resegmentation': True,
'do_greedy_resegmentation': True}
bp = ProbabilisticSegmentationBP(**params)
bp.fit(train_data)
acc_list, acc_cls_list, mean_iu_list, fwavacc_list = evaluate(bp, test_data)
print 'trained only by color features acc ', np.mean(acc_list)
print 'trained only by color features acc_cls ', np.mean(acc_cls_list)
print 'trained only by color features mean_iu ', np.mean(mean_iu_list)
print 'trained only by color features fwavcc ', np.mean(fwavacc_list)
| bsd-3-clause |
shashankrajput/seq2seq | seq2seq/tasks/dump_attention.py | 6 | 4850 | # Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Task where both the input and output sequence are plain text.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import os
import numpy as np
from matplotlib import pyplot as plt
import tensorflow as tf
from tensorflow import gfile
from seq2seq.tasks.decode_text import _get_prediction_length
from seq2seq.tasks.inference_task import InferenceTask, unbatch_dict
def _get_scores(predictions_dict):
"""Returns the attention scores, sliced by source and target length.
"""
prediction_len = _get_prediction_length(predictions_dict)
source_len = predictions_dict["features.source_len"]
return predictions_dict["attention_scores"][:prediction_len, :source_len]
def _create_figure(predictions_dict):
"""Creates and returns a new figure that visualizes
attention scores for for a single model predictions.
"""
# Find out how long the predicted sequence is
target_words = list(predictions_dict["predicted_tokens"])
prediction_len = _get_prediction_length(predictions_dict)
# Get source words
source_len = predictions_dict["features.source_len"]
source_words = predictions_dict["features.source_tokens"][:source_len]
# Plot
fig = plt.figure(figsize=(8, 8))
plt.imshow(
X=predictions_dict["attention_scores"][:prediction_len, :source_len],
interpolation="nearest",
cmap=plt.cm.Blues)
plt.xticks(np.arange(source_len), source_words, rotation=45)
plt.yticks(np.arange(prediction_len), target_words, rotation=-45)
fig.tight_layout()
return fig
class DumpAttention(InferenceTask):
"""Defines inference for tasks where both the input and output sequences
are plain text.
Params:
delimiter: Character by which tokens are delimited. Defaults to space.
unk_replace: If true, enable unknown token replacement based on attention
scores.
unk_mapping: If `unk_replace` is true, this can be the path to a file
defining a dictionary to improve UNK token replacement. Refer to the
documentation for more details.
dump_attention_dir: Save attention scores and plots to this directory.
dump_attention_no_plot: If true, only save attention scores, not
attention plots.
dump_beams: Write beam search debugging information to this file.
"""
def __init__(self, params):
super(DumpAttention, self).__init__(params)
self._attention_scores_accum = []
self._idx = 0
if not self.params["output_dir"]:
raise ValueError("Must specify output_dir for DumpAttention")
@staticmethod
def default_params():
params = {}
params.update({"output_dir": "", "dump_plots": True})
return params
def begin(self):
super(DumpAttention, self).begin()
gfile.MakeDirs(self.params["output_dir"])
def before_run(self, _run_context):
fetches = {}
fetches["predicted_tokens"] = self._predictions["predicted_tokens"]
fetches["features.source_len"] = self._predictions["features.source_len"]
fetches["features.source_tokens"] = self._predictions[
"features.source_tokens"]
fetches["attention_scores"] = self._predictions["attention_scores"]
return tf.train.SessionRunArgs(fetches)
def after_run(self, _run_context, run_values):
fetches_batch = run_values.results
for fetches in unbatch_dict(fetches_batch):
# Convert to unicode
fetches["predicted_tokens"] = np.char.decode(
fetches["predicted_tokens"].astype("S"), "utf-8")
fetches["features.source_tokens"] = np.char.decode(
fetches["features.source_tokens"].astype("S"), "utf-8")
if self.params["dump_plots"]:
output_path = os.path.join(self.params["output_dir"],
"{:05d}.png".format(self._idx))
_create_figure(fetches)
plt.savefig(output_path)
plt.close()
tf.logging.info("Wrote %s", output_path)
self._idx += 1
self._attention_scores_accum.append(_get_scores(fetches))
def end(self, _session):
scores_path = os.path.join(self.params["output_dir"],
"attention_scores.npz")
np.savez(scores_path, *self._attention_scores_accum)
tf.logging.info("Wrote %s", scores_path)
| apache-2.0 |
ldirer/scikit-learn | examples/cluster/plot_digits_agglomeration.py | 377 | 1694 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Feature agglomeration
=========================================================
These images how similar features are merged together using
feature agglomeration.
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets, cluster
from sklearn.feature_extraction.image import grid_to_graph
digits = datasets.load_digits()
images = digits.images
X = np.reshape(images, (len(images), -1))
connectivity = grid_to_graph(*images[0].shape)
agglo = cluster.FeatureAgglomeration(connectivity=connectivity,
n_clusters=32)
agglo.fit(X)
X_reduced = agglo.transform(X)
X_restored = agglo.inverse_transform(X_reduced)
images_restored = np.reshape(X_restored, images.shape)
plt.figure(1, figsize=(4, 3.5))
plt.clf()
plt.subplots_adjust(left=.01, right=.99, bottom=.01, top=.91)
for i in range(4):
plt.subplot(3, 4, i + 1)
plt.imshow(images[i], cmap=plt.cm.gray, vmax=16, interpolation='nearest')
plt.xticks(())
plt.yticks(())
if i == 1:
plt.title('Original data')
plt.subplot(3, 4, 4 + i + 1)
plt.imshow(images_restored[i], cmap=plt.cm.gray, vmax=16,
interpolation='nearest')
if i == 1:
plt.title('Agglomerated data')
plt.xticks(())
plt.yticks(())
plt.subplot(3, 4, 10)
plt.imshow(np.reshape(agglo.labels_, images[0].shape),
interpolation='nearest', cmap=plt.cm.spectral)
plt.xticks(())
plt.yticks(())
plt.title('Labels')
plt.show()
| bsd-3-clause |
ZENGXH/scikit-learn | examples/bicluster/plot_spectral_biclustering.py | 403 | 2011 | """
=============================================
A demo of the Spectral Biclustering algorithm
=============================================
This example demonstrates how to generate a checkerboard dataset and
bicluster it using the Spectral Biclustering algorithm.
The data is generated with the ``make_checkerboard`` function, then
shuffled and passed to the Spectral Biclustering algorithm. The rows
and columns of the shuffled matrix are rearranged to show the
biclusters found by the algorithm.
The outer product of the row and column label vectors shows a
representation of the checkerboard structure.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_checkerboard
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralBiclustering
from sklearn.metrics import consensus_score
n_clusters = (4, 3)
data, rows, columns = make_checkerboard(
shape=(300, 300), n_clusters=n_clusters, noise=10,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralBiclustering(n_clusters=n_clusters, method='log',
random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.1f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.matshow(np.outer(np.sort(model.row_labels_) + 1,
np.sort(model.column_labels_) + 1),
cmap=plt.cm.Blues)
plt.title("Checkerboard structure of rearranged data")
plt.show()
| bsd-3-clause |
karstenw/nodebox-pyobjc | examples/Extended Application/matplotlib/examples/subplots_axes_and_figures/custom_figure_class.py | 1 | 1517 | """
===================
Custom Figure Class
===================
You can pass a custom Figure constructor to figure if you want to derive from
the default Figure. This simple example creates a figure with a figure title.
"""
import matplotlib.pyplot as plt #import figure, show
from matplotlib.figure import Figure
# nodebox section
if __name__ == '__builtin__':
# were in nodebox
import os
import tempfile
W = 800
inset = 20
size(W, 600)
plt.cla()
plt.clf()
plt.close('all')
def tempimage():
fob = tempfile.NamedTemporaryFile(mode='w+b', suffix='.png', delete=False)
fname = fob.name
fob.close()
return fname
imgx = 20
imgy = 0
def pltshow(plt, dpi=150):
global imgx, imgy
temppath = tempimage()
plt.savefig(temppath, dpi=dpi)
dx,dy = imagesize(temppath)
w = min(W,dx)
image(temppath,imgx,imgy,width=w)
imgy = imgy + dy + 20
os.remove(temppath)
size(W, HEIGHT+dy+40)
else:
def pltshow(mplpyplot):
mplpyplot.show()
# nodebox section end
class MyFigure(Figure):
def __init__(self, *args, **kwargs):
"""
custom kwarg figtitle is a figure title
"""
figtitle = kwargs.pop('figtitle', 'hi mom')
Figure.__init__(self, *args, **kwargs)
self.text(0.5, 0.95, figtitle, ha='center')
fig = plt.figure(FigureClass=MyFigure, figtitle='my title')
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
pltshow(plt)
| mit |
LiaoPan/scikit-learn | examples/ensemble/plot_gradient_boosting_quantile.py | 392 | 2114 | """
=====================================================
Prediction Intervals for Gradient Boosting Regression
=====================================================
This example shows how quantile regression can be used
to create prediction intervals.
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import GradientBoostingRegressor
np.random.seed(1)
def f(x):
"""The function to predict."""
return x * np.sin(x)
#----------------------------------------------------------------------
# First the noiseless case
X = np.atleast_2d(np.random.uniform(0, 10.0, size=100)).T
X = X.astype(np.float32)
# Observations
y = f(X).ravel()
dy = 1.5 + 1.0 * np.random.random(y.shape)
noise = np.random.normal(0, dy)
y += noise
y = y.astype(np.float32)
# Mesh the input space for evaluations of the real function, the prediction and
# its MSE
xx = np.atleast_2d(np.linspace(0, 10, 1000)).T
xx = xx.astype(np.float32)
alpha = 0.95
clf = GradientBoostingRegressor(loss='quantile', alpha=alpha,
n_estimators=250, max_depth=3,
learning_rate=.1, min_samples_leaf=9,
min_samples_split=9)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_upper = clf.predict(xx)
clf.set_params(alpha=1.0 - alpha)
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_lower = clf.predict(xx)
clf.set_params(loss='ls')
clf.fit(X, y)
# Make the prediction on the meshed x-axis
y_pred = clf.predict(xx)
# Plot the function, the prediction and the 90% confidence interval based on
# the MSE
fig = plt.figure()
plt.plot(xx, f(xx), 'g:', label=u'$f(x) = x\,\sin(x)$')
plt.plot(X, y, 'b.', markersize=10, label=u'Observations')
plt.plot(xx, y_pred, 'r-', label=u'Prediction')
plt.plot(xx, y_upper, 'k-')
plt.plot(xx, y_lower, 'k-')
plt.fill(np.concatenate([xx, xx[::-1]]),
np.concatenate([y_upper, y_lower[::-1]]),
alpha=.5, fc='b', ec='None', label='90% prediction interval')
plt.xlabel('$x$')
plt.ylabel('$f(x)$')
plt.ylim(-10, 20)
plt.legend(loc='upper left')
plt.show()
| bsd-3-clause |
atantet/ergoPack | example/numericalFP/numericalFP_Hopf.py | 1 | 5115 | import numpy as np
import pylibconfig2
from scipy import sparse
from scipy.sparse import linalg
import matplotlib.pyplot as plt
from matplotlib import cm
from ergoNumAna import ChangCooper
readEigVal = False
#readEigVal = True
def hopf(x, mu, omega):
f = np.empty((2,))
f[0] = x[0] * (mu - (x[0]**2 + x[1]**2)) - omega*x[1]
f[1] = x[1] * (mu - (x[0]**2 + x[1]**2)) + omega*x[0]
return f
# Get model
omega = 1.
#q = 0.5
#q = 0.75
#q = 1.
#q = 1.25
#q = 1.5
#q = 1.75
#q = 2.
#q = 2.25
#q = 2.5
#q = 2.75
#q = 3.
#q = 3.25
#q = 3.5
#q = 3.75
q = 4.
muRng = np.arange(-10, 15., 0.1)
k0 = 0
#muRng = np.arange(6.6, 15., 0.1)
#k0 = 166
#muRng = np.arange(-4, 2, 0.1)
#k0 = 60
#muRng = np.arange(2, 8, 0.1)
#k0 = 120
#muRng = np.arange(8, 15, 0.1)
#k0 = 180
#muRng = np.arange(5., 10., 0.1)
#k0 = 150
#muRng = np.array([8.])
#k0 = 180
# Grid definition
dim = 2
nx0 = 100
#nx0 = 200
# give limits for the size of the periodic orbit
# at maximum value of control parameter (when noise
# effects transversally are small)
xlim = np.ones((dim,)) * np.sqrt(15) * 2
# Number of eigenvalues
nev = 100
tol = 1.e-6
B = np.eye(dim) * q
# Get standard deviations
Q = np.dot(B, B.T)
# Get grid points and steps
x = []
dx = np.empty((dim,))
nx = np.ones((dim,), dtype=int) * nx0
for d in np.arange(dim):
x.append(np.linspace(-xlim[d], xlim[d], nx[d]))
dx[d] = x[d][1] - x[d][0]
N = np.prod(nx)
idx = np.indices(nx).reshape(dim, -1)
X = np.meshgrid(*x, indexing='ij')
points = np.empty((dim, N))
for d in np.arange(dim):
points[d] = X[d].flatten()
alpha = 0.0
levels = 20
fs_default = 'x-large'
fs_latex = 'xx-large'
fs_xlabel = fs_default
fs_ylabel = fs_default
fs_xticklabels = fs_default
fs_yticklabels = fs_default
fs_legend_title = fs_default
fs_legend_labels = fs_default
fs_cbar_label = fs_default
#figFormat = 'png'
figFormat = 'eps'
dpi = 300
msize = 32
bbox_inches = 'tight'
plt.rc('font',**{'family':'serif'})
print 'For q = ', q
for k in np.arange(muRng.shape[0]):
mu = muRng[k]
print 'For mu = ', mu
if mu < 0:
signMu = 'm'
else:
signMu = 'p'
postfix = '_nx%d_k%03d_mu%s%02d_q%03d' \
% (nx0, k0 + k, signMu, int(round(np.abs(mu) * 10)), int(round(q * 100)))
if not readEigVal:
# Define drift
def drift(x):
return hopf(x, mu, omega)
# Get discretized Fokker-Planck operator
print 'Discretizing Fokker-Planck operator'
FPO = ChangCooper(points, nx, dx, drift, Q)
print 'Solving eigenvalue problem'
(w, v) = linalg.eigs(FPO, k=nev, which='LR', tol=tol)
isort = np.argsort(-w.real)
w = w[isort]
v = v[:, isort]
rho0 = v[:, 0].real
rho0 /= rho0.sum()
rho0_tile = np.tile(rho0, (dim, 1))
meanPoints = (points * rho0_tile).sum(1)
stdPoints = np.sqrt(((points - np.tile(meanPoints, (N, 1)).T)**2 * rho0_tile).sum(1))
print 'Mean points = ', meanPoints
print 'Std points = ', stdPoints
print 'Saving eigenvalues'
np.savetxt('../results/numericalFP/w_hopf%s.txt' % postfix, w)
np.savetxt('../results/numericalFP/statDist_hopf%s.txt' % postfix, rho0)
else:
print 'Reading eigenvalues'
srcFile = '../results/numericalFP/w_hopf%s.txt' % postfix
fp = open(srcFile, 'r')
w = np.empty((nev,), dtype=complex)
for ev in np.arange(nev):
line = fp.readline()
line = line.replace('+-', '-')
w[ev] = complex(line)
rho0 = np.loadtxt('../results/numericalFP/statDist_hopf%s.txt' % postfix)
print 'Plotting'
fig = plt.figure()
#fig.set_visible(False)
ax = fig.add_subplot(111)
ax.scatter(w.real, w.imag, edgecolors='face')
ax.set_xlim(-30, 0.1)
ax.set_ylim(-10, 10)
ax.text(-29, -9, r'$\mu = %.1f$' % mu, fontsize='xx-large')
fig.savefig('../results/plot/numericalFP/numFP_hopf%s.%s' \
% (postfix, figFormat), bbox_inches='tight', dpi=300)
fig = plt.figure()
ax = fig.add_subplot(111)
vect = rho0.copy()
vecAlpha = vect[vect != 0]
if alpha > 0:
vmax = np.sort(vecAlpha)[int((1. - alpha) \
* vecAlpha.shape[0])]
vect[vect > vmax] = vmax
else:
vmax = np.max(vect)
h = ax.contourf(X[0].T, X[1].T, vect.reshape(nx), levels,
cmap=cm.hot_r, vmin=0., vmax=vmax)
ax.set_xlim(X[0][:, 0].min(), X[0][:, 0].max())
ax.set_ylim(X[1][0].min(), X[1][0].max())
#cbar = plt.colorbar(h)
ax.set_xlabel(r'$x$', fontsize=fs_latex)
ax.set_ylabel(r'$y$', fontsize=fs_latex)
# plt.setp(cbar.ax.get_yticklabels(), fontsize=fs_yticklabels)
plt.setp(ax.get_xticklabels(), fontsize=fs_xticklabels)
plt.setp(ax.get_yticklabels(), fontsize=fs_yticklabels)
ax.text(-7, -7, r'$\mu = %.1f$' % mu, fontsize='xx-large')
fig.savefig('../results/plot/numericalFP/statDist_hopf%s.%s' \
% (postfix, figFormat), bbox_inches='tight', dpi=300)
plt.close()
| gpl-3.0 |
spallavolu/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
imaculate/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
Axelrod-Python/Axelrod-fingerprint | update_fingerprints.py | 1 | 8815 | """
A script to obtain the Ashlock Fingerprints of all strategies in the Axelrod
library.
This writes a hash of the source code of each strategy to file: db.csv.
If the source code of a strategy changes **or** a new strategy is introduced
then the fingerprint is regenerated for that strategy.
"""
import inspect
import hashlib
import csv
import string
import numpy as np
import matplotlib.pyplot as plt
import axelrod as axl
def hash_strategy(strategy):
"""
Hash the source code of a strategy
"""
try:
source_code = "".join(inspect.getsourcelines(strategy)[0])
except OSError: # Some classes are dynamically created
source_code = "".join(inspect.getsourcelines(strategy.strategy)[0])
hash_object = hashlib.md5(source_code.encode("utf-8"))
hashed_source = hash_object.hexdigest()
return hashed_source
def write_strategy_to_db(strategy, filename="db.csv", fingerprint="Ashlock"):
"""
Write the hash of a strategy to the db
"""
hashed_source = hash_strategy(strategy)
with open(filename, "a") as db:
try:
db.write(
"{},{},{}\n".format(
strategy.original_name, fingerprint, hashed_source
)
)
except AttributeError:
db.write(
"{},{},{}\n".format(strategy.name, fingerprint, hashed_source)
)
def read_db(filename="db.csv"):
"""
Read filename and return a dictionary mapping string names to hash of source
code of a strategy
"""
with open(filename, "r") as db:
csvreader = csv.reader(db)
str_to_hash = {(row[0], row[1]): row[2] for row in csvreader}
return str_to_hash
def create_db(filename="db.csv"):
"""
Creates an empty db.csv file
"""
with open(filename, "w"):
pass
def write_data_to_file(fp, filename):
"""
Write the fingerprint data to a file.
"""
columns = ["x", "y", "score"]
with open(filename, "w") as f:
w = csv.writer(f)
w.writerow(columns)
for key, value in fp.data.items():
w.writerow([key.x, key.y, value])
def obtain_fingerprint(
strategy, turns, repetitions, probe=axl.TitForTat, processes=1
):
"""
Obtain the fingerprint for a given strategy and save the figure to the
assets dir
"""
fp = axl.AshlockFingerprint(strategy, probe)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot()
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/{}.png".format(format_filename(name)), bbox_inches="tight"
)
write_data_to_file(fp, "assets/{}.csv".format(format_filename(name)))
def obtain_transitive_fingerprint(strategy, turns, repetitions, processes=1):
"""
Obtain the transitive fingerprint
for a given strategy and save the figure to the assets dir
"""
fp = axl.TransitiveFingerprint(strategy, number_of_opponents=30)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot()
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/transitive_{}.png".format(format_filename(name)),
bbox_inches="tight",
)
np.savetxt(
"assets/transitive_{}.csv".format(format_filename(name)), fp.data
)
def obtain_transitive_fingerprint_v_short(
strategy, turns, repetitions, processes=1
):
"""
Obtain the transitive fingerprint against short run time
for a given strategy and save the figure to the assets dir
"""
short_run_time = [s() for s in axl.short_run_time_strategies]
fp = axl.TransitiveFingerprint(strategy, opponents=short_run_time)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot(display_names=True)
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/transitive_v_short_{}.png".format(format_filename(name)),
bbox_inches="tight",
)
np.savetxt(
"assets/transitive_v_short_{}.csv".format(format_filename(name)),
fp.data,
)
def format_filename(s):
"""
Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
Borrowed from https://gist.github.com/seanh/93666
"""
valid_chars = "-_.() {}{}".format(string.ascii_letters, string.digits)
filename = "".join(c for c in s if c in valid_chars)
filename = filename.replace(" ", "_")
return filename
def write_markdown(strategy):
"""
Write a markdown section of a strategy.
"""
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
markdown = """
## {0}
![fingerprint of {0}](./assets/{1}.png)
[data (csv)](./assets/{1}.csv)
![Transitive fingerprint of {0}](./assets/transitive_{1}.png)
[data (csv)](./assets/transitive_{1}.csv)
![Transitive fingerprint of {0} against short run time](./assets/transitive_v_short_{1}.png)
[data (csv)](./assets/transitive_v_short_{1}.csv)
""".format(
name, format_filename(name)
)
return markdown
def main(
turns,
repetitions,
transitive_turns,
transitive_repetitions,
transitive_v_short_turns,
transitive_v_short_repetitions,
processes,
):
"""
Fingerprint all strategies, if a strategy has already been fingerprinted it
does not get rerun.
"""
version = axl.__version__
markdown = """# Ashlock and transitive fingerprints
See:
[axelrod.readthedocs.io/en/latest/tutorials/further_topics/fingerprinting.html#fingerprinting](http://axelrod.readthedocs.io/en/latest/tutorials/further_topics/fingerprinting.html#fingerprinting)
All strategies included from Axelrod version {}.
This README.md file is autogenerated by running:
```
$ python update_fingerprints.py
```
Each individual fingerprint can be obtained by running:
```python
import axelrod as axl
fp = axl.AshlockFingerprint(strategy, probe)
fp.fingerprint(turns={}, repetitions={})
fp.plot()
```
# Axelrod library fingerprints
""".format(
version, turns, repetitions
)
try:
db = read_db()
except FileNotFoundError:
create_db()
db = read_db()
for strategy in axl.short_run_time_strategies:
name = strategy.name
signature = hash_strategy(strategy)
fp = "Ashlock"
if (name, fp) not in db or db[name, fp] != signature:
obtain_fingerprint(
strategy, turns, repetitions, processes=processes
)
write_strategy_to_db(strategy, fingerprint=fp)
fp = "Transitive"
if (name, fp) not in db or db[name, fp] != signature:
obtain_transitive_fingerprint(
strategy,
transitive_turns,
transitive_repetitions,
processes=processes,
)
write_strategy_to_db(strategy, fingerprint=fp)
fp = "Transitive_v_short"
if (name, fp) not in db or db[name, fp] != signature:
obtain_transitive_fingerprint_v_short(
strategy,
transitive_v_short_turns,
transitive_v_short_repetitions,
processes=processes,
)
write_strategy_to_db(strategy, fingerprint=fp)
markdown += write_markdown(strategy)
with open("README.md", "w") as outfile:
outfile.write(markdown)
if __name__ == "__main__":
turns, repetitions = 200, 20
transitive_turns, transitive_repetitions = 200, 20
transitive_v_short_turns, transitive_v_short_repetitions = 200, 20
processes = 20
main(
turns=turns,
repetitions=repetitions,
transitive_turns=transitive_turns,
transitive_repetitions=transitive_repetitions,
transitive_v_short_turns=transitive_v_short_turns,
transitive_v_short_repetitions=transitive_v_short_repetitions,
processes=processes,
)
| mit |
hwp-kiel/opencali | src/ui/mplwidget.py | 1 | 1403 | # Python Qt4 bindings for GUI objects
from PyQt4 import QtGui
# import the Qt4Agg FigureCanvas object, that binds Figure to
# Qt4Agg backend. It also inherits from QWidget
from matplotlib.backends.backend_qt4agg \
import FigureCanvasQTAgg as FigureCanvas
# Matplotlib Figure object
from matplotlib.figure import Figure
class MplCanvas(FigureCanvas):
"""Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
# we define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# notify the system of updated policy
FigureCanvas.updateGeometry(self)
class MplWidget(QtGui.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent = None):
# initialization of Qt MainWindow widget
QtGui.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = MplCanvas()
# create a vertical box layout
self.vbl = QtGui.QVBoxLayout()
# add mpl widget to vertical box
self.vbl.addWidget(self.canvas)
# set the layout to th vertical box
self.setLayout(self.vbl) | gpl-2.0 |
Evensgn/MNIST-learning | mnist_svm.py | 1 | 1201 | import numpy as np
import matplotlib.pyplot as plt
GRAY_SCALE_RANGE = 255
import pickle
data_filename = 'data_deskewed.pkl'
print('Loading data from file \'' + data_filename + '\' ...')
with open(data_filename, 'rb') as f:
train_labels = pickle.load(f)
train_images = pickle.load(f)
test_labels = pickle.load(f)
test_images = pickle.load(f)
num_pixel = pickle.load(f)
print('Data loading complete.')
train_images = np.array(train_images)
train_images.resize(train_images.size // num_pixel, num_pixel)
test_images = np.array(test_images)
test_images.resize(test_images.size // num_pixel, num_pixel)
test_labels = np.array(test_labels)
train_labels = np.array(train_labels)
## normalization
train_images = train_images / GRAY_SCALE_RANGE
test_images = test_images / GRAY_SCALE_RANGE
from sklearn import svm, metrics
# clf = svm.SVC(gamma = 0.001)
clf = svm.SVC(kernel = 'linear')
clf.fit(train_images[:1000], train_labels[:1000])
prediction = clf.predict(test_images)
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(test_labels, prediction)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(test_labels, prediction)) | mit |
dr-jpk/saltefficiency | weekly/weekly_summary_plots.py | 1 | 8536 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 10:06:20 2015
@author: jpk
ToDo: automate the subsystems check. A query that checks all the subsystems in
case things change in the future should prevent issues with the pis chart
colours
"""
import sys
import os
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import report_queries as rq
import numpy as np
import matplotlib.dates as mdates
def priority_breakdown_pie_chart(x, ds, dirname='./logs/'):
'''
make a pie chart from the dataframe
'''
temp = list(x['Priority'])
no_blocks = map(int, list(x['No. Blocks']))
labels = ['P'+str(temp[i])+' - ' + str(no_blocks[i]) for i in range(0,len(temp))]
values = list(x['Tsec'])
# set colours for the priorities
colours = ['b','c','g','m','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.7), fontsize=8)
title_txt = 'Weekly Priority Breakdown - ' + str(int(x['No. Blocks'].sum())) + ' Blocks Total' + '\n {}'.format(ds)
ax.set_title(title_txt, fontsize=12)
filename = dirname+'priority_breakdown_pie_chart_' +'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(filename, dpi=100)
# pl.show()
def weekly_total_time_breakdown_pie_chart(x, ds, dirname='./logs/'):
labels = ['Science - {}'.format(x['ScienceTime'][0]),
'Engineering - {}'.format(x['EngineeringTime'][0]),
'Weather - {}'.format(x['TimeLostToWeather'][0]),
'Problems - {}'.format(x['TimeLostToProblems'][0])]
values = [int(x['Science']),
int(x['Engineering']),
int(x['Weather']),
int(x['Problems'])]
colours = ['b','c','g','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.8), fontsize=8)
title_txt = 'Weekly Time Breakdown - {} Total\n{}'.format(x['NightLength'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_total_time_breakdown_pie_chart_' + '-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_subsystem_breakdown_pie_chart(x, y, col_dict, ds, dirname='./logs/'):
subsystem = list(x['SaltSubsystem'])
time = list(x['TotalTime'])
labels = [subsystem[i] + ' - ' + time[i] for i in range(0,len(subsystem))]
values = list(x['Time'])
colours = [col_dict[i] for i in subsystem]
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'k'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.65), fontsize=8)
title_txt = 'Weekly Problems Breakdown - {}\n{}'.format(y['TotalTime'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_subsystem_breakdown_pie_chart_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_time_breakdown(x, ds, dirname='./logs/'):
'''
produce a bar stacked bar chart plot of the time breakdown per day for the
past week.
'''
fig = pl.figure(figsize=(10,4),facecolor='w')
ax = fig.add_subplot(111)
width = 0.55
ax.grid(which='major', axis='y')
# science time per day
s = ax.bar(x['Date'],
x['Science'],
width,
color = 'b',
edgecolor='w')
# engineering time per day
e = ax.bar(x['Date'],
x['Engineering'],
width,
bottom = x['Science'],
color = 'c',
edgecolor='w')
# weather time per day
w = ax.bar(x['Date'],
x['Weather'],
width,
bottom = x['Science'] + x['Engineering'],
color = 'g',
edgecolor='w')
# problem time per day
p = ax.bar(x['Date'],
x['Problems'],
width,
bottom = x['Science'] + x['Engineering'] + x['Weather'],
color = 'r',
edgecolor='w')
ax.set_ylabel('Hours', fontsize=11)
ax.set_xlabel('Date', fontsize=11)
fig.legend((s[0], e[0], w[0], p[0]),
('Science Time',
'Engineering Time',
'Time lost to Weather',
'Time lost to Problems'),
frameon=False,
fontsize=10,
loc=(0.0,0.70))
title_txt = 'Weekly Time Breakdown - {}'.format(ds)
ax.set_title(title_txt, fontsize=11)
ax.xaxis_date()
date_formatter = mdates.DateFormatter('%a \n %Y-%m-%d')
ax.xaxis.set_major_formatter(date_formatter)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
fig.autofmt_xdate(rotation=0, ha = 'left')
fig.subplots_adjust(left=0.22, bottom=0.20, right=0.96, top=None,
wspace=None, hspace=None)
pl.autoscale()
filename = 'weekly_time_breakdown_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
if __name__=='__main__':
# set the colours for all the subsystems:
subsystems_list = ['BMS', 'DOME', 'TC', 'PMAS', 'SCAM', 'TCS', 'STRUCT',
'TPC', 'HRS', 'PFIS','Proposal', 'Operations',
'ELS', 'ESKOM']
cmap = pl.cm.jet
colour_map = cmap(np.linspace(0.0, 1.0, len(subsystems_list)))
col_dict = {}
for i in range(0, len(subsystems_list)):
col_dict[subsystems_list[i]] = colour_map[i]
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306,user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
obsdate = sys.argv[1]
date = '{}-{}-{}'.format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
interval = sys.argv[2]
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
wsbt_d = rq.weekly_subsystem_breakdown_total(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
date_string = '{} - {}'.format(dr_d['StartDate'][0], dr_d['EndDate'][0])
# testing the pie_chart method
priority_breakdown_pie_chart(wpb_d, date_string)
weekly_total_time_breakdown_pie_chart(wttb_d, date_string)
weekly_subsystem_breakdown_pie_chart(wsb_d, wsbt_d, col_dict, date_string)
weekly_time_breakdown(wtb_d, date_string)
mysql_con.close()
| bsd-3-clause |
amueller/advanced_training | plots/plot_interactive_tree.py | 1 | 2695 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals.six import StringIO # doctest: +SKIP
from sklearn.tree import export_graphviz
from scipy.misc import imread
from scipy import ndimage
import os
import re
GRAPHVIS_PATH = r"C:\Program Files (x86)\Graphviz2.38\bin"
if GRAPHVIS_PATH not in os.environ['PATH']:
os.environ['PATH'] += ";" + GRAPHVIS_PATH
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def tree_image(tree, fout=None):
try:
import graphviz
except ImportError:
# make a hacky white plot
x = np.ones((10, 10))
x[0, 0] = 0
return x
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data, max_depth=3, impurity=False)
data = dot_data.getvalue()
#data = re.sub(r"gini = 0\.[0-9]+\\n", "", dot_data.getvalue())
data = re.sub(r"samples = [0-9]+\\n", "", data)
data = re.sub(r"\\nsamples = [0-9]+", "", data)
data = re.sub(r"value", "counts", data)
graph = graphviz.Source(data, format="png")
if fout is None:
fout = "tmp"
graph.render(fout)
return imread(fout + ".png")
def plot_tree(max_depth=1):
fig, ax = plt.subplots(1, 2, figsize=(15, 7))
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)
Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))
faces = faces.reshape(xx.shape)
border = ndimage.laplace(faces) != 0
ax[0].contourf(xx, yy, Z, alpha=.4)
ax[0].scatter(xx[border], yy[border], marker='.', s=1)
ax[0].set_title("max_depth = %d" % max_depth)
img = tree_image(tree)
if img is not None:
ax[1].imshow(img)
ax[1].axis("off")
else:
ax[1].set_visible(False)
else:
ax[0].set_title("data set")
ax[1].set_visible(False)
ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax[0].set_xlim(x_min, x_max)
ax[0].set_ylim(y_min, y_max)
ax[0].set_xticks(())
ax[0].set_yticks(())
def plot_tree_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_tree, max_depth=slider)
| bsd-2-clause |
nlhepler/freetype-py3 | examples/glyph-vector-2.py | 1 | 3414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Show how to access glyph outline description.
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
face = Face(b'./Vera.ttf')
face.set_char_size( 32*64 )
face.load_char('g')
slot = face.glyph
bitmap = face.glyph.bitmap
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
pitch = face.glyph.bitmap.pitch
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
outline = slot.outline
points = numpy.array(outline.points, dtype=[('x',float), ('y',float)])
x, y = points['x'], points['y']
figure = plt.figure(figsize=(8,10))
axis = figure.add_subplot(111)
#axis.scatter(points['x'], points['y'], alpha=.25)
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if tags[j] & (1 << 0) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [Path.MOVETO,]
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([Path.LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([Path.CURVE3, Path.CURVE3])
else:
verts.append(segment[1])
codes.append(Path.CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ Path.CURVE3, Path.CURVE3])
verts.append(segment[-1])
codes.append(Path.CURVE3)
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
# Draw glyph
path = Path(VERTS, CODES)
glyph = patches.PathPatch(path, fill = True, facecolor=(0.8,0.5,0.8), alpha=.25, lw=0)
glyph_outline = patches.PathPatch(path, fill = False, edgecolor='black', lw=3)
plt.imshow(Z, extent=[x.min(), x.max(),y.min(), y.max()],
interpolation='nearest', cmap = plt.cm.gray_r, vmin=0, vmax=400)
plt.xticks(numpy.linspace(x.min(), x.max(), Z.shape[1]+1), ())
plt.yticks(numpy.linspace(y.min(), y.max(), Z.shape[0]+1), ())
plt.grid(color='k', linewidth=1, linestyle='-')
axis.add_patch(glyph)
axis.add_patch(glyph_outline)
axis.set_xlim(x.min(), x.max())
axis.set_ylim(y.min(), y.max())
plt.savefig('test.pdf')
plt.show()
| bsd-3-clause |
willyd/fast-rcnn | tools/demo.py | 22 | 5446 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from utils.cython_nms import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'vgg16_fast_rcnn_iter_40000.caffemodel'),
'vgg_cnn_m_1024': ('VGG_CNN_M_1024',
'vgg_cnn_m_1024_fast_rcnn_iter_40000.caffemodel'),
'caffenet': ('CaffeNet',
'caffenet_fast_rcnn_iter_40000.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name, classes):
"""Detect object classes in an image using pre-computed object proposals."""
# Load pre-computed Selected Search object proposals
box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',
image_name + '_boxes.mat')
obj_proposals = sio.loadmat(box_file)['boxes']
# Load the demo image
im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im, obj_proposals)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls in classes:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
keep = np.where(cls_scores >= CONF_THRESH)[0]
cls_boxes = cls_boxes[keep, :]
cls_scores = cls_scores[keep]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
CONF_THRESH)
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS[args.demo_net][0],
'test.prototxt')
caffemodel = os.path.join(cfg.ROOT_DIR, 'data', 'fast_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/scripts/'
'fetch_fast_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/000004.jpg'
demo(net, '000004', ('car',))
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/001551.jpg'
demo(net, '001551', ('sofa', 'tvmonitor'))
plt.show()
| mit |
clawpack/clawpack-4.x | doc/sphinx/example-acoustics-1d/setplot_2.py | 2 | 2095 |
"""
Single figure and axes with two items
=======================================
Only the pressure q[0] is plotted.
In this example the line and points are plotted in different colors by
specifying a second item on the same axes.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for q[0]
plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-.5,1.1]
plotaxes.title = 'Pressure'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(name='line', plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = '-'
plotitem.color = 'b'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(name='points', plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = 'o'
plotitem.color = '#ff00ff' # any color supported by matplotlib
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html'# pointer for index page
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 1 # layout of plots
plotdata.latex_framesperline = 2 # layout of plots
plotdata.latex_makepdf = True # also run pdflatex?
return plotdata
| bsd-3-clause |
spallavolu/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
chungjjang80/FRETBursts | fretbursts/tests/test_burstlib.py | 1 | 40546 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014 Antonino Ingargiola <tritemio@gmail.com>
#
"""
Module containing automated unit tests for FRETBursts.
Running the tests requires `py.test`.
"""
from __future__ import division
from builtins import range, zip
from collections import namedtuple
import pytest
import numpy as np
try:
import matplotlib
except ImportError:
has_matplotlib = False # OK to run tests without matplotlib
else:
has_matplotlib = True
matplotlib.use('Agg') # but if matplotlib is installed, use Agg
try:
import numba
except ImportError:
has_numba = False
else:
has_numba = True
import fretbursts.background as bg
import fretbursts.burstlib as bl
import fretbursts.burstlib_ext as bext
from fretbursts import loader
from fretbursts import select_bursts
from fretbursts.ph_sel import Ph_sel
from fretbursts.phtools import phrates
if has_matplotlib:
import fretbursts.burst_plot as bplt
# data subdir in the notebook folder
DATASETS_DIR = u'notebooks/data/'
def _alex_process(d):
loader.alex_apply_period(d)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
d.burst_search(L=10, m=10, F=7)
def load_dataset_1ch(process=True):
fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
if process:
_alex_process(d)
return d
def load_dataset_8ch():
fn = "12d_New_30p_320mW_steer_3.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
d.burst_search(L=10, m=10, F=7)
return d
def load_fake_pax():
fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
d.add(ALEX=False, meas_type='PAX')
loader.alex_apply_period(d)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto')
d.burst_search(L=10, m=10, F=6)
return d
@pytest.fixture(scope="module", params=[
load_dataset_1ch,
load_dataset_8ch,
])
def data(request):
load_func = request.param
d = load_func()
return d
@pytest.fixture(scope="module")
def data_8ch(request):
d = load_dataset_8ch()
return d
@pytest.fixture(scope="module")
def data_1ch(request):
d = load_dataset_1ch()
return d
##
# List comparison functions
#
def list_equal(list1, list2):
"""Test numerical equality of all the elements in the two lists.
"""
return np.all([val1 == val2 for val1, val2 in zip(list1, list2)])
def list_array_equal(list1, list2):
"""Test numerical equality between two lists of arrays.
"""
return np.all([np.all(arr1 == arr2) for arr1, arr2 in zip(list1, list2)])
def list_array_allclose(list1, list2):
"""Test float closeness (np.allclose) between two lists of arrays.
"""
return np.all([np.allclose(arr1, arr2) for arr1, arr2 in zip(list1, list2)])
##
# Test functions
#
def test_bg_compatlayer_for_obsolete_attrs():
d = load_dataset_1ch(process=False)
attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa',
'rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
for attr in attrs:
with pytest.raises(RuntimeError):
getattr(d, attr)
_alex_process(d)
for attr in attrs:
assert isinstance(getattr(d, attr), list)
def test_ph_times_compact(data_1ch):
"""Test calculation of ph_times_compact."""
def isinteger(x):
return np.equal(np.mod(x, 1), 0)
ich = 0
d = data_1ch
ph_d = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'))
ph_a = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem'))
ph_dc = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'), compact=True)
ph_ac = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem'), compact=True)
# Test that the difference of ph and ph_compact is multiple of
# the complementary excitation period duration
Dex_void = bl._excitation_width(d._D_ON_multich[ich], d.alex_period)
Aex_void = bl._excitation_width(d._A_ON_multich[ich], d.alex_period)
assert isinteger((ph_d - ph_dc) / Dex_void).all()
assert isinteger((ph_a - ph_ac) / Aex_void).all()
# Test that alternation histogram does not have "gaps" for ph_compact
bins = np.linspace(0, d.alex_period, num=101)
hist_dc, _ = np.histogram(ph_dc % d.alex_period, bins=bins)
hist_ac, _ = np.histogram(ph_ac % d.alex_period, bins=bins)
assert (hist_dc > 0).all()
assert (hist_ac > 0).all()
def test_time_min_max():
"""Test time_min and time_max for ALEX data."""
d = load_dataset_1ch(process=False)
ich = 0
assert d.time_max == d.ph_times_t[ich].max() * d.clk_p
assert d.time_min == d.ph_times_t[ich].min() * d.clk_p
del d._time_max, d._time_min
_alex_process(d)
assert d.time_max == d.ph_times_m[ich][-1] * d.clk_p
assert d.time_min == d.ph_times_m[ich][0] * d.clk_p
d.delete('ph_times_m')
del d._time_max, d._time_min
assert d.time_max == d.mburst[0].stop[-1] * d.clk_p
assert d.time_min == d.mburst[0].start[0] * d.clk_p
def test_time_min_max_multispot(data_8ch):
"""Test time_min and time_max for multi-spot data."""
d = data_8ch
assert d.time_max == max(t[-1] for t in d.ph_times_m) * d.clk_p
assert d.time_min == min(t[0] for t in d.ph_times_m) * d.clk_p
def test_aex_dex_ratio(data_1ch):
"""Test methods computing relative D and A alternation periods durations.
"""
d = data_1ch
Dx, Ax = d.D_ON, d.A_ON
a1 = d._aex_fraction()
a2 = (Ax[1] - Ax[0]) / (Ax[1] - Ax[0] + Dx[1] - Dx[0])
assert a1 == a2
r1 = d._aex_dex_ratio()
r2 = (Ax[1] - Ax[0]) / (Dx[1] - Dx[0])
assert r1 == r2
assert (a1 / (1 - a1)) == r1
def test_burst_size_pax():
d = load_fake_pax()
aex_dex_ratio, alpha_d = d._aex_dex_ratio(), 1 - d._aex_fraction()
nd, na = d.nd[0], d.na[0]
nda = d.nda[0]
naa = d.naa[0] - d.nar[0] * aex_dex_ratio
# Test burst size during Dex
b1 = d.burst_sizes_pax_ich(add_aex=False)
b2 = d.burst_sizes_ich(add_naa=False)
b3 = nd + na
assert (b1 == b2).all()
assert (b1 == b3).all()
# Test naa
naa2 = d.get_naa_corrected()
naa3 = d._get_naa_ich()
assert (naa == naa2).all()
assert (naa == naa3).all()
# Test add_naa
b1 = d.burst_sizes_ich(add_naa=True)
b2 = nd + na + naa
assert (b1 == b2).all()
# Test add_aex with no duty-cycle correction
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=False)
b2 = nd + na + nda + d.naa[0]
b3 = nd + na + nda + naa + na * aex_dex_ratio
assert np.allclose(b1, b2)
assert np.allclose(b1, b3)
# Test add_aex with duty-cycle correction
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True)
b2 = nd + na + nda + na * aex_dex_ratio + naa / alpha_d
assert np.allclose(b1, b2)
# Test add_aex with duty-cycle correction, donor_ref
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=False)
assert np.allclose(b1, b2)
# Test add_aex with duty-cycle correction, gamma, beta
gamma = 0.7
beta = 0.85
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
assert np.allclose(b1 * gamma, b2)
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) +
naa / (alpha_d * beta))
assert np.allclose(b1, b2)
d.leakage = 0.1
nd, na = d.nd[0], d.na[0]
nda = d.nda[0]
naa = d.naa[0] - d.nar[0] * aex_dex_ratio
# Test add_aex with duty-cycle correction, gamma, beta
gamma = 0.7
beta = 0.85
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
assert np.allclose(b1 * gamma, b2)
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) +
naa / (alpha_d * beta))
assert np.allclose(b1, b2)
def test_bg_calc(data):
"""Smoke test bg_calc() and test deletion of bg fields.
"""
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
assert 'bg_auto_th_us0' not in data
assert 'bg_auto_F_bg' not in data
assert 'bg_th_us_user' in data
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7)
assert 'bg_auto_th_us0' in data
assert 'bg_auto_F_bg' in data
assert 'bg_th_us_user' not in data
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7,
fit_allph=False)
streams = [s for s in data.ph_streams if s != Ph_sel('all')]
bg_t = [np.sum(data.bg[s][ich] for s in streams) for ich in range(data.nch)]
assert list_array_equal(data.bg[Ph_sel('all')], bg_t)
def test_ph_streams(data):
sel = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
if data.alternated:
sel.extend([Ph_sel(Aex='Aem'), Ph_sel(Aex='Dem')])
for s in sel:
assert s in data.ph_streams
def test_bg_from(data):
"""Test the method .bg_from() for all the ph_sel combinations.
"""
d = data
for sel in d.ph_streams:
bg = d.bg_from(ph_sel=sel)
assert list_array_equal(bg, d.bg[sel])
if not (data.alternated):
assert list_array_equal(d.bg_from(Ph_sel('all')),
d.bg_from(Ph_sel(Dex='DAem')))
return
bg_dd = d.bg_from(ph_sel=Ph_sel(Dex='Dem'))
bg_ad = d.bg_from(ph_sel=Ph_sel(Dex='Aem'))
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)])
bg_aa = d.bg_from(ph_sel=Ph_sel(Aex='Aem'))
bg_da = d.bg_from(ph_sel=Ph_sel(Aex='Dem'))
bg = d.bg_from(ph_sel=Ph_sel(Aex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_aa, bg_da)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='Dem', Aex='Dem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_da)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='Aem', Aex='Aem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_ad, bg_aa)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem', Aex='Aem'))
bg2 = [b1 + b2 + b3 for b1, b2, b3 in zip(bg_dd, bg_ad, bg_aa)]
assert list_array_equal(bg, bg2)
def test_iter_ph_times(data):
"""Test method .iter_ph_times() for all the ph_sel combinations.
"""
# TODO add all the ph_sel combinations like in test_bg_from()
d = data
assert list_array_equal(d.ph_times_m, d.iter_ph_times())
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem'))):
if d.alternated:
assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.D_ex[ich]]).all()
else:
assert (ph == d.ph_times_m[ich][~d.A_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem'))):
if d.alternated:
assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.D_ex[ich]]).all()
else:
assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all()
if d.alternated:
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Dem'))):
assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Aem'))):
assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='DAem'))):
assert (ph == d.ph_times_m[ich][d.D_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='DAem'))):
assert (ph == d.ph_times_m[ich][d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem', Aex='Dem'))):
assert (ph == d.ph_times_m[ich][d.D_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem', Aex='Aem'))):
assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(
Ph_sel(Dex='DAem', Aex='Aem'))):
mask = d.D_ex[ich] + d.A_em[ich] * d.A_ex[ich]
assert (ph == d.ph_times_m[ich][mask]).all()
else:
assert list_array_equal(d.iter_ph_times(),
d.iter_ph_times(Ph_sel(Dex='DAem')))
def test_get_ph_times_period(data):
for ich in range(data.nch):
data.get_ph_times_period(0, ich=ich)
data.get_ph_times_period(0, ich=ich, ph_sel=Ph_sel(Dex='Dem'))
def test_iter_ph_times_period(data):
d = data
for ich in range(data.nch):
for period, ph_period in enumerate(d.iter_ph_times_period(ich=ich)):
istart, iend = d.Lim[ich][period]
assert (ph_period == d.ph_times_m[ich][istart : iend + 1]).all()
ph_sel = Ph_sel(Dex='Dem')
mask = d.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period, ph_period in enumerate(
d.iter_ph_times_period(ich=ich, ph_sel=ph_sel)):
istart, iend = d.Lim[ich][period]
ph_period_test = d.ph_times_m[ich][istart : iend + 1]
ph_period_test = ph_period_test[mask[istart : iend + 1]]
assert (ph_period == ph_period_test).all()
def test_burst_search_py_cy(data):
"""Test python and cython burst search with background-dependent threshold.
"""
data.burst_search(pure_python=True)
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(pure_python=False)
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
data.burst_search(L=30, pure_python=True)
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(L=30, pure_python=False)
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
def test_burst_search_constant_rates(data):
"""Test python and cython burst search with constant threshold."""
data.burst_search(min_rate_cps=50e3, pure_python=True)
assert (data.num_bursts > 0).all()
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(min_rate_cps=50e3, pure_python=False)
assert (data.num_bursts > 0).all()
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
def test_burst_search_L(data):
"""Test burst search with different L arguments."""
data.burst_search(L=10)
for bursts in data.mburst:
assert (bursts.counts >= 10).all()
num_bursts1 = data.num_bursts
data.burst_search(L=30)
for bursts in data.mburst:
assert (bursts.counts >= 30).all()
assert np.all(num_bursts1 > data.num_bursts)
def test_burst_search_with_no_bursts(data):
"""Smoke test burst search when some periods have no bursts."""
# F=600 results in periods with no bursts for the us-ALEX measurement
# and in no bursts at all for the multi-spot measurements
data.burst_search(m=10, F=600)
data.fuse_bursts(ms=1)
if has_matplotlib:
def test_stale_fitter_after_burst_search(data):
"""Test that E/S_fitter attributes are deleted on burst search."""
data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Dem'))
bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute
if data.alternated:
bplt.dplot(data, bplt.hist_S) # create S_fitter attribute
data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Aem'))
assert not hasattr(data, 'E_fitter')
if data.alternated:
assert not hasattr(data, 'S_fitter')
bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute
if data.alternated:
bplt.dplot(data, bplt.hist_S) # create S_fitter attribute
data.calc_fret()
assert not hasattr(data, 'E_fitter')
if data.alternated:
assert not hasattr(data, 'S_fitter')
def test_burst_search(data):
"""Smoke test and bg_bs check."""
streams = [Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
if data.alternated:
streams.extend([Ph_sel(Dex='Aem', Aex='Aem'), Ph_sel(Dex='DAem')])
for sel in streams:
data.burst_search(L=10, m=10, F=7, ph_sel=sel)
assert list_equal(data.bg_bs, data.bg_from(sel))
if data.alternated:
data.burst_search(m=10, F=7, ph_sel=Ph_sel(Dex='DAem'), compact=True)
data.burst_search(L=10, m=10, F=7)
def test_burst_search_and_gate(data_1ch):
"""Test consistency of burst search and gate."""
d = data_1ch
assert d.alternated
# Smoke tests
bext.burst_search_and_gate(d, F=(6, 8))
bext.burst_search_and_gate(d, m=(12, 8))
bext.burst_search_and_gate(d, min_rate_cps=(60e3, 40e3))
if d.nch > 1:
mr1 = 35e3 + np.arange(d.nch) * 1e3
mr2 = 30e3 + np.arange(d.nch) * 1e3
bext.burst_search_and_gate(d, min_rate_cps=(mr1, mr2))
# Consistency test
d_dex = d.copy()
d_dex.burst_search(ph_sel=Ph_sel(Dex='DAem'))
d_aex = d.copy()
d_aex.burst_search(ph_sel=Ph_sel(Aex='Aem'))
d_and = bext.burst_search_and_gate(d)
for bursts_dex, bursts_aex, bursts_and, ph in zip(
d_dex.mburst, d_aex.mburst, d_and.mburst, d.iter_ph_times()):
ph_b_mask_dex = bl.ph_in_bursts_mask(ph.size, bursts_dex)
ph_b_mask_aex = bl.ph_in_bursts_mask(ph.size, bursts_aex)
ph_b_mask_and = bl.ph_in_bursts_mask(ph.size, bursts_and)
assert (ph_b_mask_and == ph_b_mask_dex * ph_b_mask_aex).all()
def test_mch_count_ph_num_py_c(data):
na_py = bl.bslib.mch_count_ph_in_bursts_py(data.mburst, data.A_em)
na_c = bl.bslib.mch_count_ph_in_bursts_c(data.mburst, data.A_em)
assert list_array_equal(na_py, na_c)
assert na_py[0].dtype == np.float64
def test_burst_sizes(data):
"""Test for .burst_sizes_ich() and burst_sizes()"""
# Smoke test
plain_sizes = data.burst_sizes()
assert len(plain_sizes) == data.nch
# Test gamma and donor_ref arguments
bs1 = data.burst_sizes_ich(gamma=0.5, donor_ref=True)
bs2 = data.burst_sizes_ich(gamma=0.5, donor_ref=False)
assert np.allclose(bs1, bs2 / 0.5)
# Test add_naa
if data.alternated:
bs_no_naa = data.burst_sizes_ich(add_naa=False)
bs_naa = data.burst_sizes_ich(add_naa=True)
assert np.allclose(bs_no_naa + data.naa[0], bs_naa)
# Test beta and donor_ref arguments with gamma=1
naa1 = data.get_naa_corrected(beta=0.8, donor_ref=True)
naa2 = data.get_naa_corrected(beta=0.8, donor_ref=False)
assert np.allclose(naa1, naa2)
# Test beta and donor_ref arguments with gamma=0.5
naa1 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=True)
naa2 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=False)
assert np.allclose(naa1 * 0.5, naa2)
def test_leakage(data):
"""
Test setting leakage before and after burst search
"""
# burst search, then set leakage
data.burst_search()
data.leakage = 0.04
na1 = list(data.na)
# set leakage, then burst search
data.burst_search()
na2 = list(data.na)
assert list_array_equal(na1, na2)
def test_gamma(data):
"""
Test setting gamma before and after burst search
"""
# burst search, then set gamma
data.burst_search()
E0 = list(data.E)
data.gamma = 0.5
E1 = list(data.E)
assert not list_array_equal(E0, E1)
# burst search after setting gamma
data.burst_search()
E2 = list(data.E)
assert list_array_equal(E1, E2)
def test_dir_ex(data_1ch):
"""
Test setting dir_ex before and after burst search
"""
data = data_1ch
# burst search, then set dir_ex
data.burst_search()
na0 = list(data.na)
data.dir_ex = 0.05
na1 = list(data.na)
assert not list_array_equal(na0, na1)
# burst search after setting dir_ex
data.burst_search()
na2 = list(data.na)
assert list_array_equal(na1, na2)
def test_beta(data_1ch):
"""
Test setting beta before and after burst search
"""
data = data_1ch
# burst search, then set beta
data.burst_search()
S0 = list(data.S)
data.beta = 0.7
S1 = list(data.S)
assert not list_array_equal(S0, S1)
# burst search after setting beta
data.burst_search()
S2 = list(data.S)
assert list_array_equal(S1, S2)
def test_bursts_interface(data):
d = data
for b in d.mburst:
assert (b.start == b.data[:, b._i_start]).all()
assert (b.stop == b.data[:, b._i_stop]).all()
assert (b.istart == b.data[:, b._i_istart]).all()
assert (b.istop == b.data[:, b._i_istop]).all()
rate = 1.*b.counts/b.width
assert (b.ph_rate == rate).all()
separation = b.start[1:] - b.stop[:-1]
assert (b.separation == separation).all()
assert (b.stop > b.start).all()
def test_burst_stop_istop(data):
"""Test coherence between b_end() and b_iend()"""
d = data
for ph, bursts in zip(d.ph_times_m, d.mburst):
assert (ph[bursts.istop] == bursts.stop).all()
def test_monotonic_burst_start(data):
"""Test for monotonic burst start times."""
d = data
for i in range(d.nch):
assert (np.diff(d.mburst[i].start) > 0).all()
def test_monotonic_burst_stop(data):
"""Test for monotonic burst stop times."""
d = data
for bursts in d.mburst:
assert (np.diff(bursts.stop) > 0).all()
def test_burst_istart_iend_size(data):
"""Test consistency between burst istart, istop and counts (i.e. size)"""
d = data
for bursts in d.mburst:
counts = bursts.istop - bursts.istart + 1
assert (counts == bursts.counts).all()
def test_burst_recompute_times(data):
"""Test Bursts.recompute_times method."""
d = data
for times, bursts in zip(d.ph_times_m, d.mburst):
newbursts = bursts.recompute_times(times)
assert newbursts == bursts
def test_burst_recompute_index(data):
"""Test Bursts.recompute_index_* methods."""
d = data
ph_sel = Ph_sel(Dex='Dem')
d.burst_search(ph_sel=ph_sel, index_allph=True)
d_sel = d.copy()
d_sel.burst_search(ph_sel=ph_sel, index_allph=False)
for times_sel, mask_sel, bursts_sel, times_allph, bursts_allph in zip(
d.iter_ph_times(ph_sel=ph_sel),
d.iter_ph_masks(ph_sel=ph_sel),
d_sel.mburst,
d.iter_ph_times(),
d.mburst):
assert (times_sel[bursts_sel.istart] == bursts_sel.start).all()
assert (times_sel[bursts_sel.istop] == bursts_sel.stop).all()
assert (times_allph[bursts_allph.istart] == bursts_allph.start).all()
assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all()
# Test individual methods
bursts_allph2 = bursts_sel.recompute_index_expand(mask_sel)
assert bursts_allph2 == bursts_allph
assert (times_allph[bursts_allph2.istart] == bursts_allph2.start).all()
assert (times_allph[bursts_allph2.istop] == bursts_allph2.stop).all()
bursts_sel2 = bursts_allph.recompute_index_reduce(times_sel)
assert (times_sel[bursts_sel2.istart] == bursts_sel2.start).all()
assert (times_sel[bursts_sel2.istop] == bursts_sel2.stop).all()
assert bursts_sel2 == bursts_sel
# Test round-trip
bursts_allph3 = bursts_sel2.recompute_index_expand(mask_sel)
assert bursts_allph3 == bursts_allph2
assert (times_allph[bursts_allph3.istart] == bursts_allph3.start).all()
assert (times_allph[bursts_allph3.istop] == bursts_allph3.stop).all()
## This test is only used to develop alternative implementations of
## Bursts.recompute_index_reduce() and is normally disabled as it is very slow.
#def test_burst_recompute_index_reduce(data):
# """Test different versions of Bursts.recompute_index_reduce methods.
#
# This test is very slow so it's normally disabled.
# """
# d = data
# ph_sel = Ph_sel(Dex='Aem')
# d.burst_search(ph_sel=ph_sel)
# d_sel = d.copy()
# d_sel.burst_search(ph_sel=ph_sel, index_allph=False)
# for times_sel, bursts_sel, times_allph, bursts_allph in zip(
# d.iter_ph_times(ph_sel=ph_sel),
# d_sel.mburst,
# d.iter_ph_times(),
# d.mburst):
# assert (times_allph[bursts_allph.istart] == bursts_allph.start).all()
# assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all()
#
# bursts_sel1 = bursts_allph.recompute_index_reduce(times_sel)
# bursts_sel2 = bursts_allph.recompute_index_reduce2(times_sel)
# assert bursts_sel1 == bursts_sel2
# assert bursts_sel == bursts_sel1
def test_phrates_mtuple(data):
d = data
m = 10
max_num_ph = 20001
for ph in d.iter_ph_times():
phc = ph[:max_num_ph]
rates = phrates.mtuple_rates(phc, m)
delays = phrates.mtuple_delays(phc, m)
t_rates = 0.5 * (phc[m-1:] + phc[:-m+1])
assert phrates.mtuple_rates_max(phc, m) == rates.max()
assert phrates.mtuple_delays_min(phc, m) == delays.min()
assert phrates.default_c == 1
assert (rates == (m - 1 - phrates.default_c) / delays).all()
assert (phrates.mtuple_rates_t(phc, m) == t_rates).all()
if has_numba:
def test_phrates_kde(data):
d = data
tau = 5000 # 5000 * 12.5ns = 6.25 us
for ph in d.iter_ph_times():
# Test consistency of kde_laplace_nph and (kde_laplace, kde_rect)
rates = phrates.kde_laplace(ph, tau)
nrect = phrates.kde_rect(ph, tau*10)
ratesl, nph = phrates.nb.kde_laplace_nph(ph, tau)
assert (rates == ratesl).all()
assert (nph == nrect).all()
# Test consistency of kde_laplace and _kde_laplace_self_numba
ratesl2, nph2 = phrates.nb.kde_laplace_self_numba(ph, tau)
assert (nph2 == nrect).all()
assert (ratesl2 == rates).all()
# Smoke test laplace, gaussian, rect with time_axis
ratesl = phrates.kde_laplace(ph, tau, time_axis=ph+1)
assert ((ratesl >= 0) * (ratesl < 5e6)).all()
ratesg = phrates.kde_gaussian(ph, tau, time_axis=ph+1)
assert ((ratesg >= 0) * (ratesg < 5e6)).all()
ratesr = phrates.kde_rect(ph, tau, time_axis=ph+1)
assert ((ratesr >= 0) * (ratesr < 5e6)).all()
def test_phrates_kde_cy(data):
d = data
tau = 5000 # 5000 * 12.5ns = 6.25 us
for ph in d.iter_ph_times():
# Test consistency of kde_laplace_nph and (kde_laplace, kde_rect)
ratesg = phrates.nb.kde_gaussian_numba(ph, tau)
ratesl = phrates.nb.kde_laplace_numba(ph, tau)
ratesr = phrates.nb.kde_rect_numba(ph, tau)
ratesgc = phrates.cy.kde_gaussian_cy(ph, tau)
rateslc = phrates.cy.kde_laplace_cy(ph, tau)
ratesrc = phrates.cy.kde_rect_cy(ph, tau)
assert (ratesg == ratesgc).all()
assert (ratesl == rateslc).all()
assert (ratesr == ratesrc).all()
def test_burst_ph_data_functions(data):
"""Tests the functions that iterate or operate on per-burst "ph-data".
"""
d = data
for bursts, ph, mask in zip(d.mburst, d.iter_ph_times(),
d.iter_ph_masks(Ph_sel(Dex='Dem'))):
bstart = bursts.start
bend = bursts.stop
for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)):
assert ph[start] == bstart[i]
assert ph[stop-1] == bend[i]
for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts)):
assert burst_ph[0] == bstart[i]
assert burst_ph[-1] == bend[i]
for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts, mask=mask)):
if burst_ph.size > 0:
assert burst_ph[0] >= bstart[i]
assert burst_ph[-1] <= bend[i]
stats = bl.burst_ph_stats(ph, bursts, mask=mask)
assert (stats[~np.isnan(stats)] >= bstart[~np.isnan(stats)]).all()
assert (stats[~np.isnan(stats)] <= bend[~np.isnan(stats)]).all()
bistart = bursts.istart
biend = bursts.istop
bursts_mask = bl.ph_in_bursts_mask(ph.size, bursts)
for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)):
assert bursts_mask[start:stop].all()
if start > 0:
if i > 0 and biend[i-1] < bistart[i] - 1:
assert not bursts_mask[start - 1]
if stop < ph.size:
if i < bistart.size-1 and bistart[i+1] > biend[i] + 1:
assert not bursts_mask[stop]
def test_ph_in_bursts_ich(data):
"""Tests the ph_in_bursts_ich method.
"""
d = data
for ich in range(d.nch):
ph_in_bursts = d.ph_in_bursts_ich(ich)
ph_in_bursts_dd = d.ph_in_bursts_ich(ich, ph_sel=Ph_sel(Dex='Dem'))
assert ph_in_bursts_dd.size < ph_in_bursts.size
def test_burst_fuse(data):
"""Test 2 independent implementations of fuse_bursts for consistency.
"""
d = data
for bursts in d.mburst:
new_mbursti = bl.fuse_bursts_iter(bursts, ms=1)
new_mburstd = bl.fuse_bursts_direct(bursts, ms=1)
assert new_mbursti == new_mburstd
def test_burst_fuse_0ms(data):
"""Test that after fusing with ms=0 the sum of bursts sizes is that same
as the number of ph in bursts (via burst selection).
"""
d = data
if d.nch == 8:
d.burst_search(L=10, m=10, F=7, computefret=False)
d.mburst[1] = bl.bslib.Bursts.empty() # Make one channel with no bursts
d._calc_burst_period()
d.calc_fret(count_ph=True)
df = d.fuse_bursts(ms=0)
for ich, bursts in enumerate(df.mburst):
mask = bl.ph_in_bursts_mask(df.ph_data_sizes[ich], bursts)
assert mask.sum() == bursts.counts.sum()
df.calc_fret(count_ph=True)
assert len(df.mburst) == len(d.mburst)
assert len(df.mburst) == d.nch
def test_burst_fuse_separation(data):
"""Test that after fusing bursts the minimum separation is equal
to the threshold used during fusing.
"""
d = data
fuse_ms = 2
df = d.fuse_bursts(ms=fuse_ms)
for bursts in df.mburst:
separation = bursts.separation * df.clk_p
if bursts.num_bursts > 0:
assert separation.min() >= fuse_ms * 1e-3
def test_calc_sbr(data):
"""Smoke test Data.calc_sbr()"""
data.calc_sbr()
def test_calc_max_rate(data):
"""Smoke test for Data.calc_max_rate()"""
data.calc_max_rate(m=10)
if data.alternated:
data.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem'), compact=True)
def test_burst_data(data):
"""Test for bext.burst_data()"""
bext.burst_data(data, include_bg=True, include_ph_index=True)
bext.burst_data(data, include_bg=False, include_ph_index=True)
bext.burst_data(data, include_bg=True, include_ph_index=False)
bext.burst_data(data, include_bg=False, include_ph_index=False)
def test_print_burst_stats(data):
"""Smoke test for burstlib.print_burst_stats()"""
bl.print_burst_stats(data)
def test_expand(data):
"""Test method `expand()` for `Data()`."""
d = data
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
width2 = bursts.width * d.clk_p
period = d.bp[ich]
bg_d2 = d.bg_from(Ph_sel(Dex='Dem'))[ich][period] * width2
bg_a2 = d.bg_from(Ph_sel(Dex='Aem'))[ich][period] * width2
assert (width == width2).all()
assert (nd == d.nd[ich]).all() and (na == d.na[ich]).all()
assert (bg_d == bg_d2).all() and (bg_a == bg_a2).all()
def test_burst_data_ich(data):
"""Test method `Data.burst_data_ich()`."""
d = data
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
burst_dict = d.burst_data_ich(ich=ich)
assert (burst_dict['size_raw'] == bursts.counts).all()
assert (burst_dict['t_start'] == bursts.start * d.clk_p).all()
assert (burst_dict['t_stop'] == bursts.stop * d.clk_p).all()
assert (burst_dict['i_start'] == bursts.istart).all()
assert (burst_dict['i_stop'] == bursts.istop).all()
assert (burst_dict['bg_period'] == d.bp[ich]).all()
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
width_ms = width * 1e3
assert (width_ms == burst_dict['width_ms']).all()
assert (nd == burst_dict['nd']).all()
assert (na == burst_dict['na']).all()
assert (bg_d == burst_dict['bg_dd']).all()
assert (bg_a == burst_dict['bg_ad']).all()
if d.alternated:
period = d.bp[ich]
bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period] * width
bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period] * width
assert (bg_da == burst_dict['bg_da']).all()
assert (bg_aa == burst_dict['bg_aa']).all()
def test_burst_corrections(data):
"""Test background and bleed-through corrections."""
d = data
d.calc_ph_num(alex_all=True)
d.corrections()
leakage = d.get_leakage_array()
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0: continue # if no bursts skip this ch
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
burst_size_raw = bursts.counts
lk = leakage[ich]
if d.alternated:
nda, naa = d.nda[ich], d.naa[ich]
period = d.bp[ich]
bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period]*width
bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period]*width
burst_size_raw2 = (nd + na + bg_d + bg_a + lk*nd + nda + naa +
bg_da + bg_aa)
assert np.allclose(burst_size_raw, burst_size_raw2)
else:
burst_size_raw2 = nd + na + bg_d + bg_a + lk*nd
assert np.allclose(burst_size_raw, burst_size_raw2)
def test_burst_search_consistency(data):
"""Test consistency of burst data array
"""
d = data
for mb, ph in zip(d.mburst, d.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size == istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width == stop - start)
df = d.fuse_bursts(ms=0)
for mb, ph in zip(df.mburst, df.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size == istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width == stop - start)
df = d.fuse_bursts(ms=1)
for mb, ph in zip(df.mburst, df.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size <= istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width <= stop - start)
def test_E_and_S_with_corrections(data):
d = data
gamma = 0.5
beta = 0.7
d.gamma = gamma
d.beta = beta
for i, (E, nd, na) in enumerate(zip(d.E, d.nd, d.na)):
assert (E == na / (nd * gamma + na)).all()
if d.alternated:
naa = d.naa[i]
if 'PAX' in data.meas_type:
naa = d.naa[i] - d.nar[i]
assert (d.S[i] == (gamma * nd + na) /
(gamma * nd + na + naa / beta)).all()
def test_burst_size_da(data):
"""Test that nd + na with no corrections is equal to b_size(mburst).
"""
d = data
d.calc_ph_num(alex_all=True)
if d.alternated:
for mb, nd, na, naa, nda in zip(d.mburst, d.nd, d.na, d.naa, d.nda):
tot_size = mb.counts
tot_size2 = nd + na + naa + nda
assert np.allclose(tot_size, tot_size2)
else:
for mb, nd, na in zip(d.mburst, d.nd, d.na):
tot_size = mb.counts
assert (tot_size == nd + na).all()
def test_burst_selection(data):
"""Smoke test for burst selection methods.
"""
d = data
d.select_bursts(select_bursts.size, th1=20, th2=100, add_naa=True)
d.select_bursts(select_bursts.size, th1=20, th2=100, gamma=0.5)
M1 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='first')
M2 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='second')
Mb = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='both')
Mb2 = [m1 + m2 for m1, m2 in zip(M1, M2)]
assert list_array_equal(Mb, Mb2)
def test_burst_selection_nocorrections(data):
"""Test burst selection with uncorrected bursts.
"""
d = data
d.burst_search(computefret=False)
d.calc_fret(count_ph=True, corrections=False)
ds1 = d.select_bursts(select_bursts.size, th1=20, th2=100,
computefret=False)
ds2 = d.select_bursts(select_bursts.size, th1=20, th2=100)
ds2.calc_ph_num()
ds2.calc_fret(corrections=False)
assert list_array_equal(ds1.nd, ds2.nd)
assert list_array_equal(ds1.na, ds2.na)
assert list_array_equal(ds1.E, ds2.E)
if d.alternated:
assert list_array_equal(ds1.naa, ds2.naa)
assert list_array_equal(ds1.E, ds2.E)
def test_burst_selection_ranges(data):
"""Test selection functions having a min-max range.
"""
d = data
d.burst_search()
d.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem'))
Range = namedtuple('Range', ['min', 'max', 'getter'])
sel_functions = dict(
E=Range(0.5, 1, None), nd=Range(30, 40, None), na=Range(30, 40, None),
time=Range(1, 61, lambda d, ich: d.mburst[ich].start * d.clk_p),
width=Range(0.5, 1.5, lambda d, ich: d.mburst[ich].width * d.clk_p*1e3),
peak_phrate=Range(50e3, 150e3, lambda d, ich: d.max_rate[ich]))
if d.alternated:
sel_functions.update(naa=Range(30, 40, None), S=Range(0.3, 0.7, None))
for func_name, range_ in sel_functions.items():
func = getattr(select_bursts, func_name)
getter = range_.getter
if getter is None:
getter = lambda d, ich: d[func_name][ich]
ds = d.select_bursts(func, args=(range_.min, range_.max))
for ich in range(d.nch):
selected = getter(ds, ich)
assert ((selected >= range_.min) * (selected <= range_.max)).all()
def test_join_data(data):
"""Smoke test for bext.join_data() function.
"""
d = data
dj = bext.join_data([d, d.copy()])
assert (dj.num_bursts == 2 * d.num_bursts).all()
for bursts in dj.mburst:
assert (np.diff(bursts.start) > 0).all()
def test_collapse(data_8ch):
"""Test the .collapse() method that joins the ch.
"""
d = data_8ch
dc1 = d.collapse()
bursts1 = dc1.mburst[0]
bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=True)
assert bursts1 == bursts2
bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=False)
indexsort_stop = bursts2.stop.argsort()
bursts3 = bursts2[indexsort_stop]
indexsort_start = bursts3.start.argsort()
bursts4 = bursts3[indexsort_start]
assert bursts1 == bursts4
indexsort = np.lexsort((bursts2.stop, bursts2.start))
for name in d.burst_fields:
if name not in d or name == 'mburst':
continue
newfield = np.hstack(d[name])[indexsort]
assert np.allclose(dc1[name][0], newfield)
dc2 = d.collapse(update_gamma=False)
for name in d.burst_fields:
if name not in d: continue
if name == 'mburst':
assert dc1.mburst[0] == dc2.mburst[0]
else:
assert np.allclose(dc1[name][0], dc2[name][0])
if __name__ == '__main__':
pytest.main("-x -v fretbursts/tests/test_burstlib.py")
| gpl-2.0 |
gerddie/nipype | nipype/algorithms/rapidart.py | 9 | 30137 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
import os
from copy import deepcopy
from warnings import warn
from nibabel import load, funcs, Nifti1Image
import numpy as np
from scipy import signal
import scipy.io as sio
from nipype.external import six
from ..interfaces.base import (BaseInterface, traits, InputMultiPath,
OutputMultiPath, TraitedSpec, File,
BaseInterfaceInputSpec, isdefined)
from ..utils.filemanip import filename_to_list, save_json, split_filename
from ..utils.misc import find_indices
from .. import logging, config
iflogger = logging.getLogger('interface')
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
if source == 'NIPY':
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
#process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)],
[-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params):]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ('AFNI', 'FSFAST'):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((mc.shape[0], n_pts))
if brain_pts is not None:
displacement = np.zeros((mc.shape[0], n_pts / 3))
for i in range(mc.shape[0]):
affine = _get_affine_matrix(mc[i, :], source)
newpos[i, :] = np.dot(affine,
all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = \
np.sqrt(np.sum(np.power(np.reshape(newpos[i, :],
(3, all_pts.shape[1])) -
all_pts[0:3, :],
2),
axis=0))
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(mc.shape[0])
if use_differences:
newpos = np.concatenate((np.zeros((1, n_pts)),
np.diff(newpos, n=1, axis=0)), axis=0)
for i in range(newpos.shape[0]):
normdata[i] = \
np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2),
(3, all_pts.shape[1])), axis=0)))
else:
newpos = np.abs(signal.detrend(newpos, axis=0, type='constant'))
normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
return normdata, displacement
def _nanmean(a, axis=None):
"""Return the mean excluding items that are nan
>>> a = [1, 2, np.nan]
>>> _nanmean(a)
1.5
"""
if axis:
return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis)
else:
return np.nansum(a) / np.sum(1 - np.isnan(a))
class ArtifactDetectInputSpec(BaseInterfaceInputSpec):
realigned_files = InputMultiPath(File(exists=True),
desc="Names of realigned functional data files",
mandatory=True)
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=("Names of realignment parameters"
"corresponding to the functional data files"))
parameter_source = traits.Enum("SPM", "FSL", "AFNI", "NiPy", "FSFAST",
desc="Source of movement parameters",
mandatory=True)
use_differences = traits.ListBool([True, False], minlen=2, maxlen=2,
usedefault=True,
desc=("Use differences between successive motion (first element)"
"and intensity paramter (second element) estimates in order"
"to determine outliers. (default is [True, False])"))
use_norm = traits.Bool(True, requires=['norm_threshold'],
desc=("Uses a composite of the motion parameters in "
"order to determine outliers."),
usedefault=True)
norm_threshold = traits.Float(desc=("Threshold to use to detect motion-rela"
"ted outliers when composite motion is "
"being used"), mandatory=True,
xor=['rotation_threshold',
'translation_threshold'])
rotation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in radians) to use to detect rotation-related "
"outliers"))
translation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in mm) to use to detect translation-related "
"outliers"))
zintensity_threshold = traits.Float(mandatory=True,
desc=("Intensity Z-threshold use to detection images that deviate "
"from the mean"))
mask_type = traits.Enum('spm_global', 'file', 'thresh',
desc=("Type of mask that should be used to mask the functional "
"data. *spm_global* uses an spm_global like calculation to "
"determine the brain mask. *file* specifies a brain mask "
"file (should be an image file consisting of 0s and 1s). "
"*thresh* specifies a threshold to use. By default all voxels"
"are used, unless one of these mask types are defined."),
mandatory=True)
mask_file = File(exists=True,
desc="Mask file to be used if mask_type is 'file'.")
mask_threshold = traits.Float(desc=("Mask threshold to be used if mask_type"
" is 'thresh'."))
intersect_mask = traits.Bool(True,
desc=("Intersect the masks when computed from "
"spm_global."))
save_plot = traits.Bool(True, desc="save plots containing outliers",
usedefault=True)
plot_type = traits.Enum('png', 'svg', 'eps', 'pdf',
desc="file type of the outlier plot",
usedefault=True)
bound_by_brainmask = traits.Bool(False, desc=("use the brain mask to "
"determine bounding box"
"for composite norm (works"
"for SPM and Nipy - currently"
"inaccurate for FSL, AFNI"),
usedefault=True)
global_threshold = traits.Float(8.0, desc=("use this threshold when mask "
"type equal's spm_global"),
usedefault=True)
class ArtifactDetectOutputSpec(TraitedSpec):
outlier_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing a list of "
"0-based indices corresponding to outlier volumes"))
intensity_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing the global "
"intensity values determined from the brainmask"))
norm_files = OutputMultiPath(File,
desc=("One file for each functional run containing the composite "
"norm"))
statistic_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing information "
"about the different types of artifacts and if design info is"
" provided then details of stimulus correlated motion and a "
"listing or artifacts by event type."))
plot_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the "
"detected outliers"))
mask_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the mask"
"used for global signal calculation"))
displacement_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the voxel"
"displacement timeseries"))
class ArtifactDetect(BaseInterface):
"""Detects outliers in a functional imaging series
Uses intensity and motion parameters to infer outliers. If `use_norm` is
True, it computes the movement of the center of each face a cuboid centered
around the head and returns the maximal movement across the centers.
Examples
--------
>>> ad = ArtifactDetect()
>>> ad.inputs.realigned_files = 'functional.nii'
>>> ad.inputs.realignment_parameters = 'functional.par'
>>> ad.inputs.parameter_source = 'FSL'
>>> ad.inputs.norm_threshold = 1
>>> ad.inputs.use_differences = [True, False]
>>> ad.inputs.zintensity_threshold = 3
>>> ad.run() # doctest: +SKIP
"""
input_spec = ArtifactDetectInputSpec
output_spec = ArtifactDetectOutputSpec
def __init__(self, **inputs):
super(ArtifactDetect, self).__init__(**inputs)
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
if isinstance(motionfile, six.string_types):
infile = motionfile
elif isinstance(motionfile, list):
infile = motionfile[0]
else:
raise Exception("Unknown type of file")
_, filename, ext = split_filename(infile)
artifactfile = os.path.join(output_dir, ''.join(('art.', filename,
'_outliers.txt')))
intensityfile = os.path.join(output_dir, ''.join(('global_intensity.',
filename, '.txt')))
statsfile = os.path.join(output_dir, ''.join(('stats.', filename,
'.txt')))
normfile = os.path.join(output_dir, ''.join(('norm.', filename,
'.txt')))
plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.',
self.inputs.plot_type)))
displacementfile = os.path.join(output_dir, ''.join(('disp.',
filename, ext)))
maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext)))
return (artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['outlier_files'] = []
outputs['intensity_files'] = []
outputs['statistic_files'] = []
outputs['mask_files'] = []
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'] = []
if self.inputs.bound_by_brainmask:
outputs['displacement_files'] = []
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'] = []
for i, f in enumerate(filename_to_list(self.inputs.realigned_files)):
(outlierfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = \
self._get_output_filenames(f, os.getcwd())
outputs['outlier_files'].insert(i, outlierfile)
outputs['intensity_files'].insert(i, intensityfile)
outputs['statistic_files'].insert(i, statsfile)
outputs['mask_files'].insert(i, maskfile)
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'].insert(i, normfile)
if self.inputs.bound_by_brainmask:
outputs['displacement_files'].insert(i, displacementfile)
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'].insert(i, plotfile)
return outputs
def _plot_outliers_with_wave(self, wave, outliers, name):
import matplotlib.pyplot as plt
plt.plot(wave)
plt.ylim([wave.min(), wave.max()])
plt.xlim([0, len(wave) - 1])
if len(outliers):
plt.plot(np.tile(outliers[:, None], (1, 2)).T,
np.tile([wave.min(), wave.max()], (len(outliers), 1)).T,
'r')
plt.xlabel('Scans - 0-based')
plt.ylabel(name)
def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
"""
Core routine for detecting outliers
"""
if not cwd:
cwd = os.getcwd()
# read in functional image
if isinstance(imgfile, six.string_types):
nim = load(imgfile)
elif isinstance(imgfile, list):
if len(imgfile) == 1:
nim = load(imgfile[0])
else:
images = [load(f) for f in imgfile]
nim = funcs.concat_images(images)
# compute global intensity signal
(x, y, z, timepoints) = nim.get_shape()
data = nim.get_data()
affine = nim.get_affine()
g = np.zeros((timepoints, 1))
masktype = self.inputs.mask_type
if masktype == 'spm_global': # spm_global like calculation
iflogger.debug('art: using spm global')
intersect_mask = self.inputs.intersect_mask
if intersect_mask:
mask = np.ones((x, y, z), dtype=bool)
for t0 in range(timepoints):
vol = data[:, :, :, t0]
# Use an SPM like approach
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask = mask * mask_tmp
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
intersect_mask = False
g = np.zeros((timepoints, 1))
if not intersect_mask:
iflogger.info('not intersect_mask is True')
mask = np.zeros((x, y, z, timepoints))
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask[:, :, :, t0] = mask_tmp
g[t0] = np.nansum(vol * mask_tmp)/np.nansum(mask_tmp)
elif masktype == 'file': # uses a mask image to determine intensity
maskimg = load(self.inputs.mask_file)
mask = maskimg.get_data()
affine = maskimg.get_affine()
mask = mask > 0.5
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
elif masktype == 'thresh': # uses a fixed signal threshold
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask = vol > self.inputs.mask_threshold
g[t0] = _nanmean(vol[mask])
else:
mask = np.ones((x, y, z))
g = _nanmean(data[mask > 0, :], 1)
# compute normalized intensity values
gz = signal.detrend(g, axis=0) # detrend the signal
if self.inputs.use_differences[1]:
gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)),
axis=0)
gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal
iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)
# read in motion parameters
mc_in = np.loadtxt(motionfile)
mc = deepcopy(mc_in)
(artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = self._get_output_filenames(imgfile, cwd)
mask_img = Nifti1Image(mask.astype(np.uint8), affine)
mask_img.to_filename(maskfile)
if self.inputs.use_norm:
brain_pts = None
if self.inputs.bound_by_brainmask:
voxel_coords = np.nonzero(mask)
coords = np.vstack((voxel_coords[0],
np.vstack((voxel_coords[1],
voxel_coords[2])))).T
brain_pts = np.dot(affine,
np.hstack((coords,
np.ones((coords.shape[0], 1)))).T)
# calculate the norm of the motion parameters
normval, displacement = _calc_norm(mc,
self.inputs.use_differences[0],
self.inputs.parameter_source,
brain_pts=brain_pts)
tidx = find_indices(normval > self.inputs.norm_threshold)
ridx = find_indices(normval < 0)
if displacement is not None:
dmap = np.zeros((x, y, z, timepoints), dtype=np.float)
for i in range(timepoints):
dmap[voxel_coords[0],
voxel_coords[1],
voxel_coords[2], i] = displacement[i, :]
dimg = Nifti1Image(dmap, affine)
dimg.to_filename(displacementfile)
else:
if self.inputs.use_differences[0]:
mc = np.concatenate((np.zeros((1, 6)),
np.diff(mc_in, n=1, axis=0)),
axis=0)
traval = mc[:, 0:3] # translation parameters (mm)
rotval = mc[:, 3:6] # rotation parameters (rad)
tidx = find_indices(np.sum(abs(traval) >
self.inputs.translation_threshold, 1)
> 0)
ridx = find_indices(np.sum(abs(rotval) >
self.inputs.rotation_threshold, 1) > 0)
outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))
# write output to outputfile
np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ')
np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ')
if self.inputs.use_norm:
np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ')
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
fig = plt.figure()
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(211)
else:
plt.subplot(311)
self._plot_outliers_with_wave(gz, iidx, 'Intensity')
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(212)
self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx),
'Norm (mm)')
else:
diff = ''
if self.inputs.use_differences[0]:
diff = 'diff'
plt.subplot(312)
self._plot_outliers_with_wave(traval, tidx,
'Translation (mm)' + diff)
plt.subplot(313)
self._plot_outliers_with_wave(rotval, ridx,
'Rotation (rad)' + diff)
plt.savefig(plotfile)
plt.close(fig)
motion_outliers = np.union1d(tidx, ridx)
stats = [{'motion_file': motionfile,
'functional_file': imgfile},
{'common_outliers': len(np.intersect1d(iidx, motion_outliers)),
'intensity_outliers': len(np.setdiff1d(iidx,
motion_outliers)),
'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)),
},
{'motion': [{'using differences': self.inputs.use_differences[0]},
{'mean': np.mean(mc_in, axis=0).tolist(),
'min': np.min(mc_in, axis=0).tolist(),
'max': np.max(mc_in, axis=0).tolist(),
'std': np.std(mc_in, axis=0).tolist()},
]},
{'intensity': [{'using differences': self.inputs.use_differences[1]},
{'mean': np.mean(gz, axis=0).tolist(),
'min': np.min(gz, axis=0).tolist(),
'max': np.max(gz, axis=0).tolist(),
'std': np.std(gz, axis=0).tolist()},
]},
]
if self.inputs.use_norm:
stats.insert(3, {'motion_norm':
{'mean': np.mean(normval, axis=0).tolist(),
'min': np.min(normval, axis=0).tolist(),
'max': np.max(normval, axis=0).tolist(),
'std': np.std(normval, axis=0).tolist(),
}})
save_json(statsfile, stats)
def _run_interface(self, runtime):
"""Execute this module.
"""
funcfilelist = filename_to_list(self.inputs.realigned_files)
motparamlist = filename_to_list(self.inputs.realignment_parameters)
for i, imgf in enumerate(funcfilelist):
self._detect_outliers_core(imgf, motparamlist[i], i,
cwd=os.getcwd())
return runtime
class StimCorrInputSpec(BaseInterfaceInputSpec):
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=('Names of realignment parameters corresponding to the functional '
'data files'))
intensity_values = InputMultiPath(File(exists=True), mandatory=True,
desc='Name of file containing intensity values')
spm_mat_file = File(exists=True, mandatory=True,
desc='SPM mat file (use pre-estimate SPM.mat file)')
concatenated_design = traits.Bool(mandatory=True,
desc='state if the design matrix contains concatenated sessions')
class StimCorrOutputSpec(TraitedSpec):
stimcorr_files = OutputMultiPath(File(exists=True),
desc='List of files containing correlation values')
class StimulusCorrelation(BaseInterface):
"""Determines if stimuli are correlated with motion or intensity
parameters.
Currently this class supports an SPM generated design matrix and requires
intensity parameters. This implies that one must run
:ref:`ArtifactDetect <nipype.algorithms.rapidart.ArtifactDetect>`
and :ref:`Level1Design <nipype.interfaces.spm.model.Level1Design>` prior to running this or
provide an SPM.mat file and intensity parameters through some other means.
Examples
--------
>>> sc = StimulusCorrelation()
>>> sc.inputs.realignment_parameters = 'functional.par'
>>> sc.inputs.intensity_values = 'functional.rms'
>>> sc.inputs.spm_mat_file = 'SPM.mat'
>>> sc.inputs.concatenated_design = False
>>> sc.run() # doctest: +SKIP
"""
input_spec = StimCorrInputSpec
output_spec = StimCorrOutputSpec
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
(_, filename) = os.path.split(motionfile)
(filename, _) = os.path.splitext(filename)
corrfile = os.path.join(output_dir, ''.join(('qa.', filename,
'_stimcorr.txt')))
return corrfile
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
"""
Core routine for determining stimulus correlation
"""
if not cwd:
cwd = os.getcwd()
# read in motion parameters
mc_in = np.loadtxt(motionfile)
g_in = np.loadtxt(intensityfile)
g_in.shape = g_in.shape[0], 1
dcol = designmatrix.shape[1]
mccol = mc_in.shape[1]
concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
cm = np.corrcoef(concat_matrix, rowvar=0)
corrfile = self._get_output_filenames(motionfile, cwd)
# write output to outputfile
file = open(corrfile, 'w')
file.write("Stats for:\n")
file.write("Stimulus correlated motion:\n%s\n" % motionfile)
for i in range(dcol):
file.write("SCM.%d:" % i)
for v in cm[i, dcol + np.arange(mccol)]:
file.write(" %.2f" % v)
file.write('\n')
file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
for i in range(dcol):
file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
file.close()
def _get_spm_submatrix(self, spmmat, sessidx, rows=None):
"""
Parameters
----------
spmmat: scipy matlab object
full SPM.mat file loaded into a scipy object
sessidx: int
index to session that needs to be extracted.
"""
designmatrix = spmmat['SPM'][0][0].xX[0][0].X
U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0]
if rows is None:
rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1
cols = spmmat['SPM'][0][0].Sess[0][sessidx].col[0][range(len(U))] - 1
outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(),
axis=1)
return outmatrix
def _run_interface(self, runtime):
"""Execute this module.
"""
motparamlist = self.inputs.realignment_parameters
intensityfiles = self.inputs.intensity_values
spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
nrows = []
for i in range(len(motparamlist)):
sessidx = i
rows = None
if self.inputs.concatenated_design:
sessidx = 0
mc_in = np.loadtxt(motparamlist[i])
rows = np.sum(nrows) + np.arange(mc_in.shape[0])
nrows.append(mc_in.shape[0])
matrix = self._get_spm_submatrix(spmmat, sessidx, rows)
self._stimcorr_core(motparamlist[i], intensityfiles[i],
matrix, os.getcwd())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
files = []
for i, f in enumerate(self.inputs.realignment_parameters):
files.insert(i, self._get_output_filenames(f, os.getcwd()))
if files:
outputs['stimcorr_files'] = files
return outputs
| bsd-3-clause |
alesaccoia/TF_SoundClassification | as_sound/exec/train_vad_ann_5FCL_classifier.py | 1 | 1585 | import numpy as np
import as_classification.ann_models
import as_sound.features.extractFeatures as ef
import as_classification.utilities
import os
import matplotlib.pyplot as plt
# -------------------------------
# CREATE MODEL
# -------------------------------
model = as_classification.ann_models.ANN_5FCL()
model.initialize(15,2)
# -------------------------------
# READ AUDIO FILES
# -------------------------------
speech_data = ef.computeSupervectorForFile(os.path.dirname(os.path.realpath(__file__)) + '/data/Speech.wav', 8000, 2048, 2049)
noise_data = ef.computeSupervectorForFile(os.path.dirname(os.path.realpath(__file__)) + '/data/Noise.wav', 8000, 2048, 2049)
whole_data = np.hstack((speech_data, noise_data))
whole_data = np.swapaxes(whole_data,0,1)
whole_labels = np.zeros((whole_data.shape[0], 2))
whole_labels[:speech_data.shape[1],0] = 1
whole_labels[speech_data.shape[1]:,1] = 1
training_data = {"labels": whole_labels,
"data": whole_data}
training_data, test_data = as_classification.utilities.divideTrainingData(training_data, 0.6)
# -------------------------------
# TRAIN
# -------------------------------
model.train(training_data, test_data, 10, 20, 100)
model.saveCheckpoint(os.path.dirname(os.path.realpath(__file__)) + '/data/vadModel_ANN_5FCL.chkp')
#xp = np.arange(0,prediction.shape[0])
#plt.plot(xp, test_data[:,14], '-b', label='RMS')
#plt.plot(xp, prediction[:,0], '-r', label='ANN Output')
#plt.legend(loc='upper left')
#plt.show()
#feat = as_sound.features.extractFeatures.computeSupervector(normalized_data)
| mit |
JsNoNo/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
trachelr/mne-python | mne/time_frequency/tfr.py | 2 | 48373 | """A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
#
# License : BSD (3-clause)
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..utils import check_fname
from .multitaper import dpss_windows
from .._hdf5 import write_hdf5, read_hdf5
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.data = inst.data[..., mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) and len(axes) != len(picks):
raise RuntimeError('There must be an axes for each picked '
'channel.')
if colorbar:
logger.warning('Cannot draw colorbar for user defined axes.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, ylim=None,
tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=False, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
if show:
plt.show()
return fig
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
import matplotlib.pyplot as plt
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
fig = _plot_topo(info=info, times=times,
show_func=imshow, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor,
font_color=font_color)
if show:
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | dict | None
The outlines to be drawn. If 'head', a head scheme will be drawn.
If dict, each key refers to a tuple of x and y positions.
The values in 'mask_pos' will serve as image mask. If None, nothing
will be drawn. Defaults to 'head'. If dict, the 'autoshrink' (bool)
field will trigger automated shrinking of the positions due to
points outside the outline. Moreover, a matplotlib patch object can
be passed for advanced masking options, either directly or as a
function that returns patches (required for multi-axis plots).
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite)
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname)
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :],
Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0, use_fft=True,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
| bsd-3-clause |
Peratham/tensorlib | doc/sphinxext/gen_rst.py | 11 | 38957 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
from textwrap import dedent
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_single_localization_001.png': (1, 250),
'plot_multiple_localization_001.png': (1, 250),
'plot_overfeat_layer1_filters_001.png': (1, 250),
'plot_mnist_generator_001.png': (1, 250),
'plot_asirra_dataset_001.png': (1, 250),
}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
width: 0px;
overflow: hidden;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
""" % (ref_name, snippet))
return ''.join(out)
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(dir, 'images', 'thumb')):
os.makedirs(os.path.join(dir, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(dir, dir, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (dir, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', dir)
ex_file.write(_thumbnail_div(dir, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
time_m = 0
time_s = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_mngr.num)
plt.savefig(image_path % fig_mngr.num)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/dev/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to
# _build/html/dev/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print(e.args)
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e129.py | 2 | 5359 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[20, 20, 20, 20, 20],
max_input_power=None,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
2001: {
'remove_from': -3,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
},
4001: {
'remove_from': -3,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
zak-k/iris | lib/iris/tests/test_quickplot.py | 4 | 7721 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Tests the high-level plotting interface.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import iris.tests.test_plot as test_plot
import iris
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
# Caches _load_theta so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = "result"
if not cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_theta():
path = tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp'))
theta = iris.load_cube(path, 'air_potential_temperature')
# Improve the unit
theta.units = 'K'
return theta
@tests.skip_data
@tests.skip_plot
class TestQuickplotCoordinatesGiven(test_plot.TestPlotCoordinatesGiven):
def setUp(self):
tests.GraphicsTest.setUp(self)
filename = tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp'))
self.cube = test_plot.load_cube_once(filename, 'air_potential_temperature')
self.draw_module = iris.quickplot
self.contourf = test_plot.LambdaStr('iris.quickplot.contourf', lambda cube, *args, **kwargs:
iris.quickplot.contourf(cube, *args, **kwargs))
self.contour = test_plot.LambdaStr('iris.quickplot.contour', lambda cube, *args, **kwargs:
iris.quickplot.contour(cube, *args, **kwargs))
self.points = test_plot.LambdaStr('iris.quickplot.points', lambda cube, *args, **kwargs:
iris.quickplot.points(cube, c=cube.data, *args, **kwargs))
self.plot = test_plot.LambdaStr('iris.quickplot.plot', lambda cube, *args, **kwargs:
iris.quickplot.plot(cube, *args, **kwargs))
self.results = {'yx': (
[self.contourf, ['grid_latitude', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'grid_latitude']],
[self.contour, ['grid_latitude', 'grid_longitude']],
[self.contour, ['grid_longitude', 'grid_latitude']],
[self.points, ['grid_latitude', 'grid_longitude']],
[self.points, ['grid_longitude', 'grid_latitude']],
),
'zx': (
[self.contourf, ['model_level_number', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'model_level_number']],
[self.contour, ['model_level_number', 'grid_longitude']],
[self.contour, ['grid_longitude', 'model_level_number']],
[self.points, ['model_level_number', 'grid_longitude']],
[self.points, ['grid_longitude', 'model_level_number']],
),
'tx': (
[self.contourf, ['time', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'time']],
[self.contour, ['time', 'grid_longitude']],
[self.contour, ['grid_longitude', 'time']],
[self.points, ['time', 'grid_longitude']],
[self.points, ['grid_longitude', 'time']],
),
'x': (
[self.plot, ['grid_longitude']],
),
'y': (
[self.plot, ['grid_latitude']],
),
}
@tests.skip_data
@tests.skip_plot
class TestLabels(tests.GraphicsTest):
def setUp(self):
super(TestLabels, self).setUp()
self.theta = _load_theta()
def _slice(self, coords):
"""Returns the first cube containing the requested coordinates."""
for cube in self.theta.slices(coords):
break
return cube
def _small(self):
# Use a restricted size so we can make out the detail
cube = self._slice(['model_level_number', 'grid_longitude'])
return cube[:5, :5]
def test_contour(self):
qplt.contour(self._small())
self.check_graphic()
qplt.contourf(self._small(), coords=['model_level_number', 'grid_longitude'])
self.check_graphic()
def test_contourf(self):
qplt.contourf(self._small())
cube = self._small()
iplt.orography_at_points(cube)
self.check_graphic()
qplt.contourf(self._small(), coords=['model_level_number', 'grid_longitude'])
self.check_graphic()
qplt.contourf(self._small(), coords=['grid_longitude', 'model_level_number'])
self.check_graphic()
def test_contourf_nameless(self):
cube = self._small()
cube.standard_name = None
qplt.contourf(cube, coords=['grid_longitude', 'model_level_number'])
self.check_graphic()
def test_pcolor(self):
qplt.pcolor(self._small())
self.check_graphic()
def test_pcolormesh(self):
qplt.pcolormesh(self._small())
#cube = self._small()
#iplt.orography_at_bounds(cube)
self.check_graphic()
def test_map(self):
cube = self._slice(['grid_latitude', 'grid_longitude'])
qplt.contour(cube)
self.check_graphic()
# check that the result of adding 360 to the data is *almost* identically the same result
lon = cube.coord('grid_longitude')
lon.points = lon.points + 360
qplt.contour(cube)
self.check_graphic()
def test_alignment(self):
cube = self._small()
qplt.contourf(cube)
#qplt.outline(cube)
qplt.points(cube)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestTimeReferenceUnitsLabels(tests.GraphicsTest):
def setUp(self):
super(TestTimeReferenceUnitsLabels, self).setUp()
path = tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp'))
self.cube = iris.load_cube(path)[:, 0, 0]
def test_reference_time_units(self):
# units should not be displayed for a reference time
qplt.plot(self.cube.coord('time'), self.cube)
plt.gcf().autofmt_xdate()
self.check_graphic()
def test_not_reference_time_units(self):
# units should be displayed for other time coordinates
qplt.plot(self.cube.coord('forecast_period'), self.cube)
self.check_graphic()
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
waynenilsen/statsmodels | statsmodels/sandbox/examples/ex_kaplan_meier.py | 33 | 2838 | #An example for the Kaplan-Meier estimator
from __future__ import print_function
from statsmodels.compat.python import lrange
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print('basic data')
print('\n')
dta = list(dta.values()[-1])
print(dta[lrange(5),:])
print('\n')
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print('basic model')
print('\n')
km.summary()
print('\n')
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print('more than one curve')
print('\n')
km2.summary()
print('\n')
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print('with censoring')
print('\n')
print(dta[lrange(5),:])
print('\n')
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print('\n')
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print('log rank test')
print('\n')
print(log_rank)
print('\n')
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print('Wilcoxon')
print('\n')
print(wilcoxon)
print('\n')
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print('user specified weights')
print('\n')
print(test)
print('\n')
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print('with nan group names')
print('\n')
print(dta[lrange(5),:])
print('\n')
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print('\n')
km4.plot()
#show all the plots
plt.show()
| bsd-3-clause |
rubennj/pvlib-python | docs/sphinx/sphinxext/numpydoc/docscrape_sphinx.py | 41 | 9437 | from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/patches.py | 6 | 148732 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
import matplotlib.lines as mlines
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
_patch_alias_map = {
'antialiased': ['aa'],
'edgecolor': ['ec'],
'facecolor': ['fc'],
'linewidth': ['lw'],
'linestyle': ['ls']
}
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
# Whether to draw an edge by default. Set on a
# subclass-by-subclass basis.
_edge_default = False
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
# unscaled dashes. Needed to scale dash patterns by lw
self._us_dashes = None
self._linewidth = 0
self.set_fill(fill)
self.set_linestyle(linestyle)
self.set_linewidth(linewidth)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def _process_radius(self, radius):
if radius is not None:
return radius
if cbook.is_numlike(self._picker):
_radius = self._picker
else:
if self.get_edgecolor()[3] == 0:
_radius = 0
else:
_radius = self.get_linewidth()
return _radius
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
radius = self._process_radius(radius)
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
radius = self._process_radius(radius)
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
# For some properties we don't need or don't want to go through the
# getters/setters, so we just copy them directly.
self._edgecolor = other._edgecolor
self._facecolor = other._facecolor
self._fill = other._fill
self._hatch = other._hatch
# copy the unscaled dash pattern
self._us_dashes = other._us_dashes
self.set_linewidth(other._linewidth) # also sets dash properties
self.set_transform(other.get_data_transform())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
self.stale = True
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def _set_edgecolor(self, color):
if color is None:
if (mpl.rcParams['patch.force_edgecolor'] or
not self._fill or self._edge_default):
color = mpl.rcParams['patch.edgecolor']
else:
color = 'none'
self._edgecolor = colors.to_rgba(color, self._alpha)
self.stale = True
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, None, 'none', or 'auto'
"""
self._original_edgecolor = color
self._set_edgecolor(color)
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def _set_facecolor(self, color):
if color is None:
color = mpl.rcParams['patch.facecolor']
alpha = self._alpha if self._fill else 0
self._facecolor = colors.to_rgba(color, alpha)
self.stale = True
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
self._original_facecolor = color
self._set_facecolor(color)
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
# stale is already True
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
if w is None:
w = mpl.rcParams['axes.linewidth']
self._linewidth = float(w)
# scale the dash pattern by the linewidth
offset, ls = self._us_dashes
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq),
where ``onoffseq`` is an even length tuple of on and off ink
in points.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) |
``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
``' '`` | ``''``]
Parameters
----------
ls : { '-', '--', '-.', ':'} and more see description
The line style.
"""
if ls is None:
ls = "solid"
self._linestyle = ls
# get the unscalled dash pattern
offset, ls = self._us_dashes = mlines._get_dash_pattern(ls)
# scale the dash pattern by the linewidth
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
self.stale = True
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,) +
'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
self.stale = True
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,) +
'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
self.stale = True
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
self.stale = True
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(0, self._dashes)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
self.stale = False
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.to_rgba(self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = float(xy[0])
self._y = float(xy[1])
self._width = float(width)
self._height = float(height)
self._angle = float(angle)
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
_edge_default = True
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
self.stale = True
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
self.stale = True
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
self.stale = True
def set_radius(self, radius):
self._path = None
self.r = radius
self.stale = True
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
self.stale = True
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
self.stale = True
def set_width(self, width):
self._path = None
self.width = width
self.stale = True
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.hypot(dx, dy)
if L != 0:
cx = float(dx) / L
sx = float(dy) / L
else:
# Account for division by zero
cx, sx = 0, 1
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
_edge_default = True
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 3 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.hypot(dx, dy)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
# if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
# if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
# figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-2],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
if distance != 0:
cx = float(dx) / distance
sx = float(dy) / distance
else:
#Account for division by zero
cx, sx = 0, 1
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
self.radius = radius
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
self.stale = True
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
# self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
if six.PY2:
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefs,
annotations) = inspect.getfullargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
# adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
def _simpleprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a string rep of the list of keys.
Used to update the documentation.
"""
styles = "[ \'"
styles += "\' | \'".join(str(i) for i in sorted(_styles.keys()))
styles += "\' ]"
return styles
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBboxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class DArrow(_Base):
"""
(Double) Arrow Box
"""
# This source is copied from LArrow,
# modified to add a right arrow to the bbox.
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.DArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
# The width is padded by the arrows, so we don't need to pad it.
height = height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0)/2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), # bot-segment
(x1, y0 - dxx), (x1 + dx + dxx, y0 + dx),
(x1, y1 + dxx), # right-arrow
(x1, y1), (x0 + dxx, y1), # top-segment
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # left-arrow
(x0 + dxx, y0), (x0 + dxx, y0)] # close-poly
com = [Path.MOVETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list['darrow'] = DArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, height + 2. * pad
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = (width + 2. * pad - 2 * dr,
height + 2. * pad - 2 * dr)
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = (width + 2. * pad - tooth_size,
height + 2. * pad - tooth_size)
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(np.round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(np.round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = (list(zip(bottom_saw_x, bottom_saw_y)) +
list(zip(right_saw_x, right_saw_y)) +
list(zip(top_saw_x, top_saw_y)) +
list(zip(left_saw_x, left_saw_y)) +
[(bottom_saw_x[0], bottom_saw_y[0])])
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list),
ListBoxstyles=_simpleprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
_edge_default = True
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.stale = True
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
The following boxstyles are available:
%(AvailableBoxstyles)s
ACCEPTS: %(ListBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
self.stale = True
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The subclass needs
to implement a *connect* method whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The method needs to return a path connecting two
points. This base class defines a __call__ method, and a few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrunken.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrunk_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrunk_path
def __reduce__(self):
# because we have decided to nest these classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
# x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arms is extended so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
Parameters
----------
armA : float
minimum length of armA
armB : float
minimum length of armB
fraction : float
a fraction of the distance between two points that
will be added to armA and armB.
angle : float or None
angle of the connecting line (if None, parallel
to A and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is a callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along which the arrow
will be drawn. *mutation_size* and *aspect_ratio* have the same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
if ((len(segments) != 2) or (segments[0][1] != Path.MOVETO) or
(segments[1][1] != Path.CURVE3)):
msg = "'path' it's not a valid quadratic bezier curve"
raise ValueError(msg)
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is the very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the arrow head etc.
will be scaled. The linewidth may be used to adjust
the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrunk = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrunk,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrunken so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = np.hypot(dx, dy)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# Account for division by zero
if cp_distance == 0:
cp_distance = 1
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
# If there is no room for an arrow and a line, then skip the arrow
has_begin_arrow = (self.beginarrow and
not ((x0 == x1) and (y0 == y1)))
if has_begin_arrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
# If there is no room for an arrow and a line, then skip the arrow
has_end_arrow = (self.endarrow and not ((x2 == x3) and (y2 == y3)))
if has_end_arrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if has_begin_arrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if has_end_arrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA,
lengthA=lengthA,
angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB,
lengthB=lengthB,
angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = make_wedged_bezier2(arrow_in,
head_width / 2., wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only works with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
_edge_default = True
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shrunken by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
"""
self._dpi_cor = dpi_cor
self.stale = True
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin and end positions of the connecting
path. Use current value if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
self.stale = True
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
self.stale = True
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
provided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
self.stale = True
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
self.stale = True
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(self._dashoffset, self._dashes)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
self.stale = False
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
self.stale = True
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| gpl-3.0 |
kcavagnolo/astroML | examples/learning/plot_neighbors_photoz.py | 3 | 2115 | """
K-Neighbors for Photometric Redshifts
-------------------------------------
Estimate redshifts from the colors of sdss galaxies and quasars.
This uses colors from a sample of 50,000 objects with SDSS photometry
and ugriz magnitudes. The example shows how far one can get with an
extremely simple machine learning approach to the photometric redshift
problem.
The function :func:`fetch_sdss_galaxy_colors` used below actually queries
the SDSS CASjobs server for the colors of the 50,000 galaxies.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from astroML.datasets import fetch_sdss_galaxy_colors
from astroML.plotting import scatter_contour
n_neighbors = 1
data = fetch_sdss_galaxy_colors()
N = len(data)
# shuffle data
np.random.seed(0)
np.random.shuffle(data)
# put colors in a matrix
X = np.zeros((N, 4))
X[:, 0] = data['u'] - data['g']
X[:, 1] = data['g'] - data['r']
X[:, 2] = data['r'] - data['i']
X[:, 3] = data['i'] - data['z']
z = data['redshift']
# divide into training and testing data
Ntrain = N // 2
Xtrain = X[:Ntrain]
ztrain = z[:Ntrain]
Xtest = X[Ntrain:]
ztest = z[Ntrain:]
knn = KNeighborsRegressor(n_neighbors, weights='uniform')
zpred = knn.fit(Xtrain, ztrain).predict(Xtest)
axis_lim = np.array([-0.1, 2.5])
rms = np.sqrt(np.mean((ztest - zpred) ** 2))
print("RMS error = %.2g" % rms)
ax = plt.axes()
plt.scatter(ztest, zpred, c='k', lw=0, s=4)
plt.plot(axis_lim, axis_lim, '--k')
plt.plot(axis_lim, axis_lim + rms, ':k')
plt.plot(axis_lim, axis_lim - rms, ':k')
plt.xlim(axis_lim)
plt.ylim(axis_lim)
plt.text(0.99, 0.02, "RMS error = %.2g" % rms,
ha='right', va='bottom', transform=ax.transAxes,
bbox=dict(ec='w', fc='w'), fontsize=16)
plt.title('Photo-z: Nearest Neigbor Regression')
plt.xlabel(r'$\mathrm{z_{spec}}$', fontsize=14)
plt.ylabel(r'$\mathrm{z_{phot}}$', fontsize=14)
plt.show()
| bsd-2-clause |
nelson-liu/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
allrod5/extra-trees | benchmarks/classification/decision_surface.py | 1 | 4826 | print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for MCZA015-13 class project by Rodrigo Martins de Oliveira
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_circles
from sklearn.datasets import make_classification
from sklearn.datasets import make_moons
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier as SKExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from extra_trees.ensemble.forest import ExtraTreesClassifier
h = .02 # step size in the mesh
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
"Gaussian Process",
"Neural Net",
"Naive Bayes",
"QDA",
"AdaBoost",
"Decision Tree",
"Random Forest",
"ExtraTrees (SciKit)",
"ExtraTrees",
]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
MLPClassifier(alpha=1),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
AdaBoostClassifier(),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=2),
SKExtraTreesClassifier(n_estimators=10, max_features=2),
ExtraTreesClassifier(n_estimators=10, max_features=2),
]
X, y = make_classification(
n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [
make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(33, 11))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(
xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| mit |
Remi-C/LOD_ordering_for_patches_of_points | script/test_octree_LOD.py | 1 | 7481 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 2 22:08:22 2014
@author: remi
"""
#trying to order points by octree with python
from numpy import random, sqrt
from sklearn import preprocessing
import matplotlib.pyplot as plt
#defining a dummy entry :a random 3D pointcloud
pointcloud = random.rand(16*16,2);
index = np.arange(1,16*16+1)
#parameters
tot_level = 3 ;
#centering data so that leftmost pint is 0 abs, bottom most point is 0
pointcloud[:,0] = pointcloud[:,0]- np.amin(pointcloud[:,0]);
pointcloud[:,1] = pointcloud[:,1]- np.amin(pointcloud[:,1]);
#finding the max scaling, in X, Y or Z
max_r = max(np.amax(pointcloud[:,0])-np.amin(pointcloud[:,0]), np.amax(pointcloud[:,1])-np.amin(pointcloud[:,1]))
#dividing so max scale is 0 . Now the point cloud is between 0,1 and 0,1
pointcloud = pointcloud/ max_r ;
#we have to trick a litlle, so has that for level 3 for instance, all value are between 0 and 7 included, but not reaching 8.
pointcloud_int = np.trunc(abs((pointcloud*pow(2,tot_level)-0.0001))).astype(int)
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro') ;
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro') ;
plt.axis([-1, 8, -1, 8]) ;
plt.show() ;
plt.close('all');
result_point = pointcloud_int[rec_ar[:,0]]
plt.plot(result_point[:,0],result_point[:,1], 'ro') ;
rec_ar = np.array(rec)
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
np.binary_repr(1)
def bin(s):
return str(s) if s<=1 else bin(s>>1) + str(s&1)
def testBit(int_type, offset):
mask = 1 << offset
return( (int_type & mask)>0 )
testBit(8,1)
pointcloud_bin = np.binary_repr(pointcloud_int)
pointcloud_int >> (tot_level-1) ;
#np.binary_repr(8)
( ((pointcloud_int >> 1 ) << 1) ) >> (tot_level-1) ;
testBit(pointcloud_int[:,1],3)
#cut the input point cloud into 8 based on l bit value starting form right to left
point_cloud_0_0_mask = np.logical_and((testBit(pointcloud_int[:,0],2)==0) , (testBit(pointcloud_int[:,1],2)==0) ) ;
pivot = np.array([pow(2,tot_level-1),pow(2,tot_level-1)])
pointcloud_centered = pointcloud_int - pivot
#coordinate to work :
toto = np.array([1,2,3])
testBit(toto,1)
(pointcloud_int >>1 )>>5
pow(2,4)
1<<4
#
# level 0
result = list() ;
pointcloud_int ;
index
pivot
cur_lev = 0
rec = [];
#find the 0 level point
min_point = np.argmin(np.sum(np.abs(pointcloud_int - pivot ),axis=1))
result.append(list((index[min_point],cur_lev)))
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print b_x, b_y
rec.append (np.logical_and(
(testBit(pointcloud_int[:,0],2)>0)==b_x
,(testBit(pointcloud_int[:,1],2)>0)==b_y
)
)
testBit(pointcloud_int[:,0],2)
print (testBit(pointcloud_int[:,0],2)>0==b_x) ;
print (testBit(pointcloud_int[:,1],2)>0==b_y) ;
rec[b_x,b_y] = np.logical_and((testBit(pointcloud_int[:,0],2)>0==b_x)
,(testBit(pointcloud_int[:,1],2)>0==b_y) )
print rec
np.binary_repr(pointcloud_int[:,0] )
#givne a point cloud
#compute the closest to center
def recursive_octree_ordering(point_array,index_array, center_point, level,tot_level, result,piv):
#importing necessary lib
import numpy as np;
#print for debug
# print '\n\n working on level : '+str(level);
# print 'input points: \n\t',point_array ;
# print 'index_array : \n\t',index_array;
# print 'center_point : \n\t',center_point;
# print 'level : \n\t',level;
# print 'tot_level : \n\t',tot_level;
# print 'result : \n\t',result;
#stopping condition : no points:
if len(point_array) == 0|level<=2:
return;
#updatig level;
sub_part_level = level+1 ;
print 'level ',level,' , points remaining : ',len(point_array) ;
print center_point;
piv.append(center_point);
#find the closest point to pivot
min_point = np.argmin(np.sum(np.abs(point_array - center_point ),axis=1))
result.append(list((index_array[min_point],level))) ;
#removing the found point from the array of points
#np.delete(point_array, min_point, axis=0) ;
#np.delete(index_array, min_point, axis=0) ;
#stopping if it remains only one pioint : we won't divide further, same if we have reached max depth
if (len(point_array) ==1 )|(level >= tot_level):
return;
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print (b_x*2-1), (b_y*2-1) ;
udpate_to_pivot = np.asarray([ (b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
sub_part_center_point = center_point +udpate_to_pivot;
# we want to iterateon
# we need to update : : point_array , index_array center_point , level
#update point_array and index_array : we need to find the points that are in the subparts
#update center point, we need to add/substract to previous pivot 2^level+11
#find the points concerned :
point_in_subpart_mask = np.logical_and(
testBit(point_array[:,0],tot_level - level-1) ==b_x
, testBit(point_array[:,1],tot_level - level -1) ==b_y ) ;
sub_part_points= point_array[point_in_subpart_mask];
sub_part_index = index_array[point_in_subpart_mask];
sub_part_center_point = center_point + np.asarray([
(b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
if len(sub_part_points)>=1:
recursive_octree_ordering(sub_part_points
,sub_part_index
, sub_part_center_point
, sub_part_level
, tot_level
, result
, piv);
continue;
else:
print 'at televel ',level,'bx by:',b_x,' ',b_y,' refusing to go one, ', len(sub_part_points), ' points remaining fo this'
continue;
rec = [] ;
piv = [] ;
recursive_octree_ordering(pointcloud_int,index,pivot,0,3,rec, piv );
#recursive_octree_ordering(pointcloud_int,index, np.array([2,2]),1,3,rec, piv );
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
plot(x=pointcloud_int[:,0].T,y=pointcloud_int[:,1].T, marker='o', color='r', ls='' )
plt.plot(pointcloud_int.T, marker='o', color='r', ls='')
plt.imsave('/')
from mpl_toolkits.mplot3d import Axes3D
plt.scatter(pointcloud[:,0], pointcloud[:,1],c='red');
plt.scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro')
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro')
plt.axis([-1, 8, -1, 8])
plt.show();
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1]);
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1], pointcloud_int[:,0], zdir='z', c= 'red')
fig.show()
fig, axes = plt.subplots(1, 2, figsize=(12,3))
axes[0].scatter(pointcloud[:,0], pointcloud[:,1],c='red');
axes[1].scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
fig.show();
for f in list((0,1)):
(f*2-1)
import octree_ordering | lgpl-3.0 |
jor-/scipy | scipy/optimize/minpack.py | 1 | 34280 | from __future__ import division, print_function, absolute_import
import threading
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, prod, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
from scipy._lib._util import _asarray_validated, _lazywhere
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
sol['message'] = errors['unknown']
return sol
LEASTSQ_SUCCESS = [1, 2, 3, 4]
LEASTSQ_FAILURE = [5, 6, 7, 8]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
estimate of the Hessian. A value of None indicates a singular matrix,
which means the curvature in parameters `x` is numerically flat. To
obtain the covariance matrix of the parameters `x`, `cov_x` must be
multiplied by the variance of the residuals -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the keys:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
See Also
--------
least_squares : Newer interface to solve nonlinear least-squares problems
with bounds on the variables. See ``method=='lm'`` in particular.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
The solution, `x`, is always a 1D array, regardless of the shape of `x0`,
or whether `x0` is a scalar.
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible." % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError]}
# The FORTRAN return value (possible return values are >= 0 and <= 8)
info = retval[-1]
if full_output:
cov_x = None
if info in LEASTSQ_SUCCESS:
from numpy.dual import inv
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
else:
if info in LEASTSQ_FAILURE:
warnings.warn(errors[info][0], RuntimeWarning)
elif info == 0:
raise errors[info][1](errors[info][0])
return retval[0], info
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : array_like or object
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
functions with k predictors, but can actually be any object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the
initial values will all be 1 (if the number of parameters for the
function can be determined using introspection, otherwise a
ValueError is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-d `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-d `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-d `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False, only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.) Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
Define the data to be fit with some noise:
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> np.random.seed(1729)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`:
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> popt
array([ 2.55423706, 1.35190947, 0.47450618])
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
Constrain the optimization to the region of ``0 <= a <= 3``,
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
>>> popt
array([ 2.43708906, 1. , 0.35015434])
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
from scipy._lib._util import getargspec_no_self as _getargspec
args, varargs, varkw, defaults = _getargspec(f)
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# optimization may produce garbage for float32 inputs, cast them to float64
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata, float)
else:
ydata = np.asarray(ydata, float)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata, float)
else:
xdata = np.asarray(xdata, float)
if ydata.size == 0:
raise ValueError("`ydata` must not be empty!")
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-d, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-d, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError:
raise ValueError("`sigma` must be positive definite.")
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if 'args' in kwargs:
# The specification for the model function `f` does not support
# additional arguments. Refer to the `curve_fit` docstring for
# acceptable call signatures of `f`.
raise ValueError("'args' is not a supported keyword argument.")
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
ysize = len(infodict['fvec'])
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (prod(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2"
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
| bsd-3-clause |
sugiare418/tensorfx | build/tensorflow/scripts/003_lstm_keras_171203/model.py | 2 | 5565 | # -*- coding: utf-8 -*-
# Python 2.7.6
# tensorflow (0.7.1)
import numpy as np
import random
from param import *
import matplotlib
matplotlib.use('Agg') # GUI Off設定
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
# =================================================================
class Model:
'''機械学習のモデル定義を管理するクラス
TensorFlowのSessionや学習済みモデルの保存・読込機能も持たせる
'''
def __init__(self):
''' 初期化処理(準コンストラクタ) '''
self.__setup_model()
def __enter__(self):
''' コンテキストマネージャ(With開始時に呼ばれる) '''
return self
def __exit__(self, exc_type, exc_value, traceback):
''' コンテキストマネージャ(With終了時に呼ばれる) '''
return False
def save(self, str_path):
"""TensorFlowのモデルを保存する
Args:
str_path: モデル保存先のパス
Returns:
-
"""
def restore(self, str_path):
"""TensorFlowのモデルを読み込むする
Args:
str_path: モデル保存先のパス
Returns:
-
"""
def __setup_model(self):
''' NNモデル定義 '''
def weight_variable(shape, name=None):
return np.random.normal(scale=.01, size=shape)
model = Sequential()
model.add(BatchNormalization(input_shape=(Param.IN_CYCLE_SIZE, Param.IN_COLUMN_SIZE)))
model.add(
LSTM(Param.HIDDEN_UNIT_SIZE,
kernel_initializer=weight_variable,
input_shape=(Param.IN_CYCLE_SIZE, Param.IN_COLUMN_SIZE), # seq_length, dim
dropout=Param.DROPOUT_KEEP_PROB,
recurrent_dropout=Param.DROPOUT_KEEP_PROB))
model.add(Dropout(Param.DROPOUT_KEEP_PROB))
model.add(Dense(Param.OUT_NODES_SIZE, kernel_initializer=weight_variable))
# 分類
# model.add(Activation('softmax'))
# optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
# model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
# 回帰
model.add(Activation('linear'))
model.compile(loss='mean_squared_error', optimizer="rmsprop")
self.model = model
# =================================================================
class Trainer(Model):
'''
学習用クラス
jijiトレードデータをもとに、TensorFlowでの学習を行わせる
'''
def train(self, steps, data):
'''
モデルの学習
'''
(ary_in, ary_actual, df_jiji) = data.train_data()
hist = self.model.fit(
ary_in,
ary_actual,
batch_size=Param.IN_BATCH_SIZE,
epochs=Param.TRANING_EPOCH_SIZE,
)
'''
学習結果の可視化
'''
loss = hist.history['loss']
plt.rc('font', family='serif')
fig = plt.figure()
plt.plot(range(len(loss)), loss, label='loss', color='black')
plt.xlabel('epochs')
plt.show()
plt.savefig(__file__ + '.eps')
'''
テストデータでの確認
'''
(ary_in, ary_actual, df_jiji) = data.test_data()
acc_count = 0
for i in range(1000):
k = random.randrange(len(ary_in))
x = ary_in[k]
y = self.model.predict(np.array([x]), batch_size=1)
l = ary_actual[k]
close = df_jiji.loc[k]['n03_close']
sub_y = close - y
sub_l = close - l
if sub_y > 0 and sub_l > 0:
acc_count = acc_count + 1
if sub_y < 0 and sub_l < 0:
acc_count = acc_count + 1
print 'y : {} , l : {}, sub_y:{}, sub_l:{}'.format(y, l, sub_y ,sub_l)
print '=============================================='
print 'acc_count:{}'.format(acc_count)
# # score = self.model.evaluate(ary_in, ary_actual, batch_size=Param.IN_BATCH_SIZE)
# # print '¥n evaluate : {} '.format(score)
# score = self.model.predict(ary_in, batch_size=Param.IN_BATCH_SIZE)
# print '\n predict : {} '.format(score)
# score = self.model.predict_classes(ary_in, batch_size=Param.IN_BATCH_SIZE)
# print '\n predict_classes : {} '.format(score)
# score = self.model.predict_proba(ary_in, batch_size=Param.IN_BATCH_SIZE)
# print '\n predict_proba : {} '.format(score)
# =================================================================
class Estimator(Model):
def estimate( self, data ):
output = self.session.run(self.out_softmax, feed_dict=self.estimate_feed_dict(data))
#loss = self.session.run(self.loss, feed_dict=self.estimate_feed_dict(data))
#print output
return 'up' if output[0][0] > output[0][1] else 'down'
#return self.session.run(tf.argmax(self.output,1), feed_dict=self.estimate_feed_dict(data))
def estimate_feed_dict(self, data):
return {
self.ph_trade_data: data,
self.ph_state_size: 1,
self.ph_dropout_keep_prob: 1.0,
} | mit |
vighneshbirodkar/scikit-image | skimage/viewer/viewers/core.py | 33 | 13265 | """
ImageViewer class for viewing and interacting with images.
"""
import numpy as np
from ... import io, img_as_float
from ...util.dtype import dtype_range
from ...exposure import rescale_intensity
from ..qt import QtWidgets, Qt, Signal
from ..widgets import Slider
from ..utils import (dialogs, init_qtapp, figimage, start_qtapp,
update_axes_image)
from ..utils.canvas import BlitManager, EventManager
from ..plugins.base import Plugin
__all__ = ['ImageViewer', 'CollectionViewer']
def mpl_image_to_rgba(mpl_image):
"""Return RGB image from the given matplotlib image object.
Each image in a matplotlib figure has its own colormap and normalization
function. Return RGBA (RGB + alpha channel) image with float dtype.
Parameters
----------
mpl_image : matplotlib.image.AxesImage object
The image being converted.
Returns
-------
img : array of float, shape (M, N, 4)
An image of float values in [0, 1].
"""
image = mpl_image.get_array()
if image.ndim == 2:
input_range = (mpl_image.norm.vmin, mpl_image.norm.vmax)
image = rescale_intensity(image, in_range=input_range)
# cmap complains on bool arrays
image = mpl_image.cmap(img_as_float(image))
elif image.ndim == 3 and image.shape[2] == 3:
# add alpha channel if it's missing
image = np.dstack((image, np.ones_like(image)))
return img_as_float(image)
class ImageViewer(QtWidgets.QMainWindow):
"""Viewer for displaying images.
This viewer is a simple container object that holds a Matplotlib axes
for showing images. `ImageViewer` doesn't subclass the Matplotlib axes (or
figure) because of the high probability of name collisions.
Subclasses and plugins will likely extend the `update_image` method to add
custom overlays or filter the displayed image.
Parameters
----------
image : array
Image being viewed.
Attributes
----------
canvas, fig, ax : Matplotlib canvas, figure, and axes
Matplotlib canvas, figure, and axes used to display image.
image : array
Image being viewed. Setting this value will update the displayed frame.
original_image : array
Plugins typically operate on (but don't change) the *original* image.
plugins : list
List of attached plugins.
Examples
--------
>>> from skimage import data
>>> image = data.coins()
>>> viewer = ImageViewer(image) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
dock_areas = {'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea}
# Signal that the original image has been changed
original_image_changed = Signal(np.ndarray)
def __init__(self, image, useblit=True):
# Start main loop
init_qtapp()
super(ImageViewer, self).__init__()
#TODO: Add ImageViewer to skimage.io window manager
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("Image Viewer")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('Open file', self.open_file,
Qt.CTRL + Qt.Key_O)
self.file_menu.addAction('Save to file', self.save_to_file,
Qt.CTRL + Qt.Key_S)
self.file_menu.addAction('Quit', self.close,
Qt.CTRL + Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.main_widget = QtWidgets.QWidget()
self.setCentralWidget(self.main_widget)
if isinstance(image, Plugin):
plugin = image
image = plugin.filtered_image
plugin.image_changed.connect(self._update_original_image)
# When plugin is started, start
plugin._started.connect(self._show)
self.fig, self.ax = figimage(image)
self.canvas = self.fig.canvas
self.canvas.setParent(self)
self.ax.autoscale(enable=False)
self._tools = []
self.useblit = useblit
if useblit:
self._blit_manager = BlitManager(self.ax)
self._event_manager = EventManager(self.ax)
self._image_plot = self.ax.images[0]
self._update_original_image(image)
self.plugins = []
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.layout.addWidget(self.canvas)
status_bar = self.statusBar()
self.status_message = status_bar.showMessage
sb_size = status_bar.sizeHint()
cs_size = self.canvas.sizeHint()
self.resize(cs_size.width(), cs_size.height() + sb_size.height())
self.connect_event('motion_notify_event', self._update_status_bar)
def __add__(self, plugin):
"""Add plugin to ImageViewer"""
plugin.attach(self)
self.original_image_changed.connect(plugin._update_original_image)
if plugin.dock:
location = self.dock_areas[plugin.dock]
dock_location = Qt.DockWidgetArea(location)
dock = QtWidgets.QDockWidget()
dock.setWidget(plugin)
dock.setWindowTitle(plugin.name)
self.addDockWidget(dock_location, dock)
horiz = (self.dock_areas['left'], self.dock_areas['right'])
dimension = 'width' if location in horiz else 'height'
self._add_widget_size(plugin, dimension=dimension)
return self
def _add_widget_size(self, widget, dimension='width'):
widget_size = widget.sizeHint()
viewer_size = self.frameGeometry()
dx = dy = 0
if dimension == 'width':
dx = widget_size.width()
elif dimension == 'height':
dy = widget_size.height()
w = viewer_size.width()
h = viewer_size.height()
self.resize(w + dx, h + dy)
def open_file(self, filename=None):
"""Open image file and display in viewer."""
if filename is None:
filename = dialogs.open_file_dialog()
if filename is None:
return
image = io.imread(filename)
self._update_original_image(image)
def update_image(self, image):
"""Update displayed image.
This method can be overridden or extended in subclasses and plugins to
react to image changes.
"""
self._update_original_image(image)
def _update_original_image(self, image):
self.original_image = image # update saved image
self.image = image.copy() # update displayed image
self.original_image_changed.emit(image)
def save_to_file(self, filename=None):
"""Save current image to file.
The current behavior is not ideal: It saves the image displayed on
screen, so all images will be converted to RGB, and the image size is
not preserved (resizing the viewer window will alter the size of the
saved image).
"""
if filename is None:
filename = dialogs.save_file_dialog()
if filename is None:
return
if len(self.ax.images) == 1:
io.imsave(filename, self.image)
else:
underlay = mpl_image_to_rgba(self.ax.images[0])
overlay = mpl_image_to_rgba(self.ax.images[1])
alpha = overlay[:, :, 3]
# alpha can be set by channel of array or by a scalar value.
# Prefer the alpha channel, but fall back to scalar value.
if np.all(alpha == 1):
alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()
alpha = alpha[:, :, np.newaxis]
composite = (overlay[:, :, :3] * alpha +
underlay[:, :, :3] * (1 - alpha))
io.imsave(filename, composite)
def closeEvent(self, event):
self.close()
def _show(self, x=0):
self.move(x, 0)
for p in self.plugins:
p.show()
super(ImageViewer, self).show()
self.activateWindow()
self.raise_()
def show(self, main_window=True):
"""Show ImageViewer and attached plugins.
This behaves much like `matplotlib.pyplot.show` and `QWidget.show`.
"""
self._show()
if main_window:
start_qtapp()
return [p.output() for p in self.plugins]
def redraw(self):
if self.useblit:
self._blit_manager.redraw()
else:
self.canvas.draw_idle()
@property
def image(self):
return self._img
@image.setter
def image(self, image):
self._img = image
update_axes_image(self._image_plot, image)
# update display (otherwise image doesn't fill the canvas)
h, w = image.shape[:2]
self.ax.set_xlim(0, w)
self.ax.set_ylim(h, 0)
# update color range
clim = dtype_range[image.dtype.type]
if clim[0] < 0 and image.min() >= 0:
clim = (0, clim[1])
self._image_plot.set_clim(clim)
if self.useblit:
self._blit_manager.background = None
self.redraw()
def reset_image(self):
self.image = self.original_image.copy()
def connect_event(self, event, callback):
"""Connect callback function to matplotlib event and return id."""
cid = self.canvas.mpl_connect(event, callback)
return cid
def disconnect_event(self, callback_id):
"""Disconnect callback by its id (returned by `connect_event`)."""
self.canvas.mpl_disconnect(callback_id)
def _update_status_bar(self, event):
if event.inaxes and event.inaxes.get_navigate():
self.status_message(self._format_coord(event.xdata, event.ydata))
else:
self.status_message('')
def add_tool(self, tool):
if self.useblit:
self._blit_manager.add_artists(tool.artists)
self._tools.append(tool)
self._event_manager.attach(tool)
def remove_tool(self, tool):
if tool not in self._tools:
return
if self.useblit:
self._blit_manager.remove_artists(tool.artists)
self._tools.remove(tool)
self._event_manager.detach(tool)
def _format_coord(self, x, y):
# callback function to format coordinate display in status bar
x = int(x + 0.5)
y = int(y + 0.5)
try:
return "%4s @ [%4s, %4s]" % (self.image[y, x], x, y)
except IndexError:
return ""
class CollectionViewer(ImageViewer):
"""Viewer for displaying image collections.
Select the displayed frame of the image collection using the slider or
with the following keyboard shortcuts:
left/right arrows
Previous/next image in collection.
number keys, 0--9
0% to 90% of collection. For example, "5" goes to the image in the
middle (i.e. 50%) of the collection.
home/end keys
First/last image in collection.
Parameters
----------
image_collection : list of images
List of images to be displayed.
update_on : {'move' | 'release'}
Control whether image is updated on slide or release of the image
slider. Using 'on_release' will give smoother behavior when displaying
large images or when writing a plugin/subclass that requires heavy
computation.
"""
def __init__(self, image_collection, update_on='move', **kwargs):
self.image_collection = image_collection
self.index = 0
self.num_images = len(self.image_collection)
first_image = image_collection[0]
super(CollectionViewer, self).__init__(first_image)
slider_kws = dict(value=0, low=0, high=self.num_images - 1)
slider_kws['update_on'] = update_on
slider_kws['callback'] = self.update_index
slider_kws['value_type'] = 'int'
self.slider = Slider('frame', **slider_kws)
self.layout.addWidget(self.slider)
#TODO: Adjust height to accomodate slider; the following doesn't work
# s_size = self.slider.sizeHint()
# cs_size = self.canvas.sizeHint()
# self.resize(cs_size.width(), cs_size.height() + s_size.height())
def update_index(self, name, index):
"""Select image on display using index into image collection."""
index = int(round(index))
if index == self.index:
return
# clip index value to collection limits
index = max(index, 0)
index = min(index, self.num_images - 1)
self.index = index
self.slider.val = index
self.update_image(self.image_collection[index])
def keyPressEvent(self, event):
if type(event) == QtWidgets.QKeyEvent:
key = event.key()
# Number keys (code: 0 = key 48, 9 = key 57) move to deciles
if 48 <= key < 58:
index = 0.1 * int(key - 48) * self.num_images
self.update_index('', index)
event.accept()
else:
event.ignore()
else:
event.ignore()
| bsd-3-clause |
jia-kai/hearv | disp_freq.py | 1 | 1942 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# $File: disp_freq.py
# $Date: Sun Nov 23 12:45:25 2014 +0800
# $Author: jiakai <jia.kai66@gmail.com>
import matplotlib.pyplot as plt
import numpy as np
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('fpath', help='array json fpath')
parser.add_argument('--sample_rate', type=float, default=59.940)
parser.add_argument('--fl', type=float, default=5,
help='low cutoff')
parser.add_argument('--dmin', type=int, help='min index of data')
parser.add_argument('--dnr', type=int, help='number of data points used')
parser.add_argument('--no_shift_mean', action='store_true',
help='do not shift mean value to zero')
parser.add_argument('--clip', type=float,
help='clip all samples to be within range [-x, x]')
parser.add_argument('-o', '--output',
help='outpout the plot')
args = parser.parse_args()
with open(args.fpath) as fin:
vals = np.array(json.load(fin))
if not args.no_shift_mean:
vals -= np.mean(vals)
if args.clip:
vals = np.clip(vals, -args.clip, args.clip)
if args.dmin:
vals = vals[args.dmin:]
if args.dnr:
vals = vals[:args.dnr]
fig = plt.figure()
ax = fig.add_subplot(2, 1, 1)
ax.set_xlabel('sample number')
ax.set_ylabel('displacement')
ax.plot(vals)
fft = np.fft.fft(vals)[:len(vals) / 2]
freq = args.sample_rate / len(vals) * np.arange(1, len(fft) + 1)
if args.fl > 0:
fl = min(np.nonzero(freq >= args.fl)[0])
fft = fft[fl:]
freq = freq[fl:]
ax = fig.add_subplot(2, 1, 2)
ax.set_xlabel('freq')
ax.set_ylabel('amplitude')
ax.plot(freq, np.abs(fft))
if args.output:
fig.savefig(args.output)
plt.show()
if __name__ == '__main__':
main()
| unlicense |
gbugaisky/bimm_185_conotoxin | wip-scripts_data/kNNProc.py | 1 | 1321 | #!usr/bin/env/python
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import neighbors
def kNNGen(trainfile, testfile):
features = np.genfromtxt(trainfile, delimiter=' ', usecols=(0, 1, 2))
labels = np.genfromtxt(trainfile, delimiter=' ', usecols=(-1))
tests = np.genfromtxt(testfile, delimiter=' ', usecols=(0, 1, 2))
testlabels = np.genfromtxt(testfile, delimiter=' ', usecols=(-1))
n_neighbors = 10
h = 0.02
accuracyScores = []
for weights in ['uniform', 'distance']:
clf = neighbors.KNeighborsClassifier(n_neighbors, leaf_size=20, weights=weights)
clf.fit(features, labels)
accuracyScores.append(clf.score(tests, testlabels))
return accuracyScores
if __name__ == "__main__":
FILEPATH = ".\\SeparatedTrainTest\\"
accuracyVals = []
for i in range(0, 10):
accuracyVals.append(kNNGen(FILEPATH + "trainDataSet" + str(i) + ".csv", FILEPATH + "testDataSet" + str(i) + ".csv"))
uniformScore = 0
distanceScore = 0
with open("kNNAverageAccuracy.txt", 'w') as results:
for element in accuracyVals:
results.write(str(element) + '\n')
uniformScore += element[0]
distanceScore += element[1]
results.write("Uniform kNN Accuracy: " + str(uniformScore / 10.0) + '\n')
results.write("Distance kNN Accuracy: " + str(distanceScore / 10.0) + '\n') | gpl-2.0 |
kyleabeauchamp/vcfnp | example.py | 2 | 1198 | from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import vcfnp
vcfnp.__version__
filename = 'fixture/sample.vcf'
# load data from fixed fields (including INFO)
v = vcfnp.variants(filename, cache=True).view(np.recarray)
# print some simple variant metrics
print('found %s variants (%s SNPs)' % (v.size, np.count_nonzero(v.is_snp)))
print('QUAL mean (std): %s (%s)' % (np.mean(v.QUAL), np.std(v.QUAL)))
# plot a histogram of variant depth
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.hist(v.DP)
ax.set_title('DP histogram')
ax.set_xlabel('DP')
plt.show()
# load data from sample columns
c = vcfnp.calldata_2d(filename, cache=True).view(np.recarray)
# print some simple genotype metrics
count_phased = np.count_nonzero(c.is_phased)
count_variant = np.count_nonzero(np.any(c.genotype > 0, axis=2))
count_missing = np.count_nonzero(~c.is_called)
print('calls (phased, variant, missing): %s (%s, %s, %s)'
% (c.flatten().size, count_phased, count_variant, count_missing))
# plot a histogram of genotype quality
fig = plt.figure(2)
ax = fig.add_subplot(111)
ax.hist(c.GQ.flatten())
ax.set_title('GQ histogram')
ax.set_xlabel('GQ')
plt.show()
| mit |
sumitsourabh/opencog | opencog/python/utility/functions.py | 34 | 11056 | from math import fabs, isnan
from datetime import datetime
from spatiotemporal.unix_time import UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.numeric.globals import EPSILON
from numpy import NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
from scipy.integrate import quad
__author__ = 'keyvan'
def integral(function, start, end):
if hasattr(function, 'integral'):
return function.integral(start, end)
area, error = quad(function, start, end)
return area
def almost_equals(a, b, epsilon=EPSILON):
if fabs(a - b) < epsilon:
return True
return False
def invoke_method_on(method, sequence_or_point):
if method is None:
return None
if not callable(method):
raise TypeError("'method' is not callable")
result = []
try:
for point in sequence_or_point:
if type(point) is datetime:
point = UnixTime(point)
result.append(method(point))
except TypeError:
if type(sequence_or_point) is datetime:
sequence_or_point = UnixTime(sequence_or_point)
return method(sequence_or_point)
return result
def index_of_first_local_maximum(sequence):
first_time = True
index = 0
for element in sequence:
if first_time:
previous = element
first_time = False
continue
if element <= previous:
return index
previous = element
index += 1
return None
class Function(object):
_domain = None
_range = None
_function_undefined = None
def __init__(self, function_undefined=None, domain=None):
if function_undefined is not None:
self.function_undefined = function_undefined
if domain is not None:
if not hasattr(domain, '__iter__') or not hasattr(domain, '__getitem__'):
raise TypeError("'domain' should be iterable and support indexing")
self._domain = domain
def call_on_single_point(self, x):
"""
to override, __call__ invokes this to handle both points and sequences
"""
return 0
def derivative(self, point):
return None
def _check_domain_for(self, feature_name):
if self.domain is None:
raise TypeError("'{0}' object does not support {1}, 'domain' should be specified".format(
self.__class__.__name__, feature_name))
def plot(self, plt=None):
self._check_domain_for('plotting')
if plt is None:
import matplotlib.pyplot as plt
plt.plot(self.domain, self.range)
return plt
@property
def function_undefined(self):
return self._function_undefined
@function_undefined.setter
def function_undefined(self, value):
if value is not None and not isinstance(value, Function):
raise TypeError("'function_undefined' should be of type 'Function'")
self._function_undefined = value
@property
def domain(self):
return self._domain
@property
def range(self):
return self()
def __call__(self, x=None):
if x is None:
self._check_domain_for("call with 'None'")
x = self.domain
return invoke_method_on(self.call_on_single_point, x)
def __getitem__(self, index):
self._check_domain_for('indexing')
return self.range[index]
def __len__(self):
self._check_domain_for('len()')
return len(self.range)
def __iter__(self):
self._check_domain_for('iter()')
return iter(self.range)
def __reversed__(self):
self._check_domain_for('reversed()')
return reversed(self.range)
class FunctionLinear(Function):
def __init__(self, a=None, b=None, x_0=None, y_0=None, x_1=None, y_1=None):
#(x_0, y_0), (x_1, y_1) = sorted([(x_0, y_0), (x_1, y_1)])
if (a, b) == (None, None):
a = (float(y_1) - y_0) / (x_1 - x_0)
b = y_0 - a * x_0
if isnan(a) or isnan(b):
pass
self.a = a
self.b = b
def call_on_single_point(self, x):
return float(self.a * x + self.b)
def intersect(self, other):
if almost_equals(self.a, other.a):
return None
x = (float(other.b) - self.b) / (self.a - other.a)
return x, self(x)
def integral(self, start, end):
if start >= end:
return 0
if self.a == 0:
return self.b * (end - start)
x_intercept = self.x_intercept
if start > x_intercept or end < x_intercept or almost_equals(end, x_intercept) or almost_equals(start, x_intercept):
return (self(start) + self(end)) * (end - start) / 2.0
minus_triangle = (x_intercept - start) * self(start)
plus_triangle = (end - x_intercept) * self(end)
return minus_triangle + plus_triangle
def derivative(self, point):
return self.a
@property
def x_intercept(self):
return - float(self.b) / self.a
@property
def y_intercept(self):
return self(0)
class FunctionHorizontalLinear(FunctionLinear):
def __init__(self, y_intercept):
FunctionLinear.__init__(self, a=0, b=y_intercept)
def call_on_single_point(self, x):
return self.b
def integral(self, start, end):
if start >= end:
return 0
if almost_equals(self.b, 0):
return 0
return float(self.b) * (end - start)
def derivative(self, point):
return 0
FUNCTION_ZERO = FunctionHorizontalLinear(0)
FUNCTION_ONE = FunctionHorizontalLinear(1)
class FunctionComposite(Function):
is_normalised = False
def __init__(self, dictionary_bounds_function, function_undefined=None, domain=None, is_normalised=False):
if is_normalised is not False:
self.is_normalised = True
Function.__init__(self, function_undefined=function_undefined, domain=domain)
if not isinstance(dictionary_bounds_function, dict):
raise TypeError("'dictionary_bounds_function' should be a dictionary with (lower_bound, higher_bound) "
"tuple keys and values of type 'Function'")
self._dictionary_bounds_function = dictionary_bounds_function
def call_on_single_point(self, x):
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= x:
if b >= x:
if self.dictionary_bounds_function[function_bounds] is None:
return None
return self.dictionary_bounds_function[function_bounds](x)
return self.function_undefined(x)
def integral(self, start, end):
if self.is_normalised and self.domain is not None:
if (start < self.domain[0] or almost_equals(start, self.domain[0])) and (
end > self.domain[-1] or almost_equals(end, self.domain[-1])):
return 1.0
if start >= end:
return 0
result = 0
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= start:
if b >= end:
return self.dictionary_bounds_function[function_bounds].integral(start, end)
not_ordered = {
(start, 0): 's', (end, 0): 'e',
(a, 1): 'a', (b, 1): 'b'
}
order = ''.join([not_ordered[i] for i in sorted(not_ordered)])
if (a == start or a == end) and order == 'saeb' or (b == start or b == end) and order == 'asbe':
continue
if order in 'seab abse':
continue
if order == 'saeb':
b = end
elif order == 'asbe':
a = start
result += self.dictionary_bounds_function[function_bounds].integral(a, b)
return result
def find_bounds_for(self, point):
for bounds in self.dictionary_bounds_function:
(a, b) = bounds
if a <= point and b >= point:
return bounds
def derivative(self, point):
return self.dictionary_bounds_function[self.find_bounds_for(point)].derivative(point)
def function_in_point(self, point):
for bounds in self.dictionary_bounds_function:
a, b = bounds
if a <= point <= b:
return self.dictionary_bounds_function[bounds]
return None
# def functions_in_interval(self, interval_start, interval_end):
# dictionary_bounds_function = {}
# for bounds in self.dictionary_bounds_function:
# a, b = bounds
# if (interval_start < a or almost_equals(interval_start, a)) and (
#
# ):
@property
def dictionary_bounds_function(self):
return self._dictionary_bounds_function
class FunctionPiecewiseLinear(FunctionComposite):
def __init__(self, dictionary_input_output, function_undefined=None, is_normalised=False):
self.input_list, self.output_list = convert_dict_to_sorted_lists(dictionary_input_output)
dictionary_bounds_function = {}
for i in xrange(1, len(self.input_list)):
x_0, x_1 = self.input_list[i - 1], self.input_list[i]
y_0, y_1 = self.output_list[i - 1], self.output_list[i]
dictionary_bounds_function[(x_0, x_1)] = FunctionLinear(x_0=x_0, x_1=x_1, y_0=y_0, y_1=y_1)
if NEGATIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(NEGATIVE_INFINITY, self.input_list[0])] = function_undefined
if POSITIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(self.input_list[-1], POSITIVE_INFINITY)] = function_undefined
FunctionComposite.__init__(self, dictionary_bounds_function,
function_undefined=function_undefined,
domain=self.input_list,
is_normalised=is_normalised)
def normalised(self):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
if almost_equals(area, 0):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
dictionary_input_output = {}
output_list = [y / area for y in self.output_list]
for i in xrange(len(self.input_list)):
dictionary_input_output[self.input_list[i]] = output_list[i]
result = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=self.function_undefined)
result.is_normalised = True
return result
def __and__(self, other):
for bounds in self.dictionary_bounds_function:
a, b = bounds
linear_function = self.dictionary_bounds_function[bounds]
if __name__ == '__main__':
a = FunctionLinear(1, 0)
b = FunctionLinear(-1, 1)
print a.intersect(b)
| agpl-3.0 |
theandygross/Figures | src/Figures/Boxplots.py | 1 | 11851 | """
Created on Apr 24, 2013
@author: agross
"""
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import Stats.Scipy as Stats
from Figures.FigureHelpers import latex_float, init_ax
from Figures.FigureHelpers import prettify_ax
from Helpers.Pandas import match_series, true_index
colors = plt.rcParams['axes.color_cycle'] * 10
def _violin_plot(ax, data, pos=[], bp=False):
"""
http://pyinsci.blogspot.com/2009/09/violin-plot-with-matplotlib.html
Create violin plots on an axis. Internal to module as it does not
use Pandas data-structures. This is split off due to it's being a
reuse of the code from the blog-post linked above, and I wanted to keep
the original code untouched.
"""
from scipy.stats import gaussian_kde
from numpy import arange
# dist = max(pos)-min(pos)
dist = len(pos)
w = min(0.25 * max(dist, 1.0), 0.5)
for p, d in enumerate(data):
try:
k = gaussian_kde(d) # calculates the kernel density
m = k.dataset.min() # lower bound of violin
M = k.dataset.max() # upper bound of violin
x = arange(m, M, (M - m) / 100.) # support for violin
v = k.evaluate(x) # violin profile (density curve)
v = v / v.max() * w # scaling the violin to the available space
ax.fill_betweenx(x, p, v + p, facecolor='y', alpha=0.1)
ax.fill_betweenx(x, p, -v + p, facecolor='y', alpha=0.1)
except:
pass
if bp:
box_plot = ax.boxplot(data, notch=1, positions=range(len(pos)), vert=1,
widths=.25)
return box_plot
def box_plot_pandas(bin_vec, real_vec, ax=None, order=None):
"""
Wrapper around matplotlib's boxplot function.
Inputs
bin_vec: Series of labels
real_vec: Series of measurements to be grouped according to bin_vec
"""
_, ax = init_ax(ax)
bin_vec, real_vec = match_series(bin_vec, real_vec)
if order is not None:
categories = order
else:
categories = bin_vec.value_counts().index
data = [real_vec[bin_vec == num] for num in categories]
bp = ax.boxplot(data, positions=range(len(categories)), widths=.3,
patch_artist=True)
if real_vec.name:
ax.set_ylabel(real_vec.name)
if bin_vec.name:
ax.set_xlabel(bin_vec.name)
ax.set_xticklabels(categories)
[p.set_visible(False) for p in bp['fliers']]
[p.set_visible(False) for p in bp['caps']]
[p.set_visible(False) for p in bp['whiskers']]
for p in bp['medians']:
p.set_color(colors[0])
p.set_lw(3)
p.set_alpha(.8)
for i, p in enumerate(bp['boxes']):
p.set_color('grey')
p.set_lw(3)
p.set_alpha(.7)
if len(data[i]) < 3:
p.set_alpha(0)
def violin_plot_pandas(bin_vec, real_vec, ann='p', order=None, ax=None,
filename=None):
"""
http://pyinsci.blogspot.com/2009/09/violin-plot-with-matplotlib.html
Wrapper around matplotlib's boxplot function to add violin profile.
Inputs
bin_vec: Series of labels
real_vec: Series of measurements to be grouped according to bin_vec
"""
fig, ax = init_ax(ax)
ax.set_ylabel(real_vec.name)
ax.set_xlabel(bin_vec.name)
bin_vec, real_vec = match_series(bin_vec, real_vec)
try:
if order is None:
categories = bin_vec.value_counts().index
else:
categories = order
_violin_plot(ax, [real_vec[bin_vec == num] for num in categories],
pos=categories, bp=True)
ax.set_xticklabels([str(c) + '\n(n=%i)' % sum(bin_vec == c)
for c in categories])
except:
box_plot_pandas(bin_vec, real_vec, ax=ax)
#if type(bin_vec.name) == str:
# ax.set_title(str(bin_vec.name) + ' x ' + str(real_vec.name))
p_value = Stats.kruskal_pandas(bin_vec, real_vec)['p']
if ann == 'p_fancy':
ax.annotate('$p = {}$'.format(latex_float(p_value)), (.95, -.02),
xycoords='axes fraction', ha='right', va='bottom', size=14)
if ann == 'p':
ax.annotate('p = {0:.1e}'.format(p_value), (.95, .02),
xycoords='axes fraction', ha='right', va='bottom', size=12)
elif ann is not None:
ax.annotate(ann, (.95, .02), xycoords='axes fraction', ha='right',
va='bottom', size=12)
if filename is not None:
fig.savefig(filename)
return
def violin_plot_series(s, **kw_args):
"""
Wrapper for drawing a violin plot on a series with a multi-index.
The second level of the index is used as the binning variable.
"""
assert s.index.levshape[1] > 1
violin_plot_pandas(pd.Series(s.index.get_level_values(1), s.index), s,
**kw_args)
def paired_boxplot_o(boxes):
"""
Wrapper around plt.boxplot to draw paired boxplots
for a set of boxes.
Input is the same as plt.boxplot:
Array or a sequence of vectors.
"""
fig = plt.figure(figsize=(len(boxes) / 2.5, 4))
ax1 = fig.add_subplot(111)
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(boxes, notch=0, positions=np.arange(len(boxes)) +
1.5 * (np.arange(len(boxes)) / 2), patch_artist=True)
[p.set_color(colors[0]) for p in bp['boxes'][::2]]
[p.set_color('black') for p in bp['whiskers']]
[p.set_color('black') for p in bp['fliers']]
[p.set_alpha(.4) for p in bp['fliers']]
[p.set_alpha(.6) for p in bp['boxes']]
[p.set_edgecolor('black') for p in bp['boxes']]
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_ylabel('$Log_{2}$ RNA Expression')
ax1.set_xticks(3.5 * np.arange(len(boxes) / 2) + .5)
return ax1, bp
def paired_boxplot(boxes, ax1=None):
if not ax1:
fig = plt.figure(figsize=(len(boxes) / 2.5, 4))
ax1 = fig.add_subplot(111)
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(boxes, notch=0, positions=np.arange(len(boxes)) +
1.5 * (np.arange(len(boxes)) / 2), patch_artist=True)
[p.set_color(colors[0]) for p in bp['boxes'][::2]]
[p.set_color(colors[1]) for p in bp['boxes'][1::2]]
[p.set_color('black') for p in bp['whiskers']]
[p.set_color('black') for p in bp['fliers']]
[p.set_alpha(.4) for p in bp['fliers']]
[p.set_alpha(.8) for p in bp['boxes']]
[p.set_edgecolor('black') for p in bp['boxes']]
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_ylabel('$Log_{2}$ RNA Expression')
ax1.set_xticks(3.5 * np.arange(len(boxes) / 2) + .5)
return ax1, bp
def paired_boxplot_tumor_normal(df, sig=True, cutoffs=[.01, .00001],
order=None, ax=None):
"""
Draws a paired boxplot given a DataFrame with both tumor and normal
samples on the index. '01' and '11' are hard-coded as the ids for
tumor/normal.
"""
n = df.groupby(level=0).size() == 2
df = df.ix[n[n].index]
if order is None:
o = df.xs('11', level=1).median().order().index
df = df[o[::-1]]
else:
df = df[order]
l1 = list(df.xs('01', level=1).as_matrix().T)
l2 = list(df.xs('11', level=1).as_matrix().T)
boxes = [x for t in zip(l1, l2) for x in t]
ax1, bp = paired_boxplot(boxes, ax)
test = lambda v: Stats.ttest_rel(v.unstack()['01'], v.unstack()['11'])
res = df.apply(test).T
p = res.p
if sig:
pts = [(i * 3.5 + .5, 18) for i, n in enumerate(p) if n < cutoffs[1]]
if len(pts) > 0:
s1 = ax1.scatter(*zip(*pts), marker='$**$', label='$p<10^{-5}$', s=200)
else:
s1 = None
pts = [(i * 3.5 + .5, 18) for i, n in enumerate(p)
if (n < cutoffs[0]) and (n > cutoffs[1])]
if len(pts) > 0:
s2 = ax1.scatter(*zip(*pts), marker='$*$', label='$p<10^{-2}$', s=30)
else:
s2 = None
ax1.legend(bp['boxes'][:2] + [s2, s1],
('Tumor', 'Normal', '$p<10^{-2}$', '$p<10^{-5}$'),
loc='best', scatterpoints=1)
else:
ax1.legend(bp['boxes'][:2], ('Tumor', 'Normal'), loc='best')
ax1.set_xticklabels(df.columns)
def boxplot_panel(hit_vec, response_df):
"""
Draws a series of paired boxplots with the rows of the response_df
split according to hit_vec.
"""
b = response_df.copy()
b.columns = pd.MultiIndex.from_arrays([b.columns, hit_vec.ix[b.columns]])
b = b.T
v1, v2 = hit_vec.unique()
test = lambda v: Stats.anova(v.reset_index(level=1)[v.index.names[1]],
v.reset_index(level=1)[v.name])
res = b.apply(test).T
p = res.p.order()
b = b.ix[:, p.index]
l1 = list(b.xs(v1, level=1).as_matrix().T)
l2 = list(b.xs(v2, level=1).as_matrix().T)
boxes = [x for t in zip(l1, l2) for x in t]
ax1, bp = paired_boxplot(boxes)
y_lim = (response_df.T.quantile(.9).max()) * 1.2
pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if n < .00001]
if len(pts) > 0:
s1 = ax1.scatter(*zip(*pts), marker='$**$', label='$p<10^{-5}$', s=200)
else:
s1 = None
pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if (n < .01)
and (n > .00001)]
if len(pts) > 0:
s2 = ax1.scatter(*zip(*pts), marker='$*$', label='$p<10^{-2}$', s=30)
else:
s2 = None
ax1.set_xticklabels(b.columns)
ax1.legend(bp['boxes'][:2] + [s2, s1],
(v1, v2, '$p<10^{-2}$', '$p<10^{-5}$'),
loc='best', scatterpoints=1)
def paired_bp_tn_split(vec, assignment, ax=None, split_vals=('01', '11'),
data_type='gene expression'):
"""
Paired boxplot for a single Series, with splitting on the index,
grouped by assignment. I.E. Tumor-Normal gene expression split by
cancer.
vec:
vector of values to plot.
assignment:
vector mapping keys to group assignment
ax (None):
matplotlib axis to plot on or None
split_vals ('01','11'):
Values to split the boxplot pairing on. The default of
('01','11') indicates tumor vs. normal in the standard
TCGA barcode nomenclature. This should coorespond to values
on the second level of the index for vec and assignment.
**both vec and assignment should have an overlapping index with
multiple levels**
"""
_, ax = init_ax(ax, figsize=(8, 3))
if vec.name != None:
label = vec.name # lose label in manipulation
else:
label = ''
g1 = split_vals[0]
g2 = split_vals[1]
vec = pd.concat([vec[:, g1], vec[:, g2]], keys=[g1, g2],
axis=1)
vec = vec.dropna().stack()
counts = vec.unstack().groupby(assignment).size()
groups = list(true_index(counts > 5))
groups = vec.unstack().groupby(assignment).median()[g1].ix[groups]
groups = groups.order().index[::-1]
l1 = [np.array(vec[:, g1].ix[true_index(assignment == c)].dropna())
for c in groups]
l2 = [np.array(vec[:, g2].ix[true_index(assignment == c)].dropna())
for c in groups]
boxes = [x for t in zip(l1, l2) for x in t if len(t[1]) > 5]
ax, bp = paired_boxplot(boxes, ax)
labels = ['{}\n({})'.format(c, counts[c]) for c in groups]
ax.set_xticklabels(labels)
prettify_ax(ax)
ax.set_ylabel('{} {}'.format(label, data_type))
| mit |
toastedcornflakes/scikit-learn | benchmarks/bench_plot_ward.py | 117 | 1283 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
redarmy30/Eurobot-2017 | old year/RESET-master/Machine_vision/get_position.py | 2 | 3426 | #!/usr/bin/env python2
import numpy as np
import cv2
from matplotlib import pyplot as plt
from math import sin, cos, tan, sqrt, pi, atan
from operator import itemgetter
import timeit
#start = timeit.timeit()
h = 0.37 #the vertical distance from the ground to camera [in meters]
alpha = pi*(28.3)/180.0 #the inclination angle in degrees
F = 0.25 #the focal distance [in meters]0.00367
Nx = 640.0 #number of pixels along x axis on the focal plane
Ny = 480.0 #number of pixels along the y axis on the focal plane
psi = 78.0*pi/180.0 # maximum angular resolution in diagonal
Tetha = 2.0*atan((tan(psi/2.0))*3.0/5.0) # maximum resolution angle for vertical view
Fi = 2.0*atan((tan(psi/2.0))*4.0/5.0) # maximum resolution angle for horizontal view
#Initial calculations
gamma = pi/2.0 - alpha #calculate the inclination of focal plane
YM = F/cos(alpha) - h*tan(alpha)
YA = F*cos(alpha)
ZA = h - F*sin(alpha)
ksim = 2.0*F*tan(Tetha/2.0)
etham = 2.0*F*tan(Tetha/2.0)
# camera initialisation
#DEFINE CALSSIFICATION OF OBJECTS
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 1
params.maxThreshold = 2000
# Filter by Area.
params.filterByArea = 1
params.minArea = 1000
params.maxArea = 100000
"""# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1"""
"""# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
params.maxConvexity = 1"""
"""# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0"""
"""#Filter by color
params.filterByColor = 1
params.blobColor = 0;0;0"""
#detector = cv2.SimpleBlobDetector_create(params)
detector = cv2.SimpleBlobDetector(params) #- use this if line 57 returns error!!!
class GetObjectPosition(object):
def get_position(self):
cap = cv2.VideoCapture(0)
#cap.set(7, 15)
_, frame = cap.read()
im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
keypoints = detector.detect(im)
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
a = len (keypoints)
points = []
screenpoints = []
for keypoint in keypoints:
x0 = keypoint.pt[0]
y0 = keypoint.pt[1]
cx = x0
cy = y0
ksiE = cx*ksim/Nx
ethaE = cy*etham/Ny
Nx1 = ksim
Ny1 = etham
ksiA = ksiE - Nx1/2.0
ethaA = -(ethaE - Ny1/2.0)
YA1 = YA + ethaA*cos(gamma)
ZA1 = (YA1 - YM)*tan(gamma)
XA1 = ksiA
t = h/(h-ZA1)
X = XA1*t
Y = YA1*t
R0 = sqrt(X**2.0+Y**2.0)
X = int(X*1000.0)
Y = int(Y*1000.0)
R0 = int(R0*1000.0)
points.append((X, Y, R0))
screenpoints.append((x0,y0))
points1 = str(points)
cv2.imwrite('result.png',im_with_keypoints)
if not points:
return
z = sorted(points, key=itemgetter(2))
z1 = str(z)
b = z[0]
points = str(points)
file = open("result.txt", "w")
file.write("unsorted list")
file.write(points)#unsorted
file.write("\n")
file.write("sorted list")
file.write(z1)#sorted
file.write("\n")
file.write("The nearest objest is:")
file.write(str(b))
file.close()
#img1 = cv2.imread('result.png')
#img2 = cv2.putText(img = img1,text = points,org = (0,Ny),fontFace = cv2.FONT_HERSHEY_DUPLEX,fontScale = 0.5,
#color = (1,1,255))
cv2.imwrite('result1.png',img2)
return(b)
del(cap)
a = GetObjectPosition()
coordinates = a.get_position()
print coordinates
#end = timeit.timeit()
#print end - start | mit |
calispac/digicampipe | digicampipe/scripts/spe.py | 1 | 14553 | #!/usr/bin/env python
"""
Do the Single Photoelectron anaylsis
Usage:
digicam-spe [options] [--] <INPUT>...
Options:
-h --help Show this screen.
--max_events=N Maximum number of events to analyse.
--max_histo_filename=FILE File path of the max histogram.
[Default: ./max_histo.pk]
--charge_histo_filename=FILE File path of the charge histogram
[Default: ./charge_histo.pk]
--raw_histo_filename=FILE File path of the raw histogram
[Default: ./raw_histo.pk]
-o OUTPUT --output=OUTPUT Output file path to store the results.
[Default: ./results.npz]
-c --compute Compute the data.
-f --fit Fit.
-d --display Display.
-v --debug Enter the debug mode.
-p --pixel=<PIXEL> Give a list of pixel IDs.
--shift=N Number of bins to shift before integrating
[default: 0].
--integral_width=N Number of bins to integrate over
[default: 7].
--pulse_finder_threshold=F Threshold of pulse finder in arbitrary units
[default: 2.0].
--save_figures=PATH Save the plots to the indicated folder.
Figures are not saved is set to none
[default: none]
--ncall=N Number of calls for the fit [default: 10000]
--n_samples=N Number of samples per waveform
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
from histogram.histogram import Histogram1D
from tqdm import tqdm
from digicampipe.calib.baseline import fill_baseline, subtract_baseline
from digicampipe.calib.charge import compute_charge
from digicampipe.calib.peak import find_pulse_with_max, \
find_pulse_fast
from digicampipe.io.event_stream import calibration_event_stream
from digicampipe.scripts import raw
from digicampipe.scripts.fmpe import FMPEFitter
from digicampipe.utils.docopt import convert_pixel_args, \
convert_int, convert_text
from digicampipe.utils.pdf import fmpe_pdf_10
class MaxHistoFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 2
super(MaxHistoFitter, self).__init__(histogram, estimated_gain,
n_peaks, **kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_0': None, 'a_1': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_0, a_1):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': a_0, 'a_1': a_1, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
class SPEFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 4
super(SPEFitter, self).__init__(histogram, estimated_gain, n_peaks,
**kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_1': None, 'a_2': None, 'a_3': None,
'a_4': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_1, a_2, a_3, a_4):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': 0, 'a_1': a_1, 'a_2': a_2,
'a_3': a_3, 'a_4': a_4, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
def initialize_fit(self):
init_params = super(SPEFitter, self).initialize_fit()
init_params['a_4'] = init_params['a_3']
init_params['a_3'] = init_params['a_2']
init_params['a_2'] = init_params['a_1']
init_params['a_1'] = init_params['a_0']
init_params['baseline'] = init_params['baseline'] - init_params['gain']
del init_params['a_0']
self.initial_parameters = init_params
return init_params
def compute_dark_rate(number_of_zeros, total_number_of_events, time):
p_0 = number_of_zeros / total_number_of_events
rate = - np.log(p_0)
rate /= time
return rate
def compute_max_histo(files, histo_filename, pixel_id, max_events,
integral_width, shift, baseline):
n_pixels = len(pixel_id)
if not os.path.exists(histo_filename):
events = calibration_event_stream(files, pixel_id=pixel_id,
max_events=max_events)
# events = compute_baseline_with_min(events)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
events = find_pulse_with_max(events)
events = compute_charge(events, integral_width, shift)
max_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * integral_width,
4095 * integral_width),
)
for event in events:
max_histo.fill(event.data.reconstructed_charge)
max_histo.save(histo_filename)
return max_histo
else:
max_histo = Histogram1D.load(histo_filename)
return max_histo
def compute_spe(files, histo_filename, pixel_id, baseline, max_events,
integral_width, shift, pulse_finder_threshold, debug=False):
if not os.path.exists(histo_filename):
n_pixels = len(pixel_id)
events = calibration_event_stream(files,
max_events=max_events,
pixel_id=pixel_id)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
# events = find_pulse_1(events, 0.5, 20)
# events = find_pulse_2(events, widths=[5, 6], threshold_sigma=2)
events = find_pulse_fast(events, threshold=pulse_finder_threshold)
# events = find_pulse_fast_2(events, threshold=pulse_finder_threshold,
# min_dist=3)
# events = find_pulse_correlate(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_gaussian_filter(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_wavelets(events, widths=[4, 5, 6],
# threshold_sigma=2)
events = compute_charge(events, integral_width=integral_width,
shift=shift)
# events = compute_amplitude(events)
# events = fit_template(events)
# events = compute_full_waveform_charge(events)
spe_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * 50, 4095 * 50)
)
for event in events:
spe_histo.fill(event.data.reconstructed_charge)
spe_histo.save(histo_filename)
return spe_histo
else:
spe_histo = Histogram1D.load(histo_filename)
return spe_histo
def entry():
args = docopt(__doc__)
files = args['<INPUT>']
debug = args['--debug']
max_events = convert_int(args['--max_events'])
raw_histo_filename = args['--raw_histo_filename']
charge_histo_filename = args['--charge_histo_filename']
max_histo_filename = args['--max_histo_filename']
results_filename = args['--output']
pixel_id = convert_pixel_args(args['--pixel'])
n_pixels = len(pixel_id)
integral_width = int(args['--integral_width'])
shift = int(args['--shift'])
pulse_finder_threshold = float(args['--pulse_finder_threshold'])
n_samples = int(args['--n_samples']) # TODO access this in a better way !
estimated_gain = 20
ncall = int(args['--ncall'])
if args['--compute']:
raw_histo = raw.compute(files, max_events=max_events,
pixel_id=pixel_id, filename=raw_histo_filename)
baseline = raw_histo.mode()
compute_max_histo(files, max_histo_filename, pixel_id, max_events,
integral_width, shift, baseline)
compute_spe(files, charge_histo_filename, pixel_id, baseline,
max_events, integral_width, shift, pulse_finder_threshold,
debug=debug)
if args['--fit']:
spe_histo = Histogram1D.load(charge_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
dark_count_rate = np.zeros(n_pixels) * np.nan
electronic_noise = np.zeros(n_pixels) * np.nan
crosstalk = np.zeros(n_pixels) * np.nan
gain = np.zeros(n_pixels) * np.nan
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = max_histo[i]
fitter = MaxHistoFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
n_entries = histo.data.sum()
number_of_zeros = fitter.parameters['a_0']
window_length = 4 * n_samples
rate = compute_dark_rate(number_of_zeros,
n_entries,
window_length)
electronic_noise[i] = fitter.parameters['sigma_e']
dark_count_rate[i] = rate
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute dark count rate'
' in pixel {}'.format(pixel))
print(e)
np.savez(results_filename, dcr=dark_count_rate,
sigma_e=electronic_noise, pixel_id=pixel_id)
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = spe_histo[i]
fitter = SPEFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
params = fitter.parameters
n_entries = params['a_1']
n_entries += params['a_2']
n_entries += params['a_3']
n_entries += params['a_4']
crosstalk[i] = (n_entries - params['a_1']) / n_entries
gain[i] = params['gain']
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute gain and crosstalk'
' in pixel {}'.format(pixel))
print(e)
data = dict(np.load(results_filename))
data['crosstalk'] = crosstalk
data['gain'] = gain
np.savez(results_filename, **data)
save_figure = convert_text(args['--save_figures'])
if save_figure is not None:
output_path = save_figure
spe_histo = Histogram1D.load(charge_histo_filename)
spe_amplitude = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(raw_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
figure_directory = output_path + 'figures/'
if not os.path.exists(figure_directory):
os.makedirs(figure_directory)
histograms = [spe_histo, spe_amplitude, raw_histo, max_histo]
names = ['histogram_charge/', 'histogram_amplitude/', 'histogram_raw/',
'histo_max/']
for i, histo in enumerate(histograms):
figure = plt.figure()
histogram_figure_directory = figure_directory + names[i]
if not os.path.exists(histogram_figure_directory):
os.makedirs(histogram_figure_directory)
for j, pixel in enumerate(pixel_id):
axis = figure.add_subplot(111)
figure_path = histogram_figure_directory + 'pixel_{}'. \
format(pixel)
try:
histo.draw(index=(j,), axis=axis, log=True, legend=False)
figure.savefig(figure_path)
except Exception as e:
print('Could not save pixel {} to : {} \n'.
format(pixel, figure_path))
print(e)
axis.remove()
if args['--display']:
spe_histo = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(os.path.join(output_path,
raw_histo_filename))
max_histo = Histogram1D.load(max_histo_filename)
spe_histo.draw(index=(0,), log=True, legend=False)
raw_histo.draw(index=(0,), log=True, legend=False)
max_histo.draw(index=(0,), log=True, legend=False)
try:
data = np.load(results_filename)
dark_count_rate = data['dcr']
electronic_noise = data['sigma_e']
crosstalk = data['crosstalk']
gain = data['gain']
except IOError as e:
print(e)
print('Could not find the analysis files !')
plt.figure()
plt.hist(dark_count_rate[np.isfinite(dark_count_rate)],
bins='auto')
plt.xlabel('dark count rate [GHz]')
plt.legend(loc='best')
plt.figure()
plt.hist(crosstalk[np.isfinite(crosstalk)],
bins='auto')
plt.xlabel('Crosstalk []')
plt.legend(loc='best')
plt.figure()
plt.hist(gain[np.isfinite(gain)],
bins='auto')
plt.xlabel('Gain [LSB/p.e.]')
plt.legend(loc='best')
plt.figure()
plt.hist(electronic_noise[np.isfinite(electronic_noise)],
bins='auto')
plt.xlabel('$\sigma_e$ [LSB]')
plt.legend(loc='best')
plt.show()
return
if __name__ == '__main__':
entry()
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/user_interfaces/gtk_spreadsheet.py | 13 | 2463 | #!/usr/bin/env python
"""
Example of embedding matplotlib in an application and interacting with
a treeview to store data. Double click on an entry to update plot
data
"""
import pygtk
pygtk.require('2.0')
import gtk
from gtk import gdk
import matplotlib
matplotlib.use('GTKAgg') # or 'GTK'
from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from numpy.random import random
from matplotlib.figure import Figure
class DataManager(gtk.Window):
numRows, numCols = 20,10
data = random((numRows, numCols))
def __init__(self):
gtk.Window.__init__(self)
self.set_default_size(600, 600)
self.connect('destroy', lambda win: gtk.main_quit())
self.set_title('GtkListStore demo')
self.set_border_width(8)
vbox = gtk.VBox(False, 8)
self.add(vbox)
label = gtk.Label('Double click a row to plot the data')
vbox.pack_start(label, False, False)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_AUTOMATIC)
vbox.pack_start(sw, True, True)
model = self.create_model()
self.treeview = gtk.TreeView(model)
self.treeview.set_rules_hint(True)
# matplotlib stuff
fig = Figure(figsize=(6,4))
self.canvas = FigureCanvas(fig) # a gtk.DrawingArea
vbox.pack_start(self.canvas, True, True)
ax = fig.add_subplot(111)
self.line, = ax.plot(self.data[0,:], 'go') # plot the first row
self.treeview.connect('row-activated', self.plot_row)
sw.add(self.treeview)
self.add_columns()
self.add_events(gdk.BUTTON_PRESS_MASK |
gdk.KEY_PRESS_MASK|
gdk.KEY_RELEASE_MASK)
def plot_row(self, treeview, path, view_column):
ind, = path # get the index into data
points = self.data[ind,:]
self.line.set_ydata(points)
self.canvas.draw()
def add_columns(self):
for i in range(self.numCols):
column = gtk.TreeViewColumn('%d'%i, gtk.CellRendererText(), text=i)
self.treeview.append_column(column)
def create_model(self):
types = [float]*self.numCols
store = gtk.ListStore(*types)
for row in self.data:
store.append(row)
return store
manager = DataManager()
manager.show_all()
gtk.main()
| mit |
pingpan2013/sensor-box-project | sensor_project/genGraphs.py | 1 | 4044 | #!/usr/bin/python
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import numpy as np
import pandas as pd # used to convert datetime64 to datetime
import csv
import sys
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
class Gen_Graph:
def __init__(self, _filename):
self.filename = _filename
self.data = []
self.dtype = []
def readData(self):
'''Read the data from .csv file'''
with open(self.filename, 'r') as file:
reader = csv.reader(file)
for row in reader:
self.data.append(tuple(row))
return self.data
def genDtype(self):
'''Get the data type, always put DATE in the last '''
for i in xrange(len(self.data[0])):
if i != len(self.data[0]) - 1:
self.dtype.append((str(self.data[0][i]), '<f8'))
else:
self.dtype.append((self.data[0][i], '<M8[s]'))
print "Data Type: " + str(self.dtype)
print '=============================================================='
def uniqueish_color(self):
'''Randomly select a color'''
return plt.cm.gist_ncar(np.random.random())
def genGraph(self):
'''Generate the graph with unique y axis'''
self.genDtype()
x = np.array(self.data[1:], dtype=self.dtype)
np.save('./graph_data/data', x)
t = np.load('./graph_data/data.npy').view(np.recarray)
fig, ax = plt.subplots(1)
'''Drawing multiple lines in one graph'''
for label in self.data[0]:
if label != 'Time':
dtype = t['{0}'.format(label)]
ax.plot(pd.to_datetime(t.Time), dtype)
ax.set_xlabel(' date ')
'''Formatting the date'''
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.title('Sensor Data Flow')
'''Create the labels for different lines'''
labels = list(self.data[0][:-1])
plt.legend(labels, loc='lower left')
plt.show()
def genGraph_m(self):
'''Generate the graph with multiple y axises'''
self.genDtype()
x = np.array(self.data[1:], dtype=self.dtype)
np.save('./graph_data/data', x)
t = np.load('./graph_data/data.npy').view(np.recarray)
fig = plt.figure()
fig.subplots_adjust(right=0.75)
ax = fig.add_subplot(111)
'''Drawing multiple lines with different y axises in one graph'''
lines = []
labels = list(self.data[0][:-1])
for num in xrange(len(self.data[0]) - 1):
label = labels[num]
if num == 0:
dtype = t['{0}'.format(label)]
line1, = ax.plot(pd.to_datetime(t.Time), dtype, color=self.uniqueish_color())
lines.append(line1)
ax.set_ylabel(label)
ax.set_xlabel('Date')
elif label != 'Time':
dtype = t['{0}'.format(label)]
par = ax.twinx()
line2, = par.plot(pd.to_datetime(t.Time), dtype, color=self.uniqueish_color())
lines.append(line2)
par.set_ylabel(label)
'''Formatting the date'''
fig.autofmt_xdate()
ax.fmt_xdata = mdates.DateFormatter('%Y-%m-%d')
plt.title('Sensor Data Flow')
'''Create the labels for different lines'''
ax.legend(lines, labels, loc='lower left')
plt.draw()
plt.show()
def main():
if len(sys.argv) != 2:
print "Error with the parameters! Please specify the file path!"
sys.exit(2)
filename = sys.argv[1]
gg = Gen_Graph(filename)
data = gg.readData()
print "Original Data: "
for i in data:
print i
print '=============================================================='
gg.genGraph_m()
print "Finished Drawing!"
if __name__ == "__main__":
main()
| gpl-3.0 |
Open-Power-System-Data/conventional_power_plants | download_and_process_DE_functions.py | 1 | 14757 | # -*- coding: utf-8 -*-
import urllib.parse
import urllib.request
import posixpath
import datetime
import os
import logging
import filecmp
import difflib
import json
import sqlite3
import hashlib
import yaml
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
#from bokeh.io import output_notebook
# output_notebook()
# Logging Setup
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
datefmt='%d %b %Y %H:%M:%S')
logger = logging.getLogger()
# create download and output folders if they do not exist
os.makedirs(os.path.join('download'), exist_ok=True)
os.makedirs(os.path.join('output'), exist_ok=True)
os.makedirs(os.path.join('output', 'original_data'), exist_ok=True)
def get_sha_hash(path, blocksize=65536):
sha_hasher = hashlib.sha256()
with open(path, 'rb') as f:
buffer = f.read(blocksize)
while len(buffer) > 0:
sha_hasher.update(buffer)
buffer = f.read(blocksize)
return sha_hasher.hexdigest()
def downloadandcache(url):
"""
Download a file into a folder called "downloads".
The file is prefixed with the download date YYYY-M-D-.
Returns the local filepath.
Parameters
----------
url : str
Url of a file to be downloaded
"""
path = urllib.parse.urlsplit(url).path
filename = posixpath.basename(path)
now = datetime.datetime.now()
datestring = str(now.year) + "-" + str(now.month) + "-" + str(now.day)
filepath = os.path.join('download', datestring + "-" + filename)
filepath_original_data = os.path.join('output',
'original_data',
filename)
# check if file exists, otherwise download it
if not os.path.exists(filepath):
logger.info('Downloading file %s', filename)
urllib.request.urlretrieve(url, filepath)
urllib.request.urlretrieve(url, filepath_original_data)
else:
logger.info('Using local file from %s', filepath)
foldername = 'download'
return foldername, datestring, filename
def decrementday(year, month, day):
"""
Given values for year, month, and day, the values of the previous day are
returned. At the moment, the function assumes that every month has 31 days,
so that it will return February 31st when given values for March 1.
Parameters
----------
year : integer
Integer year
month : integer
Integer month
day : integer
Integer day
"""
if day > 1:
day = day - 1
else:
day = 31
if month > 1:
month = month - 1
else:
month = 12
year = year - 1
return year, month, day
def getolderfilenameandcleanup(foldername, datestring, filename):
"""
Given a set of foldername and filename as returned by
the downloadandcache function, an older non-identical file with
the same file structure is searched in the folder. Files identical to the
one given are deleted.
Parameters
----------
foldername : str
folder where files are located
datestring : str
string of original file date YYYY-M-D
filename : str
filename of original file
"""
originalfilepath = os.path.join(foldername, datestring + "-" + filename)
now = datetime.datetime.now()
year = now.year
month = now.month
day = now.day
# loop through older possible files
i = 0
while i < 2000:
i = i + 1
year, month, day = decrementday(year, month, day)
datestring = str(year) + "-" + str(month) + "-" + str(day)
filepath = os.path.join(foldername, datestring + "-" + filename)
# Does the file exist?
if os.path.isfile(filepath):
# Check if file is identical to original file. If yes delete this
# file and continue
if filecmp.cmp(originalfilepath, filepath):
# print('files are identical, deleting', filepath)
os.remove(filepath)
else:
# print('files are not identical:', filepath)
return filepath
raise ValueError('no older file found')
def getmatchinglist():
"""
This function returns the matchinglist located under
/input/matching_bnetza_uba.csv
Parameters
----------
none
"""
# read matching list
result = pd.read_csv(
os.path.join('input/data/DE', 'matching_bnetza_uba.csv'),
skiprows=0,
sep=',', # CSV field separator, default is ','
thousands=',', # Thousands separator, default is ','
decimal='.', # Decimal separator, default is '.')
encoding='cp1252')
result['uba_id_string'] = (result['uba_match_name'] + '_'
+ result['uba_match_fuel'])
return result
def getbnetzalist(url_bnetza, previous=False):
"""
This function returns the dataframe of the plantlist by the
Bundesnetzagentur. if previous == True, the next-oldest different plantlist
in the folder is returned as determined by the function
getolderfilenameandcleanup.
Parameters
----------
url_bnetza : str
URL of plant list
previous : boolean
Should previous plant list be returned?
"""
foldername, datestring, filename = downloadandcache(url_bnetza)
if not previous:
plantlist = pd.read_csv(os.path.join(foldername, datestring + "-" + filename),
skiprows=9,
sep=';', # CSV field separator, default is ','
thousands='.', # Thousands separator, default is ','
decimal=',', # Decimal separator, default is '.'
encoding='cp1252')
return plantlist
elif previous:
oldfilename = getolderfilenameandcleanup(foldername, datestring, filename)
oldplantlist = pd.read_csv(oldfilename,
skiprows=9,
sep=';', # CSV field separator, default is ','
thousands='.', # Thousands separator, default is ','
decimal=',', # Decimal separator, default is '.'
encoding='cp1252')
return oldplantlist
def getubalist(url_uba, previous=False):
"""
This function returns the dataframe of the plantlist by the
Umweltbundesamt. if previous == True, the next-oldest different plantlist
in the folder is returned as determined by the function
getolderfilenameandcleanup.
Parameters
----------
url_uba : str
URL of plant list
previous : boolean
Should previous plant list be returned?
"""
foldername, datestring, filename = downloadandcache(url_uba)
if not previous:
plantlist = pd.read_excel(os.path.join(foldername, datestring + "-" + filename), skiprows=9)
return plantlist
elif previous:
oldfilename = getolderfilenameandcleanup(foldername, datestring, filename)
oldplantlist = pd.read_excel(oldfilename, skiprows=9)
return oldplantlist
def getlistdifferences(oldplantlist, newplantlist):
"""
This function returns the difference between two plantlists, and only takes
into account the columns specified within the function.
Parameters
----------
oldplantlist : DataFrame
Old Plantlist
newplantlist : DataFrame
New Plantlist
"""
oldplantlist['source'] = 'old'
newplantlist['source'] = 'new'
comparisonplantlist = pd.concat([oldplantlist, newplantlist])
# Only include some columns in comparison
includecolumns = ['Kraftwerksnummer Bundesnetzagentur',
'Kraftwerksname',
'Blockname',
'Kraftwerksname / Standort',
'Kraftwerksstandort',
'Primärenergieträger',
]
cols = [col for col in comparisonplantlist.columns if col in includecolumns]
comparisonplantlist = comparisonplantlist.drop_duplicates(keep=False, subset=cols)
# Sort by first column
comparisonplantlist = comparisonplantlist.sort_values(comparisonplantlist.columns[0], ascending=True)
return comparisonplantlist
def matchinglistcheck(url_bnetza, url_uba):
"""
This function checks the BNetzA and UBA plantlists against the
matchinglist and prints out errors. For entries form the UBA Plantlist a
suggestion for correction with the closest possible match is printed.
Parameters
----------
oldplantlist : DataFrame
Old Plantlist
newplantlist : DataFrame
New Plantlist
"""
logger.info('Starting Matchinglistcheck')
plantlist_uba = getubalist(url_uba)
plantlist_bnetza = getbnetzalist(url_bnetza)
matchinglist = getmatchinglist()
plantlist_uba['uba_id_string'] = (plantlist_uba['Kraftwerksname / Standort']
+ '_' + plantlist_uba['Primärenergieträger'])
# print(plantlist_uba.uba_id_string)
matchinglist.rename(columns={'ID BNetzA': 'bnetza_id'}, inplace=True)
uba_entrylist = [x for x in plantlist_uba.uba_id_string.tolist() if str(x) != 'nan']
errorfound = False
for entry in matchinglist.index:
# print(entry, matchinglist.loc[entry].bnetza_id, matchinglist.loc[entry].uba_id_string)
bnetza_entries = plantlist_bnetza.loc[(plantlist_bnetza['Kraftwerksnummer Bundesnetzagentur'] == matchinglist.loc[entry].bnetza_id)]
# print(entry, len(bnetza_entries))
if len(bnetza_entries) == 0:
logger.error('Entry not in Bnetzalist:', matchinglist.loc[entry].bnetza_id, matchinglist.loc[entry].uba_id_string)
errorfound = True
uba_entries = plantlist_uba.loc[(plantlist_uba['uba_id_string'] == matchinglist.loc[entry].uba_id_string)]
# print(entry, len(uba_entries))
if len(uba_entries) == 0:
alternatives = difflib.get_close_matches(matchinglist.loc[entry].uba_id_string, uba_entrylist, n=3, cutoff=0.6)
logger.error('Not in ubalist: ' + matchinglist.loc[entry].uba_id_string + ' ' + matchinglist.loc[entry].bnetza_id + ' Possible alternatives: ' + ', '.join(alternatives))
# raise ValueError('Value in Ubalist missing')
errorfound = True
if errorfound == False:
logger.info('No obvious errors in Matchinglist check found')
else:
logger.error('Errors in Matchinglist exist')
def potentialmatching(url_bnetza, url_uba):
"""
This function looks for power plants form the UBA list not contained in the
matching lists. It looks up possible matches based on name similarity.
It returns a list of tuples with the plants name of the UBA List, augmented
with possible matches.
Parameters
----------
url_bnetza : string
Link to BNetzA List
url_uba: string
Link to UBA List
"""
plantlist_uba = getubalist(url_uba)
plantlist_bnetza = getbnetzalist(url_bnetza)
matchinglist = getmatchinglist()
plantlist_bnetza.rename(columns={'Kraftwerksnummer Bundesnetzagentur':'id'}, inplace=True)
plantlist_bnetza_reduced = plantlist_bnetza[plantlist_bnetza['id'].isin(matchinglist['ID BNetzA']) == False]
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Solare Strahlungsenergie']
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Windenergie (Onshore-Anlage)']
plantlist_bnetza_reduced = plantlist_bnetza_reduced[plantlist_bnetza_reduced['Energieträger'] != 'Windenergie (Offshore-Anlage)']
plantlist_bnetza_reduced['name_and_block'] = plantlist_bnetza_reduced['Kraftwerksname'] + ' ' + plantlist_bnetza_reduced['Blockname'] + '_' + plantlist_bnetza_reduced['Energieträger']
plantlist_uba.rename(columns={'Kraftwerksname / Standort' : 'name',
'Primärenergieträger': 'fuel',
'Anlagenart': 'type'}, inplace=True)
# print(plantlist_uba.columns)
plantlist_uba['uba_id_string'] = (plantlist_uba['name']
+ '_' + plantlist_uba['fuel'])
# Reduce uba list
plantlist_uba_reduced = plantlist_uba[plantlist_uba['uba_id_string'].isin(matchinglist['uba_id_string']) == False]
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'] != 'WEA']
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'] != 'PV']
plantlist_uba_reduced = plantlist_uba_reduced[plantlist_uba_reduced['type'].isnull() == False]
possiblematcheslist = []
for entry in plantlist_uba_reduced.index:
# print(entry)
moin = str(plantlist_uba_reduced.loc[entry].uba_id_string)
moin2 = [x for x in plantlist_bnetza_reduced.name_and_block.tolist() if str(x) != 'nan']# plantlist_bnetza_reduced['name_and_block'].tolist()
# print(moin)
# print(moin2)
possiblealternative = difflib.get_close_matches(moin, moin2, n=2, cutoff=0.3)
# print(moin, possiblealternative)
logger.info('Plant ' + moin + ' not in Matchinglist. Possible Matches from BNetzA List: ' + str(possiblealternative))
possiblematcheslist.append((moin, possiblealternative))
# return possiblematcheslist
return plantlist_bnetza_reduced
# Testing this file
if __name__ == "__main__":
# BNetzA Power plant list
url_bnetza = ('http://www.bundesnetzagentur.de/SharedDocs/Downloads/DE/'
'Sachgebiete/Energie/Unternehmen_Institutionen/Versorgungssicherheit/'
'Erzeugungskapazitaeten/Kraftwerksliste/Kraftwerksliste_CSV.csv'
'?__blob=publicationFile&v=10')
# UBA Power plant list
url_uba = ('https://www.umweltbundesamt.de/sites/default/files/medien/372/dokumente/kraftwerke-de-ab-100-mw.xls')
matchinglist = getmatchinglist()
plantlist_bnetza = getbnetzalist(url_bnetza, previous=False)
# plantlist_bnetza_previous = getbnetzalist(url_bnetza, previous=True)
# plantlist_bnetza_differences = getlistdifferences(plantlist_bnetza_previous, plantlist_bnetza)
# plantlist_uba = getubalist(url_uba, previous=False)
# plantlist_uba_previous = getubalist(url_uba, previous=True)
# plantlist_uba_differences = getlistdifferences(plantlist_uba_previous, plantlist_uba)
matchinglistcheck(url_bnetza, url_uba)
res = potentialmatching(url_bnetza, url_uba)
| mit |
q1ang/seaborn | seaborn/tests/test_distributions.py | 14 | 8102 | import numpy as np
import pandas as pd
import matplotlib as mpl
import matplotlib.pyplot as plt
import nose.tools as nt
import numpy.testing as npt
from numpy.testing.decorators import skipif
from . import PlotTestCase
from .. import distributions as dist
try:
import statsmodels.nonparametric.api
assert statsmodels.nonparametric.api
_no_statsmodels = False
except ImportError:
_no_statsmodels = True
class TestKDE(PlotTestCase):
rs = np.random.RandomState(0)
x = rs.randn(50)
y = rs.randn(50)
kernel = "gau"
bw = "scott"
gridsize = 128
clip = (-np.inf, np.inf)
cut = 3
def test_scipy_univariate_kde(self):
"""Test the univariate KDE estimation with scipy."""
grid, y = dist._scipy_univariate_kde(self.x, self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._scipy_univariate_kde(self.x, bw, self.gridsize,
self.cut, self.clip)
@skipif(_no_statsmodels)
def test_statsmodels_univariate_kde(self):
"""Test the univariate KDE estimation with statsmodels."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
for bw in ["silverman", .2]:
dist._statsmodels_univariate_kde(self.x, self.kernel, bw,
self.gridsize, self.cut,
self.clip)
def test_scipy_bivariate_kde(self):
"""Test the bivariate KDE estimation with scipy."""
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, self.bw,
self.gridsize, self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
# Test a specific bandwidth
clip = [self.clip, self.clip]
x, y, z = dist._scipy_bivariate_kde(self.x, self.y, 1,
self.gridsize, self.cut, clip)
# Test that we get an error with an invalid bandwidth
with nt.assert_raises(ValueError):
dist._scipy_bivariate_kde(self.x, self.y, (1, 2),
self.gridsize, self.cut, clip)
@skipif(_no_statsmodels)
def test_statsmodels_bivariate_kde(self):
"""Test the bivariate KDE estimation with statsmodels."""
clip = [self.clip, self.clip]
x, y, z = dist._statsmodels_bivariate_kde(self.x, self.y, self.bw,
self.gridsize,
self.cut, clip)
nt.assert_equal(x.shape, (self.gridsize, self.gridsize))
nt.assert_equal(y.shape, (self.gridsize, self.gridsize))
nt.assert_equal(len(z), self.gridsize)
@skipif(_no_statsmodels)
def test_statsmodels_kde_cumulative(self):
"""Test computation of cumulative KDE."""
grid, y = dist._statsmodels_univariate_kde(self.x, self.kernel,
self.bw, self.gridsize,
self.cut, self.clip,
cumulative=True)
nt.assert_equal(len(grid), self.gridsize)
nt.assert_equal(len(y), self.gridsize)
# make sure y is monotonically increasing
npt.assert_((np.diff(y) > 0).all())
def test_kde_cummulative_2d(self):
"""Check error if args indicate bivariate KDE and cumulative."""
with npt.assert_raises(TypeError):
dist.kdeplot(self.x, data2=self.y, cumulative=True)
def test_bivariate_kde_series(self):
df = pd.DataFrame({'x': self.x, 'y': self.y})
ax_series = dist.kdeplot(df.x, df.y)
ax_values = dist.kdeplot(df.x.values, df.y.values)
nt.assert_equal(len(ax_series.collections),
len(ax_values.collections))
nt.assert_equal(ax_series.collections[0].get_paths(),
ax_values.collections[0].get_paths())
class TestJointPlot(PlotTestCase):
rs = np.random.RandomState(sum(map(ord, "jointplot")))
x = rs.randn(100)
y = rs.randn(100)
data = pd.DataFrame(dict(x=x, y=y))
def test_scatter(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.collections), 1)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_reg(self):
g = dist.jointplot("x", "y", self.data, kind="reg")
nt.assert_equal(len(g.ax_joint.collections), 2)
x, y = g.ax_joint.collections[0].get_offsets().T
npt.assert_array_equal(self.x, x)
npt.assert_array_equal(self.y, y)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_resid(self):
g = dist.jointplot("x", "y", self.data, kind="resid")
nt.assert_equal(len(g.ax_joint.collections), 1)
nt.assert_equal(len(g.ax_joint.lines), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 0)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_hex(self):
g = dist.jointplot("x", "y", self.data, kind="hex")
nt.assert_equal(len(g.ax_joint.collections), 1)
x_bins = dist._freedman_diaconis_bins(self.x)
nt.assert_equal(len(g.ax_marg_x.patches), x_bins)
y_bins = dist._freedman_diaconis_bins(self.y)
nt.assert_equal(len(g.ax_marg_y.patches), y_bins)
def test_kde(self):
g = dist.jointplot("x", "y", self.data, kind="kde")
nt.assert_true(len(g.ax_joint.collections) > 0)
nt.assert_equal(len(g.ax_marg_x.collections), 1)
nt.assert_equal(len(g.ax_marg_y.collections), 1)
nt.assert_equal(len(g.ax_marg_x.lines), 1)
nt.assert_equal(len(g.ax_marg_y.lines), 1)
def test_color(self):
g = dist.jointplot("x", "y", self.data, color="purple")
purple = mpl.colors.colorConverter.to_rgb("purple")
scatter_color = g.ax_joint.collections[0].get_facecolor()[0, :3]
nt.assert_equal(tuple(scatter_color), purple)
hist_color = g.ax_marg_x.patches[0].get_facecolor()[:3]
nt.assert_equal(hist_color, purple)
def test_annotation(self):
g = dist.jointplot("x", "y", self.data)
nt.assert_equal(len(g.ax_joint.legend_.get_texts()), 1)
g = dist.jointplot("x", "y", self.data, stat_func=None)
nt.assert_is(g.ax_joint.legend_, None)
def test_hex_customise(self):
# test that default gridsize can be overridden
g = dist.jointplot("x", "y", self.data, kind="hex",
joint_kws=dict(gridsize=5))
nt.assert_equal(len(g.ax_joint.collections), 1)
a = g.ax_joint.collections[0].get_array()
nt.assert_equal(28, a.shape[0]) # 28 hexagons expected for gridsize 5
def test_bad_kind(self):
with nt.assert_raises(ValueError):
dist.jointplot("x", "y", self.data, kind="not_a_kind")
| bsd-3-clause |
tuanvu216/udacity-course | intro_to_machine_learning/lesson/lesson_4_choose_your_own_algorithm/your_algorithm.py | 1 | 2628 | #!/usr/bin/python
import matplotlib.pyplot as plt
from prep_terrain_data import makeTerrainData
from class_vis import prettyPicture
from time import time
features_train, labels_train, features_test, labels_test = makeTerrainData()
### the training data (features_train, labels_train) have both "fast" and "slow" points mixed
### in together--separate them so we can give them different colors in the scatterplot,
### and visually identify them
grade_fast = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==0]
bumpy_fast = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==0]
grade_slow = [features_train[ii][0] for ii in range(0, len(features_train)) if labels_train[ii]==1]
bumpy_slow = [features_train[ii][1] for ii in range(0, len(features_train)) if labels_train[ii]==1]
#### initial visualization
plt.xlim(0.0, 1.0)
plt.ylim(0.0, 1.0)
plt.scatter(bumpy_fast, grade_fast, color = "b", label="fast")
plt.scatter(grade_slow, bumpy_slow, color = "r", label="slow")
plt.legend()
plt.xlabel("bumpiness")
plt.ylabel("grade")
plt.show()
#################################################################################
### your code here! name your classifier object clf if you want the
### visualization code (prettyPicture) to show you the decision boundary
# K Nearest Neighbor
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
clf = KNeighborsClassifier(n_neighbors=1)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Random Forest
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score
clf = RandomForestClassifier(n_estimators=10)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
# Addaboost
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
clf = AdaBoostClassifier(n_estimators=100)
t0 = time()
clf.fit(features_train, labels_train)
print "training time:", round(time()-t0, 3), "s"
t0 = time()
pred = clf.predict(features_test)
print "predicting time:", round(time()-t0, 3), "s"
acc = accuracy_score(pred, labels_test)
print acc
try:
prettyPicture(clf, features_test, labels_test)
except NameError:
pass
| mit |
LumPenPacK/NetworkExtractionFromImages | osx_build/nefi2_osx_amd64_xcode_2015/site-packages/numpy_1.11/numpy/lib/npyio.py | 35 | 71412 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Mote
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-2-clause |
amozie/amozie | studzie/keras_gym/mountain_car_v0.py | 1 | 2577 | import numpy as np
import matplotlib.pyplot as plt
import gym
import time
import copy
from keras.models import Sequential, Model
from keras.layers import Dense, Activation, Flatten, Lambda, Input, Reshape, concatenate, Merge
from keras.optimizers import Adam, RMSprop
from keras.callbacks import History
from keras import backend as K
import tensorflow as tf
from gym import Env, Space, spaces
from gym.utils import seeding
from rl.agents.dqn import DQNAgent
from rl.policy import BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory, EpisodeParameterMemory
from rl.agents.cem import CEMAgent
from rl.agents import SARSAAgent
from rl.callbacks import TrainEpisodeLogger, CallbackList
class MountainCarEnv(Env):
metadata = {'render.modes': ['human', 'rgb_array']}
def __init__(self) -> None:
self.env = gym.make('MountainCar-v0')
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
def _step(self, action):
step = self.env.step(action)
step = list(step)
step[1] = np.abs(step[0][1]) - 0.05
return tuple(step)
def _reset(self):
return self.env.reset()
def _seed(self, seed=None):
return self.env.seed(seed)
def _render(self, mode='human', close=False):
return self.env.render(mode, close)
def _close(self):
return self.env.close()
env = MountainCarEnv()
env.seed()
nb_actions = env.action_space.n
x = Input((1,) + env.observation_space.shape)
y = Flatten()(x)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(16)(y)
y = Activation('relu')(y)
y = Dense(nb_actions)(y)
y = Activation('linear')(y)
model = Model(x, y)
memory = SequentialMemory(limit=10000, window_length=1)
# policy = BoltzmannQPolicy()
policy = EpsGreedyQPolicy()
dqn = DQNAgent(model=model, nb_actions=nb_actions, memory=memory, nb_steps_warmup=1000, gamma=.9, batch_size=32,
enable_dueling_network=False, dueling_type='avg', target_model_update=.1, policy=policy)
dqn.compile(Adam(), metrics=['mae'])
hist = dqn.fit(env, nb_steps=10000, visualize=False, verbose=2, callbacks=None)
state = env.reset()
action = env.action_space.sample()
print(action)
state_list= []
for i in range(500):
action = np.argmax(dqn.model.predict(np.expand_dims(np.expand_dims(state, 0), 0))[0])
state, reward, done, _ = env.step(2)
state_list.append(reward)
env.render()
env.render(close=True)
dqn.test(env, nb_episodes=5, visualize=True)
env.render(close=True) | apache-2.0 |
jeffersonfparil/GTWAS_POOL_RADseq_SIM | BACKUP_SCRIPTS_20170930/assignPheno.py | 1 | 4355 | #!/usr/bin/env python
import os, subprocess, sys, math
import numpy as np
import matplotlib.pyplot as plt
work_DIR = sys.argv[1]
genoFile = sys.argv[2]
nQTL = int(sys.argv[3])
heritability = float(sys.argv[4])
model = int(sys.argv[5])
os.chdir(work_DIR)
if model == 1:
#################################################
# MODEL 1: additive effects alone
# y = Xb + e; e~N(0, Ve); Ve=Vg(1/1-h^2); Vg=sum(cor(Xq)bqbq')/4
#################################################
GEN = np.genfromtxt(genoFile, delimiter='\t', skip_header=1)
nLOCI = GEN.shape[0]
nIND = GEN.shape[1]
QTL_locations = np.random.choice(range(0, nLOCI), replace=False, size=nQTL)
QTL_locations.sort()
# DEFINING THE DISTRIBUTIONS OF THE EFFECTS:
mean_QTL = 100/(2*nQTL); var_QTL = 2 #normal QTL effects
#QTL effects:
QTL_effects = np.random.normal(mean_QTL, np.sqrt(var_QTL), size=nQTL) #for a mean QTL effect of ~5 and ~mean phenotypic value of 50
QTL_OUT = np.column_stack((QTL_locations, QTL_effects)) #for writing out
##########################################
X=np.transpose(GEN)
GFX=QTL_effects
nFX=nQTL
h2=heritability
#partionning the variance taking into account the linkage among associated SNPs
AssoX=X[:,QTL_locations]
Rho=np.corrcoef(AssoX,rowvar=0)
XtX=GFX.reshape(1,nFX)*GFX.reshape(nFX,1) #GFX * GFX' (nFXxnFX dimensions)
Vg=np.sum(Rho*XtX)/4
Ve=Vg*(1/h2-1)
#Generating the phenotypes based on the variance components
Xb=np.matmul(AssoX, GFX) #alpha
e=np.random.normal(0,Ve**(0.5),nIND)
Y_model1=Xb+e
#OUTPUT
np.savetxt("Simulated_Lolium_perenne_QTL.out", QTL_OUT, fmt='%s' ,delimiter="\t")
np.savetxt("Simulated_Lolium_perenne_PHENOTYPES.data", Y_model1, fmt='%s' ,delimiter="\t")
elif model == 2:
################################################# &#### FIX ME!!!! Y_model2 is giving me the middle finger! ;-P
# MODEL 2: additive genetic effects + transcript levels
# y = Xg + Ba + Zt + e; e~N(0,Ve); Ve = (Vg+Vt)(1/1-h^2)
#################################################
transBase = sys.argv[6]
transGeno = sys.argv[7]
GEN = np.genfromtxt(genoFile, delimiter='\t', skip_header=1)
T_base = np.genfromtxt(transBase, delimiter='\t', dtype=int)
T_geno = np.genfromtxt(transGeno, delimiter='\t', dtype=int)
nLOCI = GEN.shape[0]
nIND = GEN.shape[1]
nTRANS = len(T_base)
QTL_locations = np.random.choice(range(0, nLOCI), replace=False, size=nQTL)
QTL_locations.sort()
# DEFINING THE DISTRIBUTIONS OF THE EFFECTS:
mean_QTL = 100/(2*nQTL); var_QTL = 2 #normal QTL effects
mean_bT = (mean_QTL /4); var_bT = 1 #normal base trancript level effects
mean_gT = (mean_QTL /2); var_gT = 1 #normal genotype-specific trancript level effecs
#QTL effects:
QTL_effects = np.random.normal(mean_QTL, np.sqrt(var_QTL), size=nQTL) #for a mean QTL effect of ~5 and ~mean phenotypic value of 50
QTL_OUT = np.column_stack((QTL_locations, QTL_effects)) #for writing out
#Transcript Base-levels effects:
nCausalTrans = int(np.ceil(np.random.normal(nQTL, 1, size=1))[0]) #number of transcripts that affect the phenotype
locCausalTrans = np.random.choice(nTRANS, size=nCausalTrans, replace=False)
# (i.)base-level effects:
T_base_effects = np.random.normal(mean_bT, np.sqrt(var_bT), size=nCausalTrans)
# (ii.)genotype-specific level effects:
T_geno_effects = np.random.normal(mean_gT, np.sqrt(var_gT), size=nCausalTrans)
##########################################
X=np.transpose(GEN)
GFX=QTL_effects
nFX=nQTL
h2=heritability
T0 = T_base
T1 = T_geno
t0FX = np.zeros((nTRANS,1)); t0FX[locCausalTrans,0] = T_base_effects
t1FX = np.zeros((nTRANS,1)); t1FX[locCausalTrans,0] = T_geno_effects
#variance partitioning adding Vg with Vt--> variation due to genotype-specific transcripts abundance
AssoZ=T1[:,locCausalTrans]
Rho=np.corrcoef(AssoZ,rowvar=0)
ZtZ=T_geno_effects.reshape(1,nCausalTrans)*T_geno_effects.reshape(nCausalTrans,1)
Vt=np.sum(Rho*ZtZ)/4
Vet=(Vg+Vt)*(1/h2-1)
#generating the phenotypes using the new residual distribution et:Vet
Xg = np.matmul(X[:,QTL_locations], GFX)
Ba = np.sum(T0[locCausalTrans]*T_base_effects)
Zt = np.matmul(T1, t1FX)
et=np.random.normal(0,Vet**(0.5),nIND)
Y_model2 = Xg + Ba + Zt[:,0] + et
#OUTPUT
np.savetxt("Simulated_Lolium_perenne_QTL.out", QTL_OUT, fmt='%s' ,delimiter="\t")
np.savetxt("Simulated_Lolium_perenne_PHENOTYPES.data", Y_model1, fmt='%s' ,delimiter="\t") | gpl-3.0 |
suyashbire1/pyhton_scripts_mom6 | plot_twapv_budget_complete.py | 1 | 18150 | import sys
import readParams_moreoptions as rdp1
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mom_plot1 import m6plot, xdegtokm
import numpy as np
from netCDF4 import MFDataset as mfdset, Dataset as dset
import time
from plot_twamomx_budget_complete_direct_newest import extract_twamomx_terms
from plot_twamomy_budget_complete_direct_newest import extract_twamomy_terms
import pyximport
pyximport.install()
from getvaratzc import getvaratzc5, getvaratzc
def getutwa(fhgeo, fh, fh2, sl):
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
uh = (fh.variables['uh_masked'][sl].filled(0)*dt).sum(axis=0,keepdims=True)/np.sum(dt)
h_cu = fh.variables['h_Cu'][sl].filled(0).mean(axis=0,keepdims=True)
h_cu = np.ma.masked_array(h_cu,mask=(h_cu<1e-3))
dycu = fhgeo.variables['dyCu'][sl[2:]]
utwa = uh/h_cu/dycu
return utwa, h_cu
def getvtwa(fhgeo, fh, fh2, sl):
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
vh = (fh.variables['vh_masked'][sl]*dt).sum(axis=0,keepdims=True)/np.sum(dt)
h_cv = fh.variables['h_Cv'][sl].mean(axis=0,keepdims=True)
h_cv = np.ma.masked_array(h_cv,mask=(h_cv<1e-3))
dxcv = fhgeo.variables['dxCv'][sl[2:]]
vtwa = vh/dxcv/h_cv
vtwa = np.concatenate((vtwa,-vtwa[:,:,:,-1:]),axis=3)
h_cv = np.concatenate((h_cv,h_cv[:,:,:,-1:]),axis=3)
return vtwa, h_cv
def getpv(fhgeo, fh, fh2, xs, xe, ys, ye, zs=0, ze=None):
sl = np.s_[:,zs:ze,ys:ye,xs:xe]
slpy = np.s_[:,zs:ze,ys:ye+1,xs:xe]
utwa,h_cu = getutwa(fhgeo, fh, fh2, slpy)
dybu = fhgeo.variables['dyBu'][sl[2:]]
utway = np.diff(utwa,axis=2)/dybu
vtwa,h_cv = getvtwa(fhgeo, fh, fh2, sl)
dxbu = fhgeo.variables['dxBu'][sl[2:]]
vtwax = np.diff(vtwa,axis=3)/dxbu
h_q = 0.25*(h_cu[:,:,:-1,:] + h_cu[:,:,1:,:] +
h_cv[:,:,:,:-1] + h_cv[:,:,:,1:])
f = fhgeo.variables['f'][sl[2:]]
pvhash = (f - utway + vtwax)/h_q
return pvhash, h_q
def extract_twapv_terms(geofil,vgeofil,fil,fil2,xstart,xend,ystart,yend,zs,ze,meanax,fil3=None,
alreadysaved=False):
if not alreadysaved:
keepax = ()
for i in range(4):
if i not in meanax:
keepax += (i,)
fhgeo = dset(geofil)
fh = mfdset(fil)
fh2 = mfdset(fil2)
zi = rdp1.getdims(fh)[2][0]
dbl = -np.diff(zi)*9.8/1031
(xs,xe),(ys,ye),dimq = rdp1.getlatlonindx(fh,wlon=xstart,elon=xend,
slat=ystart, nlat=yend,zs=zs,ze=ze,xhxq='xq',yhyq='yq')
dxbu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][4]
dybu = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[2][5]
aq = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[1][1]
f = rdp1.getgeombyindx(fhgeo,xs,xe,ys,ye)[-1]
nt_const = dimq[0].size
pvhash,hq = getpv(fhgeo, fh, fh2, xs, xe, ys, ye)
sl = np.s_[:,zs:ze,ys:ye,xs:xe]
slpy = np.s_[:,zs:ze,ys:ye+1,xs:xe]
dxcu = fhgeo.variables['dxCu'][slpy[2:]]
dycv = fhgeo.variables['dyCv'][sl[2:]]
dycv = np.concatenate((dycv,dycv[:,-1:]),axis=1)
if fil3:
fh3 = mfdset(fil3)
sltn = np.s_[-1:,zs:ze,ys:ye,xs:xe]
islayerdeep0 = fh3.variables['islayerdeep'][-1:,0,0,0]
islayerdeep = (fh3.variables['islayerdeep'][sltn].filled(np.nan))
swash = (islayerdeep0 - islayerdeep)/islayerdeep0*100
fh3.close()
else:
swash = None
xmom = extract_twamomx_terms(geofil,vgeofil,fil,fil2,xs,xe,ys,ye+1,zs,ze,(0,),
alreadysaved=False,xyasindices=True,calledfrompv=True)[2]
ymom = extract_twamomy_terms(geofil,vgeofil,fil,fil2,xs,xe,ys,ye,zs,ze,(0,),
alreadysaved=False,xyasindices=True,calledfrompv=True)[2]
xmom = xmom[np.newaxis,:,:,:,:]
ymom = ymom[np.newaxis,:,:,:,:]
ymom = np.concatenate((ymom,ymom[:,:,:,-1:]),axis=3)
bxppvflx = np.sum(xmom[:,:,:,:,[0,1,3,4]],axis=4)
pvhash1,_ = getpv(fhgeo, fh, fh2, xs, xe, ys-1, ye+1)
sl1 = np.s_[:,zs:ze,ys-1:ye+1,xs:xe]
vtwa1, h_cv1 = getvtwa(fhgeo,fh,fh2,sl1)
vtwa1 = 0.5*(vtwa1[:,:,:,:-1] + vtwa1[:,:,:,1:])
pvhashvtwa = pvhash1*vtwa1
sl1 = np.s_[:,zs:ze,ys:ye+1,xs:xe]
h_cu1 = fh.variables['h_Cu'][sl1].filled(np.nan).mean(axis=0,keepdims=True)
h_cu1 = np.ma.masked_array(h_cu1,mask=(h_cu1<1e-3))
pvflxx = h_cu1*(pvhashvtwa[:,:,:-1,:]+pvhashvtwa[:,:,1:,:])/2
byppvflx = np.sum(ymom[:,:,:,:,[0,1,3,4]],axis=4)
pvhash1,_ = getpv(fhgeo, fh, fh2, xs-1, xe, ys, ye)
sl1 = np.s_[:,zs:ze,ys:ye+1,xs-1:xe]
utwa1, h_cu1 = getutwa(fhgeo,fh,fh2,sl1)
utwa1 = 0.5*(utwa1[:,:,:-1,:]+utwa1[:,:,1:,:])
pvhashutwa = pvhash1*utwa1
pvhashutwa[:,:,:,-1] = 0.0
sl1 = np.s_[:,zs:ze,ys:ye,xs:xe]
h_cv1 = fh.variables['h_Cv'][sl1].mean(axis=0,keepdims=True)
h_cv1 = np.ma.masked_array(h_cv1,mask=(h_cv1<1e-3))
pvflxy = h_cv1*(pvhashutwa[:,:,:,:-1]+pvhashutwa[:,:,:,1:])/2
pvflxy = np.concatenate((pvflxy,pvflxy[:,:,:,-1:]),axis=3)
bx = bxppvflx - pvflxx
by = byppvflx + pvflxy
xmom1 = xmom[:,:,:,:,[2,5,6,7,8,9]]
xmom1 = np.concatenate((+pvflxx[:,:,:,:,np.newaxis], xmom1, bx[:,:,:,:,np.newaxis]), axis = 4)
ymom1 = ymom[:,:,:,:,[2,5,6,7,8,9]]
ymom1 = np.concatenate((-pvflxy[:,:,:,:,np.newaxis], ymom1, by[:,:,:,:,np.newaxis]), axis = 4)
#pv = (-np.diff(xmom*dxcu[:,:,np.newaxis],axis=2) + np.diff(ymom*dycv[:,:,np.newaxis],axis=3))/aq[:,:,np.newaxis]
pv = -np.diff(xmom,axis=2)/dybu[:,:,np.newaxis] + np.diff(ymom,axis=3)/dxbu[:,:,np.newaxis]
pv1x = -np.diff(xmom1*dxcu[:,:,np.newaxis],axis=2)/aq[:,:,np.newaxis]
pv1y = np.diff(ymom1*dycv[:,:,np.newaxis],axis=3)/aq[:,:,np.newaxis]
slyp = np.s_[:,:,ys:ye+1,xs:xe]
ah1 = fhgeo.variables['Ah'][slyp[2:]]
slxmyp = np.s_[:,:,ys:ye+1,xs-1:xe]
uh = fh2.variables['uh'][slxmyp].filled(0).mean(axis=0,keepdims=True)
uhx = np.diff(uh,axis=3)/ah1
uhx = np.concatenate((uhx,uhx[:,:,:,-1:]),axis=3)
uhx = 0.25*(uhx[:,:,:-1,:-1] + uhx[:,:,:-1,1:] + uhx[:,:,1:,:-1] +
uhx[:,:,1:,1:])
pv1y[:,:,:,:,0] += pvhash*uhx
slymp = np.s_[:,:,ys-1:ye+1,xs:xe]
vh = fh2.variables['vh'][slymp].mean(axis=0,keepdims=True)
vhy = np.diff(vh,axis=2)/ah1
vhy = np.concatenate((vhy,vhy[:,:,:,-1:]),axis=3)
vhy = 0.25*(vhy[:,:,:-1,:-1] + vhy[:,:,:-1,1:] + vhy[:,:,1:,:-1] +
vhy[:,:,1:,1:])
pv1x[:,:,:,:,0] += pvhash*vhy
wd = fh2.variables['wd'][slyp].mean(axis=0,keepdims=True)
wdb = np.diff(wd,axis=1)
wdb = np.concatenate((wdb,wdb[:,:,:,-1:]),axis=3)
wdb = 0.25*(wdb[:,:,:-1,:-1] + wdb[:,:,:-1,1:] + wdb[:,:,1:,:-1] +
wdb[:,:,1:,1:])
pv3 = pvhash*wdb
pv3 = pv3[:,:,:,:,np.newaxis]
#hq[hq<1] = np.nan
pvnew = np.concatenate((pv1y[:,:,:,:,:1],
pv1x[:,:,:,:,:1],
pv3,
pv1x[:,:,:,:,1:-1],
pv1y[:,:,:,:,1:-1],
pv1x[:,:,:,:,-1:]+pv1y[:,:,:,:,-1:]),axis=4)/hq[:,:,:,:,np.newaxis]
pv = np.ma.filled(pv.astype(np.float64), np.nan)
pvnew = np.ma.filled(pvnew.astype(np.float64), np.nan)
pvhash = np.ma.filled(pvhash.astype(np.float64), np.nan)
pv = np.nanmean(pv,meanax,keepdims=True)
pvnew = np.nanmean(pvnew,meanax,keepdims=True)
pvhash = np.nanmean(pvhash,meanax,keepdims=True)
X = dimq[keepax[1]]
Y = dimq[keepax[0]]
if 1 in keepax:
dt = fh.variables['average_DT'][:]
dt = dt[:,np.newaxis,np.newaxis,np.newaxis]
em = (fh2.variables['e'][0:,zs:ze,ys:ye,xs:xe]*dt).sum(axis=0,keepdims=True)/np.sum(dt)
em = np.nanmean(em, meanax,keepdims=True)
z = np.linspace(-3000,0,100)
Y = z
P = getvaratzc5(pv.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
Pnew = getvaratzc5(pvnew.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
pvhash = getvaratzc(pvhash.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
if fil3:
swash = np.nanmean(swash, meanax,keepdims=True)
swash = getvaratzc(swash.astype(np.float32),
z.astype(np.float32),
em.astype(np.float32)).squeeze()
else:
P = pv.squeeze()
Pnew = pvnew.squeeze()
pvhash = pvhash.squeeze()
swash = swash.squeeze()
np.savez('twapv_complete_terms', X=X,Y=Y,P=P)
else:
npzfile = np.load('twapv_complete_terms.npz')
X = npzfile['X']
Y = npzfile['Y']
P = npzfile['P']
fhgeo.close()
fh.close()
fh2.close()
return (X,Y,P,pvhash,Pnew,swash)
def plot_twapv(geofil,vgeofil,fil,fil2,xstart,xend,ystart,yend,zs,ze,meanax,
fil3=None,plotterms = [0,1,10,11,12,13], swashperc = 1,
cmaxpercfactor = 1,cmaxpercfactorpvhash=15,cmaxpercfactorPnew=15, savfil=None,savfilep=None,alreadysaved=False):
X,Y,P,pvhash,Pnew,swash = extract_twapv_terms(geofil,vgeofil,fil,fil2,
xstart,xend,ystart,yend,zs,ze,meanax,alreadysaved=alreadysaved,fil3=fil3)
cmax = np.nanpercentile(P,[cmaxpercfactor,100-cmaxpercfactor])
cmax = np.max(np.fabs(cmax))
fig,ax = plt.subplots(np.int8(np.ceil(P.shape[-1]/2)),2,
sharex=True,sharey=True,figsize=(12, 9))
ti = ['(a)','(b)','(c)','(d)','(e)','(f)','(g)','(h)',
'(i)','(j)','(k)','(l)','(m)','(n)','(o)','(p)','(q)','(r)']
labx = [ r'$(\hat{u}\hat{u}_{\tilde{x}})_{\tilde{y}}$',
r'$(\hat{v}\hat{u}_{\tilde{y}})_{\tilde{y}}$',
r'$(\hat{\varpi}\hat{u}_{\tilde{b}})_{\tilde{y}}$',
r'$(-f\hat{v})_{\tilde{y}}$',
r'$(\overline{m_{\tilde{x}}})_{\tilde{y}}$',
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}u^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{x}})_{\tilde{y}}$""",
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{y}}$""",
r"""$(\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{x}}^{\prime}})_{\tilde{b}})_{\tilde{y}}$""",
r'$(-\widehat{X^H})_{\tilde{y}}$',
r'$(-\widehat{X^V})_{\tilde{y}}$']
laby = [ r'$(-\hat{u}\hat{v}_{\tilde{x}})_{\tilde{x}}$',
r'$(-\hat{v}\hat{v}_{\tilde{y}})_{\tilde{x}}$',
r'$(-\hat{\varpi}\hat{v}_{\tilde{b}})_{\tilde{x}}$',
r'$(-f\hat{u})_{\tilde{x}}$',
r'$(-\overline{m_{\tilde{y}}})_{\tilde{x}}$',
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{x}})_{\tilde{x}}$""",
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{v^{\prime \prime}v^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{y}})_{\tilde{x}}$""",
r"""$(-\frac{1}{\overline{h}}(\overline{h}\widehat{v^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r'$(\widehat{Y^H})_{\tilde{x}}$',
r'$(\widehat{Y^V})_{\tilde{x}}$']
for i in range(P.shape[-1]):
axc = ax.ravel()[i]
im = m6plot((X,Y,P[:,:,i]),axc,vmax=cmax,vmin=-cmax,ptype='imshow',
txt=labx[i]+' + '+laby[i], ylim=(-2500,0),
cmap='RdBu_r', cbar=False)
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if i % 2 == 0:
axc.set_ylabel('z (m)')
if i > np.size(ax)-3:
xdegtokm(axc,0.5*(ystart+yend))
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
im = m6plot((X,Y,np.sum(P,axis=2)),vmax=cmax,vmin=-cmax,ptype='imshow',cmap='RdBu_r',ylim=(-2500,0))
if savfil:
plt.savefig(savfil+'res.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
fig,ax = plt.subplots(np.int8(np.ceil(len(plotterms)/2)),2,
sharex=True,sharey=True,figsize=(12,7))
cmaxpvhash = np.nanpercentile(pvhash,
[cmaxpercfactorpvhash,100-cmaxpercfactorpvhash])
cmaxpvhash = np.max(np.fabs(cmaxpvhash))
cmax = np.nanpercentile(Pnew,
[cmaxpercfactorPnew,100-cmaxpercfactorPnew])
cmax = np.max(np.fabs(cmax))
lab = [ r"$-\hat{u}\Pi^{\#}_{\tilde{x}}$",
r"$-\hat{v}\Pi^{\#}_{\tilde{y}}$",
r"$\Pi^{\#}(\bar{h} \hat{\varpi})_{\tilde{b}}$",
r"$\frac{(\hat{\varpi}\hat{u}_{\tilde{b}})_{\tilde{y}}}{\bar{h}}$",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}u^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{x}})_{\tilde{y}}$""",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{y}})_{\tilde{y}}$""",
r"""$\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{x}}^{\prime}})_{\tilde{b}})_{\tilde{y}}$""",
r'$-\frac{1}{\bar{h}}(\widehat{X^H})_{\tilde{y}}$',
r'$-\frac{1}{\bar{h}}(\widehat{X^V})_{\tilde{y}}$',
r'$-\frac{(\hat{\varpi}\hat{v}_{\tilde{b}})_{\tilde{x}}}{\bar{h}}$',
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{u^{\prime \prime}v^{\prime \prime}})_{\tilde{x}})_{\tilde{x}}$""",
#r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}v^{\prime \prime}}+\frac{1}{2}\overline{\zeta^{\prime 2}})_{\tilde{y}})_{\tilde{x}}$""",
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}v^{\prime \prime}})_{\tilde{y}})_{\tilde{x}}$""",
#r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{h}}(\bar{h}\widehat{v^{\prime \prime}\varpi^{\prime \prime}} + \overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r"""$-\frac{1}{\bar{h}}(\frac{1}{\bar{\zeta_{\tilde{b}}}}(\overline{\zeta^{\prime}m_{\tilde{y}}^{\prime}})_{\tilde{b}})_{\tilde{x}}$""",
r'$\frac{1}{\bar{h}}(\widehat{Y^H})_{\tilde{x}}$',
r'$\frac{1}{\bar{h}}(\widehat{Y^V})_{\tilde{x}}$',
r'$B_{\tilde{x} \tilde{y}} - B_{\tilde{y} \tilde{x}}$']
matplotlib.rcParams['contour.negative_linestyle'] = 'solid'
for i,p in enumerate(plotterms):
axc = ax.ravel()[i]
im = m6plot((X,Y,Pnew[:,:,p]),axc,vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[p],
cmap='RdBu_r', cbar=False)
im2 = axc.contour(X,Y,pvhash,np.logspace(-6,-5.5,5),colors='grey',linewidths=2)
im2.clabel(inline=True,fmt="%.1e")
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if i % 2 == 0:
axc.set_ylabel('z (m)')
if i > np.size(ax)-3:
xdegtokm(axc,0.5*(ystart+yend))
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'Pnew.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
im = m6plot((X,Y,np.sum(Pnew,axis=2)),ptype='imshow',vmax=cmax,vmin=-cmax,cmap='RdBu_r',ylim=(-2500,0))
if savfil:
plt.savefig(savfil+'Pnewres.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
fig,ax = plt.subplots(1,2,sharex=True,sharey=True,figsize=(10, 3))
im = m6plot((X,Y,np.nansum(Pnew[:,:,:2],axis=2)),
ax[0],vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[0]+lab[1],
cmap='RdBu_r', cbar=False)
im = m6plot((X,Y,np.nansum(Pnew[:,:,12:13],axis=2)),
ax[1],vmax=cmax,vmin=-cmax,ptype='imshow',
ylim=(-1200,0), txt=lab[12],
cmap='RdBu_r', cbar=False)
ax[0].set_ylabel('z (m)')
for axc in ax:
xdegtokm(axc,0.5*(ystart+yend))
im2 = axc.contour(X,Y,pvhash,np.logspace(-6,-5.5,5),colors='grey',linewidths=2)
im2.clabel(inline=True,fmt="%.1e")
axc.set_ylim(-1200,0)
fig.tight_layout()
cb = fig.colorbar(im, ax=ax.ravel().tolist())
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
if savfil:
plt.savefig(savfil+'Pnewnew.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
cmax = np.nanpercentile(pvhash,
[cmaxpercfactorpvhash,100-cmaxpercfactorpvhash])
cmax = np.max(np.fabs(cmax))
im = m6plot((X,Y,pvhash),ptype='imshow',vmax=cmax,vmin=-cmax,cmap='RdBu_r',ylim=(-2500,0))
if fil3:
cs = axc.contour(X,Y,swash,np.array([swashperc]), colors='k')
if savfil:
plt.savefig(savfil+'pvhash.eps', dpi=300, facecolor='w', edgecolor='w',
format='eps', transparent=False, bbox_inches='tight')
else:
plt.show()
| gpl-3.0 |
doutib/lobpredict | lobpredictrst/jupyter/simple_model/create_simple_model_predict.py | 1 | 6870 |
# coding: utf-8
# # The best model parameters are given by
# ```
# author : SHAMINDRA
# data_source_dir : SC_shuffle
# test_type : validation
# model_type : RF
# RF:
# n_estimators : 100
# criterion : 'gini'
# max_features : 'auto'
# max_depth : 20
# n_jobs : 1
# SVM:
# kernel : 'rbf'
# degree : 3
# gamma : 'auto'
# tol : 0.001
# NNET:
# method1 : 'Tanh'
# neurons1 : 24
# method2 : 'Tanh'
# neurons2 : 39
# decay : 0.0001
# learning_rate : 0.001
# n_iter : 25
# random_state : 1
# ```
# In[66]:
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import imp
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model, datasets
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import log_loss
from sklearn.metrics import accuracy_score
import pandas as pd
# We looked at the top features from the best performing random forest. They are as below:
# In[48]:
# The top variables are:
var_importance = [(1, 'P_1_bid', 0.020001165389254737)
, (2, 'V_1_bid', 0.018358575666246449)
, (3, 'P_1_ask', 0.017058479215839299)
, (4, 'V_1_ask', 0.016953559068869958)
, (5, 'P_2_bid', 0.016908649059514971)
, (6, 'V_2_bid', 0.016219220215427665)
, (7, 'P_2_ask', 0.015039647893425838)
, (8, 'V_2_ask', 0.014497773408233052)
, (9, 'P_3_bid', 0.014321084019596746)
, (10, 'V_3_bid', 0.014158850118003859)
, (11, 'P_3_ask', 0.014101386932514923)
, (12, 'V_3_ask', 0.013911823640617986)
, (13, 'P_4_bid', 0.013838322603744435)
, (14, 'V_4_bid', 0.013668619218980316)
, (15, 'P_4_ask', 0.013413471959983998)]
# In[33]:
# Open test and train sets
df_train = pd.read_csv(train_ds_ref
, compression='gzip', index_col = None)
df_test = pd.read_csv(test_ds_ref
, compression='gzip', index_col = None)
# Drop the first columns - they are not useful
df_train_clean = df_train.iloc[:,1:]
df_test_clean = df_test.iloc[:,1:]
# In[34]:
X_train_cols = list(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']].columns.values)
X_train = np.array(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_test = np.array(df_test_clean[['labels']])[:,0]
# In[38]:
# Define the labels
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
# Set up the data
logreg = linear_model.LogisticRegression(C=1e5)
# Fit
logreg.fit(X_train, Y_train)
# Predict
Y_hat = logreg.predict(X_test)
Y_probs = logreg.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
##confusion_matrix
confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat
, labels=labels)
# classification_report
classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)
# Output results in a list format
result = []
result.append("confusion_matrix")
result.append(confusion_matrix1)
result.append("classification_report")
result.append(classification_report1)
result.append("logloss")
result.append(logloss)
result.append("miss_err")
result.append(miss_err)
result.append("Y_hat")
result.append(Y_hat)
# In[46]:
print(result[3])
print(Y_hat)
print(Y_probs)
# #### The predicted output for our most successful RF model is as follows
# ```
# classification_report
#
# precision recall f1-score support
#
# -1 0.99 0.98 0.98 18373
# 0 0.97 0.98 0.97 16950
# 1 0.99 0.98 0.98 15265
#
# avg / total 0.98 0.98 0.98 50588
# ```
# In[49]:
def predict_simple_linear(df_train_clean, df_test_clean):
X_train_cols = list(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']].columns.values)
X_train = np.array(df_train_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_train = np.array(df_train_clean[['labels']])[:,0]
X_test = np.array(df_test_clean[['P_1_bid', 'V_1_bid', 'P_1_ask', 'V_1_ask', 'P_2_bid', 'V_2_bid', 'P_2_ask'
, 'V_2_ask']])
Y_test = np.array(df_test_clean[['labels']])[:,0]
# Define the labels
labels = np.unique(Y_train)
## # Scale Data
scaler = MinMaxScaler()
X_test = scaler.fit_transform(X_test)
X_train = scaler.fit_transform(X_train)
# Set up the data
logreg = linear_model.LogisticRegression(C=1e5)
# Fit
logreg.fit(X_train, Y_train)
# Predict
Y_hat = logreg.predict(X_test)
Y_probs = logreg.predict_proba(X_test)
## # Misclassification error rate
miss_err = 1-accuracy_score(Y_test, Y_hat)
## # Log Loss
eps = 10^(-15)
logloss = log_loss(Y_test, Y_probs, eps = eps)
##confusion_matrix
confusion_matrix1 = confusion_matrix(y_true=Y_test, y_pred=Y_hat
, labels=labels)
# classification_report
classification_report1 = classification_report(y_true=Y_test, y_pred=Y_hat)
# Output results in a list format
result = []
result.append("confusion_matrix")
result.append(confusion_matrix1)
result.append("classification_report")
result.append(classification_report1)
result.append("logloss")
result.append(logloss)
result.append("miss_err")
result.append(miss_err)
result.append("Y_hat")
result.append(Y_hat)
return result
# In[62]:
linear_simple_predict = predict_simple_linear(df_train_clean = df_train_clean
, df_test_clean = df_train_clean)
# In[64]:
# Get the predicted outcomes
linear_simple_predict_vals = linear_simple_predict[len(linear_simple_predict) -1]
len(list(linear_simple_predict_vals))
# In[67]:
modl = imp.load_source('execute_model', '../../execute_model.py')
# In[ ]:
| isc |
rfoxfa/python-utils | utils/plotting.py | 1 | 1798 | """
Plotting functions.
"""
from __future__ import absolute_import
import matplotlib.pyplot as plt
import numpy as np
def hhist(items, title=None, axislabel=None, color=None, height=None, width=None, reverse=False):
"""
Plots a horizontal histogram of values and frequencies.
Arguments:
items (iterable[any]) => A list of objects.
title (Optional[str]) => A title for the resulting histogram.
axislabel (Optional[str]) => A label for the y-axis that lists the unique items in
the parameter list.
color (Optional[str]) => A matplotlib color value for coloring the histogram
(default: matplotlib's default plot color, a royal blue)
height (Optional[int]) => A height for the plot (default: 10)
width (Optional[int]) => A width for the plot (default: 20)
reverse (Optional[bool]) => Whether or not the histogram should plot from top to bottom in
order of decreasing frequency or the reverse of that.
Returns:
Void, but a matplotlib figure should be produced (type=None).
"""
# Parse the unique items and their counts.
unique_items, item_counts = np.unique(items, return_counts=True)
# Sort the items by frequency.
item_counts, unique_items = zip(*sorted(zip(item_counts, unique_items), reverse=reverse))
# Plot the frequencies.
pos = np.arange(len(unique_items)) + 0.5
plt.figure(figsize=((width or 20), (height or 10)))
plt.barh(pos, item_counts, align='center', color=color)
plt.yticks(pos, unique_items)
plt.xlabel('Frequency')
if axislabel:
plt.ylabel(axislabel)
if title:
plt.title(title)
plt.show()
| gpl-2.0 |
rexshihaoren/scikit-learn | examples/model_selection/plot_underfitting_overfitting.py | 230 | 2649 | """
============================
Underfitting vs. Overfitting
============================
This example demonstrates the problems of underfitting and overfitting and
how we can use linear regression with polynomial features to approximate
nonlinear functions. The plot shows the function that we want to approximate,
which is a part of the cosine function. In addition, the samples from the
real function and the approximations of different models are displayed. The
models have polynomial features of different degrees. We can see that a
linear function (polynomial with degree 1) is not sufficient to fit the
training samples. This is called **underfitting**. A polynomial of degree 4
approximates the true function almost perfectly. However, for higher degrees
the model will **overfit** the training data, i.e. it learns the noise of the
training data.
We evaluate quantitatively **overfitting** / **underfitting** by using
cross-validation. We calculate the mean squared error (MSE) on the validation
set, the higher, the less likely the model generalizes correctly from the
training data.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import PolynomialFeatures
from sklearn.linear_model import LinearRegression
from sklearn import cross_validation
np.random.seed(0)
n_samples = 30
degrees = [1, 4, 15]
true_fun = lambda X: np.cos(1.5 * np.pi * X)
X = np.sort(np.random.rand(n_samples))
y = true_fun(X) + np.random.randn(n_samples) * 0.1
plt.figure(figsize=(14, 5))
for i in range(len(degrees)):
ax = plt.subplot(1, len(degrees), i + 1)
plt.setp(ax, xticks=(), yticks=())
polynomial_features = PolynomialFeatures(degree=degrees[i],
include_bias=False)
linear_regression = LinearRegression()
pipeline = Pipeline([("polynomial_features", polynomial_features),
("linear_regression", linear_regression)])
pipeline.fit(X[:, np.newaxis], y)
# Evaluate the models using crossvalidation
scores = cross_validation.cross_val_score(pipeline,
X[:, np.newaxis], y, scoring="mean_squared_error", cv=10)
X_test = np.linspace(0, 1, 100)
plt.plot(X_test, pipeline.predict(X_test[:, np.newaxis]), label="Model")
plt.plot(X_test, true_fun(X_test), label="True function")
plt.scatter(X, y, label="Samples")
plt.xlabel("x")
plt.ylabel("y")
plt.xlim((0, 1))
plt.ylim((-2, 2))
plt.legend(loc="best")
plt.title("Degree {}\nMSE = {:.2e}(+/- {:.2e})".format(
degrees[i], -scores.mean(), scores.std()))
plt.show()
| bsd-3-clause |
levelrf/level_basestation | gr-filter/examples/fir_filter_ccc.py | 13 | 3154 | #!/usr/bin/env python
from gnuradio import gr, filter
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
try:
import scipy
except ImportError:
print "Error: could not import scipy (http://www.scipy.org/)"
sys.exit(1)
try:
import pylab
except ImportError:
print "Error: could not import pylab (http://matplotlib.sourceforge.net/)"
sys.exit(1)
class example_fir_filter_ccc(gr.top_block):
def __init__(self, N, fs, bw, tw, atten, D):
gr.top_block.__init__(self)
self._nsamps = N
self._fs = fs
self._bw = bw
self._tw = tw
self._at = atten
self._decim = D
taps = filter.firdes.low_pass_2(1, self._fs, self._bw, self._tw, self._at)
print "Num. Taps: ", len(taps)
self.src = gr.noise_source_c(gr.GR_GAUSSIAN, 1)
self.head = gr.head(gr.sizeof_gr_complex, self._nsamps)
self.filt0 = filter.fir_filter_ccc(self._decim, taps)
self.vsnk_src = gr.vector_sink_c()
self.vsnk_out = gr.vector_sink_c()
self.connect(self.src, self.head, self.vsnk_src)
self.connect(self.head, self.filt0, self.vsnk_out)
def main():
parser = OptionParser(option_class=eng_option, conflict_handler="resolve")
parser.add_option("-N", "--nsamples", type="int", default=10000,
help="Number of samples to process [default=%default]")
parser.add_option("-s", "--samplerate", type="eng_float", default=8000,
help="System sample rate [default=%default]")
parser.add_option("-B", "--bandwidth", type="eng_float", default=1000,
help="Filter bandwidth [default=%default]")
parser.add_option("-T", "--transition", type="eng_float", default=100,
help="Transition band [default=%default]")
parser.add_option("-A", "--attenuation", type="eng_float", default=80,
help="Stopband attenuation [default=%default]")
parser.add_option("-D", "--decimation", type="int", default=1,
help="Decmation factor [default=%default]")
(options, args) = parser.parse_args ()
put = example_fir_filter_ccc(options.nsamples,
options.samplerate,
options.bandwidth,
options.transition,
options.attenuation,
options.decimation)
put.run()
data_src = scipy.array(put.vsnk_src.data())
data_snk = scipy.array(put.vsnk_out.data())
# Plot the signals PSDs
nfft = 1024
f1 = pylab.figure(1, figsize=(12,10))
s1 = f1.add_subplot(1,1,1)
s1.psd(data_src, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
s1.psd(data_snk, NFFT=nfft, noverlap=nfft/4,
Fs=options.samplerate)
f2 = pylab.figure(2, figsize=(12,10))
s2 = f2.add_subplot(1,1,1)
s2.plot(data_src)
s2.plot(data_snk.real, 'g')
pylab.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
etkirsch/scikit-learn | examples/decomposition/plot_sparse_coding.py | 247 | 3846 | """
===========================================
Sparse coding with a precomputed dictionary
===========================================
Transform a signal as a sparse combination of Ricker wavelets. This example
visually compares different sparse coding methods using the
:class:`sklearn.decomposition.SparseCoder` estimator. The Ricker (also known
as Mexican hat or the second derivative of a Gaussian) is not a particularly
good kernel to represent piecewise constant signals like this one. It can
therefore be seen how much adding different widths of atoms matters and it
therefore motivates learning the dictionary to best fit your type of signals.
The richer dictionary on the right is not larger in size, heavier subsampling
is performed in order to stay on the same order of magnitude.
"""
print(__doc__)
import numpy as np
import matplotlib.pylab as pl
from sklearn.decomposition import SparseCoder
def ricker_function(resolution, center, width):
"""Discrete sub-sampled Ricker (Mexican hat) wavelet"""
x = np.linspace(0, resolution - 1, resolution)
x = ((2 / ((np.sqrt(3 * width) * np.pi ** 1 / 4)))
* (1 - ((x - center) ** 2 / width ** 2))
* np.exp((-(x - center) ** 2) / (2 * width ** 2)))
return x
def ricker_matrix(width, resolution, n_components):
"""Dictionary of Ricker (Mexican hat) wavelets"""
centers = np.linspace(0, resolution - 1, n_components)
D = np.empty((n_components, resolution))
for i, center in enumerate(centers):
D[i] = ricker_function(resolution, center, width)
D /= np.sqrt(np.sum(D ** 2, axis=1))[:, np.newaxis]
return D
resolution = 1024
subsampling = 3 # subsampling factor
width = 100
n_components = resolution / subsampling
# Compute a wavelet dictionary
D_fixed = ricker_matrix(width=width, resolution=resolution,
n_components=n_components)
D_multi = np.r_[tuple(ricker_matrix(width=w, resolution=resolution,
n_components=np.floor(n_components / 5))
for w in (10, 50, 100, 500, 1000))]
# Generate a signal
y = np.linspace(0, resolution - 1, resolution)
first_quarter = y < resolution / 4
y[first_quarter] = 3.
y[np.logical_not(first_quarter)] = -1.
# List the different sparse coding methods in the following format:
# (title, transform_algorithm, transform_alpha, transform_n_nozero_coefs)
estimators = [('OMP', 'omp', None, 15), ('Lasso', 'lasso_cd', 2, None), ]
pl.figure(figsize=(13, 6))
for subplot, (D, title) in enumerate(zip((D_fixed, D_multi),
('fixed width', 'multiple widths'))):
pl.subplot(1, 2, subplot + 1)
pl.title('Sparse coding against %s dictionary' % title)
pl.plot(y, ls='dotted', label='Original signal')
# Do a wavelet approximation
for title, algo, alpha, n_nonzero in estimators:
coder = SparseCoder(dictionary=D, transform_n_nonzero_coefs=n_nonzero,
transform_alpha=alpha, transform_algorithm=algo)
x = coder.transform(y)
density = len(np.flatnonzero(x))
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x, label='%s: %s nonzero coefs,\n%.2f error'
% (title, density, squared_error))
# Soft thresholding debiasing
coder = SparseCoder(dictionary=D, transform_algorithm='threshold',
transform_alpha=20)
x = coder.transform(y)
_, idx = np.where(x != 0)
x[0, idx], _, _, _ = np.linalg.lstsq(D[idx, :].T, y)
x = np.ravel(np.dot(x, D))
squared_error = np.sum((y - x) ** 2)
pl.plot(x,
label='Thresholding w/ debiasing:\n%d nonzero coefs, %.2f error' %
(len(idx), squared_error))
pl.axis('tight')
pl.legend()
pl.subplots_adjust(.04, .07, .97, .90, .09, .2)
pl.show()
| bsd-3-clause |
Unidata/MetPy | v0.9/_downloads/8591910a2b42dadcf3b05658ddd9c600/isentropic_example.py | 2 | 7222 | # Copyright (c) 2017,2018 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
===================
Isentropic Analysis
===================
The MetPy function `mpcalc.isentropic_interpolation` allows for isentropic analysis from model
analysis data in isobaric coordinates.
"""
########################################
import cartopy.crs as ccrs
import cartopy.feature as cfeature
import matplotlib.pyplot as plt
import numpy as np
import xarray as xr
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, add_timestamp
from metpy.units import units
#######################################
# **Getting the data**
#
# In this example, NARR reanalysis data for 18 UTC 04 April 1987 from the National Centers
# for Environmental Information (https://www.ncdc.noaa.gov/data-access/model-data)
# will be used.
data = xr.open_dataset(get_test_data('narr_example.nc', False))
##########################
print(list(data.variables))
#############################
# We will reduce the dimensionality of the data as it is pulled in to remove an empty time
# dimension.
# Assign data to variable names
lat = data['lat']
lon = data['lon']
lev = data['isobaric']
times = data['time']
tmp = data['Temperature'][0]
uwnd = data['u_wind'][0]
vwnd = data['v_wind'][0]
spech = data['Specific_humidity'][0]
# pint doesn't understand gpm
data['Geopotential_height'].attrs['units'] = 'meter'
hgt = data['Geopotential_height'][0]
#############################
# To properly interpolate to isentropic coordinates, the function must know the desired output
# isentropic levels. An array with these levels will be created below.
isentlevs = [296.] * units.kelvin
####################################
# **Conversion to Isentropic Coordinates**
#
# Once three dimensional data in isobaric coordinates has been pulled and the desired
# isentropic levels created, the conversion to isentropic coordinates can begin. Data will be
# passed to the function as below. The function requires that isentropic levels, isobaric
# levels, and temperature be input. Any additional inputs (in this case relative humidity, u,
# and v wind components) will be linearly interpolated to isentropic space.
isent_anal = mpcalc.isentropic_interpolation(isentlevs,
lev,
tmp,
spech,
uwnd,
vwnd,
hgt,
tmpk_out=True)
#####################################
# The output is a list, so now we will separate the variables to different names before
# plotting.
isentprs, isenttmp, isentspech, isentu, isentv, isenthgt = isent_anal
isentu.ito('kt')
isentv.ito('kt')
########################################
# A quick look at the shape of these variables will show that the data is now in isentropic
# coordinates, with the number of vertical levels as specified above.
print(isentprs.shape)
print(isentspech.shape)
print(isentu.shape)
print(isentv.shape)
print(isenttmp.shape)
print(isenthgt.shape)
#################################
# **Converting to Relative Humidity**
#
# The NARR only gives specific humidity on isobaric vertical levels, so relative humidity will
# have to be calculated after the interpolation to isentropic space.
isentrh = 100 * mpcalc.relative_humidity_from_specific_humidity(isentspech, isenttmp, isentprs)
#######################################
# **Plotting the Isentropic Analysis**
# Set up our projection
crs = ccrs.LambertConformal(central_longitude=-100.0, central_latitude=45.0)
# Coordinates to limit map area
bounds = [(-122., -75., 25., 50.)]
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 245, size='large')
ax = fig.add_subplot(1, 1, 1, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES, linewidth=0.5)
# Plot the surface
clevisent = np.arange(0, 1000, 25)
cs = ax.contour(lon, lat, isentprs[level, :, :], clevisent,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Isentropic Pressure (hPa), Wind (kt), Relative Humidity (percent)'
.format(isentlevs[level].m), loc='left')
add_timestamp(ax, times[0].dt, y=0.02, high_contrast=True)
fig.tight_layout()
######################################
# **Montgomery Streamfunction**
#
# The Montgomery Streamfunction, :math:`{\psi} = gdz + CpT`, is often desired because its
# gradient is proportional to the geostrophic wind in isentropic space. This can be easily
# calculated with `mpcalc.montgomery_streamfunction`.
# Calculate Montgomery Streamfunction and scale by 10^-2 for plotting
msf = mpcalc.montgomery_streamfunction(isenthgt, isenttmp) / 100.
# Choose a level to plot, in this case 296 K
level = 0
fig = plt.figure(figsize=(17., 12.))
add_metpy_logo(fig, 120, 250, size='large')
ax = plt.subplot(111, projection=crs)
ax.set_extent(*bounds, crs=ccrs.PlateCarree())
ax.add_feature(cfeature.COASTLINE.with_scale('50m'), linewidth=0.75)
ax.add_feature(cfeature.STATES.with_scale('50m'), linewidth=0.5)
# Plot the surface
clevmsf = np.arange(0, 4000, 5)
cs = ax.contour(lon, lat, msf[level, :, :], clevmsf,
colors='k', linewidths=1.0, linestyles='solid', transform=ccrs.PlateCarree())
ax.clabel(cs, fontsize=10, inline=1, inline_spacing=7,
fmt='%i', rightside_up=True, use_clabeltext=True)
# Plot RH
cf = ax.contourf(lon, lat, isentrh[level, :, :], range(10, 106, 5),
cmap=plt.cm.gist_earth_r, transform=ccrs.PlateCarree())
cb = fig.colorbar(cf, orientation='horizontal', extend='max', aspect=65, shrink=0.5, pad=0.05,
extendrect='True')
cb.set_label('Relative Humidity', size='x-large')
# Plot wind barbs.
ax.barbs(lon.values, lat.values, isentu[level, :, :].m, isentv[level, :, :].m, length=6,
regrid_shape=20, transform=ccrs.PlateCarree())
# Make some titles
ax.set_title('{:.0f} K Montgomery Streamfunction '.format(isentlevs[level].m) +
r'($10^{-2} m^2 s^{-2}$), ' +
'Wind (kt), Relative Humidity (percent)', loc='left')
add_timestamp(ax, times[0].dt, y=0.02, pretext='Valid: ', high_contrast=True)
fig.tight_layout()
plt.show()
| bsd-3-clause |
del680202/MachineLearning-memo | src/tensorflow/autocoder.py | 1 | 6809 | # View more python learning tutorial on my Youtube and Youku channel!!!
# My tutorial website: https://morvanzhou.github.io/tutorials/
from __future__ import division, print_function, absolute_import
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
# Import MNIST data
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data/", one_hot=False)
# Visualize decoder setting
# Parameters
learning_rate = 0.01
training_epochs = 5
batch_size = 256
display_step = 1
examples_to_show = 10
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 256 # 1st layer num features
n_hidden_2 = 128 # 2nd layer num features
weights = {
'encoder_h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])),
'encoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_hidden_2])),
'decoder_h1': tf.Variable(tf.random_normal([n_hidden_2, n_hidden_1])),
'decoder_h2': tf.Variable(tf.random_normal([n_hidden_1, n_input])),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b2': tf.Variable(tf.random_normal([n_input])),
}
# Building the encoder
def encoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
return layer_2
# Building the decoder
def decoder(x):
# Encoder Hidden layer with sigmoid activation #1
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
# Decoder Hidden layer with sigmoid activation #2
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
return layer_2
"""
# Visualize encoder setting
# Parameters
learning_rate = 0.01 # 0.01 this learning rate will be better! Tested
training_epochs = 10
batch_size = 256
display_step = 1
# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)
# tf Graph input (only pictures)
X = tf.placeholder("float", [None, n_input])
# hidden layer settings
n_hidden_1 = 128
n_hidden_2 = 64
n_hidden_3 = 10
n_hidden_4 = 2
weights = {
'encoder_h1': tf.Variable(tf.truncated_normal([n_input, n_hidden_1],)),
'encoder_h2': tf.Variable(tf.truncated_normal([n_hidden_1, n_hidden_2],)),
'encoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_3],)),
'encoder_h4': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_4],)),
'decoder_h1': tf.Variable(tf.truncated_normal([n_hidden_4, n_hidden_3],)),
'decoder_h2': tf.Variable(tf.truncated_normal([n_hidden_3, n_hidden_2],)),
'decoder_h3': tf.Variable(tf.truncated_normal([n_hidden_2, n_hidden_1],)),
'decoder_h4': tf.Variable(tf.truncated_normal([n_hidden_1, n_input],)),
}
biases = {
'encoder_b1': tf.Variable(tf.random_normal([n_hidden_1])),
'encoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'encoder_b3': tf.Variable(tf.random_normal([n_hidden_3])),
'encoder_b4': tf.Variable(tf.random_normal([n_hidden_4])),
'decoder_b1': tf.Variable(tf.random_normal([n_hidden_3])),
'decoder_b2': tf.Variable(tf.random_normal([n_hidden_2])),
'decoder_b3': tf.Variable(tf.random_normal([n_hidden_1])),
'decoder_b4': tf.Variable(tf.random_normal([n_input])),
}
def encoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['encoder_h1']),
biases['encoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['encoder_h2']),
biases['encoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['encoder_h3']),
biases['encoder_b3']))
layer_4 = tf.add(tf.matmul(layer_3, weights['encoder_h4']),
biases['encoder_b4'])
return layer_4
def decoder(x):
layer_1 = tf.nn.sigmoid(tf.add(tf.matmul(x, weights['decoder_h1']),
biases['decoder_b1']))
layer_2 = tf.nn.sigmoid(tf.add(tf.matmul(layer_1, weights['decoder_h2']),
biases['decoder_b2']))
layer_3 = tf.nn.sigmoid(tf.add(tf.matmul(layer_2, weights['decoder_h3']),
biases['decoder_b3']))
layer_4 = tf.nn.sigmoid(tf.add(tf.matmul(layer_3, weights['decoder_h4']),
biases['decoder_b4']))
return layer_4
"""
# Construct model
encoder_op = encoder(X)
decoder_op = decoder(encoder_op)
# Prediction
y_pred = decoder_op
# Targets (Labels) are the input data.
y_true = X
# Define loss and optimizer, minimize the squared error
cost = tf.reduce_mean(tf.pow(y_true - y_pred, 2))
optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)
# Launch the graph
with tf.Session() as sess:
# tf.initialize_all_variables() no long valid from
# 2017-03-02 if using tensorflow >= 0.12
sess.run(tf.initialize_all_variables())
total_batch = int(mnist.train.num_examples/batch_size)
# Training cycle
for epoch in range(training_epochs):
# Loop over all batches
for i in range(total_batch):
batch_xs, batch_ys = mnist.train.next_batch(batch_size) # max(x) = 1, min(x) = 0
# Run optimization op (backprop) and cost op (to get loss value)
_, c = sess.run([optimizer, cost], feed_dict={X: batch_xs})
# Display logs per epoch step
if epoch % display_step == 0:
print("Epoch:", '%04d' % (epoch+1),
"cost=", "{:.9f}".format(c))
print("Optimization Finished!")
# # Applying encode and decode over test set
encode_decode = sess.run(
y_pred, feed_dict={X: mnist.test.images[:examples_to_show]})
# Compare original images with their reconstructions
f, a = plt.subplots(2, 10, figsize=(10, 2))
for i in range(examples_to_show):
a[0][i].imshow(np.reshape(mnist.test.images[i], (28, 28))) #Raw image
a[1][i].imshow(np.reshape(encode_decode[i], (28, 28))) # encode -> decode image
plt.show()
# encoder_result = sess.run(encoder_op, feed_dict={X: mnist.test.images})
# plt.scatter(encoder_result[:, 0], encoder_result[:, 1], c=mnist.test.labels)
# plt.colorbar()
# plt.show()
| apache-2.0 |
Lightmatter/django-inlineformfield | .tox/py27/lib/python2.7/site-packages/IPython/html/notebookapp.py | 5 | 34560 | # coding: utf-8
"""A tornado based IPython notebook server.
Authors:
* Brian Granger
"""
from __future__ import print_function
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The IPython Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file COPYING, distributed as part of this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# stdlib
import errno
import io
import json
import logging
import os
import random
import re
import select
import signal
import socket
import sys
import threading
import time
import webbrowser
# Third party
# check for pyzmq 2.1.11
from IPython.utils.zmqrelated import check_for_zmq
check_for_zmq('2.1.11', 'IPython.html')
from jinja2 import Environment, FileSystemLoader
# Install the pyzmq ioloop. This has to be done before anything else from
# tornado is imported.
from zmq.eventloop import ioloop
ioloop.install()
# check for tornado 3.1.0
msg = "The IPython Notebook requires tornado >= 3.1.0"
try:
import tornado
except ImportError:
raise ImportError(msg)
try:
version_info = tornado.version_info
except AttributeError:
raise ImportError(msg + ", but you have < 1.1.0")
if version_info < (3,1,0):
raise ImportError(msg + ", but you have %s" % tornado.version)
from tornado import httpserver
from tornado import web
# Our own libraries
from IPython.html import DEFAULT_STATIC_FILES_PATH
from .base.handlers import Template404
from .log import log_request
from .services.kernels.kernelmanager import MappingKernelManager
from .services.notebooks.nbmanager import NotebookManager
from .services.notebooks.filenbmanager import FileNotebookManager
from .services.clusters.clustermanager import ClusterManager
from .services.sessions.sessionmanager import SessionManager
from .base.handlers import AuthenticatedFileHandler, FileFindHandler
from IPython.config import Config
from IPython.config.application import catch_config_error, boolean_flag
from IPython.core.application import BaseIPythonApplication
from IPython.core.profiledir import ProfileDir
from IPython.consoleapp import IPythonConsoleApp
from IPython.kernel import swallow_argv
from IPython.kernel.zmq.session import default_secure
from IPython.kernel.zmq.kernelapp import (
kernel_flags,
kernel_aliases,
)
from IPython.nbformat.sign import NotebookNotary
from IPython.utils.importstring import import_item
from IPython.utils import submodule
from IPython.utils.traitlets import (
Dict, Unicode, Integer, List, Bool, Bytes,
DottedObjectName, TraitError,
)
from IPython.utils import py3compat
from IPython.utils.path import filefind, get_ipython_dir
from .utils import url_path_join
#-----------------------------------------------------------------------------
# Module globals
#-----------------------------------------------------------------------------
_examples = """
ipython notebook # start the notebook
ipython notebook --profile=sympy # use the sympy profile
ipython notebook --certfile=mycert.pem # use SSL/TLS certificate
"""
#-----------------------------------------------------------------------------
# Helper functions
#-----------------------------------------------------------------------------
def random_ports(port, n):
"""Generate a list of n random ports near the given port.
The first 5 ports will be sequential, and the remaining n-5 will be
randomly selected in the range [port-2*n, port+2*n].
"""
for i in range(min(5, n)):
yield port + i
for i in range(n-5):
yield max(1, port + random.randint(-2*n, 2*n))
def load_handlers(name):
"""Load the (URL pattern, handler) tuples for each component."""
name = 'IPython.html.' + name
mod = __import__(name, fromlist=['default_handlers'])
return mod.default_handlers
#-----------------------------------------------------------------------------
# The Tornado web application
#-----------------------------------------------------------------------------
class NotebookWebApplication(web.Application):
def __init__(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, session_manager, log, base_url,
settings_overrides, jinja_env_options):
settings = self.init_settings(
ipython_app, kernel_manager, notebook_manager, cluster_manager,
session_manager, log, base_url, settings_overrides, jinja_env_options)
handlers = self.init_handlers(settings)
super(NotebookWebApplication, self).__init__(handlers, **settings)
def init_settings(self, ipython_app, kernel_manager, notebook_manager,
cluster_manager, session_manager, log, base_url,
settings_overrides, jinja_env_options=None):
# Python < 2.6.5 doesn't accept unicode keys in f(**kwargs), and
# base_url will always be unicode, which will in turn
# make the patterns unicode, and ultimately result in unicode
# keys in kwargs to handler._execute(**kwargs) in tornado.
# This enforces that base_url be ascii in that situation.
#
# Note that the URLs these patterns check against are escaped,
# and thus guaranteed to be ASCII: 'héllo' is really 'h%C3%A9llo'.
base_url = py3compat.unicode_to_str(base_url, 'ascii')
template_path = settings_overrides.get("template_path", os.path.join(os.path.dirname(__file__), "templates"))
jenv_opt = jinja_env_options if jinja_env_options else {}
env = Environment(loader=FileSystemLoader(template_path),**jenv_opt )
settings = dict(
# basics
log_function=log_request,
base_url=base_url,
template_path=template_path,
static_path=ipython_app.static_file_path,
static_handler_class = FileFindHandler,
static_url_prefix = url_path_join(base_url,'/static/'),
# authentication
cookie_secret=ipython_app.cookie_secret,
login_url=url_path_join(base_url,'/login'),
password=ipython_app.password,
# managers
kernel_manager=kernel_manager,
notebook_manager=notebook_manager,
cluster_manager=cluster_manager,
session_manager=session_manager,
# IPython stuff
nbextensions_path = ipython_app.nbextensions_path,
mathjax_url=ipython_app.mathjax_url,
config=ipython_app.config,
jinja2_env=env,
)
# allow custom overrides for the tornado web app.
settings.update(settings_overrides)
return settings
def init_handlers(self, settings):
# Load the (URL pattern, handler) tuples for each component.
handlers = []
handlers.extend(load_handlers('base.handlers'))
handlers.extend(load_handlers('tree.handlers'))
handlers.extend(load_handlers('auth.login'))
handlers.extend(load_handlers('auth.logout'))
handlers.extend(load_handlers('notebook.handlers'))
handlers.extend(load_handlers('nbconvert.handlers'))
handlers.extend(load_handlers('services.kernels.handlers'))
handlers.extend(load_handlers('services.notebooks.handlers'))
handlers.extend(load_handlers('services.clusters.handlers'))
handlers.extend(load_handlers('services.sessions.handlers'))
handlers.extend(load_handlers('services.nbconvert.handlers'))
# FIXME: /files/ should be handled by the Contents service when it exists
nbm = settings['notebook_manager']
if hasattr(nbm, 'notebook_dir'):
handlers.extend([
(r"/files/(.*)", AuthenticatedFileHandler, {'path' : nbm.notebook_dir}),
(r"/nbextensions/(.*)", FileFindHandler, {'path' : settings['nbextensions_path']}),
])
# prepend base_url onto the patterns that we match
new_handlers = []
for handler in handlers:
pattern = url_path_join(settings['base_url'], handler[0])
new_handler = tuple([pattern] + list(handler[1:]))
new_handlers.append(new_handler)
# add 404 on the end, which will catch everything that falls through
new_handlers.append((r'(.*)', Template404))
return new_handlers
class NbserverListApp(BaseIPythonApplication):
description="List currently running notebook servers in this profile."
flags = dict(
json=({'NbserverListApp': {'json': True}},
"Produce machine-readable JSON output."),
)
json = Bool(False, config=True,
help="If True, each line of output will be a JSON object with the "
"details from the server info file.")
def start(self):
if not self.json:
print("Currently running servers:")
for serverinfo in list_running_servers(self.profile):
if self.json:
print(json.dumps(serverinfo))
else:
print(serverinfo['url'], "::", serverinfo['notebook_dir'])
#-----------------------------------------------------------------------------
# Aliases and Flags
#-----------------------------------------------------------------------------
flags = dict(kernel_flags)
flags['no-browser']=(
{'NotebookApp' : {'open_browser' : False}},
"Don't open the notebook in a browser after startup."
)
flags['no-mathjax']=(
{'NotebookApp' : {'enable_mathjax' : False}},
"""Disable MathJax
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
# Add notebook manager flags
flags.update(boolean_flag('script', 'FileNotebookManager.save_script',
'Auto-save a .py script everytime the .ipynb notebook is saved',
'Do not auto-save .py scripts for every notebook'))
# the flags that are specific to the frontend
# these must be scrubbed before being passed to the kernel,
# or it will raise an error on unrecognized flags
notebook_flags = ['no-browser', 'no-mathjax', 'script', 'no-script']
aliases = dict(kernel_aliases)
aliases.update({
'ip': 'NotebookApp.ip',
'port': 'NotebookApp.port',
'port-retries': 'NotebookApp.port_retries',
'transport': 'KernelManager.transport',
'keyfile': 'NotebookApp.keyfile',
'certfile': 'NotebookApp.certfile',
'notebook-dir': 'NotebookApp.notebook_dir',
'browser': 'NotebookApp.browser',
})
# remove ipkernel flags that are singletons, and don't make sense in
# multi-kernel evironment:
aliases.pop('f', None)
notebook_aliases = [u'port', u'port-retries', u'ip', u'keyfile', u'certfile',
u'notebook-dir', u'profile', u'profile-dir', 'browser']
#-----------------------------------------------------------------------------
# NotebookApp
#-----------------------------------------------------------------------------
class NotebookApp(BaseIPythonApplication):
name = 'ipython-notebook'
description = """
The IPython HTML Notebook.
This launches a Tornado based HTML Notebook Server that serves up an
HTML5/Javascript Notebook client.
"""
examples = _examples
classes = IPythonConsoleApp.classes + [MappingKernelManager, NotebookManager,
FileNotebookManager, NotebookNotary]
flags = Dict(flags)
aliases = Dict(aliases)
subcommands = dict(
list=(NbserverListApp, NbserverListApp.description.splitlines()[0]),
)
kernel_argv = List(Unicode)
def _log_level_default(self):
return logging.INFO
def _log_format_default(self):
"""override default log format to include time"""
return u"%(asctime)s.%(msecs).03d [%(name)s]%(highlevel)s %(message)s"
# create requested profiles by default, if they don't exist:
auto_create = Bool(True)
# file to be opened in the notebook server
file_to_run = Unicode('', config=True)
def _file_to_run_changed(self, name, old, new):
path, base = os.path.split(new)
if path:
self.file_to_run = base
self.notebook_dir = path
# Network related information
allow_origin = Unicode('', config=True,
help="""Set the Access-Control-Allow-Origin header
Use '*' to allow any origin to access your server.
Takes precedence over allow_origin_pat.
"""
)
allow_origin_pat = Unicode('', config=True,
help="""Use a regular expression for the Access-Control-Allow-Origin header
Requests from an origin matching the expression will get replies with:
Access-Control-Allow-Origin: origin
where `origin` is the origin of the request.
Ignored if allow_origin is set.
"""
)
allow_credentials = Bool(False, config=True,
help="Set the Access-Control-Allow-Credentials: true header"
)
ip = Unicode('localhost', config=True,
help="The IP address the notebook server will listen on."
)
def _ip_changed(self, name, old, new):
if new == u'*': self.ip = u''
port = Integer(8888, config=True,
help="The port the notebook server will listen on."
)
port_retries = Integer(50, config=True,
help="The number of additional ports to try if the specified port is not available."
)
certfile = Unicode(u'', config=True,
help="""The full path to an SSL/TLS certificate file."""
)
keyfile = Unicode(u'', config=True,
help="""The full path to a private key file for usage with SSL/TLS."""
)
cookie_secret = Bytes(b'', config=True,
help="""The random bytes used to secure cookies.
By default this is a new random number every time you start the Notebook.
Set it to a value in a config file to enable logins to persist across server sessions.
Note: Cookie secrets should be kept private, do not share config files with
cookie_secret stored in plaintext (you can read the value from a file).
"""
)
def _cookie_secret_default(self):
return os.urandom(1024)
password = Unicode(u'', config=True,
help="""Hashed password to use for web authentication.
To generate, type in a python/IPython shell:
from IPython.lib import passwd; passwd()
The string should be of the form type:salt:hashed-password.
"""
)
open_browser = Bool(True, config=True,
help="""Whether to open in a browser after starting.
The specific browser used is platform dependent and
determined by the python standard library `webbrowser`
module, unless it is overridden using the --browser
(NotebookApp.browser) configuration option.
""")
browser = Unicode(u'', config=True,
help="""Specify what command to use to invoke a web
browser when opening the notebook. If not specified, the
default browser will be determined by the `webbrowser`
standard library module, which allows setting of the
BROWSER environment variable to override it.
""")
webapp_settings = Dict(config=True,
help="Supply overrides for the tornado.web.Application that the "
"IPython notebook uses.")
jinja_environment_options = Dict(config=True,
help="Supply extra arguments that will be passed to Jinja environment.")
enable_mathjax = Bool(True, config=True,
help="""Whether to enable MathJax for typesetting math/TeX
MathJax is the javascript library IPython uses to render math/LaTeX. It is
very large, so you may want to disable it if you have a slow internet
connection, or for offline use of the notebook.
When disabled, equations etc. will appear as their untransformed TeX source.
"""
)
def _enable_mathjax_changed(self, name, old, new):
"""set mathjax url to empty if mathjax is disabled"""
if not new:
self.mathjax_url = u''
base_url = Unicode('/', config=True,
help='''The base URL for the notebook server.
Leading and trailing slashes can be omitted,
and will automatically be added.
''')
def _base_url_changed(self, name, old, new):
if not new.startswith('/'):
self.base_url = '/'+new
elif not new.endswith('/'):
self.base_url = new+'/'
base_project_url = Unicode('/', config=True, help="""DEPRECATED use base_url""")
def _base_project_url_changed(self, name, old, new):
self.log.warn("base_project_url is deprecated, use base_url")
self.base_url = new
extra_static_paths = List(Unicode, config=True,
help="""Extra paths to search for serving static files.
This allows adding javascript/css to be available from the notebook server machine,
or overriding individual files in the IPython"""
)
def _extra_static_paths_default(self):
return [os.path.join(self.profile_dir.location, 'static')]
@property
def static_file_path(self):
"""return extra paths + the default location"""
return self.extra_static_paths + [DEFAULT_STATIC_FILES_PATH]
nbextensions_path = List(Unicode, config=True,
help="""paths for Javascript extensions. By default, this is just IPYTHONDIR/nbextensions"""
)
def _nbextensions_path_default(self):
return [os.path.join(get_ipython_dir(), 'nbextensions')]
mathjax_url = Unicode("", config=True,
help="""The url for MathJax.js."""
)
def _mathjax_url_default(self):
if not self.enable_mathjax:
return u''
static_url_prefix = self.webapp_settings.get("static_url_prefix",
url_path_join(self.base_url, "static")
)
# try local mathjax, either in nbextensions/mathjax or static/mathjax
for (url_prefix, search_path) in [
(url_path_join(self.base_url, "nbextensions"), self.nbextensions_path),
(static_url_prefix, self.static_file_path),
]:
self.log.debug("searching for local mathjax in %s", search_path)
try:
mathjax = filefind(os.path.join('mathjax', 'MathJax.js'), search_path)
except IOError:
continue
else:
url = url_path_join(url_prefix, u"mathjax/MathJax.js")
self.log.info("Serving local MathJax from %s at %s", mathjax, url)
return url
# no local mathjax, serve from CDN
url = u"https://cdn.mathjax.org/mathjax/latest/MathJax.js"
self.log.info("Using MathJax from CDN: %s", url)
return url
def _mathjax_url_changed(self, name, old, new):
if new and not self.enable_mathjax:
# enable_mathjax=False overrides mathjax_url
self.mathjax_url = u''
else:
self.log.info("Using MathJax: %s", new)
notebook_manager_class = DottedObjectName('IPython.html.services.notebooks.filenbmanager.FileNotebookManager',
config=True,
help='The notebook manager class to use.')
trust_xheaders = Bool(False, config=True,
help=("Whether to trust or not X-Scheme/X-Forwarded-Proto and X-Real-Ip/X-Forwarded-For headers"
"sent by the upstream reverse proxy. Necessary if the proxy handles SSL")
)
info_file = Unicode()
def _info_file_default(self):
info_file = "nbserver-%s.json"%os.getpid()
return os.path.join(self.profile_dir.security_dir, info_file)
notebook_dir = Unicode(py3compat.getcwd(), config=True,
help="The directory to use for notebooks and kernels."
)
def _notebook_dir_changed(self, name, old, new):
"""Do a bit of validation of the notebook dir."""
if not os.path.isabs(new):
# If we receive a non-absolute path, make it absolute.
self.notebook_dir = os.path.abspath(new)
return
if not os.path.isdir(new):
raise TraitError("No such notebook dir: %r" % new)
# setting App.notebook_dir implies setting notebook and kernel dirs as well
self.config.FileNotebookManager.notebook_dir = new
self.config.MappingKernelManager.root_dir = new
def parse_command_line(self, argv=None):
super(NotebookApp, self).parse_command_line(argv)
if self.extra_args:
arg0 = self.extra_args[0]
f = os.path.abspath(arg0)
self.argv.remove(arg0)
if not os.path.exists(f):
self.log.critical("No such file or directory: %s", f)
self.exit(1)
# Use config here, to ensure that it takes higher priority than
# anything that comes from the profile.
c = Config()
if os.path.isdir(f):
c.NotebookApp.notebook_dir = f
elif os.path.isfile(f):
c.NotebookApp.file_to_run = f
self.update_config(c)
def init_kernel_argv(self):
"""construct the kernel arguments"""
# Scrub frontend-specific flags
self.kernel_argv = swallow_argv(self.argv, notebook_aliases, notebook_flags)
if any(arg.startswith(u'--pylab') for arg in self.kernel_argv):
self.log.warn('\n '.join([
"Starting all kernels in pylab mode is not recommended,",
"and will be disabled in a future release.",
"Please use the %matplotlib magic to enable matplotlib instead.",
"pylab implies many imports, which can have confusing side effects",
"and harm the reproducibility of your notebooks.",
]))
# Kernel should inherit default config file from frontend
self.kernel_argv.append("--IPKernelApp.parent_appname='%s'" % self.name)
# Kernel should get *absolute* path to profile directory
self.kernel_argv.extend(["--profile-dir", self.profile_dir.location])
def init_configurables(self):
# force Session default to be secure
default_secure(self.config)
self.kernel_manager = MappingKernelManager(
parent=self, log=self.log, kernel_argv=self.kernel_argv,
connection_dir = self.profile_dir.security_dir,
)
kls = import_item(self.notebook_manager_class)
self.notebook_manager = kls(parent=self, log=self.log)
self.session_manager = SessionManager(parent=self, log=self.log)
self.cluster_manager = ClusterManager(parent=self, log=self.log)
self.cluster_manager.update_profiles()
def init_logging(self):
# This prevents double log messages because tornado use a root logger that
# self.log is a child of. The logging module dipatches log messages to a log
# and all of its ancenstors until propagate is set to False.
self.log.propagate = False
# hook up tornado 3's loggers to our app handlers
for name in ('access', 'application', 'general'):
logger = logging.getLogger('tornado.%s' % name)
logger.parent = self.log
logger.setLevel(self.log.level)
def init_webapp(self):
"""initialize tornado webapp and httpserver"""
self.webapp_settings['allow_origin'] = self.allow_origin
if self.allow_origin_pat:
self.webapp_settings['allow_origin_pat'] = re.compile(self.allow_origin_pat)
self.webapp_settings['allow_credentials'] = self.allow_credentials
self.web_app = NotebookWebApplication(
self, self.kernel_manager, self.notebook_manager,
self.cluster_manager, self.session_manager,
self.log, self.base_url, self.webapp_settings,
self.jinja_environment_options
)
if self.certfile:
ssl_options = dict(certfile=self.certfile)
if self.keyfile:
ssl_options['keyfile'] = self.keyfile
else:
ssl_options = None
self.web_app.password = self.password
self.http_server = httpserver.HTTPServer(self.web_app, ssl_options=ssl_options,
xheaders=self.trust_xheaders)
if not self.ip:
warning = "WARNING: The notebook server is listening on all IP addresses"
if ssl_options is None:
self.log.critical(warning + " and not using encryption. This "
"is not recommended.")
if not self.password:
self.log.critical(warning + " and not using authentication. "
"This is highly insecure and not recommended.")
success = None
for port in random_ports(self.port, self.port_retries+1):
try:
self.http_server.listen(port, self.ip)
except socket.error as e:
if e.errno == errno.EADDRINUSE:
self.log.info('The port %i is already in use, trying another random port.' % port)
continue
elif e.errno in (errno.EACCES, getattr(errno, 'WSAEACCES', errno.EACCES)):
self.log.warn("Permission to listen on port %i denied" % port)
continue
else:
raise
else:
self.port = port
success = True
break
if not success:
self.log.critical('ERROR: the notebook server could not be started because '
'no available port could be found.')
self.exit(1)
@property
def display_url(self):
ip = self.ip if self.ip else '[all ip addresses on your system]'
return self._url(ip)
@property
def connection_url(self):
ip = self.ip if self.ip else 'localhost'
return self._url(ip)
def _url(self, ip):
proto = 'https' if self.certfile else 'http'
return "%s://%s:%i%s" % (proto, ip, self.port, self.base_url)
def init_signal(self):
if not sys.platform.startswith('win'):
signal.signal(signal.SIGINT, self._handle_sigint)
signal.signal(signal.SIGTERM, self._signal_stop)
if hasattr(signal, 'SIGUSR1'):
# Windows doesn't support SIGUSR1
signal.signal(signal.SIGUSR1, self._signal_info)
if hasattr(signal, 'SIGINFO'):
# only on BSD-based systems
signal.signal(signal.SIGINFO, self._signal_info)
def _handle_sigint(self, sig, frame):
"""SIGINT handler spawns confirmation dialog"""
# register more forceful signal handler for ^C^C case
signal.signal(signal.SIGINT, self._signal_stop)
# request confirmation dialog in bg thread, to avoid
# blocking the App
thread = threading.Thread(target=self._confirm_exit)
thread.daemon = True
thread.start()
def _restore_sigint_handler(self):
"""callback for restoring original SIGINT handler"""
signal.signal(signal.SIGINT, self._handle_sigint)
def _confirm_exit(self):
"""confirm shutdown on ^C
A second ^C, or answering 'y' within 5s will cause shutdown,
otherwise original SIGINT handler will be restored.
This doesn't work on Windows.
"""
# FIXME: remove this delay when pyzmq dependency is >= 2.1.11
time.sleep(0.1)
info = self.log.info
info('interrupted')
print(self.notebook_info())
sys.stdout.write("Shutdown this notebook server (y/[n])? ")
sys.stdout.flush()
r,w,x = select.select([sys.stdin], [], [], 5)
if r:
line = sys.stdin.readline()
if line.lower().startswith('y') and 'n' not in line.lower():
self.log.critical("Shutdown confirmed")
ioloop.IOLoop.instance().stop()
return
else:
print("No answer for 5s:", end=' ')
print("resuming operation...")
# no answer, or answer is no:
# set it back to original SIGINT handler
# use IOLoop.add_callback because signal.signal must be called
# from main thread
ioloop.IOLoop.instance().add_callback(self._restore_sigint_handler)
def _signal_stop(self, sig, frame):
self.log.critical("received signal %s, stopping", sig)
ioloop.IOLoop.instance().stop()
def _signal_info(self, sig, frame):
print(self.notebook_info())
def init_components(self):
"""Check the components submodule, and warn if it's unclean"""
status = submodule.check_submodule_status()
if status == 'missing':
self.log.warn("components submodule missing, running `git submodule update`")
submodule.update_submodules(submodule.ipython_parent())
elif status == 'unclean':
self.log.warn("components submodule unclean, you may see 404s on static/components")
self.log.warn("run `setup.py submodule` or `git submodule update` to update")
@catch_config_error
def initialize(self, argv=None):
super(NotebookApp, self).initialize(argv)
self.init_logging()
self.init_kernel_argv()
self.init_configurables()
self.init_components()
self.init_webapp()
self.init_signal()
def cleanup_kernels(self):
"""Shutdown all kernels.
The kernels will shutdown themselves when this process no longer exists,
but explicit shutdown allows the KernelManagers to cleanup the connection files.
"""
self.log.info('Shutting down kernels')
self.kernel_manager.shutdown_all()
def notebook_info(self):
"Return the current working directory and the server url information"
info = self.notebook_manager.info_string() + "\n"
info += "%d active kernels \n" % len(self.kernel_manager._kernels)
return info + "The IPython Notebook is running at: %s" % self.display_url
def server_info(self):
"""Return a JSONable dict of information about this server."""
return {'url': self.connection_url,
'hostname': self.ip if self.ip else 'localhost',
'port': self.port,
'secure': bool(self.certfile),
'base_url': self.base_url,
'notebook_dir': os.path.abspath(self.notebook_dir),
}
def write_server_info_file(self):
"""Write the result of server_info() to the JSON file info_file."""
with open(self.info_file, 'w') as f:
json.dump(self.server_info(), f, indent=2)
def remove_server_info_file(self):
"""Remove the nbserver-<pid>.json file created for this server.
Ignores the error raised when the file has already been removed.
"""
try:
os.unlink(self.info_file)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def start(self):
""" Start the IPython Notebook server app, after initialization
This method takes no arguments so all configuration and initialization
must be done prior to calling this method."""
if self.subapp is not None:
return self.subapp.start()
info = self.log.info
for line in self.notebook_info().split("\n"):
info(line)
info("Use Control-C to stop this server and shut down all kernels (twice to skip confirmation).")
self.write_server_info_file()
if self.open_browser or self.file_to_run:
try:
browser = webbrowser.get(self.browser or None)
except webbrowser.Error as e:
self.log.warn('No web browser found: %s.' % e)
browser = None
if self.file_to_run:
fullpath = os.path.join(self.notebook_dir, self.file_to_run)
if not os.path.exists(fullpath):
self.log.critical("%s does not exist" % fullpath)
self.exit(1)
uri = url_path_join('notebooks', self.file_to_run)
else:
uri = 'tree'
if browser:
b = lambda : browser.open(url_path_join(self.connection_url, uri),
new=2)
threading.Thread(target=b).start()
try:
ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
info("Interrupted...")
finally:
self.cleanup_kernels()
self.remove_server_info_file()
def list_running_servers(profile='default'):
"""Iterate over the server info files of running notebook servers.
Given a profile name, find nbserver-* files in the security directory of
that profile, and yield dicts of their information, each one pertaining to
a currently running notebook server instance.
"""
pd = ProfileDir.find_profile_dir_by_name(get_ipython_dir(), name=profile)
for file in os.listdir(pd.security_dir):
if file.startswith('nbserver-'):
with io.open(os.path.join(pd.security_dir, file), encoding='utf-8') as f:
yield json.load(f)
#-----------------------------------------------------------------------------
# Main entry point
#-----------------------------------------------------------------------------
launch_new_instance = NotebookApp.launch_instance
| mit |
Eric89GXL/scikit-learn | doc/sphinxext/gen_rst.py | 1 | 39133 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from time import time
import os
import re
import shutil
import traceback
import glob
import sys
from StringIO import StringIO
import cPickle
import urllib2
import gzip
import posixpath
try:
from PIL import Image
except:
import Image
import matplotlib
matplotlib.use('Agg')
import token
import tokenize
import numpy as np
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
resp = urllib2.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[value.keys()[0]]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
for comb_name in comb_names:
if html.find(comb_name) >= 0:
url = link + '#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_classifier_comparison_1.png': (1, 600),
'plot_outlier_detection_1.png': (3, 372),
'plot_gp_regression_1.png': (2, 250),
'plot_adaboost_twoclass_1.png': (1, 372),
'plot_compare_methods_1.png': (1, 349)}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = file(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
tokens = tokenize.generate_tokens(iter(lines).next)
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(app.builder.srcdir + '/../' + 'examples')
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
# we create an index.rst with all examples
fhindex = file(os.path.join(root_dir, 'index.rst'), 'w')
#Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
display: none;
}
.figure {
float: left;
margin: 10px;
-webkit-border-radius: 10px; /* Saf3-4, iOS 1-3.2, Android <1.6 */
-moz-border-radius: 10px; /* FF1-3.6 */
border-radius: 10px; /* Opera 10.5, IE9, Saf5, Chrome, FF4, iOS 4, Android 2.1+ */
border: 2px solid #fff;
background-color: white;
/* --> Thumbnail image size */
width: 150px;
height: 100px;
-webkit-background-size: 150px 100px; /* Saf3-4 */
-moz-background-size: 150px 100px; /* FF3.6 */
}
.figure img {
display: inline;
}
div.docstringWrapper p.caption {
display: block;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, .0); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 0.0); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 0px;
border: white;
}
div.docstringWrapper p {
display: none;
background-color: white;
-webkit-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00);
-moz-box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* FF3.5 - 3.6 */
box-shadow: 0px 0px 20px rgba(0, 0, 0, 1.00); /* Opera 10.5, IE9, FF4+, Chrome 10+ */
padding: 13px;
margin-top: 0px;
border-style: solid;
border-width: 1px;
}
</style>
.. raw:: html
<script type="text/javascript">
function animateClone(e){
var position;
position = $(this).position();
var clone = $(this).closest('.thumbnailContainer').find('.clonedItem');
var clone_fig = clone.find('.figure');
clone.css("left", position.left - 70).css("top", position.top - 70).css("position", "absolute").css("z-index", 1000).css("background-color", "white");
var cloneImg = clone_fig.find('img');
clone.show();
clone.animate({
height: "270px",
width: "320px"
}, 0
);
cloneImg.css({
'max-height': "200px",
'max-width': "280px"
});
cloneImg.animate({
height: "200px",
width: "280px"
}, 0
);
clone_fig.css({
'margin-top': '20px',
});
clone_fig.show();
clone.find('p').css("display", "block");
clone_fig.css({
height: "240",
width: "305px"
});
cloneP_height = clone.find('p.caption').height();
clone_fig.animate({
height: (200 + cloneP_height)
}, 0
);
clone.bind("mouseleave", function(e){
clone.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
clone_fig.animate({
height: "100px",
width: "150px"
}, 10, function(){$(this).hide();});
});
} //end animateClone()
$(window).load(function () {
$(".figure").css("z-index", 1);
$(".docstringWrapper").each(function(i, obj){
var clone;
var $obj = $(obj);
clone = $obj.clone();
clone.addClass("clonedItem");
clone.appendTo($obj.closest(".thumbnailContainer"));
clone.hide();
$obj.bind("mouseenter", animateClone);
}); // end each
}); // end
</script>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = file(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
tokens = tokenize.generate_tokens(lines.__iter__().next)
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = filter(lambda x: x.endswith('.py'), file_list)
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
print 80 * '_'
print ('Example directory %s does not have a README.txt file' %
src_dir)
print 'Skipping this directory'
print 80 * '_'
return
fhindex.write("""
%s
""" % file(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
for fname in sorted_listdir:
if fname.endswith('py'):
generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, fdocstring, _ = extract_docstring(new_fname, True)
thumb = os.path.join(dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(dir, fname).replace(os.path.sep, '_')
fhindex.write("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
fhindex.write('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if dir != '.':
fhindex.write(' :target: ./%s/%s.html\n\n' % (dir,
fname[:-3]))
else:
fhindex.write(' :target: ./%s.html\n\n' % link_name[:-3])
fhindex.write(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
.. toctree::
:hidden:
%s/%s
""" % (link_name, fdocstring, dir, fname[:-3]))
fhindex.write("""
.. raw:: html
<div style="clear: both"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) / 2, (height - height_sc) / 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
os.system("optipng -quiet -o 9 '{0}'".format(out_fname))
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%s.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print 'plotting %s' % fname
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
# get variables so we can later add links to the documentation
example_code_obj = {}
for var_name, var in my_globals.iteritems():
if not hasattr(var, '__module__'):
continue
if not isinstance(var.__module__, basestring):
continue
if var.__module__.split('.')[0] not in DOCMODULES:
continue
# get the type as a string with other things stripped
tstr = str(type(var))
tstr = (tstr[tstr.find('\'')
+ 1:tstr.rfind('\'')].split('.')[-1])
# get shortened module name
module_short = get_short_module_name(var.__module__,
tstr)
cobj = {'name': tstr, 'module': var.__module__,
'module_short': module_short,
'obj_type': 'object'}
example_code_obj[var_name] = cobj
# find functions so we can later add links to the documentation
funregex = re.compile('[\w.]+\(')
with open(src_file, 'rt') as fid:
for line in fid.readlines():
if line.startswith('#'):
continue
for match in funregex.findall(line):
fun_name = match[:-1]
try:
exec('this_fun = %s' % fun_name, my_globals)
except Exception as err:
# Here, we were not able to execute the
# previous statement, either because the
# fun_name was not a function but a statement
# (print), or because the regexp didn't
# catch the whole function name :
# eg:
# X = something().blah()
# will work for something, but not blah.
continue
this_fun = my_globals['this_fun']
if not callable(this_fun):
continue
if not hasattr(this_fun, '__module__'):
continue
if not isinstance(this_fun.__module__, basestring):
continue
if (this_fun.__module__.split('.')[0]
not in DOCMODULES):
continue
# get shortened module name
fun_name_short = fun_name.split('.')[-1]
module_short = get_short_module_name(
this_fun.__module__, fun_name_short)
cobj = {'name': fun_name_short,
'module': this_fun.__module__,
'module_short': module_short,
'obj_type': 'function'}
example_code_obj[fun_name] = cobj
fid.close()
if len(example_code_obj) > 0:
# save the dictionary, so we can later add hyperlinks
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
cPickle.dump(example_code_obj, fid,
cPickle.HIGHEST_PROTOCOL)
fid.close()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
for fig_num in (fig_mngr.num for fig_mngr in
matplotlib._pylab_helpers.Gcf.get_all_fig_managers()):
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_num)
plt.savefig(image_path % fig_num)
figure_list.append(image_fname % fig_num)
except:
print 80 * '_'
print '%s is not compiling:' % fname
traceback.print_exc()
print 80 * '_'
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print " - time elapsed : %.2g sec" % time_elapsed
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path % '[1-9]')]
#for f in glob.glob(image_path % '*')]
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/stable/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to _build/html/stable/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print 'Embedding documentation hyperlinks in examples..'
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print '\tprocessing: %s' % fname
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = cPickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.iteritems():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = orig_pattern % parts[0]
for part in parts[1:]:
name_html += period + orig_pattern % part
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
for name, link in str_repl.iteritems():
line = line.replace(name, link)
fid.write(line.encode('utf-8'))
except urllib2.HTTPError, e:
print ("The following HTTP Error has occurred:\n")
print e.code
except urllib2.URLError, e:
print ("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print e.args
print '[done]'
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
| bsd-3-clause |
davidgbe/scikit-learn | examples/cluster/plot_mini_batch_kmeans.py | 265 | 4081 | """
====================================================================
Comparison of the K-Means and MiniBatchKMeans clustering algorithms
====================================================================
We want to compare the performance of the MiniBatchKMeans and KMeans:
the MiniBatchKMeans is faster, but gives slightly different results (see
:ref:`mini_batch_kmeans`).
We will cluster a set of data, first with KMeans and then with
MiniBatchKMeans, and plot the results.
We will also plot the points that are labelled differently between the two
algorithms.
"""
print(__doc__)
import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cluster import MiniBatchKMeans, KMeans
from sklearn.metrics.pairwise import pairwise_distances_argmin
from sklearn.datasets.samples_generator import make_blobs
##############################################################################
# Generate sample data
np.random.seed(0)
batch_size = 45
centers = [[1, 1], [-1, -1], [1, -1]]
n_clusters = len(centers)
X, labels_true = make_blobs(n_samples=3000, centers=centers, cluster_std=0.7)
##############################################################################
# Compute clustering with Means
k_means = KMeans(init='k-means++', n_clusters=3, n_init=10)
t0 = time.time()
k_means.fit(X)
t_batch = time.time() - t0
k_means_labels = k_means.labels_
k_means_cluster_centers = k_means.cluster_centers_
k_means_labels_unique = np.unique(k_means_labels)
##############################################################################
# Compute clustering with MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=3, batch_size=batch_size,
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(X)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
##############################################################################
# Plot result
fig = plt.figure(figsize=(8, 3))
fig.subplots_adjust(left=0.02, right=0.98, bottom=0.05, top=0.9)
colors = ['#4EACC5', '#FF9C34', '#4E9A06']
# We want to have the same colors for the same cluster from the
# MiniBatchKMeans and the KMeans algorithm. Let's pair the cluster centers per
# closest one.
order = pairwise_distances_argmin(k_means_cluster_centers,
mbk_means_cluster_centers)
# KMeans
ax = fig.add_subplot(1, 3, 1)
for k, col in zip(range(n_clusters), colors):
my_members = k_means_labels == k
cluster_center = k_means_cluster_centers[k]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('KMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' % (
t_batch, k_means.inertia_))
# MiniBatchKMeans
ax = fig.add_subplot(1, 3, 2)
for k, col in zip(range(n_clusters), colors):
my_members = mbk_means_labels == order[k]
cluster_center = mbk_means_cluster_centers[order[k]]
ax.plot(X[my_members, 0], X[my_members, 1], 'w',
markerfacecolor=col, marker='.')
ax.plot(cluster_center[0], cluster_center[1], 'o', markerfacecolor=col,
markeredgecolor='k', markersize=6)
ax.set_title('MiniBatchKMeans')
ax.set_xticks(())
ax.set_yticks(())
plt.text(-3.5, 1.8, 'train time: %.2fs\ninertia: %f' %
(t_mini_batch, mbk.inertia_))
# Initialise the different array to all False
different = (mbk_means_labels == 4)
ax = fig.add_subplot(1, 3, 3)
for l in range(n_clusters):
different += ((k_means_labels == k) != (mbk_means_labels == order[k]))
identic = np.logical_not(different)
ax.plot(X[identic, 0], X[identic, 1], 'w',
markerfacecolor='#bbbbbb', marker='.')
ax.plot(X[different, 0], X[different, 1], 'w',
markerfacecolor='m', marker='.')
ax.set_title('Difference')
ax.set_xticks(())
ax.set_yticks(())
plt.show()
| bsd-3-clause |
minhlongdo/scipy | scipy/integrate/odepack.py | 62 | 9420 | # Author: Travis Oliphant
from __future__ import division, print_function, absolute_import
__all__ = ['odeint']
from . import _odepack
from copy import copy
import warnings
class ODEintWarning(Warning):
pass
_msgs = {2: "Integration successful.",
1: "Nothing was done; the integration time was 0.",
-1: "Excess work done on this call (perhaps wrong Dfun type).",
-2: "Excess accuracy requested (tolerances too small).",
-3: "Illegal input detected (internal error).",
-4: "Repeated error test failures (internal error).",
-5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
-6: "Error weight became zero during problem.",
-7: "Internal workspace insufficient to finish (internal error)."
}
def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0,
ml=None, mu=None, rtol=None, atol=None, tcrit=None, h0=0.0,
hmax=0.0, hmin=0.0, ixpr=0, mxstep=0, mxhnil=0, mxordn=12,
mxords=5, printmessg=0):
"""
Integrate a system of ordinary differential equations.
Solve a system of ordinary differential equations using lsoda from the
FORTRAN library odepack.
Solves the initial value problem for stiff or non-stiff systems
of first order ode-s::
dy/dt = func(y, t0, ...)
where y can be a vector.
*Note*: The first two arguments of ``func(y, t0, ...)`` are in the
opposite order of the arguments in the system definition function used
by the `scipy.integrate.ode` class.
Parameters
----------
func : callable(y, t0, ...)
Computes the derivative of y at t0.
y0 : array
Initial condition on y (can be a vector).
t : array
A sequence of time points for which to solve for y. The initial
value point should be the first element of this sequence.
args : tuple, optional
Extra arguments to pass to function.
Dfun : callable(y, t0, ...)
Gradient (Jacobian) of `func`.
col_deriv : bool, optional
True if `Dfun` defines derivatives down columns (faster),
otherwise `Dfun` should define derivatives across rows.
full_output : bool, optional
True if to return a dictionary of optional outputs as the second output
printmessg : bool, optional
Whether to print the convergence message
Returns
-------
y : array, shape (len(t), len(y0))
Array containing the value of y for each desired time in t,
with the initial value `y0` in the first row.
infodict : dict, only returned if full_output == True
Dictionary containing additional output information
======= ============================================================
key meaning
======= ============================================================
'hu' vector of step sizes successfully used for each time step.
'tcur' vector with the value of t reached for each time step.
(will always be at least as large as the input times).
'tolsf' vector of tolerance scale factors, greater than 1.0,
computed when a request for too much accuracy was detected.
'tsw' value of t at the time of the last method switch
(given for each time step)
'nst' cumulative number of time steps
'nfe' cumulative number of function evaluations for each time step
'nje' cumulative number of jacobian evaluations for each time step
'nqu' a vector of method orders for each successful step.
'imxer' index of the component of largest magnitude in the
weighted local error vector (e / ewt) on an error return, -1
otherwise.
'lenrw' the length of the double work array required.
'leniw' the length of integer work array required.
'mused' a vector of method indicators for each successful time step:
1: adams (nonstiff), 2: bdf (stiff)
======= ============================================================
Other Parameters
----------------
ml, mu : int, optional
If either of these are not None or non-negative, then the
Jacobian is assumed to be banded. These give the number of
lower and upper non-zero diagonals in this banded matrix.
For the banded case, `Dfun` should return a matrix whose
rows contain the non-zero bands (starting with the lowest diagonal).
Thus, the return matrix `jac` from `Dfun` should have shape
``(ml + mu + 1, len(y0))`` when ``ml >=0`` or ``mu >=0``.
The data in `jac` must be stored such that ``jac[i - j + mu, j]``
holds the derivative of the `i`th equation with respect to the `j`th
state variable. If `col_deriv` is True, the transpose of this
`jac` must be returned.
rtol, atol : float, optional
The input parameters `rtol` and `atol` determine the error
control performed by the solver. The solver will control the
vector, e, of estimated local errors in y, according to an
inequality of the form ``max-norm of (e / ewt) <= 1``,
where ewt is a vector of positive error weights computed as
``ewt = rtol * abs(y) + atol``.
rtol and atol can be either vectors the same length as y or scalars.
Defaults to 1.49012e-8.
tcrit : ndarray, optional
Vector of critical points (e.g. singularities) where integration
care should be taken.
h0 : float, (0: solver-determined), optional
The step size to be attempted on the first step.
hmax : float, (0: solver-determined), optional
The maximum absolute step size allowed.
hmin : float, (0: solver-determined), optional
The minimum absolute step size allowed.
ixpr : bool, optional
Whether to generate extra printing at method switches.
mxstep : int, (0: solver-determined), optional
Maximum number of (internally defined) steps allowed for each
integration point in t.
mxhnil : int, (0: solver-determined), optional
Maximum number of messages printed.
mxordn : int, (0: solver-determined), optional
Maximum order to be allowed for the non-stiff (Adams) method.
mxords : int, (0: solver-determined), optional
Maximum order to be allowed for the stiff (BDF) method.
See Also
--------
ode : a more object-oriented integrator based on VODE.
quad : for finding the area under a curve.
Examples
--------
The second order differential equation for the angle `theta` of a
pendulum acted on by gravity with friction can be written::
theta''(t) + b*theta'(t) + c*sin(theta(t)) = 0
where `b` and `c` are positive constants, and a prime (') denotes a
derivative. To solve this equation with `odeint`, we must first convert
it to a system of first order equations. By defining the angular
velocity ``omega(t) = theta'(t)``, we obtain the system::
theta'(t) = omega(t)
omega'(t) = -b*omega(t) - c*sin(theta(t))
Let `y` be the vector [`theta`, `omega`]. We implement this system
in python as:
>>> def pend(y, t, b, c):
... theta, omega = y
... dydt = [omega, -b*omega - c*np.sin(theta)]
... return dydt
...
We assume the constants are `b` = 0.25 and `c` = 5.0:
>>> b = 0.25
>>> c = 5.0
For initial conditions, we assume the pendulum is nearly vertical
with `theta(0)` = `pi` - 0.1, and it initially at rest, so
`omega(0)` = 0. Then the vector of initial conditions is
>>> y0 = [np.pi - 0.1, 0.0]
We generate a solution 101 evenly spaced samples in the interval
0 <= `t` <= 10. So our array of times is:
>>> t = np.linspace(0, 10, 101)
Call `odeint` to generate the solution. To pass the parameters
`b` and `c` to `pend`, we give them to `odeint` using the `args`
argument.
>>> from scipy.integrate import odeint
>>> sol = odeint(pend, y0, t, args=(b, c))
The solution is an array with shape (101, 2). The first column
is `theta(t)`, and the second is `omega(t)`. The following code
plots both components.
>>> import matplotlib.pyplot as plt
>>> plt.plot(t, sol[:, 0], 'b', label='theta(t)')
>>> plt.plot(t, sol[:, 1], 'g', label='omega(t)')
>>> plt.legend(loc='best')
>>> plt.xlabel('t')
>>> plt.grid()
>>> plt.show()
"""
if ml is None:
ml = -1 # changed to zero inside function call
if mu is None:
mu = -1 # changed to zero inside function call
t = copy(t)
y0 = copy(y0)
output = _odepack.odeint(func, y0, t, args, Dfun, col_deriv, ml, mu,
full_output, rtol, atol, tcrit, h0, hmax, hmin,
ixpr, mxstep, mxhnil, mxordn, mxords)
if output[-1] < 0:
warning_msg = _msgs[output[-1]] + " Run with full_output = 1 to get quantitative information."
warnings.warn(warning_msg, ODEintWarning)
elif printmessg:
warning_msg = _msgs[output[-1]]
warnings.warn(warning_msg, ODEintWarning)
if full_output:
output[1]['message'] = _msgs[output[-1]]
output = output[:-1]
if len(output) == 1:
return output[0]
else:
return output
| bsd-3-clause |
lcy-seso/Paddle | python/paddle/dataset/uci_housing.py | 1 | 3748 | # Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
UCI Housing dataset.
This module will download dataset from
https://archive.ics.uci.edu/ml/machine-learning-databases/housing/ and
parse training set and test set into paddle reader creators.
"""
import numpy as np
import os
import paddle.dataset.common
__all__ = ['train', 'test']
URL = 'https://archive.ics.uci.edu/ml/machine-learning-databases/housing/housing.data'
MD5 = 'd4accdce7a25600298819f8e28e8d593'
feature_names = [
'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
'PTRATIO', 'B', 'LSTAT', 'convert'
]
UCI_TRAIN_DATA = None
UCI_TEST_DATA = None
URL_MODEL = 'https://github.com/PaddlePaddle/book/raw/develop/01.fit_a_line/fit_a_line.tar'
MD5_MODEL = '52fc3da8ef3937822fcdd87ee05c0c9b'
def feature_range(maximums, minimums):
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
feature_num = len(maximums)
ax.bar(range(feature_num), maximums - minimums, color='r', align='center')
ax.set_title('feature scale')
plt.xticks(range(feature_num), feature_names)
plt.xlim([-1, feature_num])
fig.set_figheight(6)
fig.set_figwidth(10)
if not os.path.exists('./image'):
os.makedirs('./image')
fig.savefig('image/ranges.png', dpi=48)
plt.close(fig)
def load_data(filename, feature_num=14, ratio=0.8):
global UCI_TRAIN_DATA, UCI_TEST_DATA
if UCI_TRAIN_DATA is not None and UCI_TEST_DATA is not None:
return
data = np.fromfile(filename, sep=' ')
data = data.reshape(data.shape[0] / feature_num, feature_num)
maximums, minimums, avgs = data.max(axis=0), data.min(axis=0), data.sum(
axis=0) / data.shape[0]
feature_range(maximums[:-1], minimums[:-1])
for i in xrange(feature_num - 1):
data[:, i] = (data[:, i] - avgs[i]) / (maximums[i] - minimums[i])
offset = int(data.shape[0] * ratio)
UCI_TRAIN_DATA = data[:offset]
UCI_TEST_DATA = data[offset:]
def train():
"""
UCI_HOUSING training set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Training reader creator
:rtype: callable
"""
global UCI_TRAIN_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TRAIN_DATA:
yield d[:-1], d[-1:]
return reader
def test():
"""
UCI_HOUSING test set creator.
It returns a reader creator, each sample in the reader is features after
normalization and price number.
:return: Test reader creator
:rtype: callable
"""
global UCI_TEST_DATA
load_data(paddle.dataset.common.download(URL, 'uci_housing', MD5))
def reader():
for d in UCI_TEST_DATA:
yield d[:-1], d[-1:]
return reader
def fetch():
paddle.dataset.common.download(URL, 'uci_housing', MD5)
def convert(path):
"""
Converts dataset to recordio format
"""
paddle.dataset.common.convert(path, train(), 1000, "uci_housing_train")
paddle.dataset.common.convert(path, test(), 1000, "uci_houseing_test")
| apache-2.0 |