repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
justincassidy/ThinkStats2 | code/hinc_soln.py | 67 | 4296 | """This file contains code used in "Think Stats",
by Allen B. Downey, available from greenteapress.com
Copyright 2014 Allen B. Downey
License: GNU GPLv3 http://www.gnu.org/licenses/gpl.html
"""
from __future__ import print_function
import numpy as np
import pandas
import hinc
import thinkplot
import thinkstats2
"""This file contains a solution to an exercise in Think Stats:
The distributions of wealth and income are sometimes modeled using
lognormal and Pareto distributions. To see which is better, let's
look at some data.
The Current Population Survey (CPS) is joint effort of the Bureau
of Labor Statistics and the Census Bureau to study income and related
variables. Data collected in 2013 is available from
http://www.census.gov/hhes/www/cpstables/032013/hhinc/toc.htm.
I downloaded hinc06.xls, which is an Excel spreadsheet with
information about household income, and converted it to hinc06.csv,
a CSV file you will find in the repository for this book. You
will also find hinc.py, which reads the CSV file.
Extract the distribution of incomes from this dataset. Are any of the
analytic distributions in this chapter a good model of the data? A
solution to this exercise is in hinc_soln.py.
My solution generates three figures:
1) The CDF of income on a linear scale.
2) The CCDF on a log-log scale along with a Pareto model intended
to match the tail behavior.
3) The CDF on a log-x scale along with a lognormal model chose to
match the median and inter-quartile range.
My conclusions based on these figures are:
1) The Pareto model is probably a reasonable choice for the top
10-20% of incomes.
2) The lognormal model captures the shape of the distribution better,
but the data deviate substantially from the model. With different
choices for sigma, you could match the upper or lower tail, but not
both at the same time.
In summary I would say that neither model captures the whole distribution,
so you might have to
1) look for another analytic model,
2) choose one that captures the part of the distribution that is most
relevent, or
3) avoid using an analytic model altogether.
"""
class SmoothCdf(thinkstats2.Cdf):
"""Represents a CDF based on calculated quantiles.
"""
def Render(self):
"""Because this CDF was not computed from a sample, it
should not be rendered as a step function.
"""
return self.xs, self.ps
def Prob(self, x):
"""Compute CDF(x), interpolating between known values.
"""
return np.interp(x, self.xs, self.ps)
def Value(self, p):
"""Compute inverse CDF(x), interpolating between probabilities.
"""
return np.interp(p, self.ps, self.xs)
def MakeFigures(df):
"""Plots the CDF of income in several forms.
"""
xs, ps = df.income.values, df.ps.values
cdf = SmoothCdf(xs, ps, label='data')
cdf_log = SmoothCdf(np.log10(xs), ps, label='data')
# linear plot
thinkplot.Cdf(cdf)
thinkplot.Save(root='hinc_linear',
xlabel='household income',
ylabel='CDF')
# pareto plot
# for the model I chose parameters by hand to fit the tail
xs, ys = thinkstats2.RenderParetoCdf(xmin=55000, alpha=2.5,
low=0, high=250000)
thinkplot.Plot(xs, 1-ys, label='model', color='0.8')
thinkplot.Cdf(cdf, complement=True)
thinkplot.Save(root='hinc_pareto',
xlabel='log10 household income',
ylabel='CCDF',
xscale='log',
yscale='log')
# lognormal plot
# for the model I estimate mu and sigma using
# percentile-based statistics
median = cdf_log.Percentile(50)
iqr = cdf_log.Percentile(75) - cdf_log.Percentile(25)
std = iqr / 1.349
# choose std to match the upper tail
std = 0.35
print(median, std)
xs, ps = thinkstats2.RenderNormalCdf(median, std, low=3.5, high=5.5)
thinkplot.Plot(xs, ps, label='model', color='0.8')
thinkplot.Cdf(cdf_log)
thinkplot.Save(root='hinc_normal',
xlabel='log10 household income',
ylabel='CDF')
def main():
df = hinc.ReadData()
MakeFigures(df)
if __name__ == "__main__":
main()
| gpl-3.0 |
yhilpisch/dx | dx/plot.py | 1 | 5805 | #
# DX Analytics
# Helper Function for Plotting
# dx_plot.py
#
# DX Analytics is a financial analytics library, mainly for
# derviatives modeling and pricing by Monte Carlo simulation
#
# (c) Dr. Yves J. Hilpisch
# The Python Quants GmbH
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see http://www.gnu.org/licenses/.
#
import matplotlib as mpl; mpl.use('agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pylab import cm
def plot_option_stats(s_list, pv, de, ve):
''' Plot option prices, deltas and vegas for a set of
different initial values of the underlying.
Parameters
==========
s_list : array or list
set of intial values of the underlying
pv : array or list
present values
de : array or list
results for deltas
ve : array or list
results for vega
'''
plt.figure(figsize=(9, 7))
sub1 = plt.subplot(311)
plt.plot(s_list, pv, 'ro', label='Present Value')
plt.plot(s_list, pv, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(312)
plt.plot(s_list, de, 'go', label='Delta')
plt.plot(s_list, de, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(313)
plt.plot(s_list, ve, 'yo', label='Vega')
plt.plot(s_list, ve, 'b')
plt.xlabel('Strike')
plt.grid(True)
plt.legend(loc=0)
def plot_option_stats_full(s_list, pv, de, ve, th, rh, ga):
''' Plot option prices, deltas and vegas for a set of
different initial values of the underlying.
Parameters
==========
s_list : array or list
set of intial values of the underlying
pv : array or list
present values
de : array or list
results for deltas
ve : array or list
results for vega
th : array or list
results for theta
rh : array or list
results for rho
ga : array or list
results for gamma
'''
plt.figure(figsize=(10, 14))
sub1 = plt.subplot(611)
plt.plot(s_list, pv, 'ro', label='Present Value')
plt.plot(s_list, pv, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub1.get_xticklabels(), visible=False)
sub2 = plt.subplot(612)
plt.plot(s_list, de, 'go', label='Delta')
plt.plot(s_list, de, 'b')
plt.grid(True)
plt.legend(loc=0)
plt.setp(sub2.get_xticklabels(), visible=False)
sub3 = plt.subplot(613)
plt.plot(s_list, ve, 'yo', label='Gamma')
plt.plot(s_list, ve, 'b')
plt.grid(True)
plt.legend(loc=0)
sub4 = plt.subplot(614)
plt.plot(s_list, th, 'mo', label='Vega')
plt.plot(s_list, th, 'b')
plt.grid(True)
plt.legend(loc=0)
sub5 = plt.subplot(615)
plt.plot(s_list, rh, 'co', label='Theta')
plt.plot(s_list, rh, 'b')
plt.grid(True)
plt.legend(loc=0)
sub6 = plt.subplot(616)
plt.plot(s_list, ga, 'ko', label='Rho')
plt.plot(s_list, ga, 'b')
plt.xlabel('Strike')
plt.grid(True)
plt.legend(loc=0)
def plot_greeks_3d(inputs, labels):
''' Plot Greeks in 3d.
Parameters
==========
inputs : list of arrays
x, y, z arrays
labels : list of strings
labels for x, y, z
'''
x, y, z = inputs
xl, yl, zl = labels
fig = plt.figure(figsize=(10, 7))
ax = fig.gca(projection='3d')
surf = ax.plot_surface(x, y, z, rstride=1, cstride=1,
cmap=cm.coolwarm, linewidth=0.5, antialiased=True)
ax.set_xlabel(xl)
ax.set_ylabel(yl)
ax.set_zlabel(zl)
fig.colorbar(surf, shrink=0.5, aspect=5)
def plot_calibration_results(cali, relative=False):
''' Plot calibration results.
Parameters
==========
cali : instance of calibration class
instance has to have opt_parameters
relative : boolean
if True, then relative error reporting
if False, absolute error reporting
'''
cali.update_model_values()
mats = set(cali.option_data[:, 0])
mats = np.sort(list(mats))
fig, axarr = plt.subplots(len(mats), 2, sharex=True)
fig.set_size_inches(8, 12)
fig.subplots_adjust(wspace=0.2, hspace=0.2)
z = 0
for T in mats:
strikes = strikes = cali.option_data[cali.option_data[:, 0] == T][:, 1]
market = cali.option_data[cali.option_data[:, 0] == T][:, 2]
model = cali.model_values[cali.model_values[:, 0] == T][:, 2]
axarr[z, 0].set_ylabel('%s' % str(T)[:10])
axarr[z, 0].plot(strikes, market, label='Market Quotes')
axarr[z, 0].plot(strikes, model, 'ro', label='Model Prices')
axarr[z, 0].grid()
if T is mats[0]:
axarr[z, 0].set_title('Option Quotes')
if T is mats[-1]:
axarr[z, 0].set_xlabel('Strike')
wi = 2.
if relative is True:
axarr[z, 1].bar(strikes - wi / 2,
(model - market) / market * 100, width=wi)
else:
axarr[z, 1].bar(strikes - wi / 2, model - market, width=wi)
axarr[z, 1].grid()
if T is mats[0]:
axarr[z, 1].set_title('Differences')
if T is mats[-1]:
axarr[z, 1].set_xlabel('Strike')
z += 1
| agpl-3.0 |
samuel1208/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
dwavesystems/dimod | dimod/sampleset.py | 1 | 63014 | # Copyright 2018 D-Wave Systems Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import copy
import itertools
import json
import numbers
import collections.abc as abc
from collections import namedtuple
import numpy as np
from numpy.lib import recfunctions
from warnings import warn
from dimod.decorators import lockable_method
from dimod.exceptions import WriteableError
from dimod.serialization.format import Formatter
from dimod.serialization.utils import (pack_samples as _pack_samples,
unpack_samples,
serialize_ndarray,
deserialize_ndarray,
serialize_ndarrays,
deserialize_ndarrays)
from dimod.utilities import LockableDict
from dimod.variables import Variables, iter_deserialize_variables
from dimod.vartypes import as_vartype, Vartype, DISCRETE
from dimod.views.samples import SampleView, SamplesArray
__all__ = ['append_data_vectors', 'append_variables', 'as_samples', 'concatenate', 'SampleSet']
def append_data_vectors(sampleset, **vectors):
"""Create a new :obj:`.SampleSet` with additional fields in
:attr:`SampleSet.record`.
Args:
sampleset (:obj:`.SampleSet`):
:obj:`.SampleSet` to build from.
**vectors (list):
Per-sample data to be appended to :attr:`SampleSet.record`. Each
keyword is a new field name and each keyword parameter should be a
list of scalar values or numpy arrays (lists and tuples will be
converted to numpy arrays).
Returns:
:obj:`.SampleSet`: SampleSet
Examples:
The following example appends a field of lists to :attr:`SampleSet.record`.
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [-1, 1]], energy=[-1.4, -1.4], vartype='SPIN')
>>> print(sampleset)
0 1 energy num_oc.
0 -1 +1 -1.4 1
1 -1 +1 -1.4 1
['SPIN', 2 rows, 2 samples, 2 variables]
>>> sampleset = dimod.append_data_vectors(sampleset, new=[[0, 1], [1, 2]])
>>> print(sampleset)
0 1 energy num_oc. new
0 -1 +1 -1.4 1 [0 1]
1 -1 +1 -1.4 1 [1 2]
['SPIN', 2 rows, 2 samples, 2 variables]
>>> print(sampleset.record.dtype)
(numpy.record, [('sample', 'i1', (2,)), ('energy', '<f8'), ('num_occurrences', '<i8'), ('new', '<i8', (2,))])
"""
record = sampleset.record
for name, vector in vectors.items():
if len(vector) != len(record.energy):
raise ValueError("Length of vector {} must be equal to number of samples.".format(name))
try:
vector = np.asarray(vector)
if vector.ndim == 1:
record = recfunctions.append_fields(record, name, vector, usemask=False, asrecarray=True)
else:
# np's append_fields cannot append a vector with a shape that
# doesn't match the base array's, so appending non-scalar data
# requires a workaround
dtype = np.dtype([(name, vector[0].dtype, vector[0].shape)])
new_arr = recfunctions.unstructured_to_structured(vector, dtype=dtype)
record = recfunctions.merge_arrays((record, new_arr), flatten=True, asrecarray=True)
except (TypeError, AttributeError):
raise ValueError("Field value type not supported.")
return SampleSet(record, sampleset.variables, sampleset.info, sampleset.vartype)
def append_variables(sampleset, samples_like, sort_labels=True):
"""Create a new :obj:`.SampleSet` with the given variables and values.
Not defined for empty sample sets. If `sample_like` is a
:obj:`.SampleSet`, its data vectors and info are ignored.
Args:
sampleset (:obj:`.SampleSet`):
:obj:`.SampleSet` to build from.
samples_like:
Samples to add to the sample set. Either a single
sample or identical in length to the sample set.
'samples_like' is an extension of NumPy's array_like_.
See :func:`.as_samples`.
sort_labels (bool, optional, default=True):
Return :attr:`.SampleSet.variables` in sorted order. For mixed
(unsortable) types, the given order is maintained.
Returns:
:obj:`.SampleSet`: New sample set with the variables/values added.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([{'a': -1, 'b': +1},
... {'a': +1, 'b': +1}],
... dimod.SPIN,
... energy=[-1.0, 1.0])
>>> new = dimod.append_variables(sampleset, {'c': -1})
>>> print(new)
a b c energy num_oc.
0 -1 +1 -1 -1.0 1
1 +1 +1 -1 1.0 1
['SPIN', 2 rows, 2 samples, 3 variables]
Add variables from another sample set to the previous example. Note
that the energies remain unchanged.
>>> another = dimod.SampleSet.from_samples([{'c': -1, 'd': +1},
... {'c': +1, 'd': +1}],
... dimod.SPIN,
... energy=[-2.0, 1.0])
>>> new = dimod.append_variables(sampleset, another)
>>> print(new)
a b c d energy num_oc.
0 -1 +1 -1 +1 -1.0 1
1 +1 +1 +1 +1 1.0 1
['SPIN', 2 rows, 2 samples, 4 variables]
.. _array_like: https://numpy.org/doc/stable/user/basics.creation.html
"""
samples, labels = as_samples(samples_like)
num_samples = len(sampleset)
# we don't handle multiple values
if samples.shape[0] == num_samples:
# we don't need to do anything, it's already the correct shape
pass
elif samples.shape[0] == 1 and num_samples:
samples = np.repeat(samples, num_samples, axis=0)
else:
msg = ("mismatched shape. The samples to append should either be "
"a single sample or should match the length of the sample "
"set. Empty sample sets cannot be appended to.")
raise ValueError(msg)
# append requires the new variables to be unique
variables = sampleset.variables
if any(v in variables for v in labels):
msg = "Appended samples cannot contain variables in sample set"
raise ValueError(msg)
new_variables = list(variables) + labels
new_samples = np.hstack((sampleset.record.sample, samples))
return type(sampleset).from_samples((new_samples, new_variables),
sampleset.vartype,
info=copy.deepcopy(sampleset.info), # make a copy
sort_labels=sort_labels,
**sampleset.data_vectors)
def as_samples(samples_like, dtype=None, copy=False, order='C'):
"""Convert a samples_like object to a NumPy array and list of labels.
Args:
samples_like (samples_like):
A collection of raw samples. `samples_like` is an extension of
NumPy's array_like_ structure. See examples below.
dtype (data-type, optional):
dtype for the returned samples array. If not provided, it is either
derived from `samples_like`, if that object has a dtype, or set to
the smallest dtype that can hold the given values.
copy (bool, optional, default=False):
If true, then samples_like is guaranteed to be copied, otherwise
it is only copied if necessary.
order ({'K', 'A', 'C', 'F'}, optional, default='C'):
Specify the memory layout of the array. See :func:`numpy.array`.
Returns:
tuple: A 2-tuple containing:
:obj:`numpy.ndarray`: Samples.
list: Variable labels
Examples:
The following examples convert a variety of samples_like objects:
NumPy arrays
>>> import numpy as np
...
>>> dimod.as_samples(np.ones(5, dtype='int8'))
(array([[1, 1, 1, 1, 1]], dtype=int8), [0, 1, 2, 3, 4])
>>> dimod.as_samples(np.zeros((5, 2), dtype='int8'))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), [0, 1])
Lists
>>> dimod.as_samples([-1, +1, -1])
(array([[-1, 1, -1]], dtype=int8), [0, 1, 2])
>>> dimod.as_samples([[-1], [+1], [-1]])
(array([[-1],
[ 1],
[-1]], dtype=int8), [0])
Dicts
>>> dimod.as_samples({'a': 0, 'b': 1, 'c': 0}) # doctest: +SKIP
(array([[0, 1, 0]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples([{'a': -1, 'b': +1}, {'a': 1, 'b': 1}]) # doctest: +SKIP
(array([[-1, 1],
[ 1, 1]], dtype=int8), ['a', 'b'])
A 2-tuple containing an array_like object and a list of labels
>>> dimod.as_samples(([-1, +1, -1], ['a', 'b', 'c']))
(array([[-1, 1, -1]], dtype=int8), ['a', 'b', 'c'])
>>> dimod.as_samples((np.zeros((5, 2), dtype='int8'), ['in', 'out']))
(array([[0, 0],
[0, 0],
[0, 0],
[0, 0],
[0, 0]], dtype=int8), ['in', 'out'])
.. _array_like: https://numpy.org/doc/stable/user/basics.creation.html
"""
if isinstance(samples_like, SampleSet):
# we implicitely support this by handling an iterable of mapping but
# it is much faster to just do this here.
labels = list(samples_like.variables)
if dtype is None:
return samples_like.record.sample, labels
else:
return samples_like.record.sample.astype(dtype), labels
if isinstance(samples_like, tuple) and len(samples_like) == 2:
samples_like, labels = samples_like
if not isinstance(labels, list) and labels is not None:
labels = list(labels)
else:
labels = None
if isinstance(samples_like, abc.Iterator):
# if we don't check this case we can get unexpected behaviour where an
# iterator can be depleted
raise TypeError('samples_like cannot be an iterator')
if isinstance(samples_like, abc.Mapping):
return as_samples(([samples_like], labels), dtype=dtype)
if (isinstance(samples_like, list) and samples_like and
isinstance(samples_like[0], numbers.Number)):
# this is not actually necessary but it speeds up the
# samples_like = [1, 0, 1,...] case significantly
return as_samples(([samples_like], labels), dtype=dtype)
if not isinstance(samples_like, np.ndarray):
if any(isinstance(sample, abc.Mapping) for sample in samples_like):
# go through samples-like, turning the dicts into lists
samples_like, old = list(samples_like), samples_like
if labels is None:
first = samples_like[0]
if isinstance(first, abc.Mapping):
labels = list(first)
else:
labels = list(range(len(first)))
for idx, sample in enumerate(old):
if isinstance(sample, abc.Mapping):
try:
samples_like[idx] = [sample[v] for v in labels]
except KeyError:
raise ValueError("samples_like and labels do not match")
if dtype is None:
if not hasattr(samples_like, 'dtype'):
# we want to use the smallest dtype available, not yet doing any
# copying or whatever, although we do make a new array to speed
# this up
samples_like = np.asarray(samples_like)
max_ = max(-samples_like.min(initial=0),
+samples_like.max(initial=0))
if max_ <= np.iinfo(np.int8).max:
dtype = np.int8
elif max_ <= np.iinfo(np.int16).max:
dtype = np.int16
elif max_ < np.iinfo(np.int32).max:
dtype = np.int32
elif max_ < np.iinfo(np.int64).max:
dtype = np.int64
else:
raise RuntimeError
else:
dtype = samples_like.dtype
# samples-like should now be array-like
arr = np.array(samples_like, dtype=dtype, copy=copy, order=order)
if arr.ndim > 2:
raise ValueError("expected samples_like to be <= 2 dimensions")
if arr.ndim < 2:
if arr.size:
arr = np.atleast_2d(arr)
elif labels: # is not None and len > 0
arr = arr.reshape((0, len(labels)))
else:
arr = arr.reshape((0, 0))
# ok we're basically done, just need to check against the labels
if labels is None:
return arr, list(range(arr.shape[1]))
elif len(labels) != arr.shape[1]:
raise ValueError("samples_like and labels dimensions do not match")
else:
return arr, labels
def concatenate(samplesets, defaults=None):
"""Combine sample sets.
Args:
samplesets (iterable[:obj:`.SampleSet`):
Iterable of sample sets.
defaults (dict, optional):
Dictionary mapping data vector names to the corresponding default values.
Returns:
:obj:`.SampleSet`: A sample set with the same vartype and variable order as the first
given in `samplesets`.
Examples:
>>> a = dimod.SampleSet.from_samples(([-1, +1], 'ab'), dimod.SPIN, energy=-1)
>>> b = dimod.SampleSet.from_samples(([-1, +1], 'ba'), dimod.SPIN, energy=-1)
>>> ab = dimod.concatenate((a, b))
>>> ab.record.sample
array([[-1, 1],
[ 1, -1]], dtype=int8)
"""
itertup = iter(samplesets)
try:
first = next(itertup)
except StopIteration:
raise ValueError("samplesets must contain at least one SampleSet")
vartype = first.vartype
variables = first.variables
records = [first.record]
records.extend(_iter_records(itertup, vartype, variables))
# dev note: I was able to get ~2x performance boost when trying to
# implement the same functionality here by hand (I didn't know that
# this function existed then). However I think it is better to use
# numpy's function and rely on their testing etc. If however this becomes
# a performance bottleneck in the future, it might be worth changing.
record = recfunctions.stack_arrays(records, defaults=defaults,
asrecarray=True, usemask=False)
return SampleSet(record, variables, {}, vartype)
def _iter_records(samplesets, vartype, variables):
# coerce each record into the correct vartype and variable-order
for samples in samplesets:
# coerce vartype
if samples.vartype is not vartype:
samples = samples.change_vartype(vartype, inplace=False)
if samples.variables != variables:
new_record = samples.record.copy()
order = [samples.variables.index(v) for v in variables]
new_record.sample = samples.record.sample[:, order]
yield new_record
else:
# order matches so we're done
yield samples.record
def infer_vartype(samples_like):
"""Infer the vartype of the given samples-like.
Args:
A collection of samples. 'samples_like' is an extension of NumPy's
array_like_. See :func:`.as_samples`.
Returns:
The :class:`.Vartype`, or None in the case that it is ambiguous.
"""
if isinstance(samples_like, SampleSet):
return samples_like.vartype
samples, _ = as_samples(samples_like)
ones_mask = (samples == 1)
if ones_mask.all():
# either empty or all 1s, in either case ambiguous
return None
if (ones_mask ^ (samples == 0)).all():
return Vartype.BINARY
if (ones_mask ^ (samples == -1)).all():
return Vartype.SPIN
raise ValueError("given samples_like is of an unknown vartype")
class SampleSet(abc.Iterable, abc.Sized):
"""Samples and any other data returned by dimod samplers.
Args:
record (:obj:`numpy.recarray`)
A NumPy record array. Must have 'sample', 'energy' and 'num_occurrences' as fields.
The 'sample' field should be a 2D NumPy array where each row is a sample and each
column represents the value of a variable.
variables (iterable):
An iterable of variable labels, corresponding to columns in `record.samples`.
info (dict):
Information about the :class:`SampleSet` as a whole, formatted as a dict.
vartype (:class:`.Vartype`/str/set):
Variable type for the :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
* :class:`.ExtendedVartype.DISCRETE`, ``'DISCRETE'``
Examples:
This example creates a SampleSet out of a samples_like object (a NumPy array).
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.ones(5, dtype='int8'),
... 'BINARY', 0)
>>> sampleset.variables
Variables([0, 1, 2, 3, 4])
"""
_REQUIRED_FIELDS = ['sample', 'energy', 'num_occurrences']
###############################################################################################
# Construction
###############################################################################################
def __init__(self, record, variables, info, vartype):
vartype = as_vartype(vartype, extended=True)
# make sure that record is a numpy recarray and that it has the expected fields
if not isinstance(record, np.recarray):
raise TypeError("input record must be a numpy recarray")
elif not set(self._REQUIRED_FIELDS).issubset(record.dtype.fields):
raise ValueError("input record must have {}, {} and {} as fields".format(*self._REQUIRED_FIELDS))
self._record = record
num_samples, num_variables = record.sample.shape
self._variables = variables = Variables(variables)
if len(variables) != num_variables:
msg = ("mismatch between number of variables in record.sample ({}) "
"and labels ({})").format(num_variables, len(variables))
raise ValueError(msg)
self._info = LockableDict(info)
# vartype is checked by vartype_argument decorator
self._vartype = vartype
@classmethod
def from_samples(cls, samples_like, vartype, energy, info=None,
num_occurrences=None, aggregate_samples=False,
sort_labels=True, **vectors):
"""Build a :class:`SampleSet` from raw samples.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like_.
See :func:`.as_samples`.
vartype (:class:`.Vartype`/str/set):
Variable type for the :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
* :class:`.ExtendedVartype.DISCRETE`, ``'DISCRETE'``
energy (array_like):
Vector of energies.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If True, all samples in returned :obj:`.SampleSet` are unique,
with `num_occurrences` accounting for any duplicate samples in
`samples_like`.
sort_labels (bool, optional, default=True):
Return :attr:`.SampleSet.variables` in sorted order. For mixed
(unsortable) types, the given order is maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
This example creates a SampleSet out of a samples_like object (a dict).
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(
... dimod.as_samples({'a': 0, 'b': 1, 'c': 0}), 'BINARY', 0)
>>> sampleset.variables
Variables(['a', 'b', 'c'])
.. _array_like: https://numpy.org/doc/stable/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays
"""
if aggregate_samples:
return cls.from_samples(samples_like, vartype, energy,
info=info, num_occurrences=num_occurrences,
aggregate_samples=False,
**vectors).aggregate()
# get the samples, variable labels
samples, variables = as_samples(samples_like)
if sort_labels and variables: # need something to sort
try:
reindex, new_variables = zip(*sorted(enumerate(variables),
key=lambda tup: tup[1]))
except TypeError:
# unlike types are not sortable in python3, so we do nothing
pass
else:
if new_variables != variables:
# avoid the copy if possible
samples = samples[:, reindex]
variables = new_variables
num_samples, num_variables = samples.shape
energy = np.asarray(energy)
# num_occurrences
if num_occurrences is None:
num_occurrences = np.ones(num_samples, dtype=int)
else:
num_occurrences = np.asarray(num_occurrences)
# now construct the record
datatypes = [('sample', samples.dtype, (num_variables,)),
('energy', energy.dtype),
('num_occurrences', num_occurrences.dtype)]
for key, vector in vectors.items():
vectors[key] = vector = np.asarray(vector)
datatypes.append((key, vector.dtype, vector.shape[1:]))
record = np.rec.array(np.zeros(num_samples, dtype=datatypes))
record['sample'] = samples
record['energy'] = energy
record['num_occurrences'] = num_occurrences
for key, vector in vectors.items():
record[key] = vector
if info is None:
info = {}
return cls(record, variables, info, vartype)
# todo: this works with DQM/BinaryPolynomial, should change the name and/or
# update the docs.
@classmethod
def from_samples_bqm(cls, samples_like, bqm, **kwargs):
"""Build a sample set from raw samples and a binary quadratic model.
The binary quadratic model is used to calculate energies and set the
:class:`vartype`.
Args:
samples_like:
A collection of raw samples. 'samples_like' is an extension of NumPy's array_like.
See :func:`.as_samples`.
bqm (:obj:`.BinaryQuadraticModel`):
A binary quadratic model.
info (dict, optional):
Information about the :class:`SampleSet` as a whole formatted as a dict.
num_occurrences (array_like, optional):
Number of occurrences for each sample. If not provided, defaults to a vector of 1s.
aggregate_samples (bool, optional, default=False):
If True, all samples in returned :obj:`.SampleSet` are unique,
with `num_occurrences` accounting for any duplicate samples in
`samples_like`.
sort_labels (bool, optional, default=True):
Return :attr:`.SampleSet.variables` in sorted order. For mixed
(unsortable) types, the given order is maintained.
**vectors (array_like):
Other per-sample data.
Returns:
:obj:`.SampleSet`
Examples:
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> sampleset = dimod.SampleSet.from_samples_bqm({'a': -1, 'b': 1}, bqm)
"""
# more performant to do this once, here rather than again in bqm.energies
# and in cls.from_samples
samples_like = as_samples(samples_like)
energies = bqm.energies(samples_like)
return cls.from_samples(samples_like, energy=energies, vartype=bqm.vartype, **kwargs)
@classmethod
def from_future(cls, future, result_hook=None):
"""Construct a :class:`SampleSet` referencing the result of a future computation.
Args:
future (object):
Object that contains or will contain the information needed to construct a
:class:`SampleSet`. If `future` has a :meth:`~concurrent.futures.Future.done` method,
this determines the value returned by :meth:`.SampleSet.done`.
result_hook (callable, optional):
A function that is called to resolve the future. Must accept the future and return
a :obj:`.SampleSet`. If not provided, set to
.. code-block:: python
def result_hook(future):
return future.result()
Returns:
:obj:`.SampleSet`
Notes:
The future is resolved on the first read of any of the :class:`SampleSet` properties.
Examples:
Run a dimod sampler on a single thread and load the returned future into :class:`SampleSet`.
>>> from concurrent.futures import ThreadPoolExecutor
...
>>> bqm = dimod.BinaryQuadraticModel.from_ising({}, {('a', 'b'): -1})
>>> with ThreadPoolExecutor(max_workers=1) as executor:
... future = executor.submit(dimod.ExactSolver().sample, bqm)
... sampleset = dimod.SampleSet.from_future(future)
>>> sampleset.first.energy # doctest: +SKIP
"""
obj = cls.__new__(cls)
obj._future = future
if result_hook is None:
def result_hook(future):
return future.result()
elif not callable(result_hook):
raise TypeError("expected result_hook to be callable")
obj._result_hook = result_hook
return obj
###############################################################################################
# Special Methods
###############################################################################################
def __len__(self):
"""The number of rows in record."""
return self.record.__len__()
def __iter__(self):
"""Iterate over the samples, low energy to high."""
# need to make it an iterator rather than just an iterable
return iter(self.samples(sorted_by='energy'))
def __eq__(self, other):
"""SampleSet equality."""
if not isinstance(other, SampleSet):
return False
if self.vartype != other.vartype or self.info != other.info:
return False
# check that all the fields match in record, order doesn't matter
if self.record.dtype.fields.keys() != other.record.dtype.fields.keys():
return False
for field in self.record.dtype.fields:
if field == 'sample':
continue
if not (self.record[field] == other.record[field]).all():
return False
# now check the actual samples.
if self.variables == other.variables:
return (self.record.sample == other.record.sample).all()
try:
other_idx = [other.variables.index(v) for v in self.variables]
except ValueError:
# mismatched variables
return False
return (self.record.sample == other.record.sample[:, other_idx]).all()
def __getstate__(self):
# Ensure that any futures are resolved before pickling.
self.resolve()
# we'd prefer to do super().__getstate__ but unfortunately that's not
# present, so instead we recreate the (documented) behaviour
return self.__dict__
def __repr__(self):
return "{}({!r}, {}, {}, {!r})".format(self.__class__.__name__,
self.record,
self.variables,
self.info,
self.vartype.name)
def __str__(self):
return Formatter().format(self)
###############################################################################################
# Properties
###############################################################################################
@property
def data_vectors(self):
"""The per-sample data in a vector.
Returns:
dict: A dict where the keys are the fields in the record and the
values are the corresponding arrays.
Examples:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.data_vectors['energy']
array([-1, 1])
Note that this is equivalent to, and less performant than:
>>> sampleset = dimod.SampleSet.from_samples([[-1, 1], [1, 1]], dimod.SPIN,
energy=[-1, 1])
>>> sampleset.record['energy']
array([-1, 1])
"""
return {field: self.record[field] for field in self.record.dtype.names
if field != 'sample'}
@property
def first(self):
"""Sample with the lowest-energy.
Raises:
ValueError: If empty.
Example:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 1}, {('a', 'b'): 1})
>>> sampleset.first
Sample(sample={'a': -1, 'b': 1}, energy=-2.0, num_occurrences=1)
"""
try:
return next(self.data(sorted_by='energy', name='Sample'))
except StopIteration:
raise ValueError('{} is empty'.format(self.__class__.__name__))
@property
def info(self):
"""Dict of information about the :class:`SampleSet` as a whole.
Examples:
This example shows the type of information that might be returned by
a dimod sampler by submitting a BQM that sets a value on a D-Wave
system's first listed coupler.
>>> from dwave.system import DWaveSampler # doctest: +SKIP
>>> sampler = DWaveSampler() # doctest: +SKIP
>>> bqm = dimod.BQM({}, {sampler.edgelist[0]: -1}, 0, dimod.SPIN) # doctest: +SKIP
>>> sampler.sample(bqm).info # doctest: +SKIP
{'timing': {'qpu_sampling_time': 315,
'qpu_anneal_time_per_sample': 20,
'qpu_readout_time_per_sample': 274,
# Snipped above response for brevity
"""
self.resolve()
return self._info
@property
def record(self):
""":obj:`numpy.recarray` containing the samples, energies, number of occurences, and other sample data.
Examples:
>>> sampler = dimod.ExactSolver()
>>> sampleset = sampler.sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1.0})
>>> sampleset.record.sample # doctest: +SKIP
array([[-1, -1],
[ 1, -1],
[ 1, 1],
[-1, 1]], dtype=int8)
>>> len(sampleset.record.energy)
4
"""
self.resolve()
return self._record
@property
def variables(self):
""":class:`~.variables.Variables` of variable labels.
Corresponds to columns of the sample field of :attr:`.SampleSet.record`.
"""
self.resolve()
return self._variables
@property
def vartype(self):
""":class:`.Vartype` of the samples."""
self.resolve()
return self._vartype
@property
def is_writeable(self):
return getattr(self, '_writeable', True)
@is_writeable.setter
def is_writeable(self, b):
b = bool(b) # cast
self._writeable = b
self.record.flags.writeable = b
self.info.is_writeable = b
###############################################################################################
# Views
###############################################################################################
def done(self):
"""Return True if a pending computation is done.
Used when a :class:`SampleSet` is constructed with :meth:`SampleSet.from_future`.
Examples:
This example uses a :class:`~concurrent.futures.Future` object directly. Typically
a :class:`~concurrent.futures.Executor` sets the result of the future
(see documentation for :mod:`concurrent.futures`).
>>> from concurrent.futures import Future
...
>>> future = Future()
>>> sampleset = dimod.SampleSet.from_future(future)
>>> future.done()
False
>>> future.set_result(dimod.ExactSolver().sample_ising({0: -1}, {}))
>>> future.done()
True
>>> sampleset.first.energy
-1.0
"""
return (not hasattr(self, '_future')) or (not hasattr(self._future, 'done')) or self._future.done()
def samples(self, n=None, sorted_by='energy'):
"""Return an iterable over the samples.
Args:
n (int, optional, default=None):
Maximum number of samples to return in the view.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None,
samples are returned in record order.
Returns:
:obj:`.SamplesArray`: A view object mapping variable labels to
values.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> for sample in sampleset.samples(): # doctest: +SKIP
... print(sample)
{'a': -1, 'b': 1}
{'a': 1, 'b': -1}
{'a': -1, 'b': -1}
{'a': 1, 'b': 1}
>>> sampleset = dimod.ExactSolver().sample_ising({'a': 0.1, 'b': 0.0},
... {('a', 'b'): 1})
>>> samples = sampleset.samples()
>>> samples[0]
{'a': -1, 'b': 1}
>>> samples[0, 'a']
-1
>>> samples[0, ['b', 'a']]
array([ 1, -1], dtype=int8)
>>> samples[1:, ['a', 'b']]
array([[ 1, -1],
[-1, -1],
[ 1, 1]], dtype=int8)
"""
if n is not None:
return self.samples(sorted_by=sorted_by)[:n]
if sorted_by is None:
samples = self.record.sample
else:
order = np.argsort(self.record[sorted_by])
samples = self.record.sample[order]
return SamplesArray(samples, self.variables)
def data(self, fields=None, sorted_by='energy', name='Sample', reverse=False,
sample_dict_cast=True, index=False):
"""Iterate over the data in the :class:`SampleSet`.
Args:
fields (list, optional, default=None):
If specified, only these fields are included in the yielded tuples.
The special field name 'sample' can be used to view the samples.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples. If None, the samples are yielded
in record order.
name (str/None, optional, default='Sample'):
Name of the yielded namedtuples or None to yield regular tuples.
reverse (bool, optional, default=False):
If True, yield in reverse order.
sample_dict_cast (bool, optional, default=True):
Samples are returned as dicts rather than
:class:`.SampleView`, which requires heavy memory
usage. Set to False to reduce load on memory.
index (bool, optional, default=False):
If True, `datum.idx` gives the corresponding index of the
:attr:`.SampleSet.record`.
Yields:
namedtuple/tuple: The data in the :class:`SampleSet`, in the order specified by the input
`fields`.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> for datum in sampleset.data(fields=['sample', 'energy']): # doctest: +SKIP
... print(datum)
Sample(sample={'a': -1, 'b': -1}, energy=-1.5)
Sample(sample={'a': 1, 'b': -1}, energy=-0.5)
Sample(sample={'a': 1, 'b': 1}, energy=-0.5)
Sample(sample={'a': -1, 'b': 1}, energy=2.5)
>>> for energy, in sampleset.data(fields=['energy'], sorted_by='energy'):
... print(energy)
...
-1.5
-0.5
-0.5
2.5
>>> print(next(sampleset.data(fields=['energy'], name='ExactSolverSample')))
ExactSolverSample(energy=-1.5)
"""
record = self.record
if fields is None:
# make sure that sample, energy is first
fields = self._REQUIRED_FIELDS + [field for field in record.dtype.fields
if field not in self._REQUIRED_FIELDS]
if index:
fields.append('idx')
if sorted_by is None:
order = np.arange(len(self))
elif index:
# we want a stable sort but it can be slower
order = np.argsort(record[sorted_by], kind='stable')
else:
order = np.argsort(record[sorted_by])
if reverse:
order = np.flip(order)
if name is None:
# yielding a tuple
def _pack(values):
return tuple(values)
else:
# yielding a named tuple
SampleTuple = namedtuple(name, fields)
def _pack(values):
return SampleTuple(*values)
def _values(idx):
for field in fields:
if field == 'sample':
sample = SampleView(record.sample[idx, :], self.variables)
if sample_dict_cast:
sample = dict(sample)
yield sample
elif field == 'idx':
yield idx
else:
yield record[field][idx]
for idx in order:
yield _pack(_values(idx))
###############################################################################################
# Methods
###############################################################################################
def copy(self):
"""Create a shallow copy."""
return self.__class__(self.record.copy(),
self.variables, # a new one is made in all cases
self.info.copy(),
self.vartype)
def change_vartype(self, vartype, energy_offset=0.0, inplace=True):
"""Return the :class:`SampleSet` with the given vartype.
Args:
vartype (:class:`.Vartype`/str/set):
Variable type to use for the new :class:`SampleSet`. Accepted input values:
* :class:`.Vartype.SPIN`, ``'SPIN'``, ``{-1, 1}``
* :class:`.Vartype.BINARY`, ``'BINARY'``, ``{0, 1}``
energy_offset (number, optional, defaul=0.0):
Constant value applied to the 'energy' field of :attr:`SampleSet.record`.
inplace (bool, optional, default=True):
If True, the instantiated :class:`SampleSet` is updated; otherwise, a new
:class:`SampleSet` is returned.
Returns:
:obj:`.SampleSet`: SampleSet with changed vartype. If `inplace` is True, returns itself.
Notes:
This function is non-blocking unless `inplace==True`, in which case
the sample set is resolved.
Examples:
This example creates a binary copy of a spin-valued :class:`SampleSet`.
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> sampleset_binary = sampleset.change_vartype(dimod.BINARY, energy_offset=1.0, inplace=False)
>>> sampleset_binary.vartype is dimod.BINARY
True
>>> sampleset_binary.first.sample
{'a': 0, 'b': 0}
"""
if not inplace:
return self.copy().change_vartype(vartype, energy_offset, inplace=True)
if not self.done():
def hook(sampleset):
sampleset.resolve()
return sampleset.change_vartype(vartype, energy_offset)
return self.from_future(self, hook)
if not self.is_writeable:
raise WriteableError("SampleSet is not writeable")
vartype = as_vartype(vartype, extended=True) # cast to correct vartype
if energy_offset:
self.record.energy = self.record.energy + energy_offset
if vartype is self.vartype:
return self # we're done!
if vartype is Vartype.SPIN and self.vartype is Vartype.BINARY:
self.record.sample = 2 * self.record.sample - 1
self._vartype = vartype
elif vartype is Vartype.BINARY and self.vartype is Vartype.SPIN:
self.record.sample = (self.record.sample + 1) // 2
self._vartype = vartype
else:
raise ValueError("Cannot convert from {} to {}".format(self.vartype, vartype))
return self
@lockable_method
def relabel_variables(self, mapping, inplace=True):
"""Relabel the variables of a :class:`SampleSet` according to the specified mapping.
Args:
mapping (dict):
Mapping from current variable labels to new, as a dict. If incomplete mapping is
specified, unmapped variables keep their current labels.
inplace (bool, optional, default=True):
If True, the current :class:`SampleSet` is updated; otherwise, a new
:class:`SampleSet` is returned.
Returns:
:class:`.SampleSet`: SampleSet with relabeled variables. If `inplace` is True, returns
itself.
Notes:
This function is non-blocking unless `inplace==True`, in which case
the sample set is resolved.
Examples:
This example creates a relabeled copy of a :class:`SampleSet`.
>>> sampleset = dimod.ExactSolver().sample_ising({'a': -0.5, 'b': 1.0}, {('a', 'b'): -1})
>>> new_sampleset = sampleset.relabel_variables({'a': 0, 'b': 1}, inplace=False)
>>> new_sampleset.variables
Variables([0, 1])
"""
if not inplace:
return self.copy().relabel_variables(mapping, inplace=True)
if not self.done():
def hook(sampleset):
sampleset.resolve()
return sampleset.relabel_variables(mapping, inplace=True)
return self.from_future(self, hook)
self.variables._relabel(mapping)
return self
def resolve(self):
"""Ensure that the sampleset is resolved if constructed from a future.
"""
# if it doesn't have the attribute then it is already resolved
if hasattr(self, '_future'):
samples = self._result_hook(self._future)
self.__init__(samples.record, samples.variables, samples.info, samples.vartype)
del self._future
del self._result_hook
def aggregate(self):
"""Create a new SampleSet with repeated samples aggregated.
Returns:
:obj:`.SampleSet`
Note:
:attr:`.SampleSet.record.num_occurrences` are accumulated but no
other fields are.
Examples:
This examples aggregates a sample set with two identical samples
out of three.
>>> sampleset = dimod.SampleSet.from_samples([[0, 0, 1], [0, 0, 1],
... [1, 1, 1]],
... dimod.BINARY,
... [0, 0, 1])
>>> print(sampleset)
0 1 2 energy num_oc.
0 0 0 1 0 1
1 0 0 1 0 1
2 1 1 1 1 1
['BINARY', 3 rows, 3 samples, 3 variables]
>>> print(sampleset.aggregate())
0 1 2 energy num_oc.
0 0 0 1 0 2
1 1 1 1 1 1
['BINARY', 2 rows, 3 samples, 3 variables]
"""
_, indices, inverse = np.unique(self.record.sample, axis=0,
return_index=True, return_inverse=True)
# unique also sorts the array which we don't want, so we undo the sort
order = np.argsort(indices)
indices = indices[order]
# and on the inverse
revorder = np.empty(len(order), dtype=order.dtype)
revorder[order] = np.arange(len(order))
inverse = revorder[inverse]
record = self.record[indices]
# fix the number of occurrences
record.num_occurrences = 0
for old_idx, new_idx in enumerate(inverse):
record[new_idx].num_occurrences += self.record[old_idx].num_occurrences
# dev note: we don't check the energies as they should be the same
# for individual samples
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
def append_variables(self, samples_like, sort_labels=True):
"""Deprecated in favor of `dimod.append_variables`."""
warn("SampleSet.append_variables is deprecated; please use "
"`dimod.append_variables` instead.", DeprecationWarning)
return append_variables(self, samples_like, sort_labels)
def lowest(self, rtol=1.e-5, atol=1.e-8):
"""Return a sample set containing the lowest-energy samples.
A sample is included if its energy is within tolerance of the lowest
energy in the sample set. The following equation is used to determine
if two values are equivalent:
absolute(`a` - `b`) <= (`atol` + `rtol` * absolute(`b`))
See :func:`numpy.isclose` for additional details and caveats.
Args:
rtol (float, optional, default=1.e-5):
The relative tolerance (see above).
atol (float, optional, default=1.e-8):
The absolute tolerance (see above).
Returns:
:obj:`.SampleSet`: A new sample set containing the lowest energy
samples as delimited by configured tolerances from the lowest energy
sample in the current sample set.
Examples:
>>> sampleset = dimod.ExactSolver().sample_ising({'a': .001},
... {('a', 'b'): -1})
>>> print(sampleset.lowest())
a b energy num_oc.
0 -1 -1 -1.001 1
['SPIN', 1 rows, 1 samples, 2 variables]
>>> print(sampleset.lowest(atol=.1))
a b energy num_oc.
0 -1 -1 -1.001 1
1 +1 +1 -0.999 1
['SPIN', 2 rows, 2 samples, 2 variables]
Note:
"Lowest energy" is the lowest energy in the sample set. This is not
always the "ground energy" which is the lowest energy possible
for a binary quadratic model.
"""
if len(self) == 0:
# empty so all are lowest
return self.copy()
record = self.record
# want all the rows within tolerance of the minimal energy
close = np.isclose(record.energy,
np.min(record.energy),
rtol=rtol, atol=atol)
record = record[close]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
def truncate(self, n, sorted_by='energy'):
"""Create a new sample set with up to n rows.
Args:
n (int):
Maximum number of rows in the returned sample set. Does not return
any rows above this limit in the original sample set.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
truncating. Note that this sort order is maintained in the
returned sample set.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.ones((5, 5)), dimod.SPIN, energy=5)
>>> print(sampleset)
0 1 2 3 4 energy num_oc.
0 +1 +1 +1 +1 +1 5 1
1 +1 +1 +1 +1 +1 5 1
2 +1 +1 +1 +1 +1 5 1
3 +1 +1 +1 +1 +1 5 1
4 +1 +1 +1 +1 +1 5 1
['SPIN', 5 rows, 5 samples, 5 variables]
>>> print(sampleset.truncate(2))
0 1 2 3 4 energy num_oc.
0 +1 +1 +1 +1 +1 5 1
1 +1 +1 +1 +1 +1 5 1
['SPIN', 2 rows, 2 samples, 5 variables]
See:
:meth:`SampleSet.slice`
"""
return self.slice(n, sorted_by=sorted_by)
def slice(self, *slice_args, **kwargs):
"""Create a new sample set with rows sliced according to standard Python
slicing syntax.
Args:
start (int, optional, default=None):
Start index for `slice`.
stop (int):
Stop index for `slice`.
step (int, optional, default=None):
Step value for `slice`.
sorted_by (str/None, optional, default='energy'):
Selects the record field used to sort the samples before
slicing. Note that `sorted_by` determines the sample order in
the returned sample set.
Returns:
:obj:`.SampleSet`
Examples:
>>> import numpy as np
...
>>> sampleset = dimod.SampleSet.from_samples(np.diag(range(1, 11)),
... dimod.BINARY, energy=range(10))
>>> print(sampleset)
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
3 0 0 0 1 0 0 0 0 0 0 3 1
4 0 0 0 0 1 0 0 0 0 0 4 1
5 0 0 0 0 0 1 0 0 0 0 5 1
6 0 0 0 0 0 0 1 0 0 0 6 1
7 0 0 0 0 0 0 0 1 0 0 7 1
8 0 0 0 0 0 0 0 0 1 0 8 1
9 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 10 rows, 10 samples, 10 variables]
The above example's first 3 samples by energy == truncate(3):
>>> print(sampleset.slice(3))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 1 0 0 0 0 0 0 0 0 0 0 1
1 0 1 0 0 0 0 0 0 0 0 1 1
2 0 0 1 0 0 0 0 0 0 0 2 1
['BINARY', 3 rows, 3 samples, 10 variables]
The last 3 samples by energy:
>>> print(sampleset.slice(-3, None))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 0 0 0 0 1 0 0 7 1
1 0 0 0 0 0 0 0 0 1 0 8 1
2 0 0 0 0 0 0 0 0 0 1 9 1
['BINARY', 3 rows, 3 samples, 10 variables]
Every second sample in between, skipping top and bottom 3:
>>> print(sampleset.slice(3, -3, 2))
0 1 2 3 4 5 6 7 8 9 energy num_oc.
0 0 0 0 1 0 0 0 0 0 0 3 1
1 0 0 0 0 0 1 0 0 0 0 5 1
['BINARY', 2 rows, 2 samples, 10 variables]
"""
# handle `sorted_by` kwarg with a default value in a python2-compatible way
sorted_by = kwargs.pop('sorted_by', 'energy')
if kwargs:
# be strict about allowed kwargs: throw the same error as python3 would
raise TypeError('slice got an unexpected '
'keyword argument {!r}'.format(kwargs.popitem()[0]))
# follow Python's slice syntax
if slice_args:
selector = slice(*slice_args)
else:
selector = slice(None)
if sorted_by is None:
record = self.record[selector]
else:
sort_indices = np.argsort(self.record[sorted_by])
record = self.record[sort_indices[selector]]
return type(self)(record, self.variables, copy.deepcopy(self.info),
self.vartype)
###############################################################################################
# Serialization
###############################################################################################
def to_serializable(self, use_bytes=False, bytes_type=bytes,
pack_samples=True):
"""Convert a :class:`SampleSet` to a serializable object.
Note that the contents of the :attr:`.SampleSet.info` field are assumed
to be serializable.
Args:
use_bytes (bool, optional, default=False):
If True, a compact representation of the biases as bytes is used.
bytes_type (class, optional, default=bytes):
If `use_bytes` is True, this class is used to wrap the bytes
objects in the serialization. Useful for Python 2 using BSON
encoding, which does not accept the raw `bytes` type;
`bson.Binary` can be used instead.
pack_samples (bool, optional, default=True):
Pack the samples using 1 bit per sample. Samples are never
packed when :attr:`SampleSet.vartype` is
`~ExtendedVartype.DISCRETE`.
Returns:
dict: Object that can be serialized.
Examples:
This example encodes using JSON.
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
See also:
:meth:`~.SampleSet.from_serializable`
"""
schema_version = "3.1.0"
# developer note: numpy's record array stores the samples, energies,
# num_occ. etc as a struct array. If we dumped that array directly to
# bytes we could avoid a copy when undoing the serialization. However,
# we want to pack the samples, so that means that we're storing the
# arrays individually.
vectors = {name: serialize_ndarray(data, use_bytes=use_bytes,
bytes_type=bytes_type)
for name, data in self.data_vectors.items()}
# we never pack DISCRETE samplesets
pack_samples = pack_samples and self.vartype is not DISCRETE
if pack_samples:
# we could just do self.record.sample > 0 for all of these, but to
# save on the copy if we are already binary and bool/integer we
# check and just pass through in that case
samples = self.record.sample
if (self.vartype is Vartype.BINARY and
(np.issubdtype(samples.dtype, np.integer) or
np.issubdtype(samples.dtype, np.bool_))):
packed = _pack_samples(samples)
else:
packed = _pack_samples(samples > 0)
sample_data = serialize_ndarray(packed,
use_bytes=use_bytes,
bytes_type=bytes_type)
else:
sample_data = serialize_ndarray(self.record.sample,
use_bytes=use_bytes,
bytes_type=bytes_type)
return {
# metadata
"type": type(self).__name__,
"version": {"sampleset_schema": schema_version},
# samples
"num_variables": len(self.variables),
"num_rows": len(self),
"sample_data": sample_data,
"sample_type": self.record.sample.dtype.name,
"sample_packed": bool(pack_samples), # 3.1.0+, default=True
# vectors
"vectors": vectors,
# other
"variable_labels": self.variables.to_serializable(),
"variable_type": self.vartype.name,
"info": serialize_ndarrays(self.info, use_bytes=use_bytes,
bytes_type=bytes_type),
}
def _asdict(self):
# support simplejson encoding
return self.to_serializable()
@classmethod
def from_serializable(cls, obj):
"""Deserialize a :class:`SampleSet`.
Args:
obj (dict):
A :class:`SampleSet` serialized by :meth:`~.SampleSet.to_serializable`.
Returns:
:obj:`.SampleSet`
Examples:
This example encodes and decodes using JSON.
>>> import json
...
>>> samples = dimod.SampleSet.from_samples([-1, 1, -1], dimod.SPIN, energy=-.5)
>>> s = json.dumps(samples.to_serializable())
>>> new_samples = dimod.SampleSet.from_serializable(json.loads(s))
See also:
:meth:`~.SampleSet.to_serializable`
"""
version = obj["version"]["sampleset_schema"]
if version < "3.0.0":
raise ValueError("No longer supported serialization format")
# assume we're working with v3
# other data
vartype = str(obj['variable_type']) # cast to str for python2
num_variables = obj['num_variables']
variables = list(iter_deserialize_variables(obj['variable_labels']))
info = deserialize_ndarrays(obj['info'])
# vectors
vectors = {name: deserialize_ndarray(data)
for name, data in obj['vectors'].items()}
sample = deserialize_ndarray(obj['sample_data'])
if obj.get('sample_packed', True): # 3.1.0
sample = unpack_samples(sample,
n=num_variables,
dtype=obj['sample_type'])
if vartype == 'SPIN':
sample *= 2
sample -= 1
return cls.from_samples((sample, variables), vartype, info=info,
**vectors)
###############################################################################################
# Export to dataframe
###############################################################################################
def to_pandas_dataframe(self, sample_column=False):
"""Convert a sample set to a Pandas DataFrame
Args:
sample_column(bool, optional, default=False): If True, samples are
represented as a column of type dict.
Returns:
:obj:`pandas.DataFrame`
Examples:
>>> samples = dimod.SampleSet.from_samples([{'a': -1, 'b': +1, 'c': -1},
... {'a': -1, 'b': -1, 'c': +1}],
... dimod.SPIN, energy=-.5)
>>> samples.to_pandas_dataframe() # doctest: +SKIP
a b c energy num_occurrences
0 -1 1 -1 -0.5 1
1 -1 -1 1 -0.5 1
>>> samples.to_pandas_dataframe(sample_column=True) # doctest: +SKIP
sample energy num_occurrences
0 {'a': -1, 'b': 1, 'c': -1} -0.5 1
1 {'a': -1, 'b': -1, 'c': 1} -0.5 1
"""
import pandas as pd
if sample_column:
df = pd.DataFrame(self.data(sorted_by=None, sample_dict_cast=True))
else:
# work directly with the record, it's much faster
df = pd.DataFrame(self.record.sample, columns=self.variables)
for field in sorted(self.record.dtype.fields): # sort for consistency
if field == 'sample':
continue
df.loc[:, field] = self.record[field]
return df
| apache-2.0 |
cellular-nanoscience/pyotic | pyoti/modification/modification.py | 1 | 26859 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 12 14:22:31 2016
@author: Tobias Jachowski
"""
import collections
import matplotlib.pyplot as plt
import numpy as np
from abc import ABCMeta, abstractmethod
from .. import gui
from .. import helpers as hp
from .. import traces as tc
from ..evaluate import signal as sn
from ..graph import GraphMember
from ..picklable import InteractiveAttributes
class GraphicalMod(object):
"""
This class's subclasses should implement `_figure()` and `_update_fig()`,
which return and update a matplotlib figure, respectively. The figure can
be accessed by `self.figure`.
Parameters
----------
figure
modification : Modification
"""
def __init__(self, modification=None, **kwargs):
# Register the modification which should be graphically adjusted
self.modification = modification
# Initialize figure to None, which effectively disables
# `self.update_fig()` and Co. and prevent them from throwing an error
self._fig = None
def _set_plot_params(self, plot_params=None):
if plot_params is None:
plot_params = {}
gui.set_plot_params(plot_params=plot_params)
def display(self, plot_params=None):
self.init_fig(plot_params=plot_params)
def init_fig(self, show=True, plot_params=None):
"""
This method calls self._figure() to create an interactive figure and
interact with the user to determine the parameters necessary to
calculate the modification (see self._recalculate()). and
self._close_fig() to release all references to the actors of the
figure.
`self._figure()` and self._close_fig() should be (over)written by
subclasses.
"""
# Only create a figure, if the function `self._figure()` is implemented
if not hasattr(self, '_figure'):
return
# close the figure
# nbagg backend needs to have the figure closed and recreated
# whenever the code of the cell displaying the figure is executed.
# A simple update of the figure would let it disappear. Even a
# self.figure.show() wouldn't work anymore.
# For backends this just means a bit of extra calculation.
# Therefore, close the figure first before replotting it.
self.close_fig()
# set default plot parameters, can be recalled / overwritten in
# `self._figure()`
self._set_plot_params(plot_params=plot_params)
# create the figure
self.figure = self._figure()
# update the figure
self.update_fig()
# show the figure
if show:
self.figure.show()
def update(self, **kwargs):
self.update_fig(**kwargs)
def update_fig(self, **kwargs):
if self._fig is not None:
self._update_fig(**kwargs)
self._figure_canvas_draw()
def _update_fig(self, **kwargs):
pass
def close_fig(self):
if self._fig is not None:
self._pre_close_fig()
self._close_fig()
self._post_close_fig()
def _pre_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _close_fig(self):
# force redraw of the figure
self._figure_canvas_draw()
# close the figure
plt.close(self.figure)
# release memory
self.figure = None
def _post_close_fig(self):
"""
Method to be overwritten by subclasses.
"""
pass
def _figure_canvas_draw(self):
# Some matplotlib backends will throw an error when trying to draw the
# canvas. Simply ignoring the error that could happen here will prevent
# the figure from not beeing closed, left open, and preventing the next
# figure to be drawn. Even though the "except: pass" clause is
# considered bad, here the worst thing that could happen is that the
# figure produced by the matplotlib backend upon closing is not
# updated. Therefore, "except: pass" should be considered as an
# acceptable workaround for this case.
try:
# redraw the figure, before closing it
self.figure.canvas.draw()
except:
pass
@property
def figure(self):
"""
The matplotlib figure that represents and/or adjusts the parameters of
`self.modification`.
"""
# Automatically initialize a figure
if self._fig is None:
self.init_fig(show=False)
# Return a previously initialized figure
return self._fig
@figure.setter
def figure(self, figure):
self._fig = figure
class Modification(GraphMember, metaclass=ABCMeta):
"""
Modification is an abstract class, that implements methods to modify the
data of a `View` (`view_apply`) and adjust the parameters which control the
behaviour of the modifications applied.
Whenever one of the parameters needed to calculate the modification is
changed, the view, this modification is applied to, is informed.
`self.set_changed()` Has to be called upon any change of the modification
that influences the behaviour of `self.modify()`. In essence, these are all
parameters that are used to determine the modification. Therefore, this
should be called by all setters of the parameters/attributes.
Every subclass of Modification has to implement a constructor method
`self.__init__(self, **kwargs)`, which calls the superclasses' constructor
and sets the traces, the modification is applied to with the keyword
parameter `traces_apply`. An example could be:
super().__init__(traces_apply=['psdX', 'psdZ'], **kwargs)
"""
# set a graphical modification, which will, per default, do nothing
GRAPHICALMOD = GraphicalMod
def __init__(self, traces_apply=None, view_apply=None, view_based=None,
automatic_switch=False, datapoints=-1, **kwargs):
# Call the constructor of the superclass `GraphMember` and set the
# maximum allowed number of parents (`view_based`) and childs
# (`view_apply`) to one.
super().__init__(max_children=1, max_parents=1, **kwargs)
# A `Modification` has to be applied to a `View`!
if view_apply is None:
raise TypeError("Modification missing required positional argument"
" `view_apply`.")
# Set the view, from where the parameters for the modification are
# calculated from
if view_based is not None:
self.view_based = view_based
# Set the view, whose data is going to be modified
self.view_apply = view_apply
# Set the traces, which are modified by this `Modification`
self.traces_apply = traces_apply
# Initialize InteractiveAttributes object, which will hold all the
# parameters that the user should interact with.
self.iattributes = InteractiveAttributes()
# A checkbox to switch on/off the automatic determination of the
# parameters that are used to calculate the modification in the method
# `self.recalculate()`. The attribute `self.automatic` is checked in
# the method `self.recalculate()`. If `automatic` is True, the
# parameters are recalculated, otherwise the parameters are left
# unchanged. Whenever `automatic` is changed (by the user or
# automatically), `self.evaluate()` is called.
if automatic_switch:
self.add_iattribute('automatic', description='Automatic mode',
value=True, unset_automatic=False,
set_changed=False,
callback_functions=[self.evaluate])
# A checkbox to de-/activate this `Modification`. This attribute gets
# evaluated by `self.modify()`. If the `Modification` is active, it
# modifies data, otherwise not, i.e. modify() returns modified or
# unmodified original data, respectively.
desc = "".join((self.__class__.__name__, " active"))
self.add_iattribute('active', description=desc, value=True,
unset_automatic=False)
# Datapoints is used to calculate and/or present modification. The
# attribute `datapoints` is used to calculate a decimating factor and
# speed up the calculations and/or plot commands.
if datapoints > 0:
desc = "Datapoints to calculate/visualize modification"
self.add_iattribute('datapoints', description=desc,
value=datapoints, unset_automatic=False)
# Add a Button to manually call the method `self.evaluate()`.
self.add_iattribute('evaluate', description='Evaluate',
unset_automatic=False, set_changed=False,
callback_functions=[self.evaluate])
def add_iattribute(self, key, description=None, value=None,
unset_automatic=True, set_changed=True,
callback_functions=None, **kwargs):
"""
Add logic for automatic checkbox.
Register widget with unset_automatic=True
(-> Upon change of widget, unset automatic mode).
Change default behaviour by setting kwarg: unset_automatic = False
Add logic for triggering changed (calling self.set_changed).
Register widget with set_changed=True.
"""
if callback_functions is None:
callback_functions = []
if unset_automatic:
callback_functions.append(self._unset_automatic)
if set_changed:
callback_functions.append(self.set_changed)
self.iattributes.add(key, description=description, value=value,
callback_functions=callback_functions, **kwargs)
def _unset_automatic(self, leave_automatic=False, **kwargs):
"""
Add the logic for the automatic checkbox. If the value of an attribute
is changed and the attribute was created with `unset_automatic=True`,
deactivate the automatic mode (see `self.add_iattribute()`). To
temporarily leave the automatic mode status untouched when changing the
value of an attribute, i.e. not unset the automatic mode, set the value
of the attribute with the keyword argument `leave_automatic=True`
(see method `self.iattributes.set_value()`)
"""
if not leave_automatic:
self.iattributes.set_value('automatic', False, callback=False)
def evaluate(self):
"""
Implement the (re)calculation for the values necessary to calculate the
modification in the subclass and call recalculate() of the superclass
(this class).
"""
if self.updated:
# This method makes sure the modification is calculated with the
# current values of the View this modification is based on. It is
# called by self.modify().
# When a View requests data, it calls modify(), which in turn calls
# recalculate(). Recalculate(), if necessary, calls
# get_data_modified() from the View it is based on, which again
# triggers a call of modify() and a subsequent recalcaulte() of all
# modifications associated with this View.
# Modification need update, because view, this mod is based on,
# was changed.
# self._view_based.evaluate()is not needed, it is called via:
# recalculate() -> get_data_based() -> _view_based.get_data() ->
# get_modified_data() -> super().evaluate()
return
# Recalculate and print info of recalculated values if in automatic
# mode
if self.recalculate():
self.print_info()
# Update figure after recalculation has taken place
self.graphicalmod.update()
def recalculate(self):
# Check if recalculation of parameters is necessary
if self.updated:
return False
# Check the attribute self.automatic, whether the parameters needed for
# the calculation of the modification should be determined
# automatically or not. If values are set manually, no recalculation is
# necessary, and `self` is therefore up to date.
if not self.automatic:
self.updated = True
return True
# Recalculate the parameters, inform the view this `Modification`
# is applied to about the change, and set `self` to be updated.
self._recalculate()
self.set_changed(updated=True)
return True
def _recalculate(self):
"""
This method should be overwritten by subclasses and perform the
recalculation necessary to determine the parameters used by this
Modification to modify the data in `self._modify()`.
"""
pass
def print_info(self):
print("Values for Modification of class %s:"
% self.__class__.__name__)
if not self.automatic:
print(" Parameters set manually!")
for key, widget in self.iattributes._widgets.items():
if hasattr(widget, 'value'):
if isinstance(widget.value, float):
print(" %s: %.5f" % (widget.description, widget.value))
if isinstance(widget.value, collections.Iterable):
print(" %s: %s" % (widget.description, widget.value))
self._print_info()
def _print_info(self):
"""
This method should be overwritten by subclasses, which want to print
extra info additionally to the info of the calculated paremeters.
"""
pass
def modify(self, data, samples, traces_idx):
"""
Modifies data and returns the modified array.
Parameters
----------
data : 2D numpy.ndarray of type float
`data` holds the data to be modified
samples : index array or slice
`samples` is the index of the samples that was used to get the
`data`
traces : index array or slice
`traces` is the index of the traces that was used to get the `data`
"""
# Modification is active.
if self.active:
# Check if traces contained in data are modified by this
# modification.
data_traces = self.view_apply.idx_to_traces(traces_idx)
mod_traces = self.traces_apply
# Calculate the indices of traces contained in data and
# modification. First, calculate indices of modification traces.
mod_index = hp.overlap_index(mod_traces, data_traces)
if len(mod_index) > 0:
# At least one trace exists in both data and modification.
# Therefore, the data needs to be modified...
mod_index = hp.slicify(mod_index)
# Calculate indices of traces of the data in such a way that
# `data[:, data_index]` indexes the same traces as
# `self.traces_apply[mod_index]`
data_index = np.array([data_traces.index(trace)
for trace
in np.array(mod_traces)[mod_index]])
data_index = hp.slicify(data_index)
# Trigger a recalculation of the parameters for the
# modification (if necessary) before modifying the data.
self.evaluate()
# Modify and return the modified data
return self._modify(data=data,
samples=samples,
data_traces=data_traces,
data_index=data_index,
mod_index=mod_index)
# Return unmodified data
return data
@abstractmethod
def _modify(self, data, samples, data_traces, data_index, mod_index):
"""
Is called by self.modify() whenever data is requested and needs to be
modified.
Parameters
----------
data : 2D numpy.array()
Contains the data, indexed by samples and data_traces
samples : slice or 1D numpy.array()
Is the index of the samples contained in data, which was
given/asked by the user/process who called _get_data().
data_traces : list of str
Contains a list of traces (str) existent in data, which
was given/asked by the user/process who called _get_data().
data_index : slice or 1D numpy.array()
data[:, data_index] gives the data, which is modified by
this modification
mod_index : slice or 1D numpy.array()
np.array(self.traces_apply)[mod_index] gives the traces,
which are existent in data and also modified by this modfication.
Returns
-------
2D numpy.array()
The modified data.
"""
# modify data here, like so:
# data[:,data_index] -= modification[:,mod_index]
return data
@property
def updated(self):
return self._updated
@updated.setter
def updated(self, value):
"""
Gets set to True, after all `Views`, this `Modification` is based on,
have been updated and after this `Modification` has been recalculated.
This is automatically taken care of by `self.evaluate()` ->
`self.recalculate()`.
Gets called by a `View`, this `Modification` is based on, whenever the
`View` (a `Modification` of the `View`) has been changed. It
automatically informs its own `View`, that there was a change, by
calling `self.set_changed()`.
"""
self._updated = value
def member_changed(self, ancestor=True, calledfromself=False,
index_shift=None, **kwargs):
# If a change of an ancestor View or a MultiRegion was triggered by an
# index_shift, the modification needs to recalculate itself, i.e.
# the modification will alter its changeing behaviour. Because an
# index_shift change is only transmitted to `level=1`, inform the
# descendants of the change itself. A change of descendants is ignored.
if index_shift is not None and not calledfromself and ancestor:
self.set_changed(includeself=False)
# Update update status
super().member_changed(ancestor=ancestor,
calledfromself=calledfromself, **kwargs)
def _get_data(self, based=True, samples=None, traces=None, window=False,
decimate=False, copy=True):
if based:
view = self.view_based
else:
view = self.view_apply
if not isinstance(window, bool) and isinstance(window, int):
window = window
elif window:
window = self.decimate
else:
window = 1
if not isinstance(decimate, bool) and isinstance(decimate, int):
decimate = decimate
elif decimate:
decimate = self.decimate
else:
decimate = 1
if not based:
old_active = self.iattributes.active
self.iattributes.set_value('active', False, callback=False)
data = view.get_data(traces=traces, samples=samples,
moving_filter='mean', window=window,
decimate=decimate, copy=copy)
if not based:
self.iattributes.set_value('active', old_active, callback=False)
return data
def _get_data_based(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=True, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def _get_data_apply(self, samples=None, traces=None, window=False,
decimate=False, copy=True):
"""
Get data of view apply with all modifications applied, except self.
This is achieved by setting the self.__active flag to False.
self.__active is intentionally set directly by accessing the attribute
and not using the property/set_active() method, to prevent firing the
self.set_changed() method within the set_active() method.
decimate is False per default. If decimate is True, it only gets used,
if samples are set to None (step information in samples precedes over
decimate).
"""
return self._get_data(based=False, samples=samples, traces=traces,
window=window, decimate=decimate, copy=copy)
def calculate_bin_means(self, data=None, traces=None, bins=None,
datapoints_per_bin=None, sorttrace=0):
"""
Calculates binned means based on the data to be fitted. The binned
means are usually used by data fitting routines.
Parameters
----------
data : 2D numpy.ndarray of type float, optional
Defaults to `self._get_data_based(traces=traces, decimate=True)`.
traces : str or list of str, optional
Defaults to `self.traces_apply`.
bins : int, optional
Number of bins that contain the datapoints to be averaged. If
possible, it defaults to (`self.iattributes.datapoints` /
`datapoints_per_bin`), otherwise bins defaults to
(`self.view_based.datapoints` / `datapoints_per_bin`).
datapoints_per_bin : int, optional
Average number of datapoints to be averaged in one bin. Defaults to
25.
sorttrace : int, optional
Trace (column) of `data` that acts as sorting index upon binning
for the rest of the data. Defaults to the first trace of the data.
Returns
-------
1D numpy.ndarray of type float
The averaged bin values.
float
The size of one bin.
"""
# Bin data and average bins to prevent arbitrary weighting of bins with
# more datapoints
if bins is None:
bins = self._bins(datapoints_per_bin=datapoints_per_bin)
# get the traces to retrieve data from
if traces is None:
traces = self.traces_apply
# get the data to bin
if data is None:
data = self._get_data_based(traces=traces, decimate=True)
# create the bins based on one trace of the data
minimum = np.min(data[:, sorttrace])
maximum = np.max(data[:, sorttrace])
edges = np.linspace(minimum, maximum, bins + 1)
# Get the indices of the bins to which each value in input array
# belongs.
bin_idx = np.digitize(data[:, sorttrace], edges)
# Find which points are on the rightmost edge.
on_edge = data[:, sorttrace] == edges[-1]
# Shift these points one bin to the left.
bin_idx[on_edge] -= 1
# fill the bins with the means of the data contained in each bin
bin_means = np.array([data[bin_idx == i].mean(axis=0)
for i in range(1, bins + 1)
if np.any(bin_idx == i)])
bin_width = edges[1] - edges[0]
return bin_means, bin_width
def _bins(self, datapoints_per_bin=None):
# On average 25 datapoints per bin
datapoints_per_bin = datapoints_per_bin or 25
if 'datapoints' in self.iattributes:
bins = self.iattributes.datapoints / datapoints_per_bin
else:
bins = self.view_based.datapoints / datapoints_per_bin
bins = max(1, int(np.round(bins)))
return bins
_NAME = {
'position': ['positionX', 'positionY'],
'psd': ['psdX', 'psdY'],
'axis': ['X', 'Y']
}
def _excited(self, traces=None):
traces = traces or ['positionX', 'positionY']
data = self._get_data_based(traces=traces, copy=False)
return sn.get_excited_signal(data)
def interact(self):
self.recalculate()
self.iattributes.display()
self.graphicalmod.display()
@property
def graphicalmod(self):
# ZODB volatile
if not hasattr(self, '_v_graphicalmod'):
self._v_graphicalmod \
= self.__class__.GRAPHICALMOD(modification=self)
return self._v_graphicalmod
@property
def active(self):
active = False
if 'active' in self.iattributes:
active = self.iattributes.active
return active
@active.setter
def active(self, active=True):
if 'active' in self.iattributes:
self.iattributes.active = active
@property
def automatic(self):
# Does the modification automatically calculate its parameters
automatic = True
if 'automatic' in self.iattributes:
automatic = self.iattributes.automatic
return automatic
@property
def datapoints(self):
if 'datapoints' in self.iattributes:
return self.iattributes.datapoints
else:
return self.view_based.datapoints
@property
def decimate(self):
if 'datapoints' in self.iattributes:
return max(1, int(np.round(self.view_based.datapoints
/ self.datapoints)))
else:
return 1
@property
def view_based(self):
return self.parent
@property
def view_apply(self):
return self.child
@view_based.setter
def view_based(self, view):
self.set_parent(view)
@view_apply.setter
def view_apply(self, view):
self.set_child(view)
def lia(self, trace):
"""
Return the local index of trace in traces_apply
"""
return self.traces_apply.index(trace)
@property
def traces_apply(self):
# return a copy to protect local copy
return self._traces_apply.copy()
@traces_apply.setter
def traces_apply(self, traces):
if traces is None:
traces_apply = []
else:
traces_apply = tc.normalize(traces)
self._traces_apply = traces_apply
| apache-2.0 |
omnirom/android_kernel_htc_flounder | scripts/tracing/dma-api/trace.py | 96 | 12420 | """Main program and stuff"""
#from pprint import pprint
from sys import stdin
import os.path
import re
from argparse import ArgumentParser
import cPickle as pickle
from collections import namedtuple
from plotting import plotseries, disp_pic
import smmu
class TracelineParser(object):
"""Parse the needed information out of an ftrace line"""
# <...>-6 [000] d..2 5.287079: dmadebug_iommu_map_page: device=sdhci-tegra.3, addr=0x01048000, size=4096 page=c13e7214 archdata=ed504640
def __init__(self):
self.pattern = re.compile("device=(?P<dev>.*), addr=(?P<addr>.*), size=(?P<size>.*) page=(?P<page>.*) archdata=(?P<archdata>.*)")
def parse(self, args):
args = self.pattern.match(args)
return (args.group("dev"), int(args.group("addr"), 16),
int(args.group("size")), int(args.group("page"), 16),
int(args.group("archdata"), 16))
def biggest_indices(items, n):
"""Return list of indices of n biggest elements in items"""
with_indices = [(x, i) for i, x in enumerate(items)]
ordered = sorted(with_indices)
return [i for x, i in ordered[-n:]]
def by_indices(xs, ids):
"""Get elements from the list xs by their indices"""
return [xs[i] for i in ids]
"""Event represents one input line"""
Event = namedtuple("Event", ["time", "dev", "data", "delta"])
class Trace(object):
def __init__(self, args):
smmu.VERBOSITY = args.verbosity
self._args = args
self.devlist = []
self.events = []
self.metrics = {
"max_peak": self._usage_peak,
"activity_rate": self._usage_activity,
"average_mem": self._usage_avg
}
self.traceliner = TracelineParser()
@staticmethod
def get_metrics():
"""What filter metrics to get max users"""
return ["max_peak", "activity_rate", "average_mem"]
def show(self):
"""Shuffle events around, build plots, and show them"""
if self._args.max_plots:
evs = self.merge_events()
else:
evs = self.events
series, devlist = self.unload(evs)
if not self._args.no_plots:
self.plot(series, devlist)
def _get_usage(self, evs):
"""Return a metric of how active the events in evs are"""
return self.metrics[self._args.max_metric](evs)
def _usage_peak(self, evs):
"""Return the biggest peak"""
return max(e.data for e in evs)
def _usage_activity(self, evs):
"""Return the activity count: simply the length of the event list"""
return len(evs)
def _usage_avg(self, evs):
"""Return the average over all points"""
# FIXME: the data points are not uniform in time, so this might be
# somewhat off.
return float(sum(e.data for e in evs)) / len(e)
def merge_events(self):
"""Find out biggest users, keep them and flatten others to a single user"""
sizes = []
dev_evs = []
for i, dev in enumerate(self.devlist):
dev_evs.append([e for e in self.events if e.dev == dev])
sizes.append(self._get_usage(dev_evs[i]))
# indices of the devices
biggestix = biggest_indices(sizes, self._args.max_plots)
print biggestix
is_big = {}
for i, dev in enumerate(self.devlist):
is_big[dev] = i in biggestix
evs = []
for e in self.events:
if not is_big[e.dev]:
e = Event(e.time, "others", e.data, e.delta)
evs.append(e)
self.devlist.append("others")
return evs
def unload(self, events):
"""Prepare the event list for plotting
series ends up as [([time0], [data0]), ([time1], [data1]), ...]
"""
# ([x], [y]) for matplotlib
series = [([], []) for x in self.devlist]
devidx = dict([(d, i) for i, d in enumerate(self.devlist)])
for event in events:
devid = devidx[event.dev]
series[devid][0].append(event.time)
series[devid][1].append(event.data) # self.dev_data(event.dev))
series_out = []
devlist_out = []
for ser, dev in zip(series, self.devlist):
if len(ser[0]) > 0:
series_out.append(ser)
devlist_out.append(dev)
return series_out, devlist_out
def plot(self, series, devlist):
"""Display the plots"""
#series, devlist = flatten_axes(self.series, self.devlist,
# self._args.max_plots)
devinfo = (series, map(str, devlist))
allocfreeinfo = (self.allocsfrees, ["allocd", "freed", "current"])
plotseries(devinfo, allocfreeinfo)
#plotseries(devinfo)
def dev_data(self, dev):
"""what data to plot against time"""
return dev._cur_alloc
def _cache_hash(self, filename):
"""The trace files are probably not of the same size"""
return str(os.path.getsize(filename))
def load_cache(self):
"""Get the trace data from a database file, if one exists"""
has = self._cache_hash(self._args.filename)
try:
cache = open("trace." + has)
except IOError:
pass
else:
self._load_cache(pickle.load(cache))
return True
return False
def save_cache(self):
"""Store the raw trace data to a database"""
data = self._save_cache()
fh = open("trace." + self._cache_hash(self._args.filename), "w")
pickle.dump(data, fh)
def _save_cache(self):
"""Return the internal data that is needed to be pickled"""
return self.events, self.devlist, self.allocsfrees
def _load_cache(self, data):
"""Get the data from an unpickled object"""
self.events, self.devlist, self.allocsfrees = data
def load_events(self):
"""Get the internal data from a trace file or cache"""
if self._args.filename:
if self._args.cache and self.load_cache():
return
fh = open(self._args.filename)
else:
fh = stdin
self.parse(fh)
if self._args.cache and self._args.filename:
self.save_cache()
def parse(self, fh):
"""Parse the trace file in fh, store data to self"""
mems = {}
dev_by_name = {}
devlist = []
buf_owners = {}
events = []
allocsfrees = [([], []), ([], []), ([], [])] # allocs, frees, current
allocs = 0
frees = 0
curbufs = 0
mem_bytes = 1024 * 1024 * 1024
npages = mem_bytes / 4096
ncols = 512
le_pic = [0] * npages
lastupd = 0
for lineidx, line in enumerate(fh):
# no comments
if line.startswith("#"):
continue
taskpid, cpu, flags, timestamp, func, args = line.strip().split(None, 5)
func = func[:-len(":")]
# unneeded events may be there too
if not func.startswith("dmadebug"):
continue
if self._args.verbosity >= 3:
print line.rstrip()
timestamp = float(timestamp[:-1])
if timestamp < self._args.start:
continue
if timestamp >= self._args.end:
break
devname, addr, size, page, archdata = self.traceliner.parse(args)
if self._args.processes:
devname = taskpid.split("-")[0]
mapping = archdata
try:
memmap = mems[mapping]
except KeyError:
memmap = mem(mapping)
mems[mapping] = memmap
try:
dev = dev_by_name[devname]
except KeyError:
dev = smmu.Device(devname, memmap)
dev_by_name[devname] = dev
devlist.append(dev)
allocfuncs = ["dmadebug_map_page", "dmadebug_map_sg", "dmadebug_alloc_coherent"]
freefuncs = ["dmadebug_unmap_page", "dmadebug_unmap_sg", "dmadebug_free_coherent"]
ignfuncs = []
if timestamp-lastupd > 0.1:
# just some debug prints for now
lastupd = timestamp
print lineidx,timestamp
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
#disp_pic(le_pic2)
# animating the bitmap would be cool
#for row in le_pic:
# for i, a in enumerate(row):
# pass
#row[i] = 0.09 * a
if func in allocfuncs:
pages = dev_by_name[devname].alloc(addr, size)
for p in pages:
le_pic[p] = 1
buf_owners[addr] = dev_by_name[devname]
allocs += 1
curbufs += 1
allocsfrees[0][0].append(timestamp)
allocsfrees[0][1].append(allocs)
elif func in freefuncs:
if addr not in buf_owners:
if self._args.verbosity >= 1:
print "warning: %s unmapping unmapped %s" % (dev, addr)
buf_owners[addr] = dev
# fixme: move this to bitmap handling
# get to know the owners of bits
# allocs/frees calls should be traced separately from maps?
# map_pages is traced per page :(
if buf_owners[addr] != dev and self._args.verbosity >= 2:
print "note: %s unmapping [%d,%d) mapped by %s" % (
dev, addr, addr+size, buf_owners[addr])
pages = buf_owners[addr].free(addr, size)
for p in pages:
le_pic[p] = 0
frees -= 1
curbufs -= 1
allocsfrees[1][0].append(timestamp)
allocsfrees[1][1].append(frees)
elif func not in ignfuncs:
raise ValueError("unhandled %s" % func)
allocsfrees[2][0].append(timestamp)
allocsfrees[2][1].append(curbufs)
events.append(Event(timestamp, dev, self.dev_data(dev), size))
self.events = events
self.devlist = devlist
self.allocsfrees = allocsfrees
le_pic2 = [le_pic[i:i+ncols] for i in range(0, npages, ncols)]
# FIXME: not quite ready yet
disp_pic(le_pic2)
return
def mem(asid):
"""Create a new memory object for the given asid space"""
SZ_2G = 2 * 1024 * 1024 * 1024
SZ_1M = 1 * 1024 * 1024
# arch/arm/mach-tegra/include/mach/iomap.h TEGRA_SMMU_(BASE|SIZE)
base = 0x80000000
size = SZ_2G - SZ_1M
return smmu.Memory(base, size, asid)
def get_args():
"""Eat command line arguments, return argparse namespace for settings"""
parser = ArgumentParser()
parser.add_argument("filename", nargs="?",
help="trace file dump, stdin if not given")
parser.add_argument("-s", "--start", type=float, default=0,
help="start timestamp")
parser.add_argument("-e", "--end", type=float, default=1e9,
help="end timestamp")
parser.add_argument("-v", "--verbosity", action="count", default=0,
help="amount of extra information: once for warns (dup addrs), "
"twice for notices (different client in map/unmap), "
"three for echoing all back")
parser.add_argument("-p", "--processes", action="store_true",
help="use processes as memory clients instead of devices")
parser.add_argument("-n", "--no-plots", action="store_true",
help="Don't draw the plots, only read the trace")
parser.add_argument("-c", "--cache", action="store_true",
help="Pickle the data and make a cache file for fast reloading")
parser.add_argument("-m", "--max-plots", type=int,
help="Maximum number of clients to show; show biggest and sum others")
parser.add_argument("-M", "--max-metric", choices=Trace.get_metrics(),
default=Trace.get_metrics()[0],
help="Metric to use when choosing clients in --max-plots")
return parser.parse_args()
def main():
args = get_args()
trace = Trace(args)
trace.load_events()
trace.show()
if __name__ == "__main__":
main()
| gpl-2.0 |
RayMick/scikit-learn | sklearn/metrics/classification.py | 95 | 67713 | """Metrics to assess performance on classification task given classe prediction
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# Jatin Shah <jatindshah@gmail.com>
# Saurabh Jha <saurabh.jhaa@gmail.com>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import coo_matrix
from scipy.sparse import csr_matrix
from scipy.spatial.distance import hamming as sp_hamming
from ..preprocessing import LabelBinarizer, label_binarize
from ..preprocessing import LabelEncoder
from ..utils import check_array
from ..utils import check_consistent_length
from ..utils import column_or_1d
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..utils.validation import _num_samples
from ..utils.sparsefuncs import count_nonzero
from ..utils.fixes import bincount
from .base import UndefinedMetricWarning
def _check_targets(y_true, y_pred):
"""Check that y_true and y_pred belong to the same classification task
This converts multiclass or binary types to a common shape, and raises a
ValueError for a mix of multilabel and multiclass targets, a mix of
multilabel formats, for the presence of continuous-valued or multioutput
targets, or for targets of different lengths.
Column vectors are squeezed to 1d, while multilabel formats are returned
as CSR sparse label indicators.
Parameters
----------
y_true : array-like
y_pred : array-like
Returns
-------
type_true : one of {'multilabel-indicator', 'multiclass', 'binary'}
The type of the true target data, as output by
``utils.multiclass.type_of_target``
y_true : array or indicator matrix
y_pred : array or indicator matrix
"""
check_consistent_length(y_true, y_pred)
type_true = type_of_target(y_true)
type_pred = type_of_target(y_pred)
y_type = set([type_true, type_pred])
if y_type == set(["binary", "multiclass"]):
y_type = set(["multiclass"])
if len(y_type) > 1:
raise ValueError("Can't handle mix of {0} and {1}"
"".format(type_true, type_pred))
# We can't have more than one value on y_type => The set is no more needed
y_type = y_type.pop()
# No metrics support "multiclass-multioutput" format
if (y_type not in ["binary", "multiclass", "multilabel-indicator"]):
raise ValueError("{0} is not supported".format(y_type))
if y_type in ["binary", "multiclass"]:
y_true = column_or_1d(y_true)
y_pred = column_or_1d(y_pred)
if y_type.startswith('multilabel'):
y_true = csr_matrix(y_true)
y_pred = csr_matrix(y_pred)
y_type = 'multilabel-indicator'
return y_type, y_true, y_pred
def _weighted_sum(sample_score, sample_weight, normalize=False):
if normalize:
return np.average(sample_score, weights=sample_weight)
elif sample_weight is not None:
return np.dot(sample_score, sample_weight)
else:
return sample_score.sum()
def accuracy_score(y_true, y_pred, normalize=True, sample_weight=None):
"""Accuracy classification score.
In multilabel classification, this function computes subset accuracy:
the set of labels predicted for a sample must *exactly* match the
corresponding set of labels in y_true.
Read more in the :ref:`User Guide <accuracy_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of correctly classified samples.
Otherwise, return the fraction of correctly classified samples.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the correctly classified samples
(float), else it returns the number of correctly classified samples
(int).
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
jaccard_similarity_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equal
to the ``jaccard_similarity_score`` function.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import accuracy_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> accuracy_score(y_true, y_pred)
0.5
>>> accuracy_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> accuracy_score(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
differing_labels = count_nonzero(y_true - y_pred, axis=1)
score = differing_labels == 0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def confusion_matrix(y_true, y_pred, labels=None):
"""Compute confusion matrix to evaluate the accuracy of a classification
By definition a confusion matrix :math:`C` is such that :math:`C_{i, j}`
is equal to the number of observations known to be in group :math:`i` but
predicted to be in group :math:`j`.
Read more in the :ref:`User Guide <confusion_matrix>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to reorder
or select a subset of labels.
If none is given, those that appear at least once
in ``y_true`` or ``y_pred`` are used in sorted order.
Returns
-------
C : array, shape = [n_classes, n_classes]
Confusion matrix
References
----------
.. [1] `Wikipedia entry for the Confusion matrix
<http://en.wikipedia.org/wiki/Confusion_matrix>`_
Examples
--------
>>> from sklearn.metrics import confusion_matrix
>>> y_true = [2, 0, 2, 2, 0, 1]
>>> y_pred = [0, 0, 2, 2, 0, 2]
>>> confusion_matrix(y_true, y_pred)
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
>>> y_true = ["cat", "ant", "cat", "cat", "ant", "bird"]
>>> y_pred = ["ant", "ant", "cat", "cat", "ant", "cat"]
>>> confusion_matrix(y_true, y_pred, labels=["ant", "bird", "cat"])
array([[2, 0, 0],
[0, 0, 1],
[1, 0, 2]])
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type not in ("binary", "multiclass"):
raise ValueError("%s is not supported" % y_type)
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
n_labels = labels.size
label_to_ind = dict((y, x) for x, y in enumerate(labels))
# convert yt, yp into index
y_pred = np.array([label_to_ind.get(x, n_labels + 1) for x in y_pred])
y_true = np.array([label_to_ind.get(x, n_labels + 1) for x in y_true])
# intersect y_pred, y_true with labels, eliminate items not in labels
ind = np.logical_and(y_pred < n_labels, y_true < n_labels)
y_pred = y_pred[ind]
y_true = y_true[ind]
CM = coo_matrix((np.ones(y_true.shape[0], dtype=np.int), (y_true, y_pred)),
shape=(n_labels, n_labels)
).toarray()
return CM
def cohen_kappa_score(y1, y2, labels=None):
"""Cohen's kappa: a statistic that measures inter-annotator agreement.
This function computes Cohen's kappa [1], a score that expresses the level
of agreement between two annotators on a classification problem. It is
defined as
.. math::
\kappa = (p_o - p_e) / (1 - p_e)
where :math:`p_o` is the empirical probability of agreement on the label
assigned to any sample (the observed agreement ratio), and :math:`p_e` is
the expected agreement when both annotators assign labels randomly.
:math:`p_e` is estimated using a per-annotator empirical prior over the
class labels [2].
Parameters
----------
y1 : array, shape = [n_samples]
Labels assigned by the first annotator.
y2 : array, shape = [n_samples]
Labels assigned by the second annotator. The kappa statistic is
symmetric, so swapping ``y1`` and ``y2`` doesn't change the value.
labels : array, shape = [n_classes], optional
List of labels to index the matrix. This may be used to select a
subset of labels. If None, all labels that appear at least once in
``y1`` or ``y2`` are used.
Returns
-------
kappa : float
The kappa statistic, which is a number between -1 and 1. The maximum
value means complete agreement; zero or lower means chance agreement.
References
----------
.. [1] J. Cohen (1960). "A coefficient of agreement for nominal scales".
Educational and Psychological Measurement 20(1):37-46.
doi:10.1177/001316446002000104.
.. [2] R. Artstein and M. Poesio (2008). "Inter-coder agreement for
computational linguistics". Computational Linguistic 34(4):555-596.
"""
confusion = confusion_matrix(y1, y2, labels=labels)
P = confusion / float(confusion.sum())
p_observed = np.trace(P)
p_expected = np.dot(P.sum(axis=0), P.sum(axis=1))
return (p_observed - p_expected) / (1 - p_expected)
def jaccard_similarity_score(y_true, y_pred, normalize=True,
sample_weight=None):
"""Jaccard similarity coefficient score
The Jaccard index [1], or Jaccard similarity coefficient, defined as
the size of the intersection divided by the size of the union of two label
sets, is used to compare set of predicted labels for a sample to the
corresponding set of labels in ``y_true``.
Read more in the :ref:`User Guide <jaccard_similarity_score>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the sum of the Jaccard similarity coefficient
over the sample set. Otherwise, return the average of Jaccard
similarity coefficient.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
score : float
If ``normalize == True``, return the average Jaccard similarity
coefficient, else it returns the sum of the Jaccard similarity
coefficient over the sample set.
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score, hamming_loss, zero_one_loss
Notes
-----
In binary and multiclass classification, this function is equivalent
to the ``accuracy_score``. It differs in the multilabel classification
problem.
References
----------
.. [1] `Wikipedia entry for the Jaccard index
<http://en.wikipedia.org/wiki/Jaccard_index>`_
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import jaccard_similarity_score
>>> y_pred = [0, 2, 1, 3]
>>> y_true = [0, 1, 2, 3]
>>> jaccard_similarity_score(y_true, y_pred)
0.5
>>> jaccard_similarity_score(y_true, y_pred, normalize=False)
2
In the multilabel case with binary label indicators:
>>> jaccard_similarity_score(np.array([[0, 1], [1, 1]]),\
np.ones((2, 2)))
0.75
"""
# Compute accuracy for each possible representation
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type.startswith('multilabel'):
with np.errstate(divide='ignore', invalid='ignore'):
# oddly, we may get an "invalid" rather than a "divide" error here
pred_or_true = count_nonzero(y_true + y_pred, axis=1)
pred_and_true = count_nonzero(y_true.multiply(y_pred), axis=1)
score = pred_and_true / pred_or_true
# If there is no label, it results in a Nan instead, we set
# the jaccard to 1: lim_{x->0} x/x = 1
# Note with py2.6 and np 1.3: we can't check safely for nan.
score[pred_or_true == 0.0] = 1.0
else:
score = y_true == y_pred
return _weighted_sum(score, sample_weight, normalize)
def matthews_corrcoef(y_true, y_pred):
"""Compute the Matthews correlation coefficient (MCC) for binary classes
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary (two-class) classifications. It takes into
account true and false positives and negatives and is generally regarded as
a balanced measure which can be used even if the classes are of very
different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
Only in the binary case does this relate to information about true and
false positives and negatives. See references below.
Read more in the :ref:`User Guide <matthews_corrcoef>`.
Parameters
----------
y_true : array, shape = [n_samples]
Ground truth (correct) target values.
y_pred : array, shape = [n_samples]
Estimated targets as returned by a classifier.
Returns
-------
mcc : float
The Matthews correlation coefficient (+1 represents a perfect
prediction, 0 an average random prediction and -1 and inverse
prediction).
References
----------
.. [1] `Baldi, Brunak, Chauvin, Andersen and Nielsen, (2000). Assessing the
accuracy of prediction algorithms for classification: an overview
<http://dx.doi.org/10.1093/bioinformatics/16.5.412>`_
.. [2] `Wikipedia entry for the Matthews Correlation Coefficient
<http://en.wikipedia.org/wiki/Matthews_correlation_coefficient>`_
Examples
--------
>>> from sklearn.metrics import matthews_corrcoef
>>> y_true = [+1, +1, +1, -1]
>>> y_pred = [+1, -1, +1, +1]
>>> matthews_corrcoef(y_true, y_pred) # doctest: +ELLIPSIS
-0.33...
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if y_type != "binary":
raise ValueError("%s is not supported" % y_type)
lb = LabelEncoder()
lb.fit(np.hstack([y_true, y_pred]))
y_true = lb.transform(y_true)
y_pred = lb.transform(y_pred)
with np.errstate(invalid='ignore'):
mcc = np.corrcoef(y_true, y_pred)[0, 1]
if np.isnan(mcc):
return 0.
else:
return mcc
def zero_one_loss(y_true, y_pred, normalize=True, sample_weight=None):
"""Zero-one classification loss.
If normalize is ``True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int). The best
performance is 0.
Read more in the :ref:`User Guide <zero_one_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
normalize : bool, optional (default=True)
If ``False``, return the number of misclassifications.
Otherwise, return the fraction of misclassifications.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float or int,
If ``normalize == True``, return the fraction of misclassifications
(float), else it returns the number of misclassifications (int).
Notes
-----
In multilabel classification, the zero_one_loss function corresponds to
the subset zero-one loss: for each sample, the entire set of labels must be
correctly predicted, otherwise the loss for that sample is equal to one.
See also
--------
accuracy_score, hamming_loss, jaccard_similarity_score
Examples
--------
>>> from sklearn.metrics import zero_one_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> zero_one_loss(y_true, y_pred)
0.25
>>> zero_one_loss(y_true, y_pred, normalize=False)
1
In the multilabel case with binary label indicators:
>>> zero_one_loss(np.array([[0, 1], [1, 1]]), np.ones((2, 2)))
0.5
"""
score = accuracy_score(y_true, y_pred,
normalize=normalize,
sample_weight=sample_weight)
if normalize:
return 1 - score
else:
if sample_weight is not None:
n_samples = np.sum(sample_weight)
else:
n_samples = _num_samples(y_true)
return n_samples - score
def f1_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the F1 score, also known as balanced F-score or F-measure
The F1 score can be interpreted as a weighted average of the precision and
recall, where an F1 score reaches its best value at 1 and worst score at 0.
The relative contribution of precision and recall to the F1 score are
equal. The formula for the F1 score is::
F1 = 2 * (precision * recall) / (precision + recall)
In the multi-class and multi-label case, this is the weighted average of
the F1 score of each class.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
f1_score : float or array of float, shape = [n_unique_labels]
F1 score of the positive class in binary classification or weighted
average of the F1 scores of each class for the multiclass task.
References
----------
.. [1] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import f1_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> f1_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> f1_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.26...
>>> f1_score(y_true, y_pred, average=None)
array([ 0.8, 0. , 0. ])
"""
return fbeta_score(y_true, y_pred, 1, labels=labels,
pos_label=pos_label, average=average,
sample_weight=sample_weight)
def fbeta_score(y_true, y_pred, beta, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the F-beta score
The F-beta score is the weighted harmonic mean of precision and recall,
reaching its optimal value at 1 and its worst value at 0.
The `beta` parameter determines the weight of precision in the combined
score. ``beta < 1`` lends more weight to precision, while ``beta > 1``
favors recall (``beta -> 0`` considers only precision, ``beta -> inf``
only recall).
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta: float
Weight of precision in harmonic mean.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fbeta_score : float (if average is not None) or array of float, shape =\
[n_unique_labels]
F-beta score of the positive class in binary classification or weighted
average of the F-beta score of each class for the multiclass task.
References
----------
.. [1] R. Baeza-Yates and B. Ribeiro-Neto (2011).
Modern Information Retrieval. Addison Wesley, pp. 327-328.
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
Examples
--------
>>> from sklearn.metrics import fbeta_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> fbeta_score(y_true, y_pred, average='macro', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average='micro', beta=0.5)
... # doctest: +ELLIPSIS
0.33...
>>> fbeta_score(y_true, y_pred, average='weighted', beta=0.5)
... # doctest: +ELLIPSIS
0.23...
>>> fbeta_score(y_true, y_pred, average=None, beta=0.5)
... # doctest: +ELLIPSIS
array([ 0.71..., 0. , 0. ])
"""
_, _, f, _ = precision_recall_fscore_support(y_true, y_pred,
beta=beta,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('f-score',),
sample_weight=sample_weight)
return f
def _prf_divide(numerator, denominator, metric, modifier, average, warn_for):
"""Performs division and handles divide-by-zero.
On zero-division, sets the corresponding result elements to zero
and raises a warning.
The metric, modifier and average arguments are used only for determining
an appropriate warning.
"""
result = numerator / denominator
mask = denominator == 0.0
if not np.any(mask):
return result
# remove infs
result[mask] = 0.0
# build appropriate warning
# E.g. "Precision and F-score are ill-defined and being set to 0.0 in
# labels with no predicted samples"
axis0 = 'sample'
axis1 = 'label'
if average == 'samples':
axis0, axis1 = axis1, axis0
if metric in warn_for and 'f-score' in warn_for:
msg_start = '{0} and F-score are'.format(metric.title())
elif metric in warn_for:
msg_start = '{0} is'.format(metric.title())
elif 'f-score' in warn_for:
msg_start = 'F-score is'
else:
return result
msg = ('{0} ill-defined and being set to 0.0 {{0}} '
'no {1} {2}s.'.format(msg_start, modifier, axis0))
if len(mask) == 1:
msg = msg.format('due to')
else:
msg = msg.format('in {0}s with'.format(axis1))
warnings.warn(msg, UndefinedMetricWarning, stacklevel=2)
return result
def precision_recall_fscore_support(y_true, y_pred, beta=1.0, labels=None,
pos_label=1, average=None,
warn_for=('precision', 'recall',
'f-score'),
sample_weight=None):
"""Compute precision, recall, F-measure and support for each class
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The F-beta score can be interpreted as a weighted harmonic mean of
the precision and recall, where an F-beta score reaches its best
value at 1 and worst score at 0.
The F-beta score weights recall more than precision by a factor of
``beta``. ``beta == 1.0`` means recall and precision are equally important.
The support is the number of occurrences of each class in ``y_true``.
If ``pos_label is None`` and in binary classification, this function
returns the average precision, recall and F-measure if ``average``
is one of ``'micro'``, ``'macro'``, ``'weighted'`` or ``'samples'``.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
beta : float, 1.0 by default
The strength of recall versus precision in the F-score.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None (default), 'binary', 'micro', 'macro', 'samples', \
'weighted']
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
warn_for : tuple or set, for internal use
This determines which warnings will be made in the case that this
function is being used to return only one of its metrics.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision: float (if average is not None) or array of float, shape =\
[n_unique_labels]
recall: float (if average is not None) or array of float, , shape =\
[n_unique_labels]
fbeta_score: float (if average is not None) or array of float, shape =\
[n_unique_labels]
support: int (if average is not None) or array of int, shape =\
[n_unique_labels]
The number of occurrences of each label in ``y_true``.
References
----------
.. [1] `Wikipedia entry for the Precision and recall
<http://en.wikipedia.org/wiki/Precision_and_recall>`_
.. [2] `Wikipedia entry for the F1-score
<http://en.wikipedia.org/wiki/F1_score>`_
.. [3] `Discriminative Methods for Multi-labeled Classification Advances
in Knowledge Discovery and Data Mining (2004), pp. 22-30 by Shantanu
Godbole, Sunita Sarawagi
<http://www.godbole.net/shantanu/pubs/multilabelsvm-pakdd04.pdf>`
Examples
--------
>>> from sklearn.metrics import precision_recall_fscore_support
>>> y_true = np.array(['cat', 'dog', 'pig', 'cat', 'dog', 'pig'])
>>> y_pred = np.array(['cat', 'pig', 'dog', 'cat', 'cat', 'dog'])
>>> precision_recall_fscore_support(y_true, y_pred, average='macro')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='micro')
... # doctest: +ELLIPSIS
(0.33..., 0.33..., 0.33..., None)
>>> precision_recall_fscore_support(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
(0.22..., 0.33..., 0.26..., None)
It is possible to compute per-label precisions, recalls, F1-scores and
supports instead of averaging:
>>> precision_recall_fscore_support(y_true, y_pred, average=None,
... labels=['pig', 'dog', 'cat'])
... # doctest: +ELLIPSIS,+NORMALIZE_WHITESPACE
(array([ 0. , 0. , 0.66...]),
array([ 0., 0., 1.]),
array([ 0. , 0. , 0.8]),
array([2, 2, 2]))
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options and average != 'binary':
raise ValueError('average has to be one of ' +
str(average_options))
if beta <= 0:
raise ValueError("beta should be >0 in the F-beta score")
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
present_labels = unique_labels(y_true, y_pred)
if average == 'binary' and (y_type != 'binary' or pos_label is None):
warnings.warn('The default `weighted` averaging is deprecated, '
'and from version 0.18, use of precision, recall or '
'F-score with multiclass or multilabel data or '
'pos_label=None will result in an exception. '
'Please set an explicit value for `average`, one of '
'%s. In cross validation use, for instance, '
'scoring="f1_weighted" instead of scoring="f1".'
% str(average_options), DeprecationWarning, stacklevel=2)
average = 'weighted'
if y_type == 'binary' and pos_label is not None and average is not None:
if average != 'binary':
warnings.warn('From version 0.18, binary input will not be '
'handled specially when using averaged '
'precision/recall/F-score. '
'Please use average=\'binary\' to report only the '
'positive class performance.', DeprecationWarning)
if labels is None or len(labels) <= 2:
if pos_label not in present_labels:
if len(present_labels) < 2:
# Only negative labels
return (0., 0., 0., 0)
else:
raise ValueError("pos_label=%r is not a valid label: %r" %
(pos_label, present_labels))
labels = [pos_label]
if labels is None:
labels = present_labels
n_labels = None
else:
n_labels = len(labels)
labels = np.hstack([labels, np.setdiff1d(present_labels, labels,
assume_unique=True)])
### Calculate tp_sum, pred_sum, true_sum ###
if y_type.startswith('multilabel'):
sum_axis = 1 if average == 'samples' else 0
# All labels are index integers for multilabel.
# Select labels:
if not np.all(labels == present_labels):
if np.max(labels) > np.max(present_labels):
raise ValueError('All labels must be in [0, n labels). '
'Got %d > %d' %
(np.max(labels), np.max(present_labels)))
if np.min(labels) < 0:
raise ValueError('All labels must be in [0, n labels). '
'Got %d < 0' % np.min(labels))
y_true = y_true[:, labels[:n_labels]]
y_pred = y_pred[:, labels[:n_labels]]
# calculate weighted counts
true_and_pred = y_true.multiply(y_pred)
tp_sum = count_nonzero(true_and_pred, axis=sum_axis,
sample_weight=sample_weight)
pred_sum = count_nonzero(y_pred, axis=sum_axis,
sample_weight=sample_weight)
true_sum = count_nonzero(y_true, axis=sum_axis,
sample_weight=sample_weight)
elif average == 'samples':
raise ValueError("Sample-based precision, recall, fscore is "
"not meaningful outside multilabel "
"classification. See the accuracy_score instead.")
else:
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
y_pred = le.transform(y_pred)
sorted_labels = le.classes_
# labels are now from 0 to len(labels) - 1 -> use bincount
tp = y_true == y_pred
tp_bins = y_true[tp]
if sample_weight is not None:
tp_bins_weights = np.asarray(sample_weight)[tp]
else:
tp_bins_weights = None
if len(tp_bins):
tp_sum = bincount(tp_bins, weights=tp_bins_weights,
minlength=len(labels))
else:
# Pathological case
true_sum = pred_sum = tp_sum = np.zeros(len(labels))
if len(y_pred):
pred_sum = bincount(y_pred, weights=sample_weight,
minlength=len(labels))
if len(y_true):
true_sum = bincount(y_true, weights=sample_weight,
minlength=len(labels))
# Retain only selected labels
indices = np.searchsorted(sorted_labels, labels[:n_labels])
tp_sum = tp_sum[indices]
true_sum = true_sum[indices]
pred_sum = pred_sum[indices]
if average == 'micro':
tp_sum = np.array([tp_sum.sum()])
pred_sum = np.array([pred_sum.sum()])
true_sum = np.array([true_sum.sum()])
### Finally, we have all our sufficient statistics. Divide! ###
beta2 = beta ** 2
with np.errstate(divide='ignore', invalid='ignore'):
# Divide, and on zero-division, set scores to 0 and warn:
# Oddly, we may get an "invalid" rather than a "divide" error
# here.
precision = _prf_divide(tp_sum, pred_sum,
'precision', 'predicted', average, warn_for)
recall = _prf_divide(tp_sum, true_sum,
'recall', 'true', average, warn_for)
# Don't need to warn for F: either P or R warned, or tp == 0 where pos
# and true are nonzero, in which case, F is well-defined and zero
f_score = ((1 + beta2) * precision * recall /
(beta2 * precision + recall))
f_score[tp_sum == 0] = 0.0
## Average the results ##
if average == 'weighted':
weights = true_sum
if weights.sum() == 0:
return 0, 0, 0, None
elif average == 'samples':
weights = sample_weight
else:
weights = None
if average is not None:
assert average != 'binary' or len(precision) == 1
precision = np.average(precision, weights=weights)
recall = np.average(recall, weights=weights)
f_score = np.average(f_score, weights=weights)
true_sum = None # return no support
return precision, recall, f_score, true_sum
def precision_score(y_true, y_pred, labels=None, pos_label=1,
average='binary', sample_weight=None):
"""Compute the precision
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Precision of the positive class in binary classification or weighted
average of the precision of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import precision_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> precision_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> precision_score(y_true, y_pred, average='weighted')
... # doctest: +ELLIPSIS
0.22...
>>> precision_score(y_true, y_pred, average=None) # doctest: +ELLIPSIS
array([ 0.66..., 0. , 0. ])
"""
p, _, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('precision',),
sample_weight=sample_weight)
return p
def recall_score(y_true, y_pred, labels=None, pos_label=1, average='binary',
sample_weight=None):
"""Compute the recall
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The best value is 1 and the worst value is 0.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : list, optional
The set of labels to include when ``average != 'binary'``, and their
order if ``average is None``. Labels present in the data can be
excluded, for example to calculate a multiclass average ignoring a
majority negative class, while labels not present in the data will
result in 0 components in a macro average. For multilabel targets,
labels are column indices. By default, all labels in ``y_true`` and
``y_pred`` are used in sorted order.
pos_label : str or int, 1 by default
The class to report if ``average='binary'``. Until version 0.18 it is
necessary to set ``pos_label=None`` if seeking to use another averaging
method over binary targets.
average : string, [None, 'binary' (default), 'micro', 'macro', 'samples', \
'weighted']
This parameter is required for multiclass/multilabel targets.
If ``None``, the scores for each class are returned. Otherwise, this
determines the type of averaging performed on the data:
``'binary'``:
Only report results for the class specified by ``pos_label``.
This is applicable only if targets (``y_{true,pred}``) are binary.
``'micro'``:
Calculate metrics globally by counting the total true positives,
false negatives and false positives.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label). This
alters 'macro' to account for label imbalance; it can result in an
F-score that is not between precision and recall.
``'samples'``:
Calculate metrics for each instance, and find their average (only
meaningful for multilabel classification where this differs from
:func:`accuracy_score`).
Note that if ``pos_label`` is given in binary classification with
`average != 'binary'`, only that positive class is reported. This
behavior is deprecated and will change in version 0.18.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
recall : float (if average is not None) or array of float, shape =\
[n_unique_labels]
Recall of the positive class in binary classification or weighted
average of the recall of each class for the multiclass task.
Examples
--------
>>> from sklearn.metrics import recall_score
>>> y_true = [0, 1, 2, 0, 1, 2]
>>> y_pred = [0, 2, 1, 0, 0, 1]
>>> recall_score(y_true, y_pred, average='macro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='micro') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average='weighted') # doctest: +ELLIPSIS
0.33...
>>> recall_score(y_true, y_pred, average=None)
array([ 1., 0., 0.])
"""
_, r, _, _ = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
pos_label=pos_label,
average=average,
warn_for=('recall',),
sample_weight=sample_weight)
return r
def classification_report(y_true, y_pred, labels=None, target_names=None,
sample_weight=None, digits=2):
"""Build a text report showing the main classification metrics
Read more in the :ref:`User Guide <classification_report>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) target values.
y_pred : 1d array-like, or label indicator array / sparse matrix
Estimated targets as returned by a classifier.
labels : array, shape = [n_labels]
Optional list of label indices to include in the report.
target_names : list of strings
Optional display names matching the labels (same order).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
digits : int
Number of digits for formatting output floating point values
Returns
-------
report : string
Text summary of the precision, recall, F1 score for each class.
Examples
--------
>>> from sklearn.metrics import classification_report
>>> y_true = [0, 1, 2, 2, 2]
>>> y_pred = [0, 0, 2, 2, 1]
>>> target_names = ['class 0', 'class 1', 'class 2']
>>> print(classification_report(y_true, y_pred, target_names=target_names))
precision recall f1-score support
<BLANKLINE>
class 0 0.50 1.00 0.67 1
class 1 0.00 0.00 0.00 1
class 2 1.00 0.67 0.80 3
<BLANKLINE>
avg / total 0.70 0.60 0.61 5
<BLANKLINE>
"""
if labels is None:
labels = unique_labels(y_true, y_pred)
else:
labels = np.asarray(labels)
last_line_heading = 'avg / total'
if target_names is None:
width = len(last_line_heading)
target_names = ['%s' % l for l in labels]
else:
width = max(len(cn) for cn in target_names)
width = max(width, len(last_line_heading), digits)
headers = ["precision", "recall", "f1-score", "support"]
fmt = '%% %ds' % width # first column: class name
fmt += ' '
fmt += ' '.join(['% 9s' for _ in headers])
fmt += '\n'
headers = [""] + headers
report = fmt % tuple(headers)
report += '\n'
p, r, f1, s = precision_recall_fscore_support(y_true, y_pred,
labels=labels,
average=None,
sample_weight=sample_weight)
for i, label in enumerate(labels):
values = [target_names[i]]
for v in (p[i], r[i], f1[i]):
values += ["{0:0.{1}f}".format(v, digits)]
values += ["{0}".format(s[i])]
report += fmt % tuple(values)
report += '\n'
# compute averages
values = [last_line_heading]
for v in (np.average(p, weights=s),
np.average(r, weights=s),
np.average(f1, weights=s)):
values += ["{0:0.{1}f}".format(v, digits)]
values += ['{0}'.format(np.sum(s))]
report += fmt % tuple(values)
return report
def hamming_loss(y_true, y_pred, classes=None):
"""Compute the average Hamming loss.
The Hamming loss is the fraction of labels that are incorrectly predicted.
Read more in the :ref:`User Guide <hamming_loss>`.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : 1d array-like, or label indicator array / sparse matrix
Predicted labels, as returned by a classifier.
classes : array, shape = [n_labels], optional
Integer array of labels.
Returns
-------
loss : float or int,
Return the average Hamming loss between element of ``y_true`` and
``y_pred``.
See Also
--------
accuracy_score, jaccard_similarity_score, zero_one_loss
Notes
-----
In multiclass classification, the Hamming loss correspond to the Hamming
distance between ``y_true`` and ``y_pred`` which is equivalent to the
subset ``zero_one_loss`` function.
In multilabel classification, the Hamming loss is different from the
subset zero-one loss. The zero-one loss considers the entire set of labels
for a given sample incorrect if it does entirely match the true set of
labels. Hamming loss is more forgiving in that it penalizes the individual
labels.
The Hamming loss is upperbounded by the subset zero-one loss. When
normalized over samples, the Hamming loss is always between 0 and 1.
References
----------
.. [1] Grigorios Tsoumakas, Ioannis Katakis. Multi-Label Classification:
An Overview. International Journal of Data Warehousing & Mining,
3(3), 1-13, July-September 2007.
.. [2] `Wikipedia entry on the Hamming distance
<http://en.wikipedia.org/wiki/Hamming_distance>`_
Examples
--------
>>> from sklearn.metrics import hamming_loss
>>> y_pred = [1, 2, 3, 4]
>>> y_true = [2, 2, 3, 4]
>>> hamming_loss(y_true, y_pred)
0.25
In the multilabel case with binary label indicators:
>>> hamming_loss(np.array([[0, 1], [1, 1]]), np.zeros((2, 2)))
0.75
"""
y_type, y_true, y_pred = _check_targets(y_true, y_pred)
if classes is None:
classes = unique_labels(y_true, y_pred)
else:
classes = np.asarray(classes)
if y_type.startswith('multilabel'):
n_differences = count_nonzero(y_true - y_pred)
return (n_differences / (y_true.shape[0] * len(classes)))
elif y_type in ["binary", "multiclass"]:
return sp_hamming(y_true, y_pred)
else:
raise ValueError("{0} is not supported".format(y_type))
def log_loss(y_true, y_pred, eps=1e-15, normalize=True, sample_weight=None):
"""Log loss, aka logistic loss or cross-entropy loss.
This is the loss function used in (multinomial) logistic regression
and extensions of it such as neural networks, defined as the negative
log-likelihood of the true labels given a probabilistic classifier's
predictions. For a single sample with true label yt in {0,1} and
estimated probability yp that yt = 1, the log loss is
-log P(yt|yp) = -(yt log(yp) + (1 - yt) log(1 - yp))
Read more in the :ref:`User Guide <log_loss>`.
Parameters
----------
y_true : array-like or label indicator matrix
Ground truth (correct) labels for n_samples samples.
y_pred : array-like of float, shape = (n_samples, n_classes)
Predicted probabilities, as returned by a classifier's
predict_proba method.
eps : float
Log loss is undefined for p=0 or p=1, so probabilities are
clipped to max(eps, min(1 - eps, p)).
normalize : bool, optional (default=True)
If true, return the mean loss per sample.
Otherwise, return the sum of the per-sample losses.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
Examples
--------
>>> log_loss(["spam", "ham", "ham", "spam"], # doctest: +ELLIPSIS
... [[.1, .9], [.9, .1], [.8, .2], [.35, .65]])
0.21616...
References
----------
C.M. Bishop (2006). Pattern Recognition and Machine Learning. Springer,
p. 209.
Notes
-----
The logarithm used is the natural logarithm (base-e).
"""
lb = LabelBinarizer()
T = lb.fit_transform(y_true)
if T.shape[1] == 1:
T = np.append(1 - T, T, axis=1)
# Clipping
Y = np.clip(y_pred, eps, 1 - eps)
# This happens in cases when elements in y_pred have type "str".
if not isinstance(Y, np.ndarray):
raise ValueError("y_pred should be an array of floats.")
# If y_pred is of single dimension, assume y_true to be binary
# and then check.
if Y.ndim == 1:
Y = Y[:, np.newaxis]
if Y.shape[1] == 1:
Y = np.append(1 - Y, Y, axis=1)
# Check if dimensions are consistent.
check_consistent_length(T, Y)
T = check_array(T)
Y = check_array(Y)
if T.shape[1] != Y.shape[1]:
raise ValueError("y_true and y_pred have different number of classes "
"%d, %d" % (T.shape[1], Y.shape[1]))
# Renormalize
Y /= Y.sum(axis=1)[:, np.newaxis]
loss = -(T * np.log(Y)).sum(axis=1)
return _weighted_sum(loss, sample_weight, normalize)
def hinge_loss(y_true, pred_decision, labels=None, sample_weight=None):
"""Average hinge loss (non-regularized)
In binary class case, assuming labels in y_true are encoded with +1 and -1,
when a prediction mistake is made, ``margin = y_true * pred_decision`` is
always negative (since the signs disagree), implying ``1 - margin`` is
always greater than 1. The cumulated hinge loss is therefore an upper
bound of the number of mistakes made by the classifier.
In multiclass case, the function expects that either all the labels are
included in y_true or an optional labels argument is provided which
contains all the labels. The multilabel margin is calculated according
to Crammer-Singer's method. As in the binary case, the cumulated hinge loss
is an upper bound of the number of mistakes made by the classifier.
Read more in the :ref:`User Guide <hinge_loss>`.
Parameters
----------
y_true : array, shape = [n_samples]
True target, consisting of integers of two values. The positive label
must be greater than the negative label.
pred_decision : array, shape = [n_samples] or [n_samples, n_classes]
Predicted decisions, as output by decision_function (floats).
labels : array, optional, default None
Contains all the labels for the problem. Used in multiclass hinge loss.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] `Wikipedia entry on the Hinge loss
<http://en.wikipedia.org/wiki/Hinge_loss>`_
.. [2] Koby Crammer, Yoram Singer. On the Algorithmic
Implementation of Multiclass Kernel-based Vector
Machines. Journal of Machine Learning Research 2,
(2001), 265-292
.. [3] `L1 AND L2 Regularization for Multiclass Hinge Loss Models
by Robert C. Moore, John DeNero.
<http://www.ttic.edu/sigml/symposium2011/papers/
Moore+DeNero_Regularization.pdf>`_
Examples
--------
>>> from sklearn import svm
>>> from sklearn.metrics import hinge_loss
>>> X = [[0], [1]]
>>> y = [-1, 1]
>>> est = svm.LinearSVC(random_state=0)
>>> est.fit(X, y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=0, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-2], [3], [0.5]])
>>> pred_decision # doctest: +ELLIPSIS
array([-2.18..., 2.36..., 0.09...])
>>> hinge_loss([-1, 1, 1], pred_decision) # doctest: +ELLIPSIS
0.30...
In the multiclass case:
>>> X = np.array([[0], [1], [2], [3]])
>>> Y = np.array([0, 1, 2, 3])
>>> labels = np.array([0, 1, 2, 3])
>>> est = svm.LinearSVC()
>>> est.fit(X, Y)
LinearSVC(C=1.0, class_weight=None, dual=True, fit_intercept=True,
intercept_scaling=1, loss='squared_hinge', max_iter=1000,
multi_class='ovr', penalty='l2', random_state=None, tol=0.0001,
verbose=0)
>>> pred_decision = est.decision_function([[-1], [2], [3]])
>>> y_true = [0, 2, 3]
>>> hinge_loss(y_true, pred_decision, labels) #doctest: +ELLIPSIS
0.56...
"""
check_consistent_length(y_true, pred_decision, sample_weight)
pred_decision = check_array(pred_decision, ensure_2d=False)
y_true = column_or_1d(y_true)
y_true_unique = np.unique(y_true)
if y_true_unique.size > 2:
if (labels is None and pred_decision.ndim > 1 and
(np.size(y_true_unique) != pred_decision.shape[1])):
raise ValueError("Please include all labels in y_true "
"or pass labels as third argument")
if labels is None:
labels = y_true_unique
le = LabelEncoder()
le.fit(labels)
y_true = le.transform(y_true)
mask = np.ones_like(pred_decision, dtype=bool)
mask[np.arange(y_true.shape[0]), y_true] = False
margin = pred_decision[~mask]
margin -= np.max(pred_decision[mask].reshape(y_true.shape[0], -1),
axis=1)
else:
# Handles binary class case
# this code assumes that positive and negative labels
# are encoded as +1 and -1 respectively
pred_decision = column_or_1d(pred_decision)
pred_decision = np.ravel(pred_decision)
lbin = LabelBinarizer(neg_label=-1)
y_true = lbin.fit_transform(y_true)[:, 0]
try:
margin = y_true * pred_decision
except TypeError:
raise TypeError("pred_decision should be an array of floats.")
losses = 1 - margin
# The hinge_loss doesn't penalize good enough predictions.
losses[losses <= 0] = 0
return np.average(losses, weights=sample_weight)
def _check_binary_probabilistic_predictions(y_true, y_prob):
"""Check that y_true is binary and y_prob contains valid probabilities"""
check_consistent_length(y_true, y_prob)
labels = np.unique(y_true)
if len(labels) != 2:
raise ValueError("Only binary classification is supported. "
"Provided labels %s." % labels)
if y_prob.max() > 1:
raise ValueError("y_prob contains values greater than 1.")
if y_prob.min() < 0:
raise ValueError("y_prob contains values less than 0.")
return label_binarize(y_true, labels)[:, 0]
def brier_score_loss(y_true, y_prob, sample_weight=None, pos_label=None):
"""Compute the Brier score.
The smaller the Brier score, the better, hence the naming with "loss".
Across all items in a set N predictions, the Brier score measures the
mean squared difference between (1) the predicted probability assigned
to the possible outcomes for item i, and (2) the actual outcome.
Therefore, the lower the Brier score is for a set of predictions, the
better the predictions are calibrated. Note that the Brier score always
takes on a value between zero and one, since this is the largest
possible difference between a predicted probability (which must be
between zero and one) and the actual outcome (which can take on values
of only 0 and 1).
The Brier score is appropriate for binary and categorical outcomes that
can be structured as true or false, but is inappropriate for ordinal
variables which can take on three or more values (this is because the
Brier score assumes that all possible outcomes are equivalently
"distant" from one another). Which label is considered to be the positive
label is controlled via the parameter pos_label, which defaults to 1.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
pos_label : int (default: None)
Label of the positive class. If None, the maximum label is used as
positive class
Returns
-------
score : float
Brier score
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_true_categorical = np.array(["spam", "ham", "ham", "spam"])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, 1-y_prob, pos_label=0) # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true_categorical, y_prob, \
pos_label="ham") # doctest: +ELLIPSIS
0.037...
>>> brier_score_loss(y_true, np.array(y_prob) > 0.5)
0.0
References
----------
http://en.wikipedia.org/wiki/Brier_score
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if pos_label is None:
pos_label = y_true.max()
y_true = np.array(y_true == pos_label, int)
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
return np.average((y_true - y_prob) ** 2, weights=sample_weight)
| bsd-3-clause |
phgupta/Building-Analytics | building-analytics/TS_Util_Clean_Data.py | 1 | 15125 | # -*- coding: utf-8 -*-
"""
@author : Armando Casillas <armcasillas@ucdavis.edu>
@author : Marco Pritoni <marco.pritoni@gmail.com>
Created on Wed Jul 26 2017
Update Aug 08 2017
"""
from __future__ import division
import pandas as pd
import os
import sys
import requests as req
import json
import numpy as np
import datetime
import pytz
from pandas import rolling_median
from matplotlib import style
import matplotlib
class TS_Util(object):
########################################################################
## simple load file section - eventually replace this with CSV_Importer
def _set_TS_index(self, data):
'''
Parameters
----------
Returns
-------
'''
# set index
data.index = pd.to_datetime(data.index)
# format types to numeric
for col in data.columns:
data[col] = pd.to_numeric(data[col], errors="coerce")
return data
def load_TS(self, fileName, folder):
'''
Parameters
----------
Returns
-------
'''
path = os.path.join(folder, fileName)
data = pd.read_csv(path, index_col=0)
data = self._set_TS_index(data)
return data
########################################################################
## time correction for time zones - eventually replace this with CSV_Importer
def _utc_to_local(self, data, local_zone="America/Los_Angeles"):
'''
Function takes in pandas dataframe and adjusts index according to timezone in which is requested by user
Parameters
----------
data: Dataframe
pandas dataframe of json timeseries response from server
local_zone: string
pytz.timezone string of specified local timezone to change index to
Returns
-------
data: Dataframe
Pandas dataframe with timestamp index adjusted for local timezone
'''
data.index = data.index.tz_localize(pytz.utc).tz_convert(
local_zone) # accounts for localtime shift
# Gets rid of extra offset information so can compare with csv data
data.index = data.index.tz_localize(None)
return data
def _local_to_utc(self, timestamp, local_zone="America/Los_Angeles"):
'''
Parameters
----------
# Change timestamp request time to reflect request in terms of local time relative to utc - working as of 5/5/17 ( Should test more )
# remove and add to TS_Util and import
Returns
-------
'''
timestamp_new = pd.to_datetime(
timestamp, infer_datetime_format=True, errors='coerce')
timestamp_new = timestamp_new.tz_localize(
local_zone).tz_convert(pytz.utc)
timestamp_new = timestamp_new.strftime('%Y-%m-%d %H:%M:%S')
return timestamp_new
########################################################################
## remove start and end NaN: Note issue with multi-column df
def remove_start_NaN(self, data, var=None):
'''
Parameters
----------
Returns
-------
'''
if var: # limit to one or some variables
start_ok_data = data[var].first_valid_index()
else:
start_ok_data = data.first_valid_index()
data = data.loc[start_ok_data:, :]
return data
def remove_end_NaN(self, data, var=None):
'''
Parameters
----------
Returns
-------
'''
if var: # limit to one or some variables
end_ok_data = data[var].last_valid_index()
else:
end_ok_data = data.last_valid_index()
data = data.loc[:end_ok_data, :]
return data
########################################################################
## Missing data section
def _find_missing_return_frame(self, data):
'''
Function takes in pandas dataframe and find missing values in each column
Parameters
----------
data: Dataframe
Returns
-------
data: Dataframe
'''
return data.isnull()
def _find_missing(self, data, return_bool=False):
if return_bool == False: # this returns the full table with True where the condition is true
data = self._find_missing_return_frame(data)
return data
elif return_bool == "any": # this returns a bool selector if any of the column is True
bool_sel = self._find_missing_return_frame(data).any(axis=1)
return bool_sel
elif return_bool == "all": # this returns a bool selector if all of the column are True
bool_sel = self._find_missing_return_frame(data).all(axis=1)
return bool_sel
else:
print("error in multi_col_how input")
return
def display_missing(self, data, return_bool="any"):
'''
Parameters
----------
Returns
-------
'''
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[bool_sel]
def count_missing(self, data, output="number"):
'''
Parameters
----------
how = "number" or "percent"
Returns
-------
'''
count = self._find_missing(data,return_bool=False).sum()
if output == "number":
return count
elif output == "percent":
return ((count / (data.shape[0])) * 100)
def remove_missing(self, data, return_bool="any"):
'''
Parameters
----------
Returns
-------
'''
if return_bool == "any":
bool_sel = self._find_missing(data,return_bool="any")
elif return_bool == "all":
bool_sel = self._find_missing(data,return_bool="all")
return data[~bool_sel]
########################################################################
## Out of Bound section
def _find_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = ((data < lowBound) | (data > highBound))
return data
def display_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = data[self._find_outOfBound(
data, lowBound, highBound).any(axis=1)]
return data
def count_outOfBound(self, data, lowBound, highBound, output):
'''
Parameters
----------
Returns
-------
'''
count = self._find_outOfBound(data, lowBound, highBound).sum()
if output == "number":
return count
elif output == "percent":
return count / (data.shape[0]) * 1.0 * 100
def remove_outOfBound(self, data, lowBound, highBound):
'''
Parameters
----------
Returns
-------
'''
data = data[~self._find_outOfBound(
data, lowBound, highBound).any(axis=1)]
return data
########################################################################
## Outliers section
def _calc_outliers_bounds(self, data, method, coeff, window):
'''
Parameters
----------
Returns
-------
'''
if method == "std":
lowBound = (data.mean(axis=0) - coeff * data.std(axis=0)).values[0]
highBound = (data.mean(axis=0) + coeff * data.std(axis=0)).values[0]
elif method == "rstd":
rl_mean=data.rolling(window=window).mean(how=any)
rl_std = data.rolling(window=window).std(how=any).fillna(method='bfill').fillna(method='ffill')
lowBound = rl_mean - coeff * rl_std
highBound = rl_mean + coeff * rl_std
elif method == "rmedian":
rl_med = data.rolling(window=window, center=True).median().fillna(
method='bfill').fillna(method='ffill')
lowBound = rl_med - coeff
highBound = rl_med + coeff
elif method == "iqr": # coeff is multip for std and IQR or threshold for rolling median
Q1 = data.quantile(.25) # coeff is multip for std or % of quartile
Q3 = data.quantile(.75)
IQR = Q3 - Q1
lowBound = Q1 - coeff * IQR
highBound = Q3 + coeff * IQR
elif method == "qtl":
lowBound = data.quantile(.005)
highBound = data.quantile(.995)
else:
print ("method chosen does not exist")
lowBound = None
highBound = None
return lowBound, highBound
def display_outliers(self, data, method, coeff, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
data = self.display_outOfBound(data, lowBound, highBound)
return data
def count_outliers(self, data, method, coeff, output, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
count = self.count_outOfBound(data, lowBound, highBound, output=output)
return count
def remove_outliers(self, data, method, coeff, window=10):
'''
Parameters
----------
Returns
-------
'''
lowBound, highBound = self._calc_outliers_bounds(
data, method, coeff, window)
data = self.remove_outOfBound(data, lowBound, highBound)
return data
########################################################################
## If condition section
def _find_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
#print(val)
bool_sel = (data == val)
return bool_sel
def _find_greater_than_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data > val)
return bool_sel
def _find_less_than_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data < val)
return bool_sel
def _find_greater_than_or_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data >= val)
return bool_sel
def _find_less_than_or_equal_to_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = (data <= val)
return bool_sel
def _find_different_from_values(self, data, val):
'''
Parameters
----------
Returns
-------
'''
bool_sel = ~(data == val)
return bool_sel
def count_if(self, data, condition, val, output="number"):
"""
condition = "equal", "below", "above"
val = value to compare against
how = "number" or "percent"
"""
if condition == "=":
count = self._find_equal_to_values(data,val).sum()
elif condition == ">":
count = self._find_greater_than_values(data,val).sum()
elif condition == "<":
count = self._find_less_than_values(data,val).sum()
elif condition == ">=":
count = self._find_greater_than_or_equal_to_values(data,val).sum()
elif condition == "<=":
count = self._find_less_than_or_equal_to_values(data,val).sum()
elif condition == "!=":
count = self._find_different_from_values(data,val).sum()
if output == "number":
return count
elif output == "percent":
return count/data.shape[0]*1.0*100
return count
########################################################################
## Missing Data Events section
def get_start_events(self, data, var = "T_ctrl [oF]"): # create list of start events
'''
Parameters
----------
Returns
-------
'''
start_event = (data[var].isnull()) & ~(data[var].shift().isnull()) # find NaN start event
start = data[start_event].index.tolist() # selector for these events
if np.isnan(data.loc[data.index[0],var]): # if the first record is NaN
start = [data.index[0]] + start # add first record as starting time for first NaN event
else:
start = start
return start
def get_end_events(self, data, var = "T_ctrl [oF]"): # create list of end events
'''
Parameters
----------
Returns
-------
'''
end_events = ~(data[var].isnull()) & (data[var].shift().isnull()) # find NaN end events
end = data[end_events].index.tolist() # selector for these events
if ~np.isnan(data.loc[data.index[0],var]): # if first record is not NaN
end.remove(end[0]) # remove the endpoint ()
if np.isnan(data.loc[data.index[-1],var]): # if the last record is NaN
end = end + [data.index[-1]] # add last record as ending time for first NaN event
else:
end = end
return end
def create_event_table(self, data, var): # create dataframe of of start-end-length for current house/tstat
'''
Parameters
----------
Returns
-------
'''
# remove initial and final missing data
self.remove_start_NaN(data, var)
self.remove_end_NaN(data, var)
# create list of start events
start = self.get_start_events(data, var)
# create list of end events
end = self.get_end_events(data, var)
# merge lists into dataframe and calc length
events = pd.DataFrame.from_items([("start",start), ("end",end )])
events["length_min"] = (events["end"] - events["start"]).dt.total_seconds()/60 # note: this needs datetime index
#print events
events.set_index("start",inplace=True)
return events
| mit |
MartinSavc/scikit-learn | sklearn/kernel_approximation.py | 258 | 17973 | """
The :mod:`sklearn.kernel_approximation` module implements several
approximate kernel feature maps base on Fourier transforms.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
import warnings
import numpy as np
import scipy.sparse as sp
from scipy.linalg import svd
from .base import BaseEstimator
from .base import TransformerMixin
from .utils import check_array, check_random_state, as_float_array
from .utils.extmath import safe_sparse_dot
from .utils.validation import check_is_fitted
from .metrics.pairwise import pairwise_kernels
class RBFSampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of an RBF kernel by Monte Carlo approximation
of its Fourier transform.
It implements a variant of Random Kitchen Sinks.[1]
Read more in the :ref:`User Guide <rbf_kernel_approx>`.
Parameters
----------
gamma : float
Parameter of RBF kernel: exp(-gamma * x^2)
n_components : int
Number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Notes
-----
See "Random Features for Large-Scale Kernel Machines" by A. Rahimi and
Benjamin Recht.
[1] "Weighted Sums of Random Kitchen Sinks: Replacing
minimization with randomization in learning" by A. Rahimi and
Benjamin Recht.
(http://www.eecs.berkeley.edu/~brecht/papers/08.rah.rec.nips.pdf)
"""
def __init__(self, gamma=1., n_components=100, random_state=None):
self.gamma = gamma
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X, accept_sparse='csr')
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
self.random_weights_ = (np.sqrt(2 * self.gamma) * random_state.normal(
size=(n_features, self.n_components)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = check_array(X, accept_sparse='csr')
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class SkewedChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximates feature map of the "skewed chi-squared" kernel by Monte
Carlo approximation of its Fourier transform.
Read more in the :ref:`User Guide <skewed_chi_kernel_approx>`.
Parameters
----------
skewedness : float
"skewedness" parameter of the kernel. Needs to be cross-validated.
n_components : int
number of Monte Carlo samples per original feature.
Equals the dimensionality of the computed feature space.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
References
----------
See "Random Fourier Approximations for Skewed Multiplicative Histogram
Kernels" by Fuxin Li, Catalin Ionescu and Cristian Sminchisescu.
See also
--------
AdditiveChi2Sampler : A different approach for approximating an additive
variant of the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
"""
def __init__(self, skewedness=1., n_components=100, random_state=None):
self.skewedness = skewedness
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Samples random projection according to n_features.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the transformer.
"""
X = check_array(X)
random_state = check_random_state(self.random_state)
n_features = X.shape[1]
uniform = random_state.uniform(size=(n_features, self.n_components))
# transform by inverse CDF of sech
self.random_weights_ = (1. / np.pi
* np.log(np.tan(np.pi / 2. * uniform)))
self.random_offset_ = random_state.uniform(0, 2 * np.pi,
size=self.n_components)
return self
def transform(self, X, y=None):
"""Apply the approximate feature map to X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
check_is_fitted(self, 'random_weights_')
X = as_float_array(X, copy=True)
X = check_array(X, copy=False)
if (X < 0).any():
raise ValueError("X may not contain entries smaller than zero.")
X += self.skewedness
np.log(X, X)
projection = safe_sparse_dot(X, self.random_weights_)
projection += self.random_offset_
np.cos(projection, projection)
projection *= np.sqrt(2.) / np.sqrt(self.n_components)
return projection
class AdditiveChi2Sampler(BaseEstimator, TransformerMixin):
"""Approximate feature map for additive chi2 kernel.
Uses sampling the fourier transform of the kernel characteristic
at regular intervals.
Since the kernel that is to be approximated is additive, the components of
the input vectors can be treated separately. Each entry in the original
space is transformed into 2*sample_steps+1 features, where sample_steps is
a parameter of the method. Typical values of sample_steps include 1, 2 and
3.
Optimal choices for the sampling interval for certain data ranges can be
computed (see the reference). The default values should be reasonable.
Read more in the :ref:`User Guide <additive_chi_kernel_approx>`.
Parameters
----------
sample_steps : int, optional
Gives the number of (complex) sampling points.
sample_interval : float, optional
Sampling interval. Must be specified when sample_steps not in {1,2,3}.
Notes
-----
This estimator approximates a slightly different version of the additive
chi squared kernel then ``metric.additive_chi2`` computes.
See also
--------
SkewedChi2Sampler : A Fourier-approximation to a non-additive variant of
the chi squared kernel.
sklearn.metrics.pairwise.chi2_kernel : The exact chi squared kernel.
sklearn.metrics.pairwise.additive_chi2_kernel : The exact additive chi
squared kernel.
References
----------
See `"Efficient additive kernels via explicit feature maps"
<http://www.robots.ox.ac.uk/~vedaldi/assets/pubs/vedaldi11efficient.pdf>`_
A. Vedaldi and A. Zisserman, Pattern Analysis and Machine Intelligence,
2011
"""
def __init__(self, sample_steps=2, sample_interval=None):
self.sample_steps = sample_steps
self.sample_interval = sample_interval
def fit(self, X, y=None):
"""Set parameters."""
X = check_array(X, accept_sparse='csr')
if self.sample_interval is None:
# See reference, figure 2 c)
if self.sample_steps == 1:
self.sample_interval_ = 0.8
elif self.sample_steps == 2:
self.sample_interval_ = 0.5
elif self.sample_steps == 3:
self.sample_interval_ = 0.4
else:
raise ValueError("If sample_steps is not in [1, 2, 3],"
" you need to provide sample_interval")
else:
self.sample_interval_ = self.sample_interval
return self
def transform(self, X, y=None):
"""Apply approximate feature map to X.
Parameters
----------
X : {array-like, sparse matrix}, shape = (n_samples, n_features)
Returns
-------
X_new : {array, sparse matrix}, \
shape = (n_samples, n_features * (2*sample_steps + 1))
Whether the return value is an array of sparse matrix depends on
the type of the input X.
"""
msg = ("%(name)s is not fitted. Call fit to set the parameters before"
" calling transform")
check_is_fitted(self, "sample_interval_", msg=msg)
X = check_array(X, accept_sparse='csr')
sparse = sp.issparse(X)
# check if X has negative values. Doesn't play well with np.log.
if ((X.data if sparse else X) < 0).any():
raise ValueError("Entries of X must be non-negative.")
# zeroth component
# 1/cosh = sech
# cosh(0) = 1.0
transf = self._transform_sparse if sparse else self._transform_dense
return transf(X)
def _transform_dense(self, X):
non_zero = (X != 0.0)
X_nz = X[non_zero]
X_step = np.zeros_like(X)
X_step[non_zero] = np.sqrt(X_nz * self.sample_interval_)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X_nz)
step_nz = 2 * X_nz * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.cos(j * log_step_nz)
X_new.append(X_step)
X_step = np.zeros_like(X)
X_step[non_zero] = factor_nz * np.sin(j * log_step_nz)
X_new.append(X_step)
return np.hstack(X_new)
def _transform_sparse(self, X):
indices = X.indices.copy()
indptr = X.indptr.copy()
data_step = np.sqrt(X.data * self.sample_interval_)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new = [X_step]
log_step_nz = self.sample_interval_ * np.log(X.data)
step_nz = 2 * X.data * self.sample_interval_
for j in range(1, self.sample_steps):
factor_nz = np.sqrt(step_nz /
np.cosh(np.pi * j * self.sample_interval_))
data_step = factor_nz * np.cos(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
data_step = factor_nz * np.sin(j * log_step_nz)
X_step = sp.csr_matrix((data_step, indices, indptr),
shape=X.shape, dtype=X.dtype, copy=False)
X_new.append(X_step)
return sp.hstack(X_new)
class Nystroem(BaseEstimator, TransformerMixin):
"""Approximate a kernel map using a subset of the training data.
Constructs an approximate feature map for an arbitrary kernel
using a subset of the data as basis.
Read more in the :ref:`User Guide <nystroem_kernel_approx>`.
Parameters
----------
kernel : string or callable, default="rbf"
Kernel map to be approximated. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
n_components : int
Number of features to construct.
How many data points will be used to construct the mapping.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
random_state : {int, RandomState}, optional
If int, random_state is the seed used by the random number generator;
if RandomState instance, random_state is the random number generator.
Attributes
----------
components_ : array, shape (n_components, n_features)
Subset of training points used to construct the feature map.
component_indices_ : array, shape (n_components)
Indices of ``components_`` in the training set.
normalization_ : array, shape (n_components, n_components)
Normalization matrix needed for embedding.
Square root of the kernel matrix on ``components_``.
References
----------
* Williams, C.K.I. and Seeger, M.
"Using the Nystroem method to speed up kernel machines",
Advances in neural information processing systems 2001
* T. Yang, Y. Li, M. Mahdavi, R. Jin and Z. Zhou
"Nystroem Method vs Random Fourier Features: A Theoretical and Empirical
Comparison",
Advances in Neural Information Processing Systems 2012
See also
--------
RBFSampler : An approximation to the RBF kernel using random Fourier
features.
sklearn.metrics.pairwise.kernel_metrics : List of built-in kernels.
"""
def __init__(self, kernel="rbf", gamma=None, coef0=1, degree=3,
kernel_params=None, n_components=100, random_state=None):
self.kernel = kernel
self.gamma = gamma
self.coef0 = coef0
self.degree = degree
self.kernel_params = kernel_params
self.n_components = n_components
self.random_state = random_state
def fit(self, X, y=None):
"""Fit estimator to data.
Samples a subset of training points, computes kernel
on these and computes normalization matrix.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Training data.
"""
X = check_array(X, accept_sparse='csr')
rnd = check_random_state(self.random_state)
n_samples = X.shape[0]
# get basis vectors
if self.n_components > n_samples:
# XXX should we just bail?
n_components = n_samples
warnings.warn("n_components > n_samples. This is not possible.\n"
"n_components was set to n_samples, which results"
" in inefficient evaluation of the full kernel.")
else:
n_components = self.n_components
n_components = min(n_samples, n_components)
inds = rnd.permutation(n_samples)
basis_inds = inds[:n_components]
basis = X[basis_inds]
basis_kernel = pairwise_kernels(basis, metric=self.kernel,
filter_params=True,
**self._get_kernel_params())
# sqrt of kernel matrix on basis vectors
U, S, V = svd(basis_kernel)
S = np.maximum(S, 1e-12)
self.normalization_ = np.dot(U * 1. / np.sqrt(S), V)
self.components_ = basis
self.component_indices_ = inds
return self
def transform(self, X):
"""Apply feature map to X.
Computes an approximate feature map using the kernel
between some training points and X.
Parameters
----------
X : array-like, shape=(n_samples, n_features)
Data to transform.
Returns
-------
X_transformed : array, shape=(n_samples, n_components)
Transformed data.
"""
check_is_fitted(self, 'components_')
X = check_array(X, accept_sparse='csr')
kernel_params = self._get_kernel_params()
embedded = pairwise_kernels(X, self.components_,
metric=self.kernel,
filter_params=True,
**kernel_params)
return np.dot(embedded, self.normalization_.T)
def _get_kernel_params(self):
params = self.kernel_params
if params is None:
params = {}
if not callable(self.kernel):
params['gamma'] = self.gamma
params['degree'] = self.degree
params['coef0'] = self.coef0
return params
| bsd-3-clause |
badjr/pysal | pysal/contrib/spint/tests/test_gravity_stats.py | 8 | 12472 | """
Tests for statistics for gravity-style spatial interaction models
"""
__author__ = 'toshan'
import unittest
import numpy as np
import pandas as pd
import gravity as grav
import mle_stats as stats
class SingleParameter(unittest.TestCase):
"""Unit tests statistics when there is a single parameters estimated"""
def setUp(self):
self.f = np.array([0, 6469, 7629, 20036, 4690,
6194, 11688, 2243, 8857, 7248,
3559, 9221, 10099, 22866, 3388,
9986, 46618, 11639, 1380, 5261,
5985, 6731, 2704, 12250, 16132])
self.o = np.repeat(1, 25)
self.d = np.array(range(1, 26))
self.dij = np.array([0, 576, 946, 597, 373,
559, 707, 1208, 602, 692,
681, 1934, 332, 595, 906,
425, 755, 672, 1587, 526,
484, 2141, 2182, 410, 540])
self.pop = np.array([1596000, 2071000, 3376000, 6978000, 1345000,
2064000, 2378000, 1239000, 4435000, 1999000,
1274000, 7042000, 834000, 1268000, 1965000,
1046000, 12131000, 4824000, 969000, 2401000,
2410000, 2847000, 1425000, 1089000, 2909000])
self.dt = pd.DataFrame({'origins': self.o,
'destinations': self.d,
'pop': self.pop,
'Dij': self.dij,
'flows': self.f})
def test_single_parameter(self):
model = grav.ProductionConstrained(self.dt, 'origins', 'destinations', 'flows',
['pop'], 'Dij', 'pow')
ss = {'obs_mean_trip_len': 736.52834197296534,
'pred_mean_trip_len': 734.40974204773784,
'OD_pairs': 24,
'predicted_flows': 242873.00000000003,
'avg_dist_trav': 737.0,
'num_destinations': 24,
'observed_flows': 242873,
'avg_dist': 851.0,
'num_origins': 1}
ps = {'beta': {'LL_zero_val': -3.057415839736517,
'relative_likelihood_stat': 24833.721614296166,
'standard_error': 0.0052734418614330883},
'all_params': {'zero_vals_LL': -3.1780538303479453,
'mle_vals_LL': -3.0062909275101761},
'pop': {'LL_zero_val': -3.1773474269437778,
'relative_likelihood_stat': 83090.010373874276,
'standard_error': 0.0027673052892085684}}
fs = {'r_squared': 0.60516003720997413,
'srmse': 0.57873206718148507}
es = {'pred_obs_deviance': 0.1327,
'entropy_ratio': 0.5642,
'maximum_entropy': 3.1781,
'max_pred_deviance': 0.1718,
'variance_obs_entropy': 2.55421e-06,
'predicted_entropy': 3.0063,
't_stat_entropy': 66.7614,
'max_obs_deviance': 0.3045,
'observed_entropy': 2.8736,
'variance_pred_entropy': 1.39664e-06}
sys_stats = stats.sys_stats(model)
self.assertAlmostEqual(model.system_stats['obs_mean_trip_len'], ss['obs_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['pred_mean_trip_len'], ss['pred_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['OD_pairs'], ss['OD_pairs'])
self.assertAlmostEqual(model.system_stats['predicted_flows'], ss['predicted_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist_trav'], ss['avg_dist_trav'])
self.assertAlmostEqual(model.system_stats['num_destinations'], ss['num_destinations'])
self.assertAlmostEqual(model.system_stats['observed_flows'], ss['observed_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist'], ss['avg_dist'], 4)
self.assertAlmostEqual(model.system_stats['num_origins'], ss['num_origins'])
param_stats = stats.param_stats(model)
self.assertAlmostEqual(model.parameter_stats['beta']['LL_zero_val'], ps['beta']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['relative_likelihood_stat'],
ps['beta']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['standard_error'], ps['beta']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['LL_zero_val'], ps['pop']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['relative_likelihood_stat'],
ps['pop']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['pop']['standard_error'], ps['pop']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['zero_vals_LL'], ps['all_params']['zero_vals_LL'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['mle_vals_LL'], ps['all_params']['mle_vals_LL'], 4)
fit_stats = stats.fit_stats(model)
self.assertAlmostEqual(model.fit_stats['r_squared'], fs['r_squared'], 4)
self.assertAlmostEqual(model.fit_stats['srmse'], fs['srmse'], 4)
ent_stats = stats.ent_stats(model)
self.assertAlmostEqual(model.entropy_stats['pred_obs_deviance'], es['pred_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['entropy_ratio'], es['entropy_ratio'], 4)
self.assertAlmostEqual(model.entropy_stats['maximum_entropy'], es['maximum_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_pred_deviance'], es['max_pred_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_obs_entropy'], es['variance_obs_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['predicted_entropy'], es['predicted_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['t_stat_entropy'], es['t_stat_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_obs_deviance'], es['max_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['observed_entropy'], es['observed_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_pred_entropy'], es['variance_pred_entropy'], 4)
class MultipleParameter(unittest.TestCase):
"""Unit tests statistics when there are multiple parameters estimated"""
def setUp(self):
self.f = np.array([0, 180048, 79223, 26887, 198144, 17995, 35563, 30528, 110792,
283049, 0, 300345, 67280, 718673, 55094, 93434, 87987, 268458,
87267, 237229, 0, 281791, 551483, 230788, 178517, 172711, 394481,
29877, 60681, 286580, 0, 143860, 49892, 185618, 181868, 274629,
130830, 382565, 346407, 92308, 0, 252189, 192223, 89389, 279739,
21434, 53772, 287340, 49828, 316650, 0, 141679, 27409, 87938,
30287, 64645, 161645, 144980, 199466, 121366, 0, 134229, 289880,
21450, 43749, 97808, 113683, 89806, 25574, 158006, 0, 437255,
72114, 133122, 229764, 165405, 266305, 66324, 252039, 342948, 0])
self.o = np.repeat(np.array(range(1, 10)), 9)
self.d = np.tile(np.array(range(1, 10)), 9)
self.dij = np.array([0, 219, 1009, 1514, 974, 1268, 1795, 2420, 3174,
219, 0, 831, 1336, 755, 1049, 1576, 2242, 2996,
1009, 831, 0, 505, 1019, 662, 933, 1451, 2205,
1514, 1336, 505, 0, 1370, 888, 654, 946, 1700,
974, 755, 1019, 1370, 0, 482, 1144, 2278, 2862,
1268, 1049, 662, 888, 482, 0, 662, 1795, 2380,
1795, 1576, 933, 654, 1144, 662, 0, 1287, 1779,
2420, 2242, 1451, 946, 2278, 1795, 1287, 0, 754,
3147, 2996, 2205, 1700, 2862, 2380, 1779, 754, 0])
self.dt = pd.DataFrame({'Origin': self.o,
'Destination': self.d,
'flows': self.f,
'Dij': self.dij})
def test_multiple_parameter(self):
model = grav.DoublyConstrained(self.dt, 'Origin', 'Destination', 'flows', 'Dij', 'exp')
ss = {'obs_mean_trip_len': 1250.9555521611339,
'pred_mean_trip_len': 1250.9555521684863,
'OD_pairs': 72, 'predicted_flows': 12314322.0,
'avg_dist_trav': 1251.0, 'num_destinations': 9,
'observed_flows': 12314322, 'avg_dist': 1414.0,
'num_origins': 9}
ps = {'beta': {'LL_zero_val': -4.1172103581711941,
'relative_likelihood_stat': 2053596.3814015209,
'standard_error': 4.9177433418433932e-07},
'all_params': {'zero_vals_LL': -4.1172102183395936,
'mle_vals_LL': -4.0338279201692675}}
fs = {'r_squared': 0.89682406680906979,
'srmse': 0.24804939821988789}
es = {'pred_obs_deviance': 0.0314,
'entropy_ratio': 0.8855,
'maximum_entropy': 4.2767,
'max_pred_deviance': 0.2429,
'variance_obs_entropy': 3.667e-08,
'predicted_entropy': 4.0338,
't_stat_entropy': 117.1593,
'max_obs_deviance': 0.2743,
'observed_entropy': 4.0024,
'variance_pred_entropy': 3.516e-08}
sys_stats = stats.sys_stats(model)
self.assertAlmostEqual(model.system_stats['obs_mean_trip_len'], ss['obs_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['pred_mean_trip_len'], ss['pred_mean_trip_len'], 4)
self.assertAlmostEqual(model.system_stats['OD_pairs'], ss['OD_pairs'])
self.assertAlmostEqual(model.system_stats['predicted_flows'], ss['predicted_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist_trav'], ss['avg_dist_trav'])
self.assertAlmostEqual(model.system_stats['num_destinations'], ss['num_destinations'])
self.assertAlmostEqual(model.system_stats['observed_flows'], ss['observed_flows'])
self.assertAlmostEqual(model.system_stats['avg_dist'], ss['avg_dist'], 4)
self.assertAlmostEqual(model.system_stats['num_origins'], ss['num_origins'])
param_stats = stats.param_stats(model)
self.assertAlmostEqual(model.parameter_stats['beta']['LL_zero_val'], ps['beta']['LL_zero_val'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['relative_likelihood_stat'],
ps['beta']['relative_likelihood_stat'], 4)
self.assertAlmostEqual(model.parameter_stats['beta']['standard_error'], ps['beta']['standard_error'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['zero_vals_LL'], ps['all_params']['zero_vals_LL'], 4)
self.assertAlmostEqual(model.parameter_stats['all_params']['mle_vals_LL'], ps['all_params']['mle_vals_LL'], 4)
fit_stats = stats.fit_stats(model)
self.assertAlmostEqual(model.fit_stats['r_squared'], fs['r_squared'], 4)
self.assertAlmostEqual(model.fit_stats['srmse'], fs['srmse'], 4)
ent_stats = stats.ent_stats(model)
self.assertAlmostEqual(model.entropy_stats['pred_obs_deviance'], es['pred_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['entropy_ratio'], es['entropy_ratio'], 4)
self.assertAlmostEqual(model.entropy_stats['maximum_entropy'], es['maximum_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_pred_deviance'], es['max_pred_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_obs_entropy'], es['variance_obs_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['predicted_entropy'], es['predicted_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['t_stat_entropy'], es['t_stat_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['max_obs_deviance'], es['max_obs_deviance'], 4)
self.assertAlmostEqual(model.entropy_stats['observed_entropy'], es['observed_entropy'], 4)
self.assertAlmostEqual(model.entropy_stats['variance_pred_entropy'], es['variance_pred_entropy'], 4)
if __name__ == '__main__':
unittest.main() | bsd-3-clause |
allanino/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/pylab.py | 70 | 10245 | """
This is a procedural interface to the matplotlib object-oriented
plotting library.
The following plotting commands are provided; the majority have
Matlab(TM) analogs and similar argument.
_Plotting commands
acorr - plot the autocorrelation function
annotate - annotate something in the figure
arrow - add an arrow to the axes
axes - Create a new axes
axhline - draw a horizontal line across axes
axvline - draw a vertical line across axes
axhspan - draw a horizontal bar across axes
axvspan - draw a vertical bar across axes
axis - Set or return the current axis limits
bar - make a bar chart
barh - a horizontal bar chart
broken_barh - a set of horizontal bars with gaps
box - set the axes frame on/off state
boxplot - make a box and whisker plot
cla - clear current axes
clabel - label a contour plot
clf - clear a figure window
clim - adjust the color limits of the current image
close - close a figure window
colorbar - add a colorbar to the current figure
cohere - make a plot of coherence
contour - make a contour plot
contourf - make a filled contour plot
csd - make a plot of cross spectral density
delaxes - delete an axes from the current figure
draw - Force a redraw of the current figure
errorbar - make an errorbar graph
figlegend - make legend on the figure rather than the axes
figimage - make a figure image
figtext - add text in figure coords
figure - create or change active figure
fill - make filled polygons
findobj - recursively find all objects matching some criteria
gca - return the current axes
gcf - return the current figure
gci - get the current image, or None
getp - get a handle graphics property
grid - set whether gridding is on
hist - make a histogram
hold - set the axes hold state
ioff - turn interaction mode off
ion - turn interaction mode on
isinteractive - return True if interaction mode is on
imread - load image file into array
imshow - plot image data
ishold - return the hold state of the current axes
legend - make an axes legend
loglog - a log log plot
matshow - display a matrix in a new figure preserving aspect
pcolor - make a pseudocolor plot
pcolormesh - make a pseudocolor plot using a quadrilateral mesh
pie - make a pie chart
plot - make a line plot
plot_date - plot dates
plotfile - plot column data from an ASCII tab/space/comma delimited file
pie - pie charts
polar - make a polar plot on a PolarAxes
psd - make a plot of power spectral density
quiver - make a direction field (arrows) plot
rc - control the default params
rgrids - customize the radial grids and labels for polar
savefig - save the current figure
scatter - make a scatter plot
setp - set a handle graphics property
semilogx - log x axis
semilogy - log y axis
show - show the figures
specgram - a spectrogram plot
spy - plot sparsity pattern using markers or image
stem - make a stem plot
subplot - make a subplot (numrows, numcols, axesnum)
subplots_adjust - change the params controlling the subplot positions of current figure
subplot_tool - launch the subplot configuration tool
suptitle - add a figure title
table - add a table to the plot
text - add some text at location x,y to the current axes
thetagrids - customize the radial theta grids and labels for polar
title - add a title to the current axes
xcorr - plot the autocorrelation function of x and y
xlim - set/get the xlimits
ylim - set/get the ylimits
xticks - set/get the xticks
yticks - set/get the yticks
xlabel - add an xlabel to the current axes
ylabel - add a ylabel to the current axes
autumn - set the default colormap to autumn
bone - set the default colormap to bone
cool - set the default colormap to cool
copper - set the default colormap to copper
flag - set the default colormap to flag
gray - set the default colormap to gray
hot - set the default colormap to hot
hsv - set the default colormap to hsv
jet - set the default colormap to jet
pink - set the default colormap to pink
prism - set the default colormap to prism
spring - set the default colormap to spring
summer - set the default colormap to summer
winter - set the default colormap to winter
spectral - set the default colormap to spectral
_Event handling
connect - register an event handler
disconnect - remove a connected event handler
_Matrix commands
cumprod - the cumulative product along a dimension
cumsum - the cumulative sum along a dimension
detrend - remove the mean or besdt fit line from an array
diag - the k-th diagonal of matrix
diff - the n-th differnce of an array
eig - the eigenvalues and eigen vectors of v
eye - a matrix where the k-th diagonal is ones, else zero
find - return the indices where a condition is nonzero
fliplr - flip the rows of a matrix up/down
flipud - flip the columns of a matrix left/right
linspace - a linear spaced vector of N values from min to max inclusive
logspace - a log spaced vector of N values from min to max inclusive
meshgrid - repeat x and y to make regular matrices
ones - an array of ones
rand - an array from the uniform distribution [0,1]
randn - an array from the normal distribution
rot90 - rotate matrix k*90 degress counterclockwise
squeeze - squeeze an array removing any dimensions of length 1
tri - a triangular matrix
tril - a lower triangular matrix
triu - an upper triangular matrix
vander - the Vandermonde matrix of vector x
svd - singular value decomposition
zeros - a matrix of zeros
_Probability
levypdf - The levy probability density function from the char. func.
normpdf - The Gaussian probability density function
rand - random numbers from the uniform distribution
randn - random numbers from the normal distribution
_Statistics
corrcoef - correlation coefficient
cov - covariance matrix
amax - the maximum along dimension m
mean - the mean along dimension m
median - the median along dimension m
amin - the minimum along dimension m
norm - the norm of vector x
prod - the product along dimension m
ptp - the max-min along dimension m
std - the standard deviation along dimension m
asum - the sum along dimension m
_Time series analysis
bartlett - M-point Bartlett window
blackman - M-point Blackman window
cohere - the coherence using average periodiogram
csd - the cross spectral density using average periodiogram
fft - the fast Fourier transform of vector x
hamming - M-point Hamming window
hanning - M-point Hanning window
hist - compute the histogram of x
kaiser - M length Kaiser window
psd - the power spectral density using average periodiogram
sinc - the sinc function of array x
_Dates
date2num - convert python datetimes to numeric representation
drange - create an array of numbers for date plots
num2date - convert numeric type (float days since 0001) to datetime
_Other
angle - the angle of a complex array
griddata - interpolate irregularly distributed data to a regular grid
load - load ASCII data into array
polyfit - fit x, y to an n-th order polynomial
polyval - evaluate an n-th order polynomial
roots - the roots of the polynomial coefficients in p
save - save an array to an ASCII file
trapz - trapezoidal integration
__end
"""
import sys, warnings
from cbook import flatten, is_string_like, exception_to_str, popd, \
silent_list, iterable, dedent
import numpy as np
from numpy import ma
from matplotlib import mpl # pulls in most modules
from matplotlib.dates import date2num, num2date,\
datestr2num, strpdate2num, drange,\
epoch2num, num2epoch, mx2num,\
DateFormatter, IndexDateFormatter, DateLocator,\
RRuleLocator, YearLocator, MonthLocator, WeekdayLocator,\
DayLocator, HourLocator, MinuteLocator, SecondLocator,\
rrule, MO, TU, WE, TH, FR, SA, SU, YEARLY, MONTHLY,\
WEEKLY, DAILY, HOURLY, MINUTELY, SECONDLY, relativedelta
import matplotlib.dates
# bring all the symbols in so folks can import them from
# pylab in one fell swoop
from matplotlib.mlab import window_hanning, window_none,\
conv, detrend, detrend_mean, detrend_none, detrend_linear,\
polyfit, polyval, entropy, normpdf, griddata,\
levypdf, find, trapz, prepca, rem, norm, orth, rank,\
sqrtm, prctile, center_matrix, rk4, exp_safe, amap,\
sum_flat, mean_flat, rms_flat, l1norm, l2norm, norm, frange,\
diagonal_matrix, base_repr, binary_repr, log2, ispower2,\
bivariate_normal, load, save
from matplotlib.mlab import stineman_interp, slopes, \
stineman_interp, inside_poly, poly_below, poly_between, \
is_closed_polygon, path_length, distances_along_curve, vector_lengths
from numpy import *
from numpy.fft import *
from numpy.random import *
from numpy.linalg import *
from matplotlib.mlab import window_hanning, window_none, conv, detrend, demean, \
detrend_mean, detrend_none, detrend_linear, entropy, normpdf, levypdf, \
find, longest_contiguous_ones, longest_ones, prepca, prctile, prctile_rank, \
center_matrix, rk4, bivariate_normal, get_xyz_where, get_sparse_matrix, dist, \
dist_point_to_segment, segments_intersect, fftsurr, liaupunov, movavg, \
save, load, exp_safe, \
amap, rms_flat, l1norm, l2norm, norm_flat, frange, diagonal_matrix, identity, \
base_repr, binary_repr, log2, ispower2, fromfunction_kw, rem, norm, orth, rank, sqrtm,\
mfuncC, approx_real, rec_append_field, rec_drop_fields, rec_join, csv2rec, rec2csv, isvector
from matplotlib.pyplot import *
# provide the recommended module abbrevs in the pylab namespace
import matplotlib.pyplot as plt
import numpy as np
| agpl-3.0 |
fxia22/pointGAN | show_ae.py | 1 | 1680 | from __future__ import print_function
from show3d_balls import *
import argparse
import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
from datasets import PartDataset
from pointnet import PointGen, PointGenC, PointNetAE
import torch.nn.functional as F
import matplotlib.pyplot as plt
#showpoints(np.random.randn(2500,3), c1 = np.random.uniform(0,1,size = (2500)))
parser = argparse.ArgumentParser()
parser.add_argument('--model', type=str, default = '', help='model path')
opt = parser.parse_args()
print (opt)
ae = PointNetAE(num_points = 2048)
ae.load_state_dict(torch.load(opt.model))
dataset = PartDataset(root = 'shapenetcore_partanno_segmentation_benchmark_v0', class_choice = ['Chair'], classification = True, npoints = 2048)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=64,
shuffle=True, num_workers=1)
ae.cuda()
i,data = enumerate(dataloader, 0).next()
points, _ = data
points = Variable(points)
bs = points.size()[0]
points = points.transpose(2,1)
points = points.cuda()
gen = ae(points)
point_np = gen.transpose(2,1).cpu().data.numpy()
#showpoints(points.transpose(2,1).cpu().data.numpy())
showpoints(point_np)
#sim_noise = Variable(torch.randn(1000, 100))
#points = gen(sim_noise)
#point_np = points.transpose(2,1).data.numpy()
#print(point_np.shape)
#np.savez('gan.npz', points = point_np)
| mit |
petosegan/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
BU-PyCon/Meeting-2 | Programs/PyPlot.py | 1 | 18763 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib.patches import *
import pdb
print("""
MatPlotLib Advanced Tutorial
----------------------------
This is a tutorial covering the features and usage of the matplotlib package
in more detail. In truth, no single tutorial can cover all the features that
exist in the matplotlib package since it is extremely expansive. This tutorial
will cover as much material as possible to let you know of the features that
are available to you when plotting data.
Some Notes:
1) A few parts of this program uses pdb to pause the program and allow the
user to try making things for themselves. Use c to continue with the
program.
2) This program uses plt for the reference name of pyplot. All pyplot methods
should be preceded with the qualifier plt such as plt.show().
3) For the best results, run this program with ipython. Regular python may
dislike plotting during program execution.
""")
pause = input("Press [Enter] to continue...")
print('\n'*100)
print("""
###
## pyplot
###
Within the matplotlib package, the main module you want to be using is the
pyplot module. It is generally imported as
import matplotlib.pyplot as plt
The pyplot module has many useful functions that can be called and used and
we will go over them one by one. For reference, some useful methods are shown
below.
>>> plt.close() # Closes the current figure. Optional arguments
include passing in a figure, figure number,
or the string 'all' which closes all figures.
>>> plt.draw() # Forces the figure to be redrawn. Useful if it
was been updated after it was last shown or
drawn.
>>> plt.gca() # Returns the currently active axes object
>>> plt.gcf() # Returns the currently active figure object
>>> plt.show() # Shows the latest figure. By default, matplotlib
pauses and waits for the window to be closed
before continuing. This feature can be turned
off with the keyword block = False.
>>> plt.savefig('title.png') # Saves the figure to a file. The file type is
automatically determined by the extension.
Supported formats include png, pdf, ps, eps,
and svg. This has the keywords dpi which
specifies the resolution of the output and
bbox_inches which, when set to 'tight' reduces
any extra white space in the saved file.
>>> plt.subplots_adjust() # Allows for adjusting parameters of the layout
such as the horizontal space (hspace) or width
space (wspace) between plots, as well as the
left, right, top, and bottom padding.
""")
pause = input("Press [Enter] to continue...")
print('\n'*100)
print("""
###
## Components of a Plot
###
At it's core, matplotlib is nothing more than a graphics package. Every
component of a plot is just a particular "Artist", all drawn on top of
each other to make a nice looking plot.
The beauty of pyplot is the degree of customization that you can have.
You have control over every individual component of this plot and you can
change each of them individually. To do this properly, we will focus on
using the object oriented feature of matplotlib.
Before we talk about how to work with all these features, we need to know
what they are. A window should have just popped up that you can examine.
This window shows all the various components of a figure and the names that
pyplot uses for them. This figure contains the following components
-> Figure The main part of the plot which everything is shown on. This
encompasses the entire area of the window, excluding the toolbar.
-> Axes A single plot, added to the figure. This can have many sets of
data added to it along with other components such as legends.
Axes can even sit on top of other axes, but importantly, they
are still a component of figure, not the axes they may sit inside
of.
-> Axis Note the difference here! This is an axIs not an axEs. This
component is a single axis on the axes and defines how the data
is plotted. An axes, by default has two axises, the x and y
(unless you're plotting in 3D in which case it has a z). You can
add more axises though. Each axis has various components such as
the spine, tick labels, major ticks, and minor ticks.
-> Spine Each axis has various components. One of them is the spine. This
is the actual black line at the border of the plots that the
tick marks are drawn on. Each default axis has 2 spines. For the
x axis, it has the top and bottom spine and likewise the y axis
has the right and left.
-> Legend Each axes can have a legend added to it. The legend can have lines
on it, one for each curve labeled.
""")
x = np.arange(0,4*np.pi+np.pi/8,np.pi/8)
y1 = np.sin(x)
y2 = np.cos(x)
fig, (ax1, ax2) = plt.subplots(2, figsize = (10,7))
fig.canvas.set_window_title('Pyplot Figure Components')
plt.subplots_adjust(hspace = 0.4)
plt.suptitle('Figure title', fontsize = 20)
#Create subplot 1
ax1.plot(x, y1, '-dr', label = '$sin(x)$')
ax1.plot(x, np.array([0]*len(x)), '--k')
ax1.set_xlim([0,4*np.pi])
ax1.set_title('Axes 1 Title')
ax1.set_xlabel('Axes 1 x-axis label')
ax1.set_ylabel('Axes 1 y-axis label')
ax1.legend(loc = 'best')
#Create subplot 2
ax2.plot(x, y2, ':og', label = '$cos(x)$')
ax2.plot(x, np.array([0]*len(x)), '-k')
ax2.set_xlim([0,4*np.pi])
ax2.set_title('Axes 2 Title')
ax2.set_xlabel('Axes 2 x-axis label')
ax2.set_ylabel('Axes 2 y-axis label')
ax2.legend(loc = 'best')
#Add artists
ax = fig.add_axes([0,0,1,1])
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.set_zorder(0)
ax.set_axis_bgcolor((0, 0, 0, 0))
ax.add_patch(Rectangle((0.01,0.01),0.98,0.98, fill = False, lw = 2, ec = 'b', transform=ax.transAxes))
ax.annotate('Figure', (0.02,0.02), textcoords = 'axes fraction',
fontsize = 20, color = 'b', transform=ax.transAxes)
ax.add_patch(Rectangle((0.04,0.5),0.9,0.44, fill = False, lw = 2, ec = 'g', transform=ax.transAxes))
ax.annotate('Axes', (0.05,0.52), textcoords = 'axes fraction',
fontsize = 20, color = 'g', transform=ax.transAxes)
ax.add_patch(Rectangle((0.11,0.08),0.03,0.38, fill = False, lw = 2, ec = 'r', transform=ax.transAxes))
ax.annotate('Axis', (0.045,0.4), textcoords = 'axes fraction',
fontsize = 20, color = 'r', transform=ax.transAxes)
ax.add_patch(Rectangle((0.11,0.08),0.8,0.04, fill = False, lw = 2, ec = 'r', transform=ax.transAxes))
ax.annotate('Axis', (0.85,0.04), textcoords = 'axes fraction',
fontsize = 20, color = 'r')
ax.annotate('Spine', (0.8,0.43), xytext = (0.8,0.35), xycoords = 'axes fraction',
color = (1,0.5,0), fontsize = 20,
textcoords = 'axes fraction', horizontalalignment='left',
arrowprops=dict(arrowstyle = '-|>', fc=(1,0.5,0)))
ax.annotate('', (0.9,0.32), xytext = (0.84,0.34), xycoords = 'axes fraction',
arrowprops=dict(arrowstyle = '-|>', fc=(1,0.5,0)))
plt.show(block = False)
plt.pause(0.01)
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Objects in matplotlib
###
The above mentioned components of a figure (along with a few others) are
all representable as objects. These objects are stored in a variable which
maintains the state of that object and also has functions the object can
call to change its state. Let's look at how we can use these objects to
create a new figure.
""")
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Creating a New Figure
###
There multiple ways to create a new figure. Probably the simplest is
>>> fig = figure(1, figsize = (5,5), tight_layout = True)
The 1 in this case is an ID for the figure (much like the logical unit
number in IDL). The keywords figsize and tight_layout are optional. The
former sets the physical size of the figure and the second tells the layout
manager to make the plots as close as possible. The state of the figure is
stored in the fig variable which knows entirely about this new figure.
Calling this figure method tells matplotlib that any subsequent plotting
commands should apply to this figure. We can switch to plotting on a new
figure by calling the figure command for another figure (or even switch
back to an old figure). Another method for creating figures is the following
>>> fig = plt.subplots()
This method is much more powerful, but these features will be discussed in
the next section. For reference here are a set of methods and their
functionality that the figure object can call
>>> fig.add_subplot(111) # Adds a subplot at the specified position
>>> fig.clear() # Clears the figure's axes
>>> fig.suptitle('Title') # Adds a title to the figure
Many of the methods listed above as pyplot methods, such as subplots_adjust or
draw, can be applied to a specific figure as well.
""")
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Creating a New Axes
###
Once you have a figure, it's time to add some Axeses to it. As mentioned
before, matplotlib supports using objects. If you've properly created your
figure, it will have been stored into an object. You can now call the method
add_subplot.
>>> ax = fig.add_subplot(1,1,1)
The order of these parameters is add_subplot(rows, columns, plotNo), where
plotNo is the number of the plot, starting at 1 in the upper left and counting
left to right then top to bottom. If all values are less than 10, an equivalent
procedure is to do
>>> ax = fig.add_subplot(111)
Note how this function has created and returned an axes object which we have
stored into the variable ax. There is another method which creates the figure
an axes at the same time
>>> fig, (ax1, ax2) = plt.subplots(nrows = 2, ncols = 1, figsize = (8,8))
The figure is stored into the first variable and the axes are stored into
the second variable with is a tuple of axes.
You can also call the plt.subplot() which acts like add_subplot() but adds
an axes to the currently active figure (determined by the last one referenced).
For more control over your axes positioning, you can specify the exact position
and extent of an axes with the subplot2grid function.
>>> ax = plt.subplot2grid((2,3),(1,0), colspan = 2, rowspan = 1)
This tells figure that there will be a grid of 2 x 3 plots (2 rows, 3 cols) and
this creates a new plot at the position (1,0) (second row, first column) with a
column span of 2 and a row span of 1. If you really want to specify the exact
location, try the add_axes method.
>>> ax = fig.add_axes([0.5, 0.5, 0.3, 0.3])
This tells the figure to put the lower left corner of the axes at the position
(0.5, 0.5) (as fractions of the figure size) and have it span a width and height
of (0.3, 0.3). This is useful to putting plots inside plots. Try this out for
yourself!
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Plotting to Axes
###
There are many types of plots that can be put on an axes. Below are some simple
examples.
>>> ax.plot() # Simple scatter/line plot
>>> ax.bar() # Vertical bar plot
>>> ax.barh() # Horizonatal bar plot
>>> ax.boxplot() # Box and wisker plot
>>> ax.contour() # Contour plot of lines
>>> ax.contourf() # Filled contour plot
>>> ax.errorbar() # Scatter/line plot with errorbars
>>> ax.fill() # Scatter/line plot which is filled below the curve
>>> ax.hist() # A histogram of the input data
>>> ax.loglog() # Scatter/line plot that is logarithmic on both axes
>>> ax.pie() # Pie chart
>>> ax.polar() # Polar plot
>>> ax.quiver() # 2D field of arrows
>>> ax.semilogx() # Scatter/line plot with logarithmic x and linear y.
>>> ax.semilogy() # Equivalent to semilogx, but now y is logarithmic
>>> ax.steamplot()# A streamline of a vector flow
>>> ax.step() # A step plot
Feel free to try out some of these. You may have to look up the proper
procedures online.
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Axes Methods
###
Aside from the many plots, there are many useful methods to adjust the
properties of of the axes
>>> ax.add_patch() # Adds a 'patch' which is an artist like arrows or circles
>>> ax.annotate() # Adds a string with an arrow to the axes
>>> ax.axhspan() # Adds a horizontal bar across the plot
>>> ax.axvspan() # Adds a vertical bar across the plot
>>> ax.arrow() # Adds an arrow
>>> ax.cla() # Clears the axes
>>> ax.colorbar() # Colorbar added to the plot
>>> ax.grid() # Turns on grid lines, keywords include which (major or
minor) and axis (both, x, or y).
>>> ax.legend() # Legend added to the plot
>>> ax.minorticks_on() # Turns on minor tick marks
>>> ax.set_cmap() # Sets the color map of the axes
>>> ax.set_title() # Sets the title of the axes
>>> ax.set_xlabel() # Sets the x label of the axes
>>> ax.set_xlim() # Sets the x limits of the axes
>>> ax.set_xscale() # Sets the scale, either linear, log, or symlog
>>> ax.set_xticklabels()#A list of strings to use for the tick labels
>>> ax.set_xticks() # Set's values of tick marks with list
## The above x axis specific functions have analagous y axis functions
>>> ax.text() # Adds a string to the axes
>>> ax.tick_params() # Changes tick and tick label appearances
Try playing with these various features after creating a figure and axes.
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Axis and Spines
###
Just as you can work with the specific axes on a figure, you can also work with
specific axis and spines on your axes. These can be extracted and stored in
their own variables, but it is generally easier to refer to them as the
components of the axes object. They are accessed in the following way.
>>> ax.xaxis
>>> ax.yaxis
>>> ax.spine['top'] # Spine is a dict with components, 'top', 'bottom',
'left', and 'right'.
These components of the axes have the following useful methods.
>>> ax.xaxis.set_major_formatter()# Set's how the tick marks are formatted
>>> ax.xaxis.set_major_locator() # Sets location of tick marks (see locators)
## The above major methods have analagous minor methods
>>> ax.xaxis.set_ticklabels() # Set to empty list to turn off labels
>>> ax.xaxis.set_ticks_position() # Change tick position to only 'top', 'left, etc.
## The above xaxis methods have analagous yaxis methods
>>> ax.spines['top'].set_color() # Sets the color of the spine
>>> ax.spines['top'].set_position()# Changes position of the spine
>>> ax.spines['top'].set_visible()# Turns off the spine
## The above spine methods have analagous methods for 'bottom', 'left', and
'right'
Feel free to play with these properties as well.
""")
pdb.set_trace()
print('\n'*100)
print("""
###
## Higher Degrees of Customization
###
We could choose to go even further down the ladder than axis and spines. It is
possible to get the tickmark objects from an axis (via get_major_ticks()) and
change properties on a tickmark by tickmark basis. However, it is no longer
instructive to continue showing methods and ways of doing this as it can always
be looked up. For extreme control over every component of plotting, it is sometimes
useful to use the rcParams variable. This should be imported as
from matplotlib import rcParams
You can then refer to any component of the figure by referencing the dict's
keyword, and setting the value. Common examples include
>>> rcParams['lines.linewidth'] = 2 # Sets linewidths to be 2 by default
>>> rcParams['lines.color'] = 'r' # Sets line colors to be red by default
There are hundreds of parameters that can be set, all of which can be seen by
going here http://matplotlib.org/users/customizing.html.
""")
pause = input('Press [Enter] to continue...')
print('\n'*100)
print("""
###
## Animations
###
This will only introduce the idea of animations. To actually produce saved
animations in the form of mp4 or some similar format requires installing third
party programs such as ffmpeg. However, matplotlib comes with an animation
package imported as matplotlib.animation. It has tools to allow you to
continually update a plot such that it is animated. There are abilities to
save the animation as well. Below is the code for a very simple animation plot.
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig, ax = plt.subplots()
ax.set_xlim([0,2*np.pi])
x = np.arange(0, 2*np.pi, 0.01) # x-array
line, = ax.plot(x, np.sin(x)) # The comma after line makes it a tuple
#Init only required for blitting to give a clean slate.
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate(i):
line.set_ydata(np.sin(x+i/10.0)) # update the data
return line,
#blit=True means only redraw the components which have updated. This is
#faster than redrawing everything.
ani = animation.FuncAnimation(fig, animate, init_func=init,
interval=25, blit=True)
plt.show()
""")
fig, ax = plt.subplots()
ax.set_xlim([0,2*np.pi])
x = np.arange(0, 2*np.pi, 0.01) # x-array
line, = ax.plot(x, np.sin(x)) # The comma after line makes it a tuple
#Init only required for blitting to give a clean slate.
def init():
line.set_ydata(np.ma.array(x, mask=True))
return line,
def animate(i):
line.set_ydata(np.sin(x+i/10.0)) # update the data
return line,
#blit=True means only redraw the components which have updated. This is
#faster than redrawing everything.
ani = animation.FuncAnimation(fig, animate, init_func=init,
interval=25, blit=True)
plt.show(block = False)
print('Done...')
| mit |
ivano666/tensorflow | tensorflow/examples/skflow/text_classification_character_rnn.py | 9 | 2530 | # Copyright 2015-present The Scikit Flow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This is an example of using recurrent neural networks over characters
for DBpedia dataset to predict class from description of an entity.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import pandas
import tensorflow as tf
from tensorflow.contrib import learn
### Training data
# Downloads, unpacks and reads DBpedia dataset.
dbpedia = learn.datasets.load_dataset('dbpedia')
X_train, y_train = pandas.DataFrame(dbpedia.train.data)[1], pandas.Series(dbpedia.train.target)
X_test, y_test = pandas.DataFrame(dbpedia.test.data)[1], pandas.Series(dbpedia.test.target)
### Process vocabulary
MAX_DOCUMENT_LENGTH = 100
char_processor = learn.preprocessing.ByteProcessor(MAX_DOCUMENT_LENGTH)
X_train = np.array(list(char_processor.fit_transform(X_train)))
X_test = np.array(list(char_processor.transform(X_test)))
### Models
HIDDEN_SIZE = 20
def char_rnn_model(X, y):
byte_list = learn.ops.one_hot_matrix(X, 256)
byte_list = learn.ops.split_squeeze(1, MAX_DOCUMENT_LENGTH, byte_list)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.rnn(cell, byte_list, dtype=tf.float32)
return learn.models.logistic_regression(encoding, y)
classifier = learn.TensorFlowEstimator(model_fn=char_rnn_model, n_classes=15,
steps=100, optimizer='Adam', learning_rate=0.01, continue_training=True)
# Continuously train for 1000 steps & predict on test set.
while True:
classifier.fit(X_train, y_train)
score = metrics.accuracy_score(y_test, classifier.predict(X_test))
print("Accuracy: %f" % score)
| apache-2.0 |
edhuckle/statsmodels | statsmodels/sandbox/regression/tests/test_gmm_poisson.py | 31 | 13338 | '''
TestGMMMultTwostepDefault() has lower precision
'''
from statsmodels.compat.python import lmap
import numpy as np
from numpy.testing.decorators import skipif
import pandas
import scipy
from scipy import stats
from statsmodels.regression.linear_model import OLS
from statsmodels.sandbox.regression import gmm
from numpy.testing import assert_allclose, assert_equal
from statsmodels.compat.scipy import NumpyVersion
def get_data():
import os
curdir = os.path.split(__file__)[0]
dt = pandas.read_csv(os.path.join(curdir, 'racd10data_with_transformed.csv'))
# Transformations compared to original data
##dt3['income'] /= 10.
##dt3['aget'] = (dt3['age'] - dt3['age'].min()) / 5.
##dt3['aget2'] = dt3['aget']**2
# How do we do this with pandas
mask = ~((np.asarray(dt['private']) == 1) & (dt['medicaid'] == 1))
mask = mask & (dt['docvis'] <= 70)
dt3 = dt[mask]
dt3['const'] = 1 # add constant
return dt3
DATA = get_data()
#------------- moment conditions for example
def moment_exponential_add(params, exog, exp=True):
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
#if not np.isfinite(predicted).all():
#print "invalid predicted", predicted
#raise RuntimeError('invalid predicted')
predicted = np.clip(predicted, 0, 1e100) # try to avoid inf
else:
predicted = np.dot(exog, params)
return predicted
def moment_exponential_mult(params, data, exp=True):
# multiplicative error model
endog = data[:,0]
exog = data[:,1:]
if not np.isfinite(params).all():
print("invalid params", params)
# moment condition without instrument
if exp:
predicted = np.exp(np.dot(exog, params))
predicted = np.clip(predicted, 0, 1e100) # avoid inf
resid = endog / predicted - 1
if not np.isfinite(resid).all():
print("invalid resid", resid)
else:
resid = endog - np.dot(exog, params)
return resid
#------------------- test classes
# copied from test_gmm.py, with changes
class CheckGMM(object):
# default tolerance, overwritten by subclasses
params_tol = [5e-6, 5e-6]
bse_tol = [5e-7, 5e-7]
q_tol = [5e-6, 1e-9]
j_tol = [5e-5, 1e-9]
def test_basic(self):
res1, res2 = self.res1, self.res2
# test both absolute and relative difference
rtol, atol = self.params_tol
assert_allclose(res1.params, res2.params, rtol=rtol, atol=0)
assert_allclose(res1.params, res2.params, rtol=0, atol=atol)
rtol, atol = self.bse_tol
assert_allclose(res1.bse, res2.bse, rtol=rtol, atol=0)
assert_allclose(res1.bse, res2.bse, rtol=0, atol=atol)
def test_other(self):
res1, res2 = self.res1, self.res2
rtol, atol = self.q_tol
assert_allclose(res1.q, res2.Q, rtol=atol, atol=rtol)
rtol, atol = self.j_tol
assert_allclose(res1.jval, res2.J, rtol=atol, atol=rtol)
j, jpval, jdf = res1.jtest()
# j and jval should be the same
assert_allclose(res1.jval, res2.J, rtol=13, atol=13)
#pvalue is not saved in Stata results
pval = stats.chi2.sf(res2.J, res2.J_df)
#assert_allclose(jpval, pval, rtol=1e-4, atol=1e-6)
assert_allclose(jpval, pval, rtol=rtol, atol=atol)
assert_equal(jdf, res2.J_df)
def test_smoke(self):
res1 = self.res1
res1.summary()
class TestGMMAddOnestep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
q_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False})
self.res1 = res0
from .results_gmm_poisson import results_addonestep as results
self.res2 = results
class TestGMMAddTwostep(CheckGMM):
@classmethod
def setup_class(self):
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(np.log(endog+1), exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog, exog, instrument, moment_exponential_add)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_addtwostep as results
self.res2 = results
class TestGMMMultOnestep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
self.q_tol = [0.04, 0]
self.j_tol = [0.04, 0]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=0, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multonestep as results
self.res2 = results
class TestGMMMultTwostep(CheckGMM):
#compares has_optimal_weights=True with Stata's has_optimal_weights=False
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-6, 5e-7]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':False}, has_optimal_weights=False)
self.res1 = res0
from .results_gmm_poisson import results_multtwostep as results
self.res2 = results
class TestGMMMultTwostepDefault(CheckGMM):
# compares my defaults with the same options in Stata
# agreement is not very high, maybe vce(unadjusted) is different after all
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [0.004, 5e-4]
self.params_tol = [5e-5, 5e-5]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
#wargs={'centered':True}, has_optimal_weights=True
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepdefault as results
self.res2 = results
class TestGMMMultTwostepCenter(CheckGMM):
#compares my defaults with the same options in Stata
@classmethod
def setup_class(self):
# compare to Stata default options, twostep GMM
XLISTEXOG2 = 'aget aget2 educyr actlim totchr'.split()
endog_name = 'docvis'
exog_names = 'private medicaid'.split() + XLISTEXOG2 + ['const']
instrument_names = 'income medicaid ssiratio'.split() + XLISTEXOG2 + ['const']
endog = DATA[endog_name]
exog = DATA[exog_names]
instrument = DATA[instrument_names]
asarray = lambda x: np.asarray(x, float)
endog, exog, instrument = lmap(asarray, [endog, exog, instrument])
# Need to add all data into exog
endog_ = np.zeros(len(endog))
exog_ = np.column_stack((endog, exog))
self.bse_tol = [5e-4, 5e-5]
self.params_tol = [5e-5, 5e-5]
q_tol = [5e-5, 1e-8]
# compare to Stata default options, iterative GMM
# with const at end
start = OLS(endog, exog).fit().params
nobs, k_instr = instrument.shape
w0inv = np.dot(instrument.T, instrument) / nobs
mod = gmm.NonlinearIVGMM(endog_, exog_, instrument, moment_exponential_mult)
res0 = mod.fit(start, maxiter=2, inv_weights=w0inv,
optim_method='bfgs', optim_args={'gtol':1e-8, 'disp': 0},
wargs={'centered':True}, has_optimal_weights=False
)
self.res1 = res0
from .results_gmm_poisson import results_multtwostepcenter as results
self.res2 = results
def test_more(self):
# from Stata `overid`
J_df = 1
J_p = 0.332254330027383
J = 0.940091427212973
j, jpval, jdf = self.res1.jtest()
assert_allclose(jpval, J_p, rtol=5e-5, atol=0)
if __name__ == '__main__':
tt = TestGMMAddOnestep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMAddTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultOnestep()
tt.setup_class()
tt.test_basic()
#tt.test_other()
tt = TestGMMMultTwostep()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepDefault()
tt.setup_class()
tt.test_basic()
tt.test_other()
tt = TestGMMMultTwostepCenter()
tt.setup_class()
tt.test_basic()
tt.test_other()
| bsd-3-clause |
cbertinato/pandas | pandas/tests/plotting/test_backend.py | 1 | 1151 | import pytest
import pandas
def test_matplotlib_backend_error():
msg = ('matplotlib is required for plotting when the default backend '
'"matplotlib" is selected.')
try:
import matplotlib # noqa
except ImportError:
with pytest.raises(ImportError, match=msg):
pandas.set_option('plotting.backend', 'matplotlib')
def test_backend_is_not_module():
msg = ('"not_an_existing_module" does not seem to be an installed module. '
'A pandas plotting backend must be a module that can be imported')
with pytest.raises(ValueError, match=msg):
pandas.set_option('plotting.backend', 'not_an_existing_module')
def test_backend_is_correct(monkeypatch):
monkeypatch.setattr('pandas.core.config_init.importlib.import_module',
lambda name: None)
pandas.set_option('plotting.backend', 'correct_backend')
assert pandas.get_option('plotting.backend') == 'correct_backend'
# Restore backend for other tests (matplotlib can be not installed)
try:
pandas.set_option('plotting.backend', 'matplotlib')
except ImportError:
pass
| bsd-3-clause |
leggitta/mne-python | examples/realtime/ftclient_rt_compute_psd.py | 17 | 2460 | """
==============================================================
Compute real-time power spectrum density with FieldTrip client
==============================================================
Please refer to `ftclient_rt_average.py` for instructions on
how to get the FieldTrip connector working in MNE-Python.
This example demonstrates how to use it for continuous
computation of power spectra in real-time using the
get_data_as_epoch function.
"""
# Author: Mainak Jas <mainak@neuro.hut.fi>
#
# License: BSD (3-clause)
import numpy as np
import matplotlib.pyplot as plt
import mne
from mne.realtime import FieldTripClient
from mne.time_frequency import compute_epochs_psd
print(__doc__)
# user must provide list of bad channels because
# FieldTrip header object does not provide that
bads = ['MEG 2443', 'EEG 053']
fig, ax = plt.subplots(1)
with FieldTripClient(host='localhost', port=1972,
tmax=150, wait_max=10) as rt_client:
# get measurement info guessed by MNE-Python
raw_info = rt_client.get_measurement_info()
# select gradiometers
picks = mne.pick_types(raw_info, meg='grad', eeg=False, eog=True,
stim=False, include=[], exclude=bads)
n_fft = 256 # the FFT size. Ideally a power of 2
n_samples = 2048 # time window on which to compute FFT
for ii in range(20):
epoch = rt_client.get_data_as_epoch(n_samples=n_samples, picks=picks)
psd, freqs = compute_epochs_psd(epoch, fmin=2, fmax=200, n_fft=n_fft)
cmap = 'RdBu_r'
freq_mask = freqs < 150
freqs = freqs[freq_mask]
log_psd = 10 * np.log10(psd[0])
tmin = epoch.events[0][0] / raw_info['sfreq']
tmax = (epoch.events[0][0] + n_samples) / raw_info['sfreq']
if ii == 0:
im = ax.imshow(log_psd[:, freq_mask].T, aspect='auto',
origin='lower', cmap=cmap)
ax.set_yticks(np.arange(0, len(freqs), 10))
ax.set_yticklabels(freqs[::10].round(1))
ax.set_xlabel('Frequency (Hz)')
ax.set_xticks(np.arange(0, len(picks), 30))
ax.set_xticklabels(picks[::30])
ax.set_xlabel('MEG channel index')
im.set_clim()
else:
im.set_data(log_psd[:, freq_mask].T)
plt.title('continuous power spectrum (t = %0.2f sec to %0.2f sec)'
% (tmin, tmax), fontsize=10)
plt.pause(0.5)
plt.close()
| bsd-3-clause |
ChanderG/scikit-learn | sklearn/manifold/tests/test_isomap.py | 226 | 3941 | from itertools import product
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from sklearn import datasets
from sklearn import manifold
from sklearn import neighbors
from sklearn import pipeline
from sklearn import preprocessing
from sklearn.utils.testing import assert_less
eigen_solvers = ['auto', 'dense', 'arpack']
path_methods = ['auto', 'FW', 'D']
def test_isomap_simple_grid():
# Isomap should preserve distances when all neighbors are used
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# distances from each point to all others
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
assert_array_almost_equal(G, G_iso)
def test_isomap_reconstruction_error():
# Same setup as in test_isomap_simple_grid, with an added dimension
N_per_side = 5
Npts = N_per_side ** 2
n_neighbors = Npts - 1
# grid of equidistant points in 2D, n_components = n_dim
X = np.array(list(product(range(N_per_side), repeat=2)))
# add noise in a third dimension
rng = np.random.RandomState(0)
noise = 0.1 * rng.randn(Npts, 1)
X = np.concatenate((X, noise), 1)
# compute input kernel
G = neighbors.kneighbors_graph(X, n_neighbors,
mode='distance').toarray()
centerer = preprocessing.KernelCenterer()
K = centerer.fit_transform(-0.5 * G ** 2)
for eigen_solver in eigen_solvers:
for path_method in path_methods:
clf = manifold.Isomap(n_neighbors=n_neighbors, n_components=2,
eigen_solver=eigen_solver,
path_method=path_method)
clf.fit(X)
# compute output kernel
G_iso = neighbors.kneighbors_graph(clf.embedding_,
n_neighbors,
mode='distance').toarray()
K_iso = centerer.fit_transform(-0.5 * G_iso ** 2)
# make sure error agrees
reconstruction_error = np.linalg.norm(K - K_iso) / Npts
assert_almost_equal(reconstruction_error,
clf.reconstruction_error())
def test_transform():
n_samples = 200
n_components = 10
noise_scale = 0.01
# Create S-curve dataset
X, y = datasets.samples_generator.make_s_curve(n_samples, random_state=0)
# Compute isomap embedding
iso = manifold.Isomap(n_components, 2)
X_iso = iso.fit_transform(X)
# Re-embed a noisy version of the points
rng = np.random.RandomState(0)
noise = noise_scale * rng.randn(*X.shape)
X_iso2 = iso.transform(X + noise)
# Make sure the rms error on re-embedding is comparable to noise_scale
assert_less(np.sqrt(np.mean((X_iso - X_iso2) ** 2)), 2 * noise_scale)
def test_pipeline():
# check that Isomap works fine as a transformer in a Pipeline
# only checks that no error is raised.
# TODO check that it actually does something useful
X, y = datasets.make_blobs(random_state=0)
clf = pipeline.Pipeline(
[('isomap', manifold.Isomap()),
('clf', neighbors.KNeighborsClassifier())])
clf.fit(X, y)
assert_less(.9, clf.score(X, y))
| bsd-3-clause |
ovpn-to/oVPN.to-Client-Software | else/python/hooks.py | 1 | 17289 | # -*- coding: utf-8 -*-
#
# Hooks module for py2exe.
# Inspired by cx_freeze's hooks.py, which is:
#
# Copyright © 2007-2013, Anthony Tuininga.
# Copyright © 2001-2006, Computronix (Canada) Ltd., Edmonton, Alberta, Canada.
# All rights reserved.
#
import os, sys
# Exclude modules that the standard library imports (conditionally),
# but which are not present on windows.
#
# _memimporter can be excluded because it is built into the run-stub.
windows_excludes = """
_curses
_dummy_threading
_emx_link
_gestalt
_posixsubprocess
ce
clr
console
fcntl
grp
java
org
os2
posix
pwd
site
termios
vms_lib
_memimporter
""".split()
def init_finder(finder):
# what about renamed functions, like urllib.pathname2url?
#
# We should use ignore() for Python 2 names so that my py2to3
# importhook works. For modules that are not present on Windows,
# we should probably use excludes.append()
finder.excludes.extend(windows_excludes)
# python2 modules are ignored (but not excluded)
finder.ignore("BaseHTTPServer")
finder.ignore("ConfigParser")
finder.ignore("IronPython")
finder.ignore("SimpleHTTPServer")
finder.ignore("StringIO")
finder.ignore("__builtin__")
finder.ignore("_winreg")
finder.ignore("cPickle")
finder.ignore("cStringIO")
finder.ignore("commands")
finder.ignore("compiler")
finder.ignore("copy_reg")
finder.ignore("dummy_thread")
finder.ignore("future_builtins")
finder.ignore("htmlentitydefs")
finder.ignore("httplib")
finder.ignore("md5")
finder.ignore("new")
finder.ignore("thread")
finder.ignore("unittest2")
finder.ignore("urllib2")
finder.ignore("urlparse")
def hook_pycparser(finder, module):
"""pycparser needs lextab.py and yacctab.py which are not picked
up automatically. Make sure the complete package is included;
otherwise the exe-files may create yacctab.py and lextab.py when
they are run.
"""
finder.import_package_later("pycparser")
def hook_pycparser__build_tables(finder, module):
finder.ignore("lextab")
finder.ignore("yacctab")
finder.ignore("_ast_gen")
finder.ignore("c_ast")
def hook_pycparser_ply(finder, module):
finder.ignore("lex")
finder.ignore("ply")
def hook_OpenSSL(finder, module):
"""OpenSSL needs the cryptography package."""
finder.import_package_later("cryptography")
def hook_cffi_cparser(finder, module):
finder.ignore("cffi._pycparser")
def hook_cffi(finder, module):
# We need to patch two methods in the
# cffi.vengine_cpy.VCPythonEngine class so that cffi libraries
# work from within zip-files.
finder.add_bootcode("""
def patch_cffi():
def find_module(self, module_name, path, so_suffixes):
import sys
name = "%s.%s" % (self.verifier.ext_package, module_name)
try:
__import__(name)
except ImportError:
return None
self.__module = mod = sys.modules[name]
return mod.__file__
def load_library(self):
from cffi import ffiplatform
import sys
# XXX review all usages of 'self' here!
# import it as a new extension module
module = self.__module
#
# call loading_cpy_struct() to get the struct layout inferred by
# the C compiler
self._load(module, 'loading')
#
# the C code will need the <ctype> objects. Collect them in
# order in a list.
revmapping = dict([(value, key)
for (key, value) in self._typesdict.items()])
lst = [revmapping[i] for i in range(len(revmapping))]
lst = list(map(self.ffi._get_cached_btype, lst))
#
# build the FFILibrary class and instance and call _cffi_setup().
# this will set up some fields like '_cffi_types', and only then
# it will invoke the chained list of functions that will really
# build (notably) the constant objects, as <cdata> if they are
# pointers, and store them as attributes on the 'library' object.
class FFILibrary(object):
_cffi_python_module = module
_cffi_ffi = self.ffi
_cffi_dir = []
def __dir__(self):
return FFILibrary._cffi_dir + list(self.__dict__)
library = FFILibrary()
if module._cffi_setup(lst, ffiplatform.VerificationError, library):
import warnings
warnings.warn("reimporting %r might overwrite older definitions"
% (self.verifier.get_module_name()))
#
# finally, call the loaded_cpy_xxx() functions. This will perform
# the final adjustments, like copying the Python->C wrapper
# functions from the module to the 'library' object, and setting
# up the FFILibrary class with properties for the global C variables.
self._load(module, 'loaded', library=library)
module._cffi_original_ffi = self.ffi
module._cffi_types_of_builtin_funcs = self._types_of_builtin_functions
return library
from cffi.vengine_cpy import VCPythonEngine
VCPythonEngine.find_module = find_module
VCPythonEngine.load_library = load_library
patch_cffi()
del patch_cffi
""")
def hook_multiprocessing(finder, module):
module.__globalnames__.add("AuthenticationError")
module.__globalnames__.add("BufferTooShort")
module.__globalnames__.add("Manager")
module.__globalnames__.add("TimeoutError")
module.__globalnames__.add("cpu_count")
module.__globalnames__.add("current_process")
module.__globalnames__.add("get_context")
module.__globalnames__.add("get_start_method")
module.__globalnames__.add("set_start_method")
module.__globalnames__.add("JoinableQueue")
module.__globalnames__.add("Lock")
module.__globalnames__.add("Process")
module.__globalnames__.add("Queue")
module.__globalnames__.add("freeze_support")
def import_psutil(finder, module):
"""Exclude stuff for other operating systems."""
finder.excludes.append("_psutil_bsd")
finder.excludes.append("_psutil_linux")
finder.excludes.append("_psutil_osx")
finder.excludes.append("_psutil_posix")
finder.excludes.append("_psutil_sunos")
def hook_PIL(finder, module):
# c:\Python33-64\lib\site-packages\PIL
"""Pillow loads plugins"""
# Exclude python 2 imports
finder.excludes.append("Tkinter")
finder.import_package_later("PIL")
def hook__socket(finder, module):
"""
_socket.pyd uses the 'idna' encoding; and that requires
'unicodedata.pyd'.
"""
finder.import_hook("encodings.idna")
finder.import_hook("unicodedata")
def hook_pyreadline(finder, module):
"""
"""
finder.ignore("IronPythonConsole")
finder.excludes.append("StringIO") # in pyreadline.py3k_compat
finder.ignore("System")
finder.excludes.append("sets")
finder.ignore("startup")
def hook_xml_etree_ElementTree(finder, module):
"""ElementC14N is an optional extension. Ignore if it is not
found.
"""
finder.ignore("ElementC14N")
def hook_urllib_request(finder, module):
"""urllib.request imports _scproxy on darwin
"""
finder.excludes.append("_scproxy")
def hook_pythoncom(finder, module):
"""pythoncom is a Python extension module with .dll extension,
usually in the windows system directory as pythoncom3X.dll.
"""
import pythoncom
finder.add_dll(pythoncom.__file__)
def hook_pywintypes(finder, module):
"""pywintypes is a Python extension module with .dll extension,
usually in the windows system directory as pywintypes3X.dll.
"""
import pywintypes
finder.add_dll(pywintypes.__file__)
def hook_win32com(finder, module):
"""The win32com package extends it's __path__ at runtime.
"""
finder.import_hook("pywintypes")
finder.import_hook("pythoncom")
import win32com
module.__path__ = win32com.__path__
def hook_win32api(finder, module):
"""win32api.FindFiles(...) needs this."""
#finder.import_hook("pywintypes")
finder.import_hook("win32timezone")
def hook_tkinter(finder, module):
"""Recusively copy tcl and tk directories.
"""
# It probably doesn't make sense to exclude tix from the tcl distribution,
# and only copy it when tkinter.tix is imported...
import tkinter._fix as fix
tcl_dir = os.path.normpath(os.path.join(fix.tcldir, ".."))
assert os.path.isdir(tcl_dir)
finder.add_datadirectory("tcl", tcl_dir, recursive=True)
finder.set_min_bundle("tkinter", 2)
def hook_six(finder, module):
"""six.py has an object 'moves'. This allows to import
modules/packages via attribute access under new names.
We install a fake module named 'six.moves' which simulates this
behaviour.
"""
class SixImporter(type(module)):
"""Simulate six.moves.
Import renamed modules when retrived as attributes.
"""
__code__ = None
def __init__(self, mf, *args, **kw):
import six
self.__moved_modules = {item.name: item.mod
for item in six._moved_attributes
if isinstance(item, six.MovedModule)}
super().__init__(*args, **kw)
self.__finder = mf
def __getattr__(self, name):
if name in self.__moved_modules:
renamed = self.__moved_modules[name]
self.__finder.safe_import_hook(renamed, caller=self)
mod = self.__finder.modules[renamed]
# add the module again with the renamed name:
self.__finder._add_module("six.moves." + name, mod)
return mod
else:
raise AttributeError(name)
m = SixImporter(finder,
None, "six.moves", finder._optimize)
finder._add_module("six.moves", m)
def hook_matplotlib(finder, module):
"""matplotlib requires data files in a 'mpl-data' subdirectory in
the same directory as the executable.
"""
# c:\Python33\lib\site-packages\matplotlib
mpl_data_path = os.path.join(os.path.dirname(module.__loader__.path),
"mpl-data")
finder.add_datadirectory("mpl-data", mpl_data_path, recursive=True)
finder.excludes.append("wx")
# XXX matplotlib requires tkinter which modulefinder does not
# detect because of the six bug.
def hook_numpy(finder, module):
"""numpy for Python 3 still tries to import some Python 2 modules;
exclude them."""
# I'm not sure if we can safely exclude these:
finder.ignore("Numeric")
finder.ignore("numarray")
finder.ignore("numpy_distutils")
finder.ignore("setuptools")
finder.ignore("Pyrex")
finder.ignore("nose")
finder.ignore("scipy")
def hook_nose(finder, module):
finder.ignore("IronPython")
finder.ignore("cStringIO")
finder.ignore("unittest2")
def hook_sysconfig(finder, module):
finder.ignore("_sysconfigdata")
def hook_numpy_random_mtrand(finder, module):
"""the numpy.random.mtrand module is an extension module and the
numpy.random module imports * from this module; define the list of
global names available to this module in order to avoid spurious
errors about missing modules.
"""
module.__globalnames__.add('RandomState')
module.__globalnames__.add('beta')
module.__globalnames__.add('binomial')
module.__globalnames__.add('bytes')
module.__globalnames__.add('chisquare')
module.__globalnames__.add('choice')
module.__globalnames__.add('dirichlet')
module.__globalnames__.add('exponential')
module.__globalnames__.add('f')
module.__globalnames__.add('gamma')
module.__globalnames__.add('geometric')
module.__globalnames__.add('get_state')
module.__globalnames__.add('gumbel')
module.__globalnames__.add('hypergeometric')
module.__globalnames__.add('laplace')
module.__globalnames__.add('logistic')
module.__globalnames__.add('lognormal')
module.__globalnames__.add('logseries')
module.__globalnames__.add('multinomial')
module.__globalnames__.add('multivariate_normal')
module.__globalnames__.add('negative_binomial')
module.__globalnames__.add('noncentral_chisquare')
module.__globalnames__.add('noncentral_f')
module.__globalnames__.add('normal')
module.__globalnames__.add('np')
module.__globalnames__.add('operator')
module.__globalnames__.add('pareto')
module.__globalnames__.add('permutation')
module.__globalnames__.add('poisson')
module.__globalnames__.add('power')
module.__globalnames__.add('rand')
module.__globalnames__.add('randint')
module.__globalnames__.add('randn')
module.__globalnames__.add('random_integers')
module.__globalnames__.add('random_sample')
module.__globalnames__.add('rayleigh')
module.__globalnames__.add('seed')
module.__globalnames__.add('set_state')
module.__globalnames__.add('shuffle')
module.__globalnames__.add('standard_cauchy')
module.__globalnames__.add('standard_exponential')
module.__globalnames__.add('standard_gamma')
module.__globalnames__.add('standard_normal')
module.__globalnames__.add('standard_t')
module.__globalnames__.add('triangular')
module.__globalnames__.add('uniform')
module.__globalnames__.add('vonmises')
module.__globalnames__.add('wald')
module.__globalnames__.add('weibull')
module.__globalnames__.add('zipf')
def hook_numpy_distutils(finder, module):
"""In a 'if sys.version_info[0] < 3:' block numpy.distutils does
an implicit relative import: 'import __config__'. This will not
work in Python3 so ignore it.
"""
finder.excludes.append("__config__")
def hook_numpy_f2py(finder, module):
""" numpy.f2py tries to import __svn_version__. Ignore when his fails.
"""
finder.excludes.append("__svn_version__")
def hook_numpy_core_umath(finder, module):
"""the numpy.core.umath module is an extension module and the numpy module
imports * from this module; define the list of global names available
to this module in order to avoid spurious errors about missing
modules"""
module.__globalnames__.add("add")
module.__globalnames__.add("absolute")
module.__globalnames__.add("arccos")
module.__globalnames__.add("arccosh")
module.__globalnames__.add("arcsin")
module.__globalnames__.add("arcsinh")
module.__globalnames__.add("arctan")
module.__globalnames__.add("arctanh")
module.__globalnames__.add("bitwise_and")
module.__globalnames__.add("bitwise_or")
module.__globalnames__.add("bitwise_xor")
module.__globalnames__.add("ceil")
module.__globalnames__.add("conjugate")
module.__globalnames__.add("cosh")
module.__globalnames__.add("divide")
module.__globalnames__.add("exp")
module.__globalnames__.add("e")
module.__globalnames__.add("fabs")
module.__globalnames__.add("floor")
module.__globalnames__.add("floor_divide")
module.__globalnames__.add("fmod")
module.__globalnames__.add("geterrobj")
module.__globalnames__.add("greater")
module.__globalnames__.add("hypot")
module.__globalnames__.add("invert")
module.__globalnames__.add("isfinite")
module.__globalnames__.add("isinf")
module.__globalnames__.add("isnan")
module.__globalnames__.add("less")
module.__globalnames__.add("left_shift")
module.__globalnames__.add("log")
module.__globalnames__.add("logical_and")
module.__globalnames__.add("logical_not")
module.__globalnames__.add("logical_or")
module.__globalnames__.add("logical_xor")
module.__globalnames__.add("maximum")
module.__globalnames__.add("minimum")
module.__globalnames__.add("multiply")
module.__globalnames__.add("negative")
module.__globalnames__.add("not_equal")
module.__globalnames__.add("power")
module.__globalnames__.add("remainder")
module.__globalnames__.add("right_shift")
module.__globalnames__.add("sign")
module.__globalnames__.add("signbit")
module.__globalnames__.add("sinh")
module.__globalnames__.add("sqrt")
module.__globalnames__.add("tan")
module.__globalnames__.add("tanh")
module.__globalnames__.add("true_divide")
def hook_numpy_core_numerictypes(finder, module):
"""the numpy.core.numerictypes module adds a number of items to itself
dynamically; define these to avoid spurious errors about missing
modules"""
module.__globalnames__.add("bool_")
module.__globalnames__.add("cdouble")
module.__globalnames__.add("complexfloating")
module.__globalnames__.add("csingle")
module.__globalnames__.add("double")
module.__globalnames__.add("longdouble")
module.__globalnames__.add("float32")
module.__globalnames__.add("float64")
module.__globalnames__.add("float_")
module.__globalnames__.add("inexact")
module.__globalnames__.add("integer")
module.__globalnames__.add("intc")
module.__globalnames__.add("int32")
module.__globalnames__.add("number")
module.__globalnames__.add("single")
def hook_numpy_core(finder, module):
finder.ignore("numpy.core._dotblas")
| gpl-2.0 |
idlead/scikit-learn | examples/neural_networks/plot_mlp_training_curves.py | 56 | 3596 | """
========================================================
Compare Stochastic learning strategies for MLPClassifier
========================================================
This example visualizes some training loss curves for different stochastic
learning strategies, including SGD and Adam. Because of time-constraints, we
use several small datasets, for which L-BFGS might be more suitable. The
general trend shown in these examples seems to carry over to larger datasets,
however.
"""
print(__doc__)
import matplotlib.pyplot as plt
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import MinMaxScaler
from sklearn import datasets
# different learning rate schedules and momentum parameters
params = [{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'constant', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': 0,
'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': True, 'learning_rate_init': 0.2},
{'algorithm': 'sgd', 'learning_rate': 'invscaling', 'momentum': .9,
'nesterovs_momentum': False, 'learning_rate_init': 0.2},
{'algorithm': 'adam'}]
labels = ["constant learning-rate", "constant with momentum",
"constant with Nesterov's momentum",
"inv-scaling learning-rate", "inv-scaling with momentum",
"inv-scaling with Nesterov's momentum", "adam"]
plot_args = [{'c': 'red', 'linestyle': '-'},
{'c': 'green', 'linestyle': '-'},
{'c': 'blue', 'linestyle': '-'},
{'c': 'red', 'linestyle': '--'},
{'c': 'green', 'linestyle': '--'},
{'c': 'blue', 'linestyle': '--'},
{'c': 'black', 'linestyle': '-'}]
def plot_on_dataset(X, y, ax, name):
# for each dataset, plot learning for each learning strategy
print("\nlearning on dataset %s" % name)
ax.set_title(name)
X = MinMaxScaler().fit_transform(X)
mlps = []
if name == "digits":
# digits is larger but converges fairly quickly
max_iter = 15
else:
max_iter = 400
for label, param in zip(labels, params):
print("training: %s" % label)
mlp = MLPClassifier(verbose=0, random_state=0,
max_iter=max_iter, **param)
mlp.fit(X, y)
mlps.append(mlp)
print("Training set score: %f" % mlp.score(X, y))
print("Training set loss: %f" % mlp.loss_)
for mlp, label, args in zip(mlps, labels, plot_args):
ax.plot(mlp.loss_curve_, label=label, **args)
fig, axes = plt.subplots(2, 2, figsize=(15, 10))
# load / generate some toy datasets
iris = datasets.load_iris()
digits = datasets.load_digits()
data_sets = [(iris.data, iris.target),
(digits.data, digits.target),
datasets.make_circles(noise=0.2, factor=0.5, random_state=1),
datasets.make_moons(noise=0.3, random_state=0)]
for ax, data, name in zip(axes.ravel(), data_sets, ['iris', 'digits',
'circles', 'moons']):
plot_on_dataset(*data, ax=ax, name=name)
fig.legend(ax.get_lines(), labels=labels, ncol=3, loc="upper center")
plt.show()
| bsd-3-clause |
musteryu/Data-Mining | assignment-黄煜-3120100937/question_4.py | 1 | 1258 | from mylib import *
import os,sys
import numpy as np
import matplotlib.pyplot as plt
import math
import random
from time import time
if __name__ == '__main__':
DIR_PATH = sys.path[0] + '\\'
# normal distribution vector file
nvctr_file1 = DIR_PATH + 'normal_500_1.txt'
nvctr_file2 = DIR_PATH + 'normal_500_2.txt'
# uniform distribution vector file
uvctr_file1 = DIR_PATH + 'uniform_500_1.txt'
uvctr_file2 = DIR_PATH + 'uniform_500_2.txt'
# normal distribution matrix
nmtrx = fget_mtrx(nvctr_file1) + fget_mtrx(nvctr_file2)
# uniform distribution matrix
umtrx = fget_mtrx(uvctr_file1) + fget_mtrx(uvctr_file2)
# plist is list the numbers of dimensions after DCT compression
# nplist is for normal distribution data set
# uplist is for uniform distribution data set
nplist = []
uplist = []
for vector in nmtrx:
u, p = my_DCT_compression(vector, 0.01)
nplist.append(p)
for vector in umtrx:
u, p = my_DCT_compression(vector, 0.01)
uplist.append(p)
# draw histogram
plt.figure(1)
plt.subplot(2,1,1)
my_hist(nplist, bucket_size = 1, flat_edge = False, title = "For normal distribution data set")
plt.subplot(2,1,2)
my_hist(uplist, bucket_size = 1, flat_edge = False, title = "For uniform distribution data set")
plt.show()
| gpl-2.0 |
riddlezyc/geolab | src/structure/Z.py | 1 | 1474 | # -*- coding: utf-8 -*-
# from framesplit import trajectory
# too slow using this module
import matplotlib.pyplot as plt
dirName = r"F:\simulations\asphaltenes\na-mont\TMBO-oil\water\373-continue/"
xyzName = 'all.xyz'
hetero = 'O' # 'oh' 'N' 'sp' 'O' 'Np' 'sp'
with open(dirName + xyzName, 'r') as foo:
coords = foo.readlines()
nAtoms = int(coords[0])
nFrames = int(len(coords) / (nAtoms + 2))
pos = []
for i in range(nFrames):
istart = i * (nAtoms + 2)
iend = (i + 1) * (nAtoms + 2)
pos.append(coords[istart:iend])
# for i in range(200):
# print coords[i]
heteroatom = 0
# all of my molecules have atoms less than 200
for i in range(200):
x = pos[0][i].split()[0]
if x == hetero:
heteroatom = i
break
heteroZ = []
for p in pos:
# print p[heteroatom].split()[0]
zx = float(p[heteroatom].split()[3])
if zx < 10:
zx = zx + 80
heteroZ.append(zx)
with open(dirName + 'heteroZ.dat', 'w') as foo:
for i, z in enumerate(heteroZ):
print >> foo, "%3d %8.5f" % (i, z)
# energy plot
plt.figure(0, figsize=(8, 4))
figName = dirName + 'heteroZ.png'
plt.title('z of heteroatom', fontsize=20)
plt.plot(range(len(heteroZ)-1), heteroZ[1:], linewidth=2)
plt.grid(True)
plt.xlabel('steps')
plt.ylabel('Z')
plt.axis([0, len(heteroZ)*1.1, 0, max(heteroZ)*1.1])
plt.savefig(figName, format='png', dpi=300)
plt.close()
| gpl-3.0 |
rraadd88/dms2dfe | dms2dfe/lib/io_mut_class.py | 2 | 9503 | # Copyright 2017, Rohan Dandage <rraadd_8@hotmail.com,rohan@igib.in>
# This program is distributed under General Public License v. 3.
"""
================================
``io_mut_files``
================================
"""
from __future__ import division
import sys
from os.path import splitext,exists,basename,abspath,dirname
from os import makedirs,stat
import pandas as pd
import numpy as np
from glob import glob
import logging
import subprocess
from dms2dfe.lib.io_dfs import set_index
def getcolsmets(d,cs):
"""
Get the total non-na items in column.
:param d: pandas dataframe
:param cs: list of columns
"""
cols=[c for c in d.columns if cs in c]
counts=[len(d[c].dropna()) for c in cols]
return dict(zip(cols,counts))
def get_2SD_cutoffs(d,reps,N=False):
"""
Get 2SD cutoffs for values in given column
:param d: pandas dataframe
:param reps: names of replicate conditions
"""
t=d.copy()
mus=[]
sigmas=[]
for r1i,r1 in enumerate(reps):
for r2i,r2 in enumerate(reps):
if r1i<r2i:
t.loc[:,'reps']=t.loc[:,r1]-t.loc[:,r2]
if N and ('mut' in t):
t.loc[(t.loc[:,'mut']==t.loc[:,'ref']),'reps']=np.nan
mus.append(t.loc[:,'reps'].mean())
sigmas.append(t.loc[:,'reps'].std())
mu=np.mean(mus)
sigma=np.sqrt(np.mean([s**2 for s in sigmas]))
return mu+sigma*2,mu,sigma
def get_repli_FiA(d,csel='.NiA_tran.sel',cref='.NiA_tran.ref'):
"""
Get replicates from data_fit
:param d: pandas dataframe data_fit
"""
sels=np.sort([c for c in d.columns if csel in c])
refs=np.sort([c for c in d.columns if cref in c])
FCAi=1
cols_FCA=[]
for refi,ref in enumerate(refs):
for seli,sel in enumerate(sels):
if refi==seli:
coln='FCA%s_reps' % FCAi
d.loc[:,coln]=d.loc[:,sel]-d.loc[:,ref]
d.loc[(d.loc[:,'mut']==d.loc[:,'ref']),coln]=np.nan
cols_FCA.append(coln)
FCAi+=1
return d.loc[:,cols_FCA]
def data_fit2cutoffs(d,sA,sB,N=True):
"""
Get cutoffs of data fit to assign classes
:param d: pandas dataframe with fold change values
:param sA,sB: columns with two conditions to be compared
"""
refs=[c for c in d.columns if sA in c]
sels=[c for c in d.columns if sB in c]
_,mu1,sigma1=get_2SD_cutoffs(d,refs)
_,mu2,sigma2=get_2SD_cutoffs(d,sels)
return np.mean([mu1,mu2])+2*np.sqrt(np.mean([sigma1,sigma2]))
def class_fit(d,col_fit='FiA',FC=True,zscore=False): #column of the data_fit
"""
This classifies the fitness of mutants into beneficial, neutral or, deleterious.
:param d: dataframe of `data_fit`.
:returns d: classes of fitness written in 'class-fit' column based on values in column 'FiA'.
"""
cols_reps=[c for c in d if '.NiA_tran.ref' in c]
if FC and (len(cols_reps)==2):
up,_,_=get_2SD_cutoffs(d,cols_reps,N=True)
dw=-1*up
else:
if zscore:
up,dw=-2,2
else:
up,dw=0,0
d.loc[d.loc[:,col_fit]>+up, 'class_fit']="enriched"
d.loc[((d.loc[:,col_fit]>=dw) & (d.loc[:,col_fit]<=up)),'class_fit']="neutral"
d.loc[d.loc[:,col_fit]<dw, 'class_fit']="depleted"
return d
def class_comparison(dA,dB):
"""
This classifies differences in fitness i.e. relative fitness into positive, negative or robust categories.
:param dc: dataframe with `dc`.
:returns dc: dataframe with `class__comparison` added according to fitness levels in input and selected samples in `dc`
"""
dA=set_index(dA,'mutids')
dB=set_index(dB,'mutids')
dc=get_repli_FiA(dA).join(get_repli_FiA(dB),lsuffix='_test',rsuffix='_ctrl')
up=data_fit2cutoffs(dc,sA='_reps_test',sB='_reps_ctrl',N=False)
dw=-1*up
diff=dA.loc[:,'FiA']-dB.loc[:,'FiA']
diff.index.name='mutids'
diff=diff.reset_index()
mutids_up=diff.loc[(diff.loc[:,'FiA']>up),'mutids'].tolist()
mutids_dw=diff.loc[(diff.loc[:,'FiA']<dw),'mutids'].tolist()
dc.loc[mutids_up,'class_comparison']="positive"
dc.loc[mutids_dw,'class_comparison']="negative"
return dc.loc[:,'class_comparison']
def get_data_metrics(prj_dh):
"""
Obtain metrics for fold change values from a experiment
:param prj_dh: path to project directory
"""
data_fit_fns_all=[basename(s) for s in glob('%s/data_fit/aas/*' % prj_dh)]
data_fit_fns_all2labels=dict(zip(data_fit_fns_all,data_fit_fns_all))
data_fit_metrics=pd.DataFrame(index=data_fit_fns_all)
fit=pd.read_csv('%s/cfg/fit' % prj_dh).set_index('unsel')
comparison=pd.read_csv('%s/cfg/comparison' % prj_dh).set_index('ctrl')
for ctrl in comparison.index:
fh_ctrls=glob('%s/data_fit/aas/%s_WRT_*' % (prj_dh,ctrl))
for fh_ctrl in fh_ctrls:
dctrl=pd.read_csv(fh_ctrl).set_index('mutids')
up,_,_=get_2SD_cutoffs(dctrl,[c for c in dctrl if '.NiA_tran.ref' in c],N=True)
dw=-1*up
# print up,dw
ctrl=basename(fh_ctrl)
ctrls={'versus_%s' % ctrl:ctrl}
data_fit_metrics.index.name='fn'
for fni,fn in enumerate(data_fit_fns_all):
fh= '%s/data_fit/aas/%s' % (prj_dh,fn)
d=pd.read_csv(fh).set_index('mutids')
data_fit_metrics.loc[fn,'$\mu+2\sigma$']=up
d=d.reset_index()
mutids_up=d.loc[(d.loc[:,'FiA']>up),'mutids'].tolist()
mutids_dw=d.loc[(d.loc[:,'FiA']<dw),'mutids'].tolist()
mutids_updw=mutids_up+mutids_dw
d=d.set_index('mutids')
data_fit_metrics.loc[fn,'$n$']=len(d.loc[:,'FiA'].dropna())
ref_counts=getcolsmets(d,cs='.NiA_tran.ref')
sel_counts=getcolsmets(d,cs='.NiA_tran.sel')
for i,_ in enumerate(ref_counts.keys()):
data_fit_metrics.loc[fn,'$n$ ref%02d' % i]=ref_counts[ref_counts.keys()[i]]
for i,_ in enumerate(sel_counts.keys()):
data_fit_metrics.loc[fn,'$n$ sel%02d' % i]=sel_counts[sel_counts.keys()[i]]
data_fit_metrics.loc[fn,'$n_{neutral}$']=data_fit_metrics.loc[fn,'$n$']-len(mutids_updw)
data_fit_metrics.loc[fn,'$n_{enriched}$']=len(mutids_up)
data_fit_metrics.loc[fn,'$n_{depleted}$']=len(mutids_dw)
data_fit_metrics.loc[fn,'$F$ mean']=d.loc[:,'FiA'].mean()
if len(data_fit_metrics.columns)>0:
data_fit_metrics['$n_{enriched}$%%']=data_fit_metrics['$n_{enriched}$']\
/(data_fit_metrics['$n_{enriched}$']+data_fit_metrics['$n_{depleted}$'])*100
data_fit_metrics['$n_{depleted}$%%']=data_fit_metrics['$n_{depleted}$']\
/(data_fit_metrics['$n_{enriched}$']+data_fit_metrics['$n_{depleted}$'])*100
data_fit_metrics['$n_{enriched}$%']=data_fit_metrics['$n_{enriched}$']/data_fit_metrics['$n$']*100
data_fit_metrics['$n_{depleted}$%']=data_fit_metrics['$n_{depleted}$']/data_fit_metrics['$n$']*100
data_fit_metrics['$n_{neutral}$%']=data_fit_metrics['$n_{neutral}$']/data_fit_metrics['$n$']*100
data_fit_metrics['$E$']=data_fit_metrics['$n_{enriched}$']/\
(data_fit_metrics['$n_{enriched}$']+data_fit_metrics['$n_{depleted}$'])
# print data_fit_metrics.index
# print data_fit_metrics.columns
data_fit_metrics.loc[:,'labels']=[data_fit_fns_all2labels[i] for i in data_fit_metrics.index]
for c in ctrls:
fh= '%s/data_fit/aas/%s' % (prj_dh,ctrls[c])
dA=pd.read_csv(fh).set_index('mutids')
for fni,fn in enumerate(data_fit_fns_all):
fh= '%s/data_fit/aas/%s' % (prj_dh,fn)
dB=pd.read_csv(fh).set_index('mutids')
dc=get_repli_FiA(dA).join(get_repli_FiA(dB),lsuffix='_ctrl',rsuffix='_test')
up=data_fit2cutoffs(dc,sA='_reps_test',sB='_reps_ctrl',N=False)
dw=-1*up
data_fit_metrics.loc[fn,'$\mu+2\sigma$ comparison %s' % c]=up
diff=dB.loc[:,'FiA']-dA.loc[:,'FiA']
diff=diff.reset_index()
mutids_up=diff.loc[(diff.loc[:,'FiA']>up),'mutids'].tolist()
mutids_dw=diff.loc[(diff.loc[:,'FiA']<dw),'mutids'].tolist()
mutids_updw=mutids_up+mutids_dw
diff=diff.set_index('mutids')
data_fit_metrics.loc[fn,'$n$ %s' % c]=len(diff.dropna())
data_fit_metrics.loc[fn,'$n_{positive}$ %s' % c]=len(mutids_up)
data_fit_metrics.loc[fn,'$n_{negative}$ %s' % c]=len(mutids_dw)
data_fit_metrics['$\Delta n$ %s' % c]=(data_fit_metrics['$n$']-data_fit_metrics.loc[ctrls[c],'$n$'])
data_fit_metrics['$\Delta n_{enriched}$ %s' % c]=(data_fit_metrics['$n_{enriched}$']-data_fit_metrics.loc[ctrls[c],'$n_{enriched}$'])
data_fit_metrics['$\Delta n_{enriched}$%% %s' % c]=(data_fit_metrics['$n_{enriched}$%']-data_fit_metrics.loc[ctrls[c],'$n_{enriched}$%'])
data_fit_metrics['$\Delta F$ %s' % c]=(data_fit_metrics['$F$ mean']-data_fit_metrics.loc[ctrls[c],'$F$ mean'])
data_fit_metrics_fh='%s/data_comparison/data_fit_metrics' % prj_dh
data_fit_metrics.to_csv(data_fit_metrics_fh)
return data_fit_metrics
else:
logging.info('data_metrics has 0 cols') | gpl-3.0 |
leejw51/BumblebeeNet | Test/AddLayer.py | 1 | 1320 | import numpy as np
import matplotlib.pylab as plt
from MulLayer import MulLayer
class AddLayer:
def __init__ (self):
pass
def forward(self, x, y):
out = x + y
return out
def backward(self, dout):
dx = dout * 1
dy = dout * 1
return dx,dy
def test_add_layer():
apple = 100
apple_num = 2
orange = 150
orange_num = 3
tax = 1.1
mul_apple_layer = MulLayer()
mul_orange_layer = MulLayer()
add_apple_orange_layer = AddLayer()
mul_tax_layer = MulLayer()
apple_price = mul_apple_layer.forward( apple, apple_num)
orange_price = mul_orange_layer.forward( orange, orange_num)
all_price = add_apple_orange_layer.forward( apple_price, orange_price)
price = mul_tax_layer.forward( all_price, tax)
dprice = 1
dall_price, dtax = mul_tax_layer.backward( dprice)
dapple_price, dorange_price = add_apple_orange_layer.backward(dall_price)
dorange, dorange_num = mul_orange_layer.backward(dorange_price)
dapple, dapple_num = mul_apple_layer.backward( dapple_price)
print("price=", price)
print(dapple_num, dapple, dorange, dorange_num, dtax)
| mit |
stephenliu1989/HK_DataMiner | hkdataminer/cluster/faiss_dbscan_.py | 1 | 14197 | # -*- coding: utf-8 -*-
"""
DBSCAN Acclerated by Facebook AI Faiss
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
import time
from scipy import sparse
from numba import autojit
import numba
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.utils import check_array, check_consistent_length
#from sklearn.neighbors import NearestNeighbors
from sklearn.cluster._dbscan_inner import dbscan_inner
import faiss
@autojit
def get_neighborhoods(D, I, eps):
neighborhoods = []
for i in range(len(D)):
distances = D[i]
#print(distances)
distances = np.delete(distances, 0)
indices = I[i]
indices = np.delete(indices, 0)
#print(indices)
index = indices[distances <= eps]
neighborhoods.append(index)
#neighborhoods = np.asarray(neighborhoods)
#np.savetxt('faiss_neighborhoods', np.asarray(neighborhoods), fmt='%s')
return np.asarray(neighborhoods)
def cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True):
dimension = X.shape[1]
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
assert not index_cpu.is_trained
index_cpu.train(X)
assert index_cpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
index_cpu.add(X)
n_samples = 1000
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_cpu.search(X[samples], k) # sanity check
while np.min(np.amax(D, axis=1)) < eps:
k = k * 2
# D, I = index_gpu.search(X[samples], k)
#print(np.min(np.amax(D, axis=1)), eps, k)
D, I = index_cpu.search(X[samples], k)
if k > 1024:
k = 1000
#print(np.max(D[:, k - 1]), k, eps)
index_cpu.nprobe = nprobe
D, I = index_cpu.search(X, k) # actual search
return get_neighborhoods(D, I, eps)
def gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=True):
dimension = X.shape[1]
if IVFFlat is True:
quantizer = faiss.IndexFlatL2(dimension)
index_cpu = faiss.IndexIVFFlat(quantizer, dimension, nlist, faiss.METRIC_L2)
# here we specify METRIC_L2, by default it performs inner-product search
res = faiss.StandardGpuResources() # use a single GPU
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
# make it an IVF GPU index
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
assert not index_gpu.is_trained
index_gpu.train(X)
assert index_gpu.is_trained
# here we specify METRIC_L2, by default it performs inner-product search
else:
index_cpu = faiss.IndexFlatL2(dimension)
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.device = 0
index_gpu = faiss.index_cpu_to_gpu(res, 0, index_cpu)
index_gpu.add(X)
n_samples = 1000
k = min_samples
samples = np.random.choice(len(X), n_samples)
# print(samples)
D, I = index_gpu.search(X[samples], k) # sanity check
while np.max(D[:, k - 1]) < eps:
k = k * 2
D, I = index_gpu.search(X[samples], k)
#print(np.max(D[:, k - 1]), k, eps)
index_gpu.nprobe = nprobe
D, I = index_gpu.search(X, k) # actual search
return get_neighborhoods(D, I, eps)
def faiss_dbscan(X, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', metric_params=None,
algorithm='auto', leaf_size=30, p=2, sample_weight=None, n_jobs=1, GPU=False, IVFFlat=True):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run for neighbors search.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if GPU is True:
neighborhoods = gpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat)
else:
neighborhoods = cpu_radius_neighbors(X, eps, min_samples, nlist, nprobe, return_distance=False, IVFFlat=IVFFlat)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class Faiss_DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
.. versionadded:: 0.17
metric *precomputed* to accept precomputed sparse matrix.
metric_params : dict, optional
Additional keyword arguments for the metric function.
.. versionadded:: 0.19
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, nlist=100, nprobe=5, metric='l2', n_jobs=1, GPU=False, IVFFlat=True):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.n_jobs = n_jobs
self.GPU = GPU
self.IVFFlat = IVFFlat
self.nlist = nlist
self.nprobe = nprobe
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
#if metric is not "rmsd":
# X = check_array(X, accept_sparse='csr')
#t0 = time.time()
clust = faiss_dbscan(X, eps=self.eps, min_samples=self.min_samples, nlist=self.nlist, nprobe=self.nprobe, sample_weight=sample_weight, GPU=self.GPU, IVFFlat=self.IVFFlat)
#t1 = time.time()
#print("Faiss DBSCAN clustering Time Cost:", t1 - t0)
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
| apache-2.0 |
joergsimon/gesture-analysis | analysis/feature_selection.py | 1 | 6744 | from analysis.preparation import labelMatrixToArray
from analysis.preparation import normalizeZeroClassArray
from visualise.trace_features import trace_feature_origin
from visualise.confusion_matrix import plot_confusion_matrix
import numpy as np
import sklearn
import sklearn.linear_model
import sklearn.preprocessing as pp
import sklearn.svm as svm
import sklearn.feature_selection as fs
from analysis.classification import fit_classifier
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
# Interesting References:
# RFECV:
# Guyon, I., Weston, J., Barnhill, S., & Vapnik, V. (2002). Gene selection for
# cancer classification using support vector machines. Mach. Learn.. 46(1-3). 389-422.
def feature_selection(train_data, train_labels, const):
train_labels_arr, exclude = labelMatrixToArray(train_labels, const.label_threshold)
train_data_clean = train_data.drop(exclude)
train_labels_arr, train_data_clean, _ = normalizeZeroClassArray(train_labels_arr, train_data_clean)
print "num features before selection: {}".format(train_data_clean.columns.size)
feature_index = variance_threshold(train_data_clean)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:,feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after variance threshold".format(clf_name))
print(classification_report(train_labels_arr,prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index,const)
feature_index = rfe(train_data_clean,train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFE".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = k_best_chi2(train_data_clean, train_labels_arr, 700)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after Chi2".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
feature_index = rfe_cv_f1(train_data_clean, train_labels_arr)
clf, clf_name, needs_scaling = fit_classifier(train_data_clean.values[:, feature_index], np.array(train_labels_arr))
prediction = clf.predict(get_values(train_data_clean, feature_index, needs_scaling))
print("report for {} after RFECV".format(clf_name))
print(classification_report(train_labels_arr, prediction))
cnf_matrix = confusion_matrix(train_labels_arr, prediction)
plt.figure()
plot_confusion_matrix(cnf_matrix, classes=['0.0','1.0','2.0','3.0','4.0','5.0','6.0','7.0'],
title="Confusion Matrix for {} after variance threshold".format(clf_name))
trace_feature_origin(feature_index, const)
plt.show()
def get_values(data, feature_index, needs_scaling):
if needs_scaling:
values = data.values[:, feature_index]
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)
return values
else:
return data.values[:, feature_index]
def variance_threshold(train_data):
# feature selection using VarianceThreshold filter
sel = fs.VarianceThreshold(threshold=(.8 * (1 - .8)))
fit = sel.fit(train_data.values)
col_index = fit.get_support(indices=True)
print "num features selected by VarianceThreshold: {}".format(len(col_index))
return col_index
def rfe(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = sklearn.linear_model.Lasso(alpha = 0.1) #svm.SVR(kernel="linear")
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values) # pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels))
print "run rfecv.."
rfecv = fs.RFE(estimator=svc, step=0.1, verbose=2)
rfecv.fit(values, np.array(train_labels))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFE(CV)/Lasso: {}".format(len(col_index))
return col_index
def rfe_cv_f1(train_data, train_labels):
# important toto!
# todo: I think also for feature selection we should take care the 0 class is balanced!
# todo: if you use it that way, scale the features
print "Recursive eleminate features: "
svc = svm.SVC(kernel="linear") #sklearn.linear_model.Lasso(alpha = 0.1)
print "scale data"
values = train_data.values
minmax = pp.MinMaxScaler()
values = minmax.fit_transform(values)#pp.scale(values)
print "test fit."
svc.fit(values, np.array(train_labels).astype(int))
print "run rfecv.."
rfecv = fs.RFECV(estimator=svc, step=0.05, verbose=2)
rfecv.fit(values, np.array(train_labels).astype(int))
print "get support..."
col_index = rfecv.get_support(indices=True)
print "num features selected by RFECV/SVR: {}".format(len(col_index))
return col_index
def k_best_chi2(train_data, train_labels, k):
values = train_data.values
if values.min() < 0:
values = values + abs(values.min())
kb = fs.SelectKBest(fs.chi2, k=k)
kb.fit(values, np.array(train_labels))
col_index = kb.get_support(indices=True)
print "num features selected by K-Best using chi2: {}".format(len(col_index))
return col_index | apache-2.0 |
brodoll/sms-tools | lectures/09-Sound-description/plots-code/centroid.py | 23 | 1086 | import numpy as np
import matplotlib.pyplot as plt
import essentia.standard as ess
M = 1024
N = 1024
H = 512
fs = 44100
spectrum = ess.Spectrum(size=N)
window = ess.Windowing(size=M, type='hann')
centroid = ess.Centroid(range=fs/2.0)
x = ess.MonoLoader(filename = '../../../sounds/speech-male.wav', sampleRate = fs)()
centroids = []
for frame in ess.FrameGenerator(x, frameSize=M, hopSize=H, startFromZero=True):
mX = spectrum(window(frame))
centroid_val = centroid(mX)
centroids.append(centroid_val)
centroids = np.array(centroids)
plt.figure(1, figsize=(9.5, 5))
plt.subplot(2,1,1)
plt.plot(np.arange(x.size)/float(fs), x)
plt.axis([0, x.size/float(fs), min(x), max(x)])
plt.ylabel('amplitude')
plt.title('x (speech-male.wav)')
plt.subplot(2,1,2)
frmTime = H*np.arange(centroids.size)/float(fs)
plt.plot(frmTime, centroids, 'g', lw=1.5)
plt.axis([0, x.size/float(fs), min(centroids), max(centroids)])
plt.xlabel('time (sec)')
plt.ylabel('frequency (Hz)')
plt.title('spectral centroid')
plt.tight_layout()
plt.savefig('centroid.png')
plt.show()
| agpl-3.0 |
Rignak/Scripts-Python | DeepLearning/TagPrediction/TagPrediction.py | 1 | 10291 | import numpy as np
import matplotlib.pyplot as plt
import os
from os.path import join
import cv2
from skimage.transform import resize
from tqdm import tqdm
from datetime import datetime
import functools
os.environ['TF_CPP_MIN_VLOG_LEVEL'] = '3'
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
from keras import optimizers
from keras.models import Model, load_model
from keras.layers import Flatten, Dense, Conv2D, Dropout, MaxPooling2D, BatchNormalization, Input
from keras.callbacks import ModelCheckpoint, Callback
import json
import tensorflow as tf
tf.reset_default_graph()
from keras import backend as K
K.image_dim_ordering()
###################### Hyperparameters ######################
# Mode paramaters
INPUT_SHAPE = (256, 256, 3)
IMAGE_NUMBER = 30000
WEIGHT_FILENAME = os.path.join('models', 'tag_prediction.hdf5')
ROOT = 'dress'
VALIDATION_SPLIT = 0.9
TAG_END = "_dress"
FILE_END = 'S'
MIN_TAG_USE = 500
# Training parameters
BATCH_SIZE = 8
EPOCHS = 100
LEARNING_RATE = 1 * 10 ** -3
DROPOUT = 0.5
MOMENTUM = 0.5
WEIGHT_DECAY = 4 * 10 ** -5 # weight decay
ACTIVATION = 'selu'
NEURON_BASIS = 32
class PlotLearning(Callback):
def __init__(self, examples=False):
super().__init__()
self.examples = examples
self.x = []
self.losses, self.val_losses = [], []
self.logs = []
def on_epoch_end(self, epoch, logs={}):
self.logs.append(logs)
self.x.append(epoch)
self.losses.append(logs.get('loss'))
self.val_losses.append(logs.get('val_loss'))
plt.yscale('log')
plt.plot(self.x, self.losses)
plt.plot(self.x, self.val_losses)
plt.xlabel('Epochs')
plt.ylabel('Crossentropy')
plt.legend(['Training', 'Validation'])
plt.tight_layout()
plt.savefig('plot.png')
plt.close()
if self.examples:
z = self.model.predict(self.model.example[0][:6])
plot_example(self.model.example[0][:6], self.model.example[1][:6], z, self.model.labels)
plt.savefig(f"epochs/epoch{self.x[-1]}.png")
plt.close()
def plot_example(xs, ys, zs, labels):
n = xs.shape[0]
plt.figure(figsize=(12, 8))
plt.tight_layout()
for i, (x, y, z) in enumerate(zip(xs, ys, zs)):
if i != 0:
tick_label = [' ' for label in labels]
else:
tick_label = labels
plt.subplot(3, n, i + 1)
plt.imshow(x)
plt.subplot(3, n, i + n + 1)
plt.barh(labels, y, tick_label=tick_label)
plt.xlim(0, 1)
plt.subplot(3, n, i + 2 * n + 1)
plt.barh(labels, z, tick_label=tick_label)
plt.xlim(0, 1)
def import_model(tag_number, input_shape=INPUT_SHAPE):
inputs = Input(input_shape)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(inputs)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 2, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 8, (3, 3), activation=ACTIVATION, padding='same')(layer)
layer = Conv2D(NEURON_BASIS * 4, (1, 1), activation=ACTIVATION, padding='same')(layer)
layer = MaxPooling2D(pool_size=(2, 2))(layer)
layer = Flatten()(layer)
layer = BatchNormalization()(layer)
layer = Dense(512, activation=ACTIVATION)(layer)
layer = Dropout(DROPOUT)(layer)
layer = Dense(2048, activation=ACTIVATION)(layer)
layer = Dropout(DROPOUT)(layer)
layer = Dense(tag_number, activation='sigmoid')(layer)
model = Model(inputs=[inputs], outputs=[layer])
sgd = optimizers.SGD(lr=LEARNING_RATE, momentum=MOMENTUM, nesterov=True)
model.compile(optimizer='adam', loss='binary_crossentropy')
model.summary()
return model
def get_tags(root, files, min_tag_use=MIN_TAG_USE, suffix=TAG_END):
with open(join('..', 'imgs', root + '.json'), 'r') as file:
tags = json.load(file)
tag_count = {}
files = [os.path.split(file)[-1] for file in files]
for key, value in tags.items():
if key + f'{FILE_END}.png' not in files:
continue
for tag in value.split():
if tag not in tag_count:
tag_count[tag] = 1
else:
tag_count[tag] += 1
with open(join('..', 'imgs', root + '_count.json'), 'w') as file:
json.dump(tag_count, file, sort_keys=True, indent=4)
print(f'Have {len(list(tag_count.keys()))} tags')
tags_count = {tag: count for tag, count in tag_count.items() if count > min_tag_use and tag.endswith(suffix)}
print(f'Keep tags with >{min_tag_use} use: {len(tag_count)} tags')
for tag, count in tags_count.items():
print(f'{tag}: {count}')
input('Continue?')
return tags, tags_count
def make_output(files, tags, tags_count):
output = {}
for file in tqdm(files):
i = os.path.splitext(os.path.split(file)[-1])[0]
if FILE_END:
i = i[:-1]
truth = tags[i].split()
output[file] = []
for tag in tags_count.keys():
if tag in truth:
output[file].append(1)
else:
output[file].append(0)
return output
def metrics(model, files, output, tags_count):
true_positives = np.zeros(len(output))
positives = np.zeros(len(output))
truth = np.zeros(len(output))
for file in tqdm(files):
img = image_process(file)
img = np.expand_dims(img, axis=0)
prediction = model.predict(img)[0]
for i, coef in enumerate(prediction):
f = tags_count.values()[i] / len(files)
if coef > f:
positives[i] += 1
if output[file][i] > f:
truth[i] += 1
if output[file][i] > f and coef > f:
true_positives[i] += 1
print('Tag\tPrecision\tRecall')
for i, k, l, key in zip(true_positives, positives, truth, tags_count.keys()):
if k != 0:
precision = int(i / k * 1000) / 100
else:
precision = 0
if l != 0:
recall = int(i / l * 1000) / 100
else:
recall = 0
print(f'{key}\t{precision}%\t{recall}%\t')
# @functools.lru_cache(maxsize=IMAGE_NUMBER)
def image_process(file):
img = cv2.imread(file)
img = img[:, :, [2, 1, 0]]
# img = resize(img, INPUT_SHAPE, mode='reflect', preserve_range=True, anti_aliasing=True)
return img
def generator(files, output, batch_size=BATCH_SIZE):
while True:
batch_files = np.random.choice(files, size=batch_size)
# j += 1
# print(index, j, [(k + j) % n for k in index], [(k + j) for k in index], index+j)
batch_output = np.array([output[file] for file in batch_files])
batch_input = np.zeros([batch_size] + [shape for shape in INPUT_SHAPE])
for i, file in enumerate(batch_files):
batch_input[i] = image_process(file)
yield batch_input / 255, batch_output
def train(model, files, output, tags_count, weight_filename=WEIGHT_FILENAME,
validation_split=VALIDATION_SPLIT, epochs=EPOCHS, batch_size=BATCH_SIZE):
class_weights = {i: len(files) / count for i, count in enumerate(tags_count.values())}
index = int(len(files) * validation_split)
training_generator = generator(files[:index], output)
validation_generator = generator(files[index:], output)
calls = [ModelCheckpoint(weight_filename, save_best_only=True),
PlotLearning(examples=True)]
model.example = next(validation_generator)
model.labels = list(tags_count.keys())
model.fit_generator(generator=training_generator,
validation_data=validation_generator,
verbose=1,
steps_per_epoch=int(len(files) * validation_split) // batch_size,
validation_steps=int(len(files) * (1 - validation_split)) // batch_size,
epochs=epochs,
callbacks=calls,
class_weight=class_weights
)
def test(files, output, tags_count, weight_filename=WEIGHT_FILENAME):
model = load_model(weight_filename)
metrics(model, files, output, tags_count)
image_generator = generator(files, output, batch_size=1)
fs = [count / len(files) for count in tags_count.values()]
fs = [0.5 for i in fs]
while True:
print('---')
im, truth = next(image_generator)
truth_string = ' '.join([tags_count.keys()[j] for j, v in enumerate(truth[0]) if v > fs[j]])
print('TRUTH:', truth_string)
print(im.shape)
prediction = model.predict(im)[0]
prediction_string = ' '.join([tags_count.keys()[j] for j, v in enumerate(prediction) if v > fs[j]])
print('PREDICTION:', prediction_string)
plt.imshow(im[0])
plt.show()
def main():
root = join('..', 'imgs', ROOT)
files = [join(root, folder, file) for folder in os.listdir(root) for file in os.listdir(join(root, folder))][
:IMAGE_NUMBER]
tags, tags_count = get_tags(ROOT, files)
output = make_output(files, tags, tags_count)
# test(files, output, tags_count)
model = import_model(len(tags_count))
train(model, files, output, tags_count)
print('DONE')
if __name__ == '__main__':
main()
| gpl-3.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/feature_selection/variance_threshold.py | 123 | 2572 | # Author: Lars Buitinck
# License: 3-clause BSD
import numpy as np
from ..base import BaseEstimator
from .base import SelectorMixin
from ..utils import check_array
from ..utils.sparsefuncs import mean_variance_axis
from ..utils.validation import check_is_fitted
class VarianceThreshold(BaseEstimator, SelectorMixin):
"""Feature selector that removes all low-variance features.
This feature selection algorithm looks only at the features (X), not the
desired outputs (y), and can thus be used for unsupervised learning.
Read more in the :ref:`User Guide <variance_threshold>`.
Parameters
----------
threshold : float, optional
Features with a training-set variance lower than this threshold will
be removed. The default is to keep all features with non-zero variance,
i.e. remove the features that have the same value in all samples.
Attributes
----------
variances_ : array, shape (n_features,)
Variances of individual features.
Examples
--------
The following dataset has integer features, two of which are the same
in every sample. These are removed with the default setting for threshold::
>>> X = [[0, 2, 0, 3], [0, 1, 4, 3], [0, 1, 1, 3]]
>>> selector = VarianceThreshold()
>>> selector.fit_transform(X)
array([[2, 0],
[1, 4],
[1, 1]])
"""
def __init__(self, threshold=0.):
self.threshold = threshold
def fit(self, X, y=None):
"""Learn empirical variances from X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Sample vectors from which to compute variances.
y : any
Ignored. This parameter exists only for compatibility with
sklearn.pipeline.Pipeline.
Returns
-------
self
"""
X = check_array(X, ('csr', 'csc'), dtype=np.float64)
if hasattr(X, "toarray"): # sparse matrix
_, self.variances_ = mean_variance_axis(X, axis=0)
else:
self.variances_ = np.var(X, axis=0)
if np.all(self.variances_ <= self.threshold):
msg = "No feature in X meets the variance threshold {0:.5f}"
if X.shape[0] == 1:
msg += " (X contains only one sample)"
raise ValueError(msg.format(self.threshold))
return self
def _get_support_mask(self):
check_is_fitted(self, 'variances_')
return self.variances_ > self.threshold
| mit |
cogeorg/BlackRhino | networkx/drawing/nx_pylab.py | 1 | 32861 | # Copyright (C) 2004-2016 by
# Aric Hagberg <hagberg@lanl.gov>
# Dan Schult <dschult@colgate.edu>
# Pieter Swart <swart@lanl.gov>
# All rights reserved.
# BSD license.
#
# Author: Aric Hagberg (hagberg@lanl.gov)
"""
**********
Matplotlib
**********
Draw networks with matplotlib.
See Also
--------
matplotlib: http://matplotlib.org/
pygraphviz: http://pygraphviz.github.io/
"""
import networkx as nx
from networkx.drawing.layout import shell_layout,\
circular_layout,spectral_layout,spring_layout,random_layout
__all__ = ['draw',
'draw_networkx',
'draw_networkx_nodes',
'draw_networkx_edges',
'draw_networkx_labels',
'draw_networkx_edge_labels',
'draw_circular',
'draw_random',
'draw_spectral',
'draw_spring',
'draw_shell']
def draw(G, pos=None, ax=None, **kwds):
"""Draw the graph G with Matplotlib.
Draw the graph as a simple representation with no node
labels or edge labels and using the full Matplotlib figure area
and no axis labels by default. See draw_networkx() for more
full-featured drawing that allows title, axis labels etc.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
ax : Matplotlib Axes object, optional
Draw the graph in specified Matplotlib axes.
kwds : optional keywords
See networkx.draw_networkx() for a description of optional keywords.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
See Also
--------
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
Notes
-----
This function has the same name as pylab.draw and pyplot.draw
so beware when using
>>> from networkx import *
since you might overwrite the pylab.draw function.
With pyplot use
>>> import matplotlib.pyplot as plt
>>> import networkx as nx
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G) # networkx draw()
>>> plt.draw() # pyplot draw()
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
cf = plt.gcf()
else:
cf = ax.get_figure()
cf.set_facecolor('w')
if ax is None:
if cf._axstack() is None:
ax = cf.add_axes((0, 0, 1, 1))
else:
ax = cf.gca()
if 'with_labels' not in kwds:
kwds['with_labels'] = 'labels' in kwds
try:
draw_networkx(G, pos=pos, ax=ax, **kwds)
ax.set_axis_off()
plt.draw_if_interactive()
except:
raise
return
def draw_networkx(G, pos=None, arrows=True, with_labels=True, **kwds):
"""Draw the graph G using Matplotlib.
Draw the graph with Matplotlib with options for node positions,
labeling, titles, and many other drawing features.
See draw() for simple drawing without labels or axes.
Parameters
----------
G : graph
A networkx graph
pos : dictionary, optional
A dictionary with nodes as keys and positions as values.
If not specified a spring layout positioning will be computed.
See :py:mod:`networkx.drawing.layout` for functions that
compute node positions.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
with_labels : bool, optional (default=True)
Set to True to draw labels on the nodes.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional (default G.nodes())
Draw only specified nodes
edgelist : list, optional (default=G.edges())
Draw only specified edges
node_size : scalar or array, optional (default=300)
Size of nodes. If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats, (default='r')
Node color. Can be a single color format string,
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string, optional (default='o')
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8'.
alpha : float, optional (default=1.0)
The node and edge transparency
cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of nodes
vmin,vmax : float, optional (default=None)
Minimum and maximum for node colormap scaling
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
width : float, optional (default=1.0)
Line width of edges
edge_color : color string, or array of floats (default='r')
Edge color. Can be a single color format string,
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
edge_cmap : Matplotlib colormap, optional (default=None)
Colormap for mapping intensities of edges
edge_vmin,edge_vmax : floats, optional (default=None)
Minimum and maximum for edge colormap scaling
style : string, optional (default='solid')
Edge line style (solid|dashed|dotted,dashdot)
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int, optional (default=12)
Font size for text labels
font_color : string, optional (default='k' black)
Font color string
font_weight : string, optional (default='normal')
Font weight
font_family : string, optional (default='sans-serif')
Font family
label : string, optional
Label for graph legend
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nx.draw(G)
>>> nx.draw(G,pos=nx.spring_layout(G)) # use spring layout
>>> import matplotlib.pyplot as plt
>>> limits=plt.axis('off') # turn of axis
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if pos is None:
pos = nx.drawing.spring_layout(G) # default to spring layout
node_collection = draw_networkx_nodes(G, pos, **kwds)
edge_collection = draw_networkx_edges(G, pos, arrows=arrows, **kwds)
if with_labels:
draw_networkx_labels(G, pos, **kwds)
plt.draw_if_interactive()
def draw_networkx_nodes(G, pos,
nodelist=None,
node_size=300,
node_color='r',
node_shape='o',
alpha=1.0,
cmap=None,
vmin=None,
vmax=None,
ax=None,
linewidths=None,
label=None,
**kwds):
"""Draw the nodes of the graph G.
This draws only the nodes of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
nodelist : list, optional
Draw only specified nodes (default G.nodes())
node_size : scalar or array
Size of nodes (default=300). If an array is specified it must be the
same length as nodelist.
node_color : color string, or array of floats
Node color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
node_shape : string
The shape of the node. Specification is as matplotlib.scatter
marker, one of 'so^>v<dph8' (default='o').
alpha : float
The node transparency (default=1.0)
cmap : Matplotlib colormap
Colormap for mapping intensities of nodes (default=None)
vmin,vmax : floats
Minimum and maximum for node colormap scaling (default=None)
linewidths : [None | scalar | sequence]
Line width of symbol border (default =1.0)
label : [None| string]
Label for legend
Returns
-------
matplotlib.collections.PathCollection
`PathCollection` of the nodes.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> nodes=nx.draw_networkx_nodes(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_edges()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
import collections
try:
import matplotlib.pyplot as plt
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if nodelist is None:
nodelist = list(G)
if not nodelist or len(nodelist) == 0: # empty nodelist, no drawing
return None
try:
xy = numpy.asarray([pos[v] for v in nodelist])
except KeyError as e:
raise nx.NetworkXError('Node %s has no position.'%e)
except ValueError:
raise nx.NetworkXError('Bad value in node positions.')
if isinstance(alpha, collections.Iterable):
node_color = apply_alpha(node_color, alpha, nodelist, cmap, vmin, vmax)
alpha = None
node_collection = ax.scatter(xy[:, 0], xy[:, 1],
s=node_size,
c=node_color,
marker=node_shape,
cmap=cmap,
vmin=vmin,
vmax=vmax,
alpha=alpha,
linewidths=linewidths,
label=label)
node_collection.set_zorder(2)
return node_collection
def draw_networkx_edges(G, pos,
edgelist=None,
width=1.0,
edge_color='k',
style='solid',
alpha=1.0,
edge_cmap=None,
edge_vmin=None,
edge_vmax=None,
ax=None,
arrows=True,
label=None,
**kwds):
"""Draw the edges of the graph G.
This draws only the edges of the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
edgelist : collection of edge tuples
Draw only specified edges(default=G.edges())
width : float, or array of floats
Line width of edges (default=1.0)
edge_color : color string, or array of floats
Edge color. Can be a single color format string (default='r'),
or a sequence of colors with the same length as edgelist.
If numeric values are specified they will be mapped to
colors using the edge_cmap and edge_vmin,edge_vmax parameters.
style : string
Edge line style (default='solid') (solid|dashed|dotted,dashdot)
alpha : float
The edge transparency (default=1.0)
edge_ cmap : Matplotlib colormap
Colormap for mapping intensities of edges (default=None)
edge_vmin,edge_vmax : floats
Minimum and maximum for edge colormap scaling (default=None)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
arrows : bool, optional (default=True)
For directed graphs, if True draw arrowheads.
label : [None| string]
Label for legend
Returns
-------
matplotlib.collection.LineCollection
`LineCollection` of the edges
Notes
-----
For directed graphs, "arrows" (actually just thicker stubs) are drawn
at the head end. Arrows can be turned off with keyword arrows=False.
Yes, it is ugly but drawing proper arrows with Matplotlib this
way is tricky.
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edges=nx.draw_networkx_edges(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_labels()
draw_networkx_edge_labels()
"""
try:
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
from matplotlib.colors import colorConverter, Colormap
from matplotlib.collections import LineCollection
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edgelist is None:
edgelist = list(G.edges())
if not edgelist or len(edgelist) == 0: # no edges!
return None
# set edge positions
edge_pos = numpy.asarray([(pos[e[0]], pos[e[1]]) for e in edgelist])
if not cb.iterable(width):
lw = (width,)
else:
lw = width
if not cb.is_string_like(edge_color) \
and cb.iterable(edge_color) \
and len(edge_color) == len(edge_pos):
if numpy.alltrue([cb.is_string_like(c)
for c in edge_color]):
# (should check ALL elements)
# list of color letters such as ['k','r','k',...]
edge_colors = tuple([colorConverter.to_rgba(c, alpha)
for c in edge_color])
elif numpy.alltrue([not cb.is_string_like(c)
for c in edge_color]):
# If color specs are given as (rgb) or (rgba) tuples, we're OK
if numpy.alltrue([cb.iterable(c) and len(c) in (3, 4)
for c in edge_color]):
edge_colors = tuple(edge_color)
else:
# numbers (which are going to be mapped with a colormap)
edge_colors = None
else:
raise ValueError('edge_color must consist of either color names or numbers')
else:
if cb.is_string_like(edge_color) or len(edge_color) == 1:
edge_colors = (colorConverter.to_rgba(edge_color, alpha), )
else:
raise ValueError('edge_color must be a single color or list of exactly m colors where m is the number or edges')
edge_collection = LineCollection(edge_pos,
colors=edge_colors,
linewidths=lw,
antialiaseds=(1,),
linestyle=style,
transOffset = ax.transData,
)
edge_collection.set_zorder(1) # edges go behind nodes
edge_collection.set_label(label)
ax.add_collection(edge_collection)
# Note: there was a bug in mpl regarding the handling of alpha values for
# each line in a LineCollection. It was fixed in matplotlib in r7184 and
# r7189 (June 6 2009). We should then not set the alpha value globally,
# since the user can instead provide per-edge alphas now. Only set it
# globally if provided as a scalar.
if cb.is_numlike(alpha):
edge_collection.set_alpha(alpha)
if edge_colors is None:
if edge_cmap is not None:
assert(isinstance(edge_cmap, Colormap))
edge_collection.set_array(numpy.asarray(edge_color))
edge_collection.set_cmap(edge_cmap)
if edge_vmin is not None or edge_vmax is not None:
edge_collection.set_clim(edge_vmin, edge_vmax)
else:
edge_collection.autoscale()
arrow_collection = None
if G.is_directed() and arrows:
# a directed graph hack
# draw thick line segments at head end of edge
# waiting for someone else to implement arrows that will work
arrow_colors = edge_colors
a_pos = []
p = 1.0-0.25 # make head segment 25 percent of edge length
for src, dst in edge_pos:
x1, y1 = src
x2, y2 = dst
dx = x2-x1 # x offset
dy = y2-y1 # y offset
d = numpy.sqrt(float(dx**2 + dy**2)) # length of edge
if d == 0: # source and target at same position
continue
if dx == 0: # vertical edge
xa = x2
ya = dy*p+y1
if dy == 0: # horizontal edge
ya = y2
xa = dx*p+x1
else:
theta = numpy.arctan2(dy, dx)
xa = p*d*numpy.cos(theta)+x1
ya = p*d*numpy.sin(theta)+y1
a_pos.append(((xa, ya), (x2, y2)))
arrow_collection = LineCollection(a_pos,
colors=arrow_colors,
linewidths=[4*ww for ww in lw],
antialiaseds=(1,),
transOffset = ax.transData,
)
arrow_collection.set_zorder(1) # edges go behind nodes
arrow_collection.set_label(label)
ax.add_collection(arrow_collection)
# update view
minx = numpy.amin(numpy.ravel(edge_pos[:, :, 0]))
maxx = numpy.amax(numpy.ravel(edge_pos[:, :, 0]))
miny = numpy.amin(numpy.ravel(edge_pos[:, :, 1]))
maxy = numpy.amax(numpy.ravel(edge_pos[:, :, 1]))
w = maxx-minx
h = maxy-miny
padx, pady = 0.05*w, 0.05*h
corners = (minx-padx, miny-pady), (maxx+padx, maxy+pady)
ax.update_datalim(corners)
ax.autoscale_view()
# if arrow_collection:
return edge_collection
def draw_networkx_labels(G, pos,
labels=None,
font_size=12,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
**kwds):
"""Draw node labels on the graph G.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
labels : dictionary, optional (default=None)
Node labels in a dictionary keyed by node of text labels
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_family : string
Font family (default='sans-serif')
font_weight : string
Font weight (default='normal')
alpha : float
The text transparency (default=1.0)
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
Returns
-------
dict
`dict` of labels keyed on the nodes
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> labels=nx.draw_networkx_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_edge_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if labels is None:
labels = dict((n, n) for n in G.nodes())
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
text_items = {} # there is no text collection so we'll fake one
for n, label in labels.items():
(x, y) = pos[n]
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
transform=ax.transData,
bbox=bbox,
clip_on=True,
)
text_items[n] = t
return text_items
def draw_networkx_edge_labels(G, pos,
edge_labels=None,
label_pos=0.5,
font_size=10,
font_color='k',
font_family='sans-serif',
font_weight='normal',
alpha=1.0,
bbox=None,
ax=None,
rotate=True,
**kwds):
"""Draw edge labels.
Parameters
----------
G : graph
A networkx graph
pos : dictionary
A dictionary with nodes as keys and positions as values.
Positions should be sequences of length 2.
ax : Matplotlib Axes object, optional
Draw the graph in the specified Matplotlib axes.
alpha : float
The text transparency (default=1.0)
edge_labels : dictionary
Edge labels in a dictionary keyed by edge two-tuple of text
labels (default=None). Only labels for the keys in the dictionary
are drawn.
label_pos : float
Position of edge label along edge (0=head, 0.5=center, 1=tail)
font_size : int
Font size for text labels (default=12)
font_color : string
Font color string (default='k' black)
font_weight : string
Font weight (default='normal')
font_family : string
Font family (default='sans-serif')
bbox : Matplotlib bbox
Specify text box shape and colors.
clip_on : bool
Turn on clipping at axis boundaries (default=True)
Returns
-------
dict
`dict` of labels keyed on the edges
Examples
--------
>>> G=nx.dodecahedral_graph()
>>> edge_labels=nx.draw_networkx_edge_labels(G,pos=nx.spring_layout(G))
Also see the NetworkX drawing examples at
http://networkx.readthedocs.io/en/latest/gallery.html
See Also
--------
draw()
draw_networkx()
draw_networkx_nodes()
draw_networkx_edges()
draw_networkx_labels()
"""
try:
import matplotlib.pyplot as plt
import matplotlib.cbook as cb
import numpy
except ImportError:
raise ImportError("Matplotlib required for draw()")
except RuntimeError:
print("Matplotlib unable to open display")
raise
if ax is None:
ax = plt.gca()
if edge_labels is None:
labels = dict(((u, v), d) for u, v, d in G.edges(data=True))
else:
labels = edge_labels
text_items = {}
for (n1, n2), label in labels.items():
(x1, y1) = pos[n1]
(x2, y2) = pos[n2]
(x, y) = (x1 * label_pos + x2 * (1.0 - label_pos),
y1 * label_pos + y2 * (1.0 - label_pos))
if rotate:
angle = numpy.arctan2(y2-y1, x2-x1)/(2.0*numpy.pi)*360 # degrees
# make label orientation "right-side-up"
if angle > 90:
angle -= 180
if angle < - 90:
angle += 180
# transform data coordinate angle to screen coordinate angle
xy = numpy.array((x, y))
trans_angle = ax.transData.transform_angles(numpy.array((angle,)),
xy.reshape((1, 2)))[0]
else:
trans_angle = 0.0
# use default box of white with white border
if bbox is None:
bbox = dict(boxstyle='round',
ec=(1.0, 1.0, 1.0),
fc=(1.0, 1.0, 1.0),
)
if not cb.is_string_like(label):
label = str(label) # this will cause "1" and 1 to be labeled the same
# set optional alignment
horizontalalignment = kwds.get('horizontalalignment', 'center')
verticalalignment = kwds.get('verticalalignment', 'center')
t = ax.text(x, y,
label,
size=font_size,
color=font_color,
family=font_family,
weight=font_weight,
alpha=alpha,
horizontalalignment=horizontalalignment,
verticalalignment=verticalalignment,
rotation=trans_angle,
transform=ax.transData,
bbox=bbox,
zorder=1,
clip_on=True,
)
text_items[(n1, n2)] = t
return text_items
def draw_circular(G, **kwargs):
"""Draw the graph G with a circular layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, circular_layout(G), **kwargs)
def draw_random(G, **kwargs):
"""Draw the graph G with a random layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, random_layout(G), **kwargs)
def draw_spectral(G, **kwargs):
"""Draw the graph G with a spectral layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spectral_layout(G), **kwargs)
def draw_spring(G, **kwargs):
"""Draw the graph G with a spring layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
draw(G, spring_layout(G), **kwargs)
def draw_shell(G, **kwargs):
"""Draw networkx graph with shell layout.
Parameters
----------
G : graph
A networkx graph
kwargs : optional keywords
See networkx.draw_networkx() for a description of optional keywords,
with the exception of the pos parameter which is not used by this
function.
"""
nlist = kwargs.get('nlist', None)
if nlist is not None:
del(kwargs['nlist'])
draw(G, shell_layout(G, nlist=nlist), **kwargs)
def draw_nx(G, pos, **kwds):
"""For backward compatibility; use draw or draw_networkx."""
draw(G, pos, **kwds)
def apply_alpha(colors, alpha, elem_list, cmap=None, vmin=None, vmax=None):
"""Apply an alpha (or list of alphas) to the colors provided.
Parameters
----------
color : color string, or array of floats
Color of element. Can be a single color format string (default='r'),
or a sequence of colors with the same length as nodelist.
If numeric values are specified they will be mapped to
colors using the cmap and vmin,vmax parameters. See
matplotlib.scatter for more details.
alpha : float or array of floats
Alpha values for elements. This can be a single alpha value, in
which case it will be applied to all the elements of color. Otherwise,
if it is an array, the elements of alpha will be applied to the colors
in order (cycling through alpha multiple times if necessary).
elem_list : array of networkx objects
The list of elements which are being colored. These could be nodes, edges
or labels.
cmap : matplotlib colormap
Color map for use if colors is a list of floats corresponding to points on
a color mapping.
vmin, vmax : float
Minimum and maximum values for normalizing colors if a color mapping is used.
Returns
-------
rgba_colors : numpy ndarray
Array containing RGBA format values for each of the node colours.
"""
import numbers
import itertools
try:
import numpy
from matplotlib.colors import colorConverter
import matplotlib.cm as cm
except ImportError:
raise ImportError("Matplotlib required for draw()")
# If we have been provided with a list of numbers as long as elem_list, apply the color mapping.
if len(colors) == len(elem_list) and isinstance(colors[0], numbers.Number):
mapper = cm.ScalarMappable(cmap=cmap)
mapper.set_clim(vmin, vmax)
rgba_colors = mapper.to_rgba(colors)
# Otherwise, convert colors to matplotlib's RGB using the colorConverter object.
# These are converted to numpy ndarrays to be consistent with the to_rgba method of ScalarMappable.
else:
try:
rgba_colors = numpy.array([colorConverter.to_rgba(colors)])
except ValueError:
rgba_colors = numpy.array([colorConverter.to_rgba(color) for color in colors])
# Set the final column of the rgba_colors to have the relevant alpha values.
try:
# If alpha is longer than the number of colors, resize to the number of elements.
# Also, if rgba_colors.size (the number of elements of rgba_colors) is the same as the number of
# elements, resize the array, to avoid it being interpreted as a colormap by scatter()
if len(alpha) > len(rgba_colors) or rgba_colors.size == len(elem_list):
rgba_colors.resize((len(elem_list), 4))
rgba_colors[1:, 0] = rgba_colors[0, 0]
rgba_colors[1:, 1] = rgba_colors[0, 1]
rgba_colors[1:, 2] = rgba_colors[0, 2]
rgba_colors[:, 3] = list(itertools.islice(itertools.cycle(alpha), len(rgba_colors)))
except TypeError:
rgba_colors[:, -1] = alpha
return rgba_colors
# fixture for nose tests
def setup_module(module):
from nose import SkipTest
try:
import matplotlib as mpl
mpl.use('PS', warn=False)
import matplotlib.pyplot as plt
except:
raise SkipTest("matplotlib not available")
| gpl-3.0 |
yavalvas/yav_com | build/matplotlib/lib/matplotlib/dviread.py | 11 | 33923 | """
An experimental module for reading dvi files output by TeX. Several
limitations make this not (currently) useful as a general-purpose dvi
preprocessor, but it is currently used by the pdf backend for
processing usetex text.
Interface::
dvi = Dvi(filename, 72)
# iterate over pages (but only one page is supported for now):
for page in dvi:
w, h, d = page.width, page.height, page.descent
for x,y,font,glyph,width in page.text:
fontname = font.texname
pointsize = font.size
...
for x,y,height,width in page.boxes:
...
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import xrange
import errno
import matplotlib
import matplotlib.cbook as mpl_cbook
from matplotlib.compat import subprocess
from matplotlib import rcParams
import numpy as np
import struct
import sys
import os
if six.PY3:
def ord(x):
return x
_dvistate = mpl_cbook.Bunch(pre=0, outer=1, inpage=2, post_post=3, finale=4)
class Dvi(object):
"""
A dvi ("device-independent") file, as produced by TeX.
The current implementation only reads the first page and does not
even attempt to verify the postamble.
"""
def __init__(self, filename, dpi):
"""
Initialize the object. This takes the filename as input and
opens the file; actually reading the file happens when
iterating through the pages of the file.
"""
matplotlib.verbose.report('Dvi: ' + filename, 'debug')
self.file = open(filename, 'rb')
self.dpi = dpi
self.fonts = {}
self.state = _dvistate.pre
self.baseline = self._get_baseline(filename)
def _get_baseline(self, filename):
if rcParams['text.latex.preview']:
base, ext = os.path.splitext(filename)
baseline_filename = base + ".baseline"
if os.path.exists(baseline_filename):
with open(baseline_filename, 'rb') as fd:
l = fd.read().split()
height, depth, width = l
return float(depth)
return None
def __iter__(self):
"""
Iterate through the pages of the file.
Returns (text, boxes) pairs, where:
text is a list of (x, y, fontnum, glyphnum, width) tuples
boxes is a list of (x, y, height, width) tuples
The coordinates are transformed into a standard Cartesian
coordinate system at the dpi value given when initializing.
The coordinates are floating point numbers, but otherwise
precision is not lost and coordinate values are not clipped to
integers.
"""
while True:
have_page = self._read()
if have_page:
yield self._output()
else:
break
def close(self):
"""
Close the underlying file if it is open.
"""
if not self.file.closed:
self.file.close()
def _output(self):
"""
Output the text and boxes belonging to the most recent page.
page = dvi._output()
"""
minx, miny, maxx, maxy = np.inf, np.inf, -np.inf, -np.inf
maxy_pure = -np.inf
for elt in self.text + self.boxes:
if len(elt) == 4: # box
x,y,h,w = elt
e = 0 # zero depth
else: # glyph
x,y,font,g,w = elt
h,e = font._height_depth_of(g)
minx = min(minx, x)
miny = min(miny, y - h)
maxx = max(maxx, x + w)
maxy = max(maxy, y + e)
maxy_pure = max(maxy_pure, y)
if self.dpi is None:
# special case for ease of debugging: output raw dvi coordinates
return mpl_cbook.Bunch(text=self.text, boxes=self.boxes,
width=maxx-minx, height=maxy_pure-miny,
descent=descent)
d = self.dpi / (72.27 * 2**16) # from TeX's "scaled points" to dpi units
if self.baseline is None:
descent = (maxy - maxy_pure) * d
else:
descent = self.baseline
text = [ ((x-minx)*d, (maxy-y)*d - descent, f, g, w*d)
for (x,y,f,g,w) in self.text ]
boxes = [ ((x-minx)*d, (maxy-y)*d - descent, h*d, w*d) for (x,y,h,w) in self.boxes ]
return mpl_cbook.Bunch(text=text, boxes=boxes,
width=(maxx-minx)*d,
height=(maxy_pure-miny)*d,
descent=descent)
def _read(self):
"""
Read one page from the file. Return True if successful,
False if there were no more pages.
"""
while True:
byte = ord(self.file.read(1)[0])
self._dispatch(byte)
# if self.state == _dvistate.inpage:
# matplotlib.verbose.report(
# 'Dvi._read: after %d at %f,%f' %
# (byte, self.h, self.v),
# 'debug-annoying')
if byte == 140: # end of page
return True
if self.state == _dvistate.post_post: # end of file
self.close()
return False
def _arg(self, nbytes, signed=False):
"""
Read and return an integer argument *nbytes* long.
Signedness is determined by the *signed* keyword.
"""
str = self.file.read(nbytes)
value = ord(str[0])
if signed and value >= 0x80:
value = value - 0x100
for i in range(1, nbytes):
value = 0x100*value + ord(str[i])
return value
def _dispatch(self, byte):
"""
Based on the opcode *byte*, read the correct kinds of
arguments from the dvi file and call the method implementing
that opcode with those arguments.
"""
if 0 <= byte <= 127: self._set_char(byte)
elif byte == 128: self._set_char(self._arg(1))
elif byte == 129: self._set_char(self._arg(2))
elif byte == 130: self._set_char(self._arg(3))
elif byte == 131: self._set_char(self._arg(4, True))
elif byte == 132: self._set_rule(self._arg(4, True), self._arg(4, True))
elif byte == 133: self._put_char(self._arg(1))
elif byte == 134: self._put_char(self._arg(2))
elif byte == 135: self._put_char(self._arg(3))
elif byte == 136: self._put_char(self._arg(4, True))
elif byte == 137: self._put_rule(self._arg(4, True), self._arg(4, True))
elif byte == 138: self._nop()
elif byte == 139: self._bop(*[self._arg(4, True) for i in range(11)])
elif byte == 140: self._eop()
elif byte == 141: self._push()
elif byte == 142: self._pop()
elif byte == 143: self._right(self._arg(1, True))
elif byte == 144: self._right(self._arg(2, True))
elif byte == 145: self._right(self._arg(3, True))
elif byte == 146: self._right(self._arg(4, True))
elif byte == 147: self._right_w(None)
elif byte == 148: self._right_w(self._arg(1, True))
elif byte == 149: self._right_w(self._arg(2, True))
elif byte == 150: self._right_w(self._arg(3, True))
elif byte == 151: self._right_w(self._arg(4, True))
elif byte == 152: self._right_x(None)
elif byte == 153: self._right_x(self._arg(1, True))
elif byte == 154: self._right_x(self._arg(2, True))
elif byte == 155: self._right_x(self._arg(3, True))
elif byte == 156: self._right_x(self._arg(4, True))
elif byte == 157: self._down(self._arg(1, True))
elif byte == 158: self._down(self._arg(2, True))
elif byte == 159: self._down(self._arg(3, True))
elif byte == 160: self._down(self._arg(4, True))
elif byte == 161: self._down_y(None)
elif byte == 162: self._down_y(self._arg(1, True))
elif byte == 163: self._down_y(self._arg(2, True))
elif byte == 164: self._down_y(self._arg(3, True))
elif byte == 165: self._down_y(self._arg(4, True))
elif byte == 166: self._down_z(None)
elif byte == 167: self._down_z(self._arg(1, True))
elif byte == 168: self._down_z(self._arg(2, True))
elif byte == 169: self._down_z(self._arg(3, True))
elif byte == 170: self._down_z(self._arg(4, True))
elif 171 <= byte <= 234: self._fnt_num(byte-171)
elif byte == 235: self._fnt_num(self._arg(1))
elif byte == 236: self._fnt_num(self._arg(2))
elif byte == 237: self._fnt_num(self._arg(3))
elif byte == 238: self._fnt_num(self._arg(4, True))
elif 239 <= byte <= 242:
len = self._arg(byte-238)
special = self.file.read(len)
self._xxx(special)
elif 243 <= byte <= 246:
k = self._arg(byte-242, byte==246)
c, s, d, a, l = [ self._arg(x) for x in (4, 4, 4, 1, 1) ]
n = self.file.read(a+l)
self._fnt_def(k, c, s, d, a, l, n)
elif byte == 247:
i, num, den, mag, k = [ self._arg(x) for x in (1, 4, 4, 4, 1) ]
x = self.file.read(k)
self._pre(i, num, den, mag, x)
elif byte == 248: self._post()
elif byte == 249: self._post_post()
else:
raise ValueError("unknown command: byte %d"%byte)
def _pre(self, i, num, den, mag, comment):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of dvi file")
if i != 2:
raise ValueError("Unknown dvi format %d"%i)
if num != 25400000 or den != 7227 * 2**16:
raise ValueError("nonstandard units in dvi file")
# meaning: TeX always uses those exact values, so it
# should be enough for us to support those
# (There are 72.27 pt to an inch so 7227 pt =
# 7227 * 2**16 sp to 100 in. The numerator is multiplied
# by 10^5 to get units of 10**-7 meters.)
if mag != 1000:
raise ValueError("nonstandard magnification in dvi file")
# meaning: LaTeX seems to frown on setting \mag, so
# I think we can assume this is constant
self.state = _dvistate.outer
def _set_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_char in dvi file")
self._put_char(char)
self.h += self.fonts[self.f]._width_of(char)
def _set_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced set_rule in dvi file")
self._put_rule(a, b)
self.h += b
def _put_char(self, char):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_char in dvi file")
font = self.fonts[self.f]
if font._vf is None:
self.text.append((self.h, self.v, font, char,
font._width_of(char)))
# matplotlib.verbose.report(
# 'Dvi._put_char: %d,%d %d' %(self.h, self.v, char),
# 'debug-annoying')
else:
scale = font._scale
for x, y, f, g, w in font._vf[char].text:
newf = DviFont(scale=_mul2012(scale, f._scale),
tfm=f._tfm, texname=f.texname, vf=f._vf)
self.text.append((self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
newf, g, newf._width_of(g)))
self.boxes.extend([(self.h + _mul2012(x, scale),
self.v + _mul2012(y, scale),
_mul2012(a, scale), _mul2012(b, scale))
for x, y, a, b in font._vf[char].boxes])
def _put_rule(self, a, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced put_rule in dvi file")
if a > 0 and b > 0:
self.boxes.append((self.h, self.v, a, b))
# matplotlib.verbose.report(
# 'Dvi._put_rule: %d,%d %d,%d' % (self.h, self.v, a, b),
# 'debug-annoying')
def _nop(self):
pass
def _bop(self, c0, c1, c2, c3, c4, c5, c6, c7, c8, c9, p):
if self.state != _dvistate.outer:
raise ValueError("misplaced bop in dvi file (state %d)" % self.state)
self.state = _dvistate.inpage
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack = []
self.text = [] # list of (x,y,fontnum,glyphnum)
self.boxes = [] # list of (x,y,width,height)
def _eop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced eop in dvi file")
self.state = _dvistate.outer
del self.h, self.v, self.w, self.x, self.y, self.z, self.stack
def _push(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced push in dvi file")
self.stack.append((self.h, self.v, self.w, self.x, self.y, self.z))
def _pop(self):
if self.state != _dvistate.inpage:
raise ValueError("misplaced pop in dvi file")
self.h, self.v, self.w, self.x, self.y, self.z = self.stack.pop()
def _right(self, b):
if self.state != _dvistate.inpage:
raise ValueError("misplaced right in dvi file")
self.h += b
def _right_w(self, new_w):
if self.state != _dvistate.inpage:
raise ValueError("misplaced w in dvi file")
if new_w is not None:
self.w = new_w
self.h += self.w
def _right_x(self, new_x):
if self.state != _dvistate.inpage:
raise ValueError("misplaced x in dvi file")
if new_x is not None:
self.x = new_x
self.h += self.x
def _down(self, a):
if self.state != _dvistate.inpage:
raise ValueError("misplaced down in dvi file")
self.v += a
def _down_y(self, new_y):
if self.state != _dvistate.inpage:
raise ValueError("misplaced y in dvi file")
if new_y is not None:
self.y = new_y
self.v += self.y
def _down_z(self, new_z):
if self.state != _dvistate.inpage:
raise ValueError("misplaced z in dvi file")
if new_z is not None:
self.z = new_z
self.v += self.z
def _fnt_num(self, k):
if self.state != _dvistate.inpage:
raise ValueError("misplaced fnt_num in dvi file")
self.f = k
def _xxx(self, special):
if six.PY3:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and chr(ch)
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
else:
matplotlib.verbose.report(
'Dvi._xxx: encountered special: %s'
% ''.join([(32 <= ord(ch) < 127) and ch
or '<%02x>' % ord(ch)
for ch in special]),
'debug')
def _fnt_def(self, k, c, s, d, a, l, n):
tfm = _tfmfile(n[-l:].decode('ascii'))
if c != 0 and tfm.checksum != 0 and c != tfm.checksum:
raise ValueError('tfm checksum mismatch: %s'%n)
# It seems that the assumption behind the following check is incorrect:
#if d != tfm.design_size:
# raise ValueError, 'tfm design size mismatch: %d in dvi, %d in %s'%\
# (d, tfm.design_size, n)
vf = _vffile(n[-l:].decode('ascii'))
self.fonts[k] = DviFont(scale=s, tfm=tfm, texname=n, vf=vf)
def _post(self):
if self.state != _dvistate.outer:
raise ValueError("misplaced post in dvi file")
self.state = _dvistate.post_post
# TODO: actually read the postamble and finale?
# currently post_post just triggers closing the file
def _post_post(self):
raise NotImplementedError
class DviFont(object):
"""
Object that holds a font's texname and size, supports comparison,
and knows the widths of glyphs in the same units as the AFM file.
There are also internal attributes (for use by dviread.py) that
are *not* used for comparison.
The size is in Adobe points (converted from TeX points).
.. attribute:: texname
Name of the font as used internally by TeX and friends. This
is usually very different from any external font names, and
:class:`dviread.PsfontsMap` can be used to find the external
name of the font.
.. attribute:: size
Size of the font in Adobe points, converted from the slightly
smaller TeX points.
.. attribute:: widths
Widths of glyphs in glyph-space units, typically 1/1000ths of
the point size.
"""
__slots__ = ('texname', 'size', 'widths', '_scale', '_vf', '_tfm')
def __init__(self, scale, tfm, texname, vf):
if six.PY3 and isinstance(texname, bytes):
texname = texname.decode('ascii')
self._scale, self._tfm, self.texname, self._vf = \
scale, tfm, texname, vf
self.size = scale * (72.0 / (72.27 * 2**16))
try:
nchars = max(six.iterkeys(tfm.width)) + 1
except ValueError:
nchars = 0
self.widths = [ (1000*tfm.width.get(char, 0)) >> 20
for char in xrange(nchars) ]
def __eq__(self, other):
return self.__class__ == other.__class__ and \
self.texname == other.texname and self.size == other.size
def __ne__(self, other):
return not self.__eq__(other)
def _width_of(self, char):
"""
Width of char in dvi units. For internal use by dviread.py.
"""
width = self._tfm.width.get(char, None)
if width is not None:
return _mul2012(width, self._scale)
matplotlib.verbose.report(
'No width for char %d in font %s' % (char, self.texname),
'debug')
return 0
def _height_depth_of(self, char):
"""
Height and depth of char in dvi units. For internal use by dviread.py.
"""
result = []
for metric,name in ((self._tfm.height, "height"),
(self._tfm.depth, "depth")):
value = metric.get(char, None)
if value is None:
matplotlib.verbose.report(
'No %s for char %d in font %s' % (name, char, self.texname),
'debug')
result.append(0)
else:
result.append(_mul2012(value, self._scale))
return result
class Vf(Dvi):
"""
A virtual font (\*.vf file) containing subroutines for dvi files.
Usage::
vf = Vf(filename)
glyph = vf[code]
glyph.text, glyph.boxes, glyph.width
"""
def __init__(self, filename):
Dvi.__init__(self, filename, 0)
try:
self._first_font = None
self._chars = {}
self._packet_ends = None
self._read()
finally:
self.close()
def __getitem__(self, code):
return self._chars[code]
def _dispatch(self, byte):
# If we are in a packet, execute the dvi instructions
if self.state == _dvistate.inpage:
byte_at = self.file.tell()-1
if byte_at == self._packet_ends:
self._finalize_packet()
# fall through
elif byte_at > self._packet_ends:
raise ValueError("Packet length mismatch in vf file")
else:
if byte in (139, 140) or byte >= 243:
raise ValueError("Inappropriate opcode %d in vf file" % byte)
Dvi._dispatch(self, byte)
return
# We are outside a packet
if byte < 242: # a short packet (length given by byte)
cc, tfm = self._arg(1), self._arg(3)
self._init_packet(byte, cc, tfm)
elif byte == 242: # a long packet
pl, cc, tfm = [ self._arg(x) for x in (4, 4, 4) ]
self._init_packet(pl, cc, tfm)
elif 243 <= byte <= 246:
Dvi._dispatch(self, byte)
elif byte == 247: # preamble
i, k = self._arg(1), self._arg(1)
x = self.file.read(k)
cs, ds = self._arg(4), self._arg(4)
self._pre(i, x, cs, ds)
elif byte == 248: # postamble (just some number of 248s)
self.state = _dvistate.post_post
else:
raise ValueError("unknown vf opcode %d" % byte)
def _init_packet(self, pl, cc, tfm):
if self.state != _dvistate.outer:
raise ValueError("Misplaced packet in vf file")
self.state = _dvistate.inpage
self._packet_ends = self.file.tell() + pl
self._packet_char = cc
self._packet_width = tfm
self.h, self.v, self.w, self.x, self.y, self.z = 0, 0, 0, 0, 0, 0
self.stack, self.text, self.boxes = [], [], []
self.f = self._first_font
def _finalize_packet(self):
self._chars[self._packet_char] = mpl_cbook.Bunch(
text=self.text, boxes=self.boxes, width = self._packet_width)
self.state = _dvistate.outer
def _pre(self, i, x, cs, ds):
if self.state != _dvistate.pre:
raise ValueError("pre command in middle of vf file")
if i != 202:
raise ValueError("Unknown vf format %d" % i)
if len(x):
matplotlib.verbose.report('vf file comment: ' + x, 'debug')
self.state = _dvistate.outer
# cs = checksum, ds = design size
def _fnt_def(self, k, *args):
Dvi._fnt_def(self, k, *args)
if self._first_font is None:
self._first_font = k
def _fix2comp(num):
"""
Convert from two's complement to negative.
"""
assert 0 <= num < 2**32
if num & 2**31:
return num - 2**32
else:
return num
def _mul2012(num1, num2):
"""
Multiply two numbers in 20.12 fixed point format.
"""
# Separated into a function because >> has surprising precedence
return (num1*num2) >> 20
class Tfm(object):
"""
A TeX Font Metric file. This implementation covers only the bare
minimum needed by the Dvi class.
.. attribute:: checksum
Used for verifying against the dvi file.
.. attribute:: design_size
Design size of the font (in what units?)
.. attribute:: width
Width of each character, needs to be scaled by the factor
specified in the dvi file. This is a dict because indexing may
not start from 0.
.. attribute:: height
Height of each character.
.. attribute:: depth
Depth of each character.
"""
__slots__ = ('checksum', 'design_size', 'width', 'height', 'depth')
def __init__(self, filename):
matplotlib.verbose.report('opening tfm file ' + filename, 'debug')
with open(filename, 'rb') as file:
header1 = file.read(24)
lh, bc, ec, nw, nh, nd = \
struct.unpack(str('!6H'), header1[2:14])
matplotlib.verbose.report(
'lh=%d, bc=%d, ec=%d, nw=%d, nh=%d, nd=%d' % (
lh, bc, ec, nw, nh, nd), 'debug')
header2 = file.read(4*lh)
self.checksum, self.design_size = \
struct.unpack(str('!2I'), header2[:8])
# there is also encoding information etc.
char_info = file.read(4*(ec-bc+1))
widths = file.read(4*nw)
heights = file.read(4*nh)
depths = file.read(4*nd)
self.width, self.height, self.depth = {}, {}, {}
widths, heights, depths = \
[ struct.unpack(str('!%dI') % (len(x)/4), x)
for x in (widths, heights, depths) ]
for idx, char in enumerate(xrange(bc, ec+1)):
self.width[char] = _fix2comp(widths[ord(char_info[4*idx])])
self.height[char] = _fix2comp(heights[ord(char_info[4*idx+1]) >> 4])
self.depth[char] = _fix2comp(depths[ord(char_info[4*idx+1]) & 0xf])
class PsfontsMap(object):
"""
A psfonts.map formatted file, mapping TeX fonts to PS fonts.
Usage::
>>> map = PsfontsMap(find_tex_file('pdftex.map'))
>>> entry = map['ptmbo8r']
>>> entry.texname
'ptmbo8r'
>>> entry.psname
'Times-Bold'
>>> entry.encoding
'/usr/local/texlive/2008/texmf-dist/fonts/enc/dvips/base/8r.enc'
>>> entry.effects
{'slant': 0.16700000000000001}
>>> entry.filename
For historical reasons, TeX knows many Type-1 fonts by different
names than the outside world. (For one thing, the names have to
fit in eight characters.) Also, TeX's native fonts are not Type-1
but Metafont, which is nontrivial to convert to PostScript except
as a bitmap. While high-quality conversions to Type-1 format exist
and are shipped with modern TeX distributions, we need to know
which Type-1 fonts are the counterparts of which native fonts. For
these reasons a mapping is needed from internal font names to font
file names.
A texmf tree typically includes mapping files called e.g.
psfonts.map, pdftex.map, dvipdfm.map. psfonts.map is used by
dvips, pdftex.map by pdfTeX, and dvipdfm.map by dvipdfm.
psfonts.map might avoid embedding the 35 PostScript fonts (i.e.,
have no filename for them, as in the Times-Bold example above),
while the pdf-related files perhaps only avoid the "Base 14" pdf
fonts. But the user may have configured these files differently.
"""
__slots__ = ('_font',)
def __init__(self, filename):
self._font = {}
with open(filename, 'rt') as file:
self._parse(file)
def __getitem__(self, texname):
try:
result = self._font[texname]
except KeyError:
result = self._font[texname.decode('ascii')]
fn, enc = result.filename, result.encoding
if fn is not None and not fn.startswith('/'):
result.filename = find_tex_file(fn)
if enc is not None and not enc.startswith('/'):
result.encoding = find_tex_file(result.encoding)
return result
def _parse(self, file):
"""Parse each line into words."""
for line in file:
line = line.strip()
if line == '' or line.startswith('%'):
continue
words, pos = [], 0
while pos < len(line):
if line[pos] == '"': # double quoted word
pos += 1
end = line.index('"', pos)
words.append(line[pos:end])
pos = end + 1
else: # ordinary word
end = line.find(' ', pos+1)
if end == -1: end = len(line)
words.append(line[pos:end])
pos = end
while pos < len(line) and line[pos] == ' ':
pos += 1
self._register(words)
def _register(self, words):
"""Register a font described by "words".
The format is, AFAIK: texname fontname [effects and filenames]
Effects are PostScript snippets like ".177 SlantFont",
filenames begin with one or two less-than signs. A filename
ending in enc is an encoding file, other filenames are font
files. This can be overridden with a left bracket: <[foobar
indicates an encoding file named foobar.
There is some difference between <foo.pfb and <<bar.pfb in
subsetting, but I have no example of << in my TeX installation.
"""
# If the map file specifies multiple encodings for a font, we
# follow pdfTeX in choosing the last one specified. Such
# entries are probably mistakes but they have occurred.
# http://tex.stackexchange.com/questions/10826/
# http://article.gmane.org/gmane.comp.tex.pdftex/4914
texname, psname = words[:2]
effects, encoding, filename = '', None, None
for word in words[2:]:
if not word.startswith('<'):
effects = word
else:
word = word.lstrip('<')
if word.startswith('[') or word.endswith('.enc'):
if encoding is not None:
matplotlib.verbose.report(
'Multiple encodings for %s = %s'
% (texname, psname), 'debug')
if word.startswith('['):
encoding = word[1:]
else:
encoding = word
else:
assert filename is None
filename = word
eff = effects.split()
effects = {}
try:
effects['slant'] = float(eff[eff.index('SlantFont')-1])
except ValueError:
pass
try:
effects['extend'] = float(eff[eff.index('ExtendFont')-1])
except ValueError:
pass
self._font[texname] = mpl_cbook.Bunch(
texname=texname, psname=psname, effects=effects,
encoding=encoding, filename=filename)
class Encoding(object):
"""
Parses a \*.enc file referenced from a psfonts.map style file.
The format this class understands is a very limited subset of
PostScript.
Usage (subject to change)::
for name in Encoding(filename):
whatever(name)
"""
__slots__ = ('encoding',)
def __init__(self, filename):
with open(filename, 'rt') as file:
matplotlib.verbose.report('Parsing TeX encoding ' + filename, 'debug-annoying')
self.encoding = self._parse(file)
matplotlib.verbose.report('Result: ' + repr(self.encoding), 'debug-annoying')
def __iter__(self):
for name in self.encoding:
yield name
def _parse(self, file):
result = []
state = 0
for line in file:
comment_start = line.find('%')
if comment_start > -1:
line = line[:comment_start]
line = line.strip()
if state == 0:
# Expecting something like /FooEncoding [
if '[' in line:
state = 1
line = line[line.index('[')+1:].strip()
if state == 1:
if ']' in line: # ] def
line = line[:line.index(']')]
state = 2
words = line.split()
for w in words:
if w.startswith('/'):
# Allow for /abc/def/ghi
subwords = w.split('/')
result.extend(subwords[1:])
else:
raise ValueError("Broken name in encoding file: " + w)
return result
def find_tex_file(filename, format=None):
"""
Call :program:`kpsewhich` to find a file in the texmf tree. If
*format* is not None, it is used as the value for the
:option:`--format` option.
Apparently most existing TeX distributions on Unix-like systems
use kpathsea. I hear MikTeX (a popular distribution on Windows)
doesn't use kpathsea, so what do we do? (TODO)
.. seealso::
`Kpathsea documentation <http://www.tug.org/kpathsea/>`_
The library that :program:`kpsewhich` is part of.
"""
cmd = ['kpsewhich']
if format is not None:
cmd += ['--format=' + format]
cmd += [filename]
matplotlib.verbose.report('find_tex_file(%s): %s' \
% (filename,cmd), 'debug')
# stderr is unused, but reading it avoids a subprocess optimization
# that breaks EINTR handling in some Python versions:
# http://bugs.python.org/issue12493
# https://github.com/matplotlib/matplotlib/issues/633
pipe = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = pipe.communicate()[0].rstrip()
matplotlib.verbose.report('find_tex_file result: %s' % result,
'debug')
return result.decode('ascii')
# With multiple text objects per figure (e.g., tick labels) we may end
# up reading the same tfm and vf files many times, so we implement a
# simple cache. TODO: is this worth making persistent?
_tfmcache = {}
_vfcache = {}
def _fontfile(texname, class_, suffix, cache):
try:
return cache[texname]
except KeyError:
pass
filename = find_tex_file(texname + suffix)
if filename:
result = class_(filename)
else:
result = None
cache[texname] = result
return result
def _tfmfile(texname):
return _fontfile(texname, Tfm, '.tfm', _tfmcache)
def _vffile(texname):
return _fontfile(texname, Vf, '.vf', _vfcache)
if __name__ == '__main__':
import sys
matplotlib.verbose.set_level('debug-annoying')
fname = sys.argv[1]
try: dpi = float(sys.argv[2])
except IndexError: dpi = None
dvi = Dvi(fname, dpi)
fontmap = PsfontsMap(find_tex_file('pdftex.map'))
for page in dvi:
print('=== new page ===')
fPrev = None
for x,y,f,c,w in page.text:
if f != fPrev:
print('font', f.texname, 'scaled', f._scale/pow(2.0,20))
fPrev = f
print(x,y,c, 32 <= c < 128 and chr(c) or '.', w)
for x,y,w,h in page.boxes:
print(x,y,'BOX',w,h)
| mit |
l11x0m7/Paper | Modulation/code/signal_analysis.py | 1 | 7322 | # -*- encoding:utf-8 -*-
import os
import sys
import logging
from copy import deepcopy
from matplotlib import pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
reload(sys)
def drawModulation(dirpath, rownum=200):
"""信号文件绘图
:param filepath: 需要显示绘图的信号文件路径
:return: None
"""
plt.figure(1)
filepaths = os.listdir(dirpath)
fileorder = 1
useful_filepaths = [f for f in filepaths if f.startswith('parse_mod')]
for filepath in useful_filepaths:
count = np.random.randint(1, rownum + 1)
with open(dirpath + '/' + filepath, 'rb') as fr:
x = list()
vals = list()
name = filepath
for i, line in enumerate(fr):
if i < count:
continue
if i > count:
break
vals = line.strip().split('\t')
vals = map(float, vals)
x = range(len(vals))
plt.subplot(2 * len(useful_filepaths), 1, fileorder * 2 - 1)
plt.plot(x, vals, color = ((fileorder * 20 + 25) % 255 / 255.,
(fileorder * 5 + 35) % 255 / 255.,
(fileorder * 30 + 45) % 255 / 255.))
plt.xlabel('symbol number')
plt.ylabel('signal amplitude')
plt.title(name)
fileorder += 1
plt.show()
def drawMixSignal(filepath, sample=5):
"""信号文件绘图
:param filepath: 需要显示绘图的信号文件路径
:return: None
"""
plt.figure(1)
with open(filepath, 'rb') as fr:
rowNumber = sum(1 for _ in fr)
with open(filepath, 'rb') as fr:
sampleSignals = set(np.random.choice(range(rowNumber), sample, replace=False))
rowOrder = 1
for i, line in enumerate(fr):
if i not in sampleSignals:
continue
vals = line.strip().split('\t')
vals = map(float, vals)
x = range(len(vals))
plt.subplot(sample, 1, rowOrder)
plt.plot(x, vals, color = ((rowOrder * 20 + 25) % 255 / 255.,
(rowOrder * 5 + 35) % 255 / 255.,
(rowOrder * 30 + 45) % 255 / 255.))
rowOrder += 1
plt.show()
def mixSignalAndTagging(dirpath='../data', savepath='../data/mixSignals.txt', modeSize=[]):
"""信号混叠和标注
对已有的信号进行混叠.
1-7分别对应:2ASK、QPSK、2FSK、2ASK+QPSK、2ASK+2FSK、QPSK+2FSK、2ASK+QPSK+2FSK
:param dirpath: signal path
:param modeSize: the sample size in each mode, from `1` to `n`
:return: mixed signal
"""
def tagger(tag):
"""
给样本打标签,目前手动填写标签类型
:param tag: like `1\t2`, `0\t2`, `0\t1\t2`
:return: `int` from 1 to 7 representing label
"""
if tag == '\t'.join(['0', ]):
return 1
elif tag == '\t'.join(['1', ]):
return 2
elif tag == '\t'.join(['2', ]):
return 3
elif tag == '\t'.join(['0', '1']):
return 4
elif tag == '\t'.join(['0', '2']):
return 5
elif tag == '\t'.join(['1', '2']):
return 6
elif tag == '\t'.join(['0', '1', '2']):
return 7
def C(n, m):
def calcNext(count, point, l, r, res, pre):
if(point > r):
return
if count == 1:
for i in xrange(point, r + 1):
pre.append(i)
res.append(deepcopy(pre))
pre.pop()
else:
for i in xrange(point, r + 1):
pre.append(i)
calcNext(count - 1, i + 1, l, r, res, pre)
pre.pop()
res = list()
calcNext(m, 0, 0, n - 1, res, [])
return res
files = os.listdir(dirpath)
signals = {}
for filepath in files:
if not filepath.startswith('parse_'):
continue
with open(dirpath + '/' + filepath, 'rb') as fr:
modName = filepath.split('parse_mod_')[1].split('.txt')[0]
signal = list()
for line in fr:
amps = line.strip().split('\t')
amps = map(float, amps)
signal.append(amps)
# signal = zip(*signal)
# signal = np.tile(signal, (20, 1))
signals[modName] = signal
modTypes = np.asarray(signals.keys())
modeNum = len(modTypes)
totalSignals = np.array([])
totalTags = list()
for mixNum in xrange(1, modeNum + 1):
groupIndeces = C(modeNum, mixNum)
groupNum = len(groupIndeces)
sampleEachMod = modeSize[mixNum - 1] // groupNum
groupSignals = np.array([])
for groupInd in groupIndeces:
mixSignals = np.array([])
tag = '\t'.join(map(str, sorted(groupInd)))
tag = str(tagger(tag))
while len(mixSignals) < sampleEachMod:
mixSignal = np.zeros([len(signals[modTypes[0]]), len(signals[modTypes[0]][0])])
for ind in groupInd:
curSignal = np.asarray(signals[modTypes[ind]])
randomIndeces = np.random.choice(len(curSignal), len(curSignal), replace=False)
randSignal = curSignal[randomIndeces]
mixSignal += randSignal
mixSignals = np.concatenate([mixSignals, mixSignal]) if mixSignals.shape[0] != 0 else mixSignal
mixSignals = mixSignals[:sampleEachMod, :]
totalTags.extend([tag] * sampleEachMod)
groupSignals = np.concatenate([groupSignals, mixSignals]) if groupSignals.shape[0] != 0 else mixSignals
totalSignals = np.concatenate([totalSignals, groupSignals]) if totalSignals.shape[0] != 0 else groupSignals
assert len(totalTags) == sum(modeSize)
assert len(totalSignals) == sum(modeSize)
indeces = np.random.choice(len(totalSignals), len(totalSignals), replace=False)
totalSignals = np.asarray(totalSignals)[indeces]
totalTags = np.asarray(totalTags)[indeces]
with open(savepath, 'wb') as fw:
for i in xrange(len(totalTags)):
signal = totalSignals[i]
signal = map(str, signal)
tag = totalTags[i]
fw.write('\t'.join(['\t'.join(signal), tag]) + '\n')
def split(filepath):
with open(filepath, 'rb') as fr:
X = list()
for line in fr:
X.append(line.strip())
X_train, X_test = train_test_split(X, test_size=0.2, random_state=42)
filename = filepath.split('/')[-1]
dirbase = filepath.split('/')[:-1]
with open('/'.join(dirbase + ['train_' + filename]), 'wb') as fw:
for line in X_train:
fw.write(line + '\n')
with open('/'.join(dirbase + ['test_' + filename]), 'wb') as fw:
for line in X_test:
fw.write(line + '\n')
if __name__ == '__main__':
# drawModulation('../data/5dB')
drawMixSignal('../data/50dB/mixSignals.txt')
# mixSignalAndTagging('../data/5dB', '../data/5dB/mixSignals.txt', [600, 1500, 2000])
# split('../data/5dB/mixSignals.txt')
| apache-2.0 |
marcusrehm/serenata-de-amor | research/src/fetch_cnpj_info.py | 2 | 12631 | from concurrent import futures
import json
import argparse
import time
import random
import itertools
import numpy as np
import os.path
import sys
import pandas as pd
import shutil
import requests
import requests.exceptions
import re
import logging
import json
from datetime import datetime, timedelta
LOGGER_NAME = 'fetch_cnpj'
TEMP_DATASET_PATH = os.path.join('data', 'companies-partial.xz')
INFO_DATASET_PATH = os.path.join('data', '{0}-{1}-{2}-companies.xz')
global logger, cnpj_list, num_threads, proxies_list
# source files mapped for extract cnpj
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'table_config.json')) as json_file:
json_config = json.load(json_file)
datasets_cols = json_config['cnpj_cpf']
def configure_logger(verbosity):
logger = logging.getLogger(LOGGER_NAME)
logger.setLevel(verbosity)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(verbosity)
logger.addHandler(ch)
return logger
def transform_and_translate_data(json_data):
"""
Transform main activity, secondary activity and partners list in
multi columns and translate column names.
"""
try:
data = pd.DataFrame(columns=['atividade_principal',
'data_situacao',
'tipo',
'nome',
'telefone',
'atividades_secundarias',
'situacao',
'bairro',
'logradouro',
'numero',
'cep',
'municipio',
'uf',
'abertura',
'natureza_juridica',
'fantasia',
'cnpj',
'ultima_atualizacao',
'status',
'complemento',
'email',
'efr',
'motivo_situacao',
'situacao_especial',
'data_situacao_especial',
'qsa'])
data = data.append(json_data, ignore_index=True)
except Exception as e:
logger.error("Error trying to transform and translate data:")
logger.error(json_data)
raise e
def decompose_main_activity(value):
struct = value
if struct:
return pd.Series(struct[0]). \
rename_axis({'code': 'main_activity_code',
'text': 'main_activity'})
else:
return pd.Series({}, index=['main_activity_code', 'main_activity'])
def decompose_secondary_activities(value):
struct = value
if struct and struct[0].get('text') != 'Não informada':
new_attributes = [pd.Series(activity).
rename_axis({'code': 'secondary_activity_%i_code' % (index + 1),
'text': 'secondary_activity_%i' % (index + 1)})
for index, activity in enumerate(struct)]
return pd.concat(new_attributes)
else:
return pd.Series()
def decompose_partners_list(value):
struct = value
if struct and len(struct) > 0:
new_attributes = [pd.Series(partner).
rename_axis({
'nome_rep_legal': 'partner_%i_legal_representative_name' % (index + 1),
'qual_rep_legal': 'partner_%i_legal_representative_qualification' % (index + 1),
'pais_origem': 'partner_%i_contry_origin' % (index + 1),
'nome': 'partner_%i_name' % (index + 1),
'qual': 'partner_%i_qualification' % (index + 1)})
for index, partner in enumerate(struct)]
return pd.concat(new_attributes)
else:
return pd.Series()
data = data.rename(columns={
'abertura': 'opening',
'atividade_principal': 'main_activity',
'atividades_secundarias': 'secondary_activities',
'bairro': 'neighborhood',
'cep': 'zip_code',
'complemento': 'additional_address_details',
'data_situacao_especial': 'special_situation_date',
'data_situacao': 'situation_date',
'efr': 'responsible_federative_entity',
'fantasia': 'trade_name',
'logradouro': 'address',
'motivo_situacao': 'situation_reason',
'municipio': 'city',
'natureza_juridica': 'legal_entity',
'nome': 'name',
'numero': 'number',
'situacao_especial': 'special_situation',
'situacao': 'situation',
'telefone': 'phone',
'tipo': 'type',
'uf': 'state',
'ultima_atualizacao': 'last_updated',
})
data['main_activity'] = data['main_activity'].fillna('{}')
data['secondary_activities'] = data['secondary_activities'].fillna('{}')
data['qsa'] = data['qsa'].fillna('{}')
data = pd.concat([
data.drop(['main_activity', 'secondary_activities', 'qsa'], axis=1),
data['main_activity'].apply(decompose_main_activity),
data['secondary_activities'].apply(decompose_secondary_activities),
data['qsa'].apply(decompose_partners_list)],
axis=1)
return data
def load_temp_dataset():
if os.path.exists(TEMP_DATASET_PATH):
return pd.read_csv(TEMP_DATASET_PATH, low_memory=False)
else:
return pd.DataFrame(columns=['cnpj'])
def read_cnpj_source_files(cnpj_source_files):
"""
Read the files passed as arguments and extract CNPJs from them.
The file needs to be mapped in datasets_cols.
"""
def extract_file_name_from_args(filepath):
date = re.compile('\d+-\d+-\d+-').findall(os.path.basename(filepath))
if date:
filename_without_date = os.path.basename(
filepath).replace(date[0], '')
else:
filename_without_date = os.path.basename(filepath)
return filename_without_date[:filename_without_date.rfind('.')]
def read_cnpj_list_to_import(filename, column):
cnpj_list = pd.read_csv(filename,
usecols=([column]),
dtype={column: np.str}
)[column]
cnpj_list = cnpj_list.map(lambda cnpj:
str(cnpj).replace(r'[./-]', '')).where(cnpj_list.str.len() == 14).unique()
return list(cnpj_list)
filesNotFound = list(filter(lambda file: not os.path.exists(file) or
datasets_cols.get(
extract_file_name_from_args(file.lower())) is None,
cnpj_source_files))
filesFound = list(filter(lambda file: os.path.exists(file) and
datasets_cols.get(
extract_file_name_from_args(file.lower())),
cnpj_source_files))
cnpj_list_to_import = list(itertools.chain.from_iterable(
map(lambda file: read_cnpj_list_to_import(file, datasets_cols.get(extract_file_name_from_args(file.lower()))),
filesFound)))
return cnpj_list_to_import, filesFound, filesNotFound
def remaining_cnpjs(cnpj_list_to_import, temp_dataset):
cnpj_list = set(cnpj_list_to_import)
already_fetched = set(temp_dataset['cnpj'].str.replace(r'[./-]', ''))
return list(cnpj_list - already_fetched)
def fetch_cnpj_info(cnpj, timeout=60):
url = 'http://receitaws.com.br/v1/cnpj/%s' % cnpj
try:
result = requests.get(url,
timeout=timeout,
proxies={'http': random.choice(proxies_list + [None])})
if result.status_code == 200:
cnpj_list.remove(cnpj)
json_return = json.loads(result.text.replace(
'\n', '').replace('\r', '').replace(';', ''))
return json_return
elif result.status_code == 429:
logger.debug('Sleeping 60 seconds to try again.')
logger.debug(result.text)
time.sleep(60)
logger.debug('Thread starting fetch again. {} CNPJs remaining.'.format(
len(cnpj_list)))
else:
logger.debug(result.text)
except requests.exceptions.Timeout as e:
logger.debug(e)
except requests.exceptions.ConnectionError as e:
logger.debug(e)
parser = argparse.ArgumentParser(epilog="ex: python fetch_cnpj_info.py \
./data/2016-12-10-reimbursements.xz 2016-12-14-amendments.xz \
-p 177.67.84.135:8080 177.67.82.80:8080 -t 10")
parser.add_argument('-v', '--verbosity', default='INFO',
help='level of logging messages.')
parser.add_argument('-t', '--threads', type=int, default=10,
help='number of threads to use in fetch process')
parser.add_argument('-p', '--proxies', nargs='*',
help='proxies list to distribuite requests in fetch process')
parser.add_argument('args', nargs='+',
help='source files from where to get CNPJs to fetch')
args = parser.parse_args()
if args.args:
logger = configure_logger(args.verbosity)
if args.proxies:
proxies_list = ['http://' + proxy for proxy in args.proxies]
else:
proxies_list = []
num_threads = args.threads
cnpj_list_to_import, filesFound, filesNotFound = read_cnpj_source_files(
args.args)
temp_dataset = load_temp_dataset()
cnpj_list = remaining_cnpjs(cnpj_list_to_import, temp_dataset)
print('%i CNPJ\'s to be fetched' % len(cnpj_list))
print('Starting fetch. {0} worker threads and {1} http proxies'.format(
num_threads, len(proxies_list)))
# Try again in case of error during fetch_cnpj_info
while len(cnpj_list) > 0:
with futures.ThreadPoolExecutor(max_workers=num_threads) as executor:
future_to_cnpj_info = dict((executor.submit(fetch_cnpj_info, cnpj), cnpj)
for cnpj in cnpj_list)
last_saving_point = 0
for future in futures.as_completed(future_to_cnpj_info):
cnpj = future_to_cnpj_info[future]
if future.exception() is None and future.result() is not None and future.result()['status'] == 'OK':
result_translated = transform_and_translate_data(
future.result())
temp_dataset = pd.concat([temp_dataset, result_translated])
if last_saving_point < divmod(len(temp_dataset.index), 100)[0]:
last_saving_point = divmod(len(temp_dataset.index), 100)[0]
print('###################################')
print('Saving information already fetched. {0} records'.format(
len(temp_dataset.index)))
temp_dataset.to_csv(TEMP_DATASET_PATH,
compression='xz',
encoding='utf-8',
index=False)
temp_dataset.to_csv(TEMP_DATASET_PATH,
compression='xz',
encoding='utf-8',
index=False)
os.rename(TEMP_DATASET_PATH, INFO_DATASET_PATH.format(
datetime.today().strftime("%Y"),
datetime.today().strftime("%m"),
datetime.today().strftime("%d")))
if len(filesNotFound) > 0:
print('The following files were not found:')
for file in filesNotFound:
print(file)
print('Maybe they were misspelled or the CNPJ\'s columns are not mapped:')
for file in datasets_cols:
print('File: %s | Column: %s' % (file, datasets_cols[file]))
print('%i CNPJ\'s listed in file(s)' % len(set(cnpj_list_to_import)))
cnpj_list = remaining_cnpjs(cnpj_list_to_import, temp_dataset)
print('%i CNPJ\'s remaining' % len(cnpj_list))
else:
print('no files to fetch CNPJ\'s')
print('type python fetch_cnpj_info.py -h for help')
| mit |
decebel/librosa | librosa/filters.py | 2 | 24021 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Filters
=======
Filter bank construction
------------------------
.. autosummary::
:toctree: generated/
dct
mel
chroma
constant_q
Miscellaneous
-------------
.. autosummary::
:toctree: generated/
constant_q_lengths
cq_to_chroma
window_bandwidth
Deprecated
----------
.. autosummary::
:toctree: generated/
logfrequency
"""
import numpy as np
import scipy
import scipy.signal
import warnings
from . import cache
from . import util
from .util.exceptions import ParameterError
from .core.time_frequency import note_to_hz, hz_to_midi, hz_to_octs
from .core.time_frequency import fft_frequencies, mel_frequencies
# Dictionary of window function bandwidths
WINDOW_BANDWIDTHS = dict(hann=0.725)
__all__ = ['dct',
'mel',
'chroma',
'constant_q',
'constant_q_lengths',
'cq_to_chroma',
'window_bandwidth',
# Deprecated
'logfrequency']
@cache
def dct(n_filters, n_input):
"""Discrete cosine transform (DCT type-III) basis.
.. [1] http://en.wikipedia.org/wiki/Discrete_cosine_transform
Parameters
----------
n_filters : int > 0 [scalar]
number of output components (DCT filters)
n_input : int > 0 [scalar]
number of input components (frequency bins)
Returns
-------
dct_basis: np.ndarray [shape=(n_filters, n_input)]
DCT (type-III) basis vectors [1]_
Examples
--------
>>> n_fft = 2048
>>> dct_filters = librosa.filters.dct(13, 1 + n_fft // 2)
>>> dct_filters
array([[ 0.031, 0.031, ..., 0.031, 0.031],
[ 0.044, 0.044, ..., -0.044, -0.044],
...,
[ 0.044, 0.044, ..., -0.044, -0.044],
[ 0.044, 0.044, ..., 0.044, 0.044]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(dct_filters, x_axis='linear')
>>> plt.ylabel('DCT function')
>>> plt.title('DCT filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
basis = np.empty((n_filters, n_input))
basis[0, :] = 1.0 / np.sqrt(n_input)
samples = np.arange(1, 2*n_input, 2) * np.pi / (2.0 * n_input)
for i in range(1, n_filters):
basis[i, :] = np.cos(i*samples) * np.sqrt(2.0/n_input)
return basis
@cache
def mel(sr, n_fft, n_mels=128, fmin=0.0, fmax=None, htk=False):
"""Create a Filterbank matrix to combine FFT bins into Mel-frequency bins
Parameters
----------
sr : int > 0 [scalar]
sampling rate of the incoming signal
n_fft : int > 0 [scalar]
number of FFT components
n_mels : int > 0 [scalar]
number of Mel bands to generate
fmin : float >= 0 [scalar]
lowest frequency (in Hz)
fmax : float >= 0 [scalar]
highest frequency (in Hz).
If `None`, use `fmax = sr / 2.0`
htk : bool [scalar]
use HTK formula instead of Slaney
Returns
-------
M : np.ndarray [shape=(n_mels, 1 + n_fft/2)]
Mel transform matrix
Examples
--------
>>> melfb = librosa.filters.mel(22050, 2048)
>>> melfb
array([[ 0. , 0.016, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
Clip the maximum frequency to 8KHz
>>> librosa.filters.mel(22050, 2048, fmax=8000)
array([[ 0. , 0.02, ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ],
...,
[ 0. , 0. , ..., 0. , 0. ],
[ 0. , 0. , ..., 0. , 0. ]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(melfb, x_axis='linear')
>>> plt.ylabel('Mel filter')
>>> plt.title('Mel filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
if fmax is None:
fmax = float(sr) / 2
# Initialize the weights
n_mels = int(n_mels)
weights = np.zeros((n_mels, int(1 + n_fft // 2)))
# Center freqs of each FFT bin
fftfreqs = fft_frequencies(sr=sr, n_fft=n_fft)
# 'Center freqs' of mel bands - uniformly spaced between limits
freqs = mel_frequencies(n_mels + 2,
fmin=fmin,
fmax=fmax,
htk=htk)
# Slaney-style mel is scaled to be approx constant energy per channel
enorm = 2.0 / (freqs[2:n_mels+2] - freqs[:n_mels])
for i in range(n_mels):
# lower and upper slopes for all bins
lower = (fftfreqs - freqs[i]) / (freqs[i+1] - freqs[i])
upper = (freqs[i+2] - fftfreqs) / (freqs[i+2] - freqs[i+1])
# .. then intersect them with each other and zero
weights[i] = np.maximum(0, np.minimum(lower, upper)) * enorm[i]
return weights
@cache
def chroma(sr, n_fft, n_chroma=12, A440=440.0, ctroct=5.0,
octwidth=2, norm=2, base_c=True):
"""Create a Filterbank matrix to convert STFT to chroma
Parameters
----------
sr : int > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
number of FFT bins
n_chroma : int > 0 [scalar]
number of chroma bins
A440 : float > 0 [scalar]
Reference frequency for A440
ctroct : float > 0 [scalar]
octwidth : float > 0 or None [scalar]
`ctroct` and `octwidth` specify a dominance window -
a Gaussian weighting centered on `ctroct` (in octs, A0 = 27.5Hz)
and with a gaussian half-width of `octwidth`.
Set `octwidth` to `None` to use a flat weighting.
norm : float > 0 or np.inf
Normalization factor for each filter
base_c : bool
If True, the filter bank will start at 'C'.
If False, the filter bank will start at 'A'.
Returns
-------
wts : ndarray [shape=(n_chroma, 1 + n_fft / 2)]
Chroma filter matrix
See Also
--------
util.normalize
feature.chroma_stft
Examples
--------
Build a simple chroma filter bank
>>> chromafb = librosa.filters.chroma(22050, 4096)
array([[ 1.689e-05, 3.024e-04, ..., 4.639e-17, 5.327e-17],
[ 1.716e-05, 2.652e-04, ..., 2.674e-25, 3.176e-25],
...,
[ 1.578e-05, 3.619e-04, ..., 8.577e-06, 9.205e-06],
[ 1.643e-05, 3.355e-04, ..., 1.474e-10, 1.636e-10]])
Use quarter-tones instead of semitones
>>> librosa.filters.chroma(22050, 4096, n_chroma=24)
array([[ 1.194e-05, 2.138e-04, ..., 6.297e-64, 1.115e-63],
[ 1.206e-05, 2.009e-04, ..., 1.546e-79, 2.929e-79],
...,
[ 1.162e-05, 2.372e-04, ..., 6.417e-38, 9.923e-38],
[ 1.180e-05, 2.260e-04, ..., 4.697e-50, 7.772e-50]])
Equally weight all octaves
>>> librosa.filters.chroma(22050, 4096, octwidth=None)
array([[ 3.036e-01, 2.604e-01, ..., 2.445e-16, 2.809e-16],
[ 3.084e-01, 2.283e-01, ..., 1.409e-24, 1.675e-24],
...,
[ 2.836e-01, 3.116e-01, ..., 4.520e-05, 4.854e-05],
[ 2.953e-01, 2.888e-01, ..., 7.768e-10, 8.629e-10]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(chromafb, x_axis='linear')
>>> plt.ylabel('Chroma filter')
>>> plt.title('Chroma filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
"""
wts = np.zeros((n_chroma, n_fft))
# Get the FFT bins, not counting the DC component
frequencies = np.linspace(0, sr, n_fft, endpoint=False)[1:]
frqbins = n_chroma * hz_to_octs(frequencies, A440)
# make up a value for the 0 Hz bin = 1.5 octaves below bin 1
# (so chroma is 50% rotated from bin 1, and bin width is broad)
frqbins = np.concatenate(([frqbins[0] - 1.5 * n_chroma], frqbins))
binwidthbins = np.concatenate((np.maximum(frqbins[1:] - frqbins[:-1],
1.0), [1]))
D = np.subtract.outer(frqbins, np.arange(0, n_chroma, dtype='d')).T
n_chroma2 = np.round(float(n_chroma) / 2)
# Project into range -n_chroma/2 .. n_chroma/2
# add on fixed offset of 10*n_chroma to ensure all values passed to
# rem are positive
D = np.remainder(D + n_chroma2 + 10*n_chroma, n_chroma) - n_chroma2
# Gaussian bumps - 2*D to make them narrower
wts = np.exp(-0.5 * (2*D / np.tile(binwidthbins, (n_chroma, 1)))**2)
# normalize each column
wts = util.normalize(wts, norm=norm, axis=0)
# Maybe apply scaling for fft bins
if octwidth is not None:
wts *= np.tile(
np.exp(-0.5 * (((frqbins/n_chroma - ctroct)/octwidth)**2)),
(n_chroma, 1))
if base_c:
wts = np.roll(wts, -3, axis=0)
# remove aliasing columns, copy to ensure row-contiguity
return np.ascontiguousarray(wts[:, :int(1 + n_fft/2)])
@util.decorators.deprecated('0.4', '0.5')
def logfrequency(sr, n_fft, n_bins=84, bins_per_octave=12, tuning=0.0,
fmin=None, spread=0.125): # pragma: no cover
'''Approximate a constant-Q filter bank for a fixed-window STFT.
Each filter is a log-normal window centered at the corresponding frequency.
.. warning:: Deprecated in librosa 0.4
Parameters
----------
sr : int > 0 [scalar]
audio sampling rate
n_fft : int > 0 [scalar]
FFT window size
n_bins : int > 0 [scalar]
Number of bins. Defaults to 84 (7 octaves).
bins_per_octave : int > 0 [scalar]
Number of bins per octave. Defaults to 12 (semitones).
tuning : None or float in `[-0.5, +0.5]` [scalar]
Tuning correction parameter, in fractions of a bin.
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
spread : float > 0 [scalar]
Spread of each filter, as a fraction of a bin.
Returns
-------
C : np.ndarray [shape=(n_bins, 1 + n_fft/2)]
log-frequency filter bank.
Examples
--------
Simple log frequency filters
>>> logfb = librosa.filters.logfrequency(22050, 4096)
>>> logfb
array([[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.],
...,
[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.]])
Use a narrower frequency range
>>> librosa.filters.logfrequency(22050, 4096, n_bins=48, fmin=110)
array([[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.],
...,
[ 0., 0., ..., 0., 0.],
[ 0., 0., ..., 0., 0.]])
Use narrower filters for sparser response: 5% of a semitone
>>> librosa.filters.logfrequency(22050, 4096, spread=0.05)
Or wider: 50% of a semitone
>>> librosa.filters.logfrequency(22050, 4096, spread=0.5)
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> librosa.display.specshow(logfb, x_axis='linear')
>>> plt.ylabel('Logarithmic filters')
>>> plt.title('Log-frequency filter bank')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
# What's the shape parameter for our log-normal filters?
sigma = float(spread) / bins_per_octave
# Construct the output matrix
basis = np.zeros((n_bins, int(1 + n_fft/2)))
# Get log frequencies of bins
log_freqs = np.log2(fft_frequencies(sr, n_fft)[1:])
for i in range(n_bins):
# What's the center (median) frequency of this filter?
c_freq = correction * fmin * (2.0**(float(i) / bins_per_octave))
# Place a log-normal window around c_freq
basis[i, 1:] = np.exp(-0.5 * ((log_freqs - np.log2(c_freq)) / sigma)**2
- np.log2(sigma) - log_freqs)
# Normalize the filters
basis = util.normalize(basis, norm=1, axis=1)
return basis
def __float_window(window_function):
'''Decorator function for windows with fractional input.
This function guarantees that for fractional `x`, the following hold:
1. `__float_window(window_function)(x)` has length `np.ceil(x)`
2. all values from `np.floor(x)` are set to 0.
For integer-valued `x`, there should be no change in behavior.
'''
def _wrap(n, *args, **kwargs):
'''The wrapped window'''
n_min, n_max = int(np.floor(n)), int(np.ceil(n))
window = window_function(n, *args, **kwargs)
if len(window) < n_max:
window = np.pad(window, [(0, n_max - len(window))],
mode='constant')
window[n_min:] = 0.0
return window
return _wrap
@cache
def constant_q(sr, fmin=None, n_bins=84, bins_per_octave=12, tuning=0.0,
window=None, resolution=2, pad_fft=True, norm=1, **kwargs):
r'''Construct a constant-Q basis.
This uses the filter bank described by [1]_.
.. [1] McVicar, Matthew.
"A machine learning approach to automatic chord extraction."
Dissertation, University of Bristol. 2013.
Parameters
----------
sr : int > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin. Defaults to `C1 ~= 32.70`
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : function or `None`
Windowing function to apply to filters.
Default: `scipy.signal.hann`
resolution : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
pad_fft : boolean
Center-pad all filters up to the nearest integral power of 2.
By default, padding is done with zeros, but this can be overridden
by setting the `mode=` field in *kwargs*.
norm : {inf, -inf, 0, float > 0}
Type of norm to use for basis function normalization.
See librosa.util.normalize
kwargs : additional keyword arguments
Arguments to `np.pad()` when `pad==True`.
Returns
-------
filters : np.ndarray, `len(filters) == n_bins`
`filters[i]` is `i`\ th time-domain CQT basis filter
lengths : np.ndarray, `len(lengths) == n_bins`
The (fractional) length of each filter
See Also
--------
constant_q_lengths
librosa.core.cqt
librosa.util.normalize
Examples
--------
Use a longer window for each filter
>>> basis, lengths = librosa.filters.constant_q(22050, resolution=3)
Plot one octave of filters in time and frequency
>>> basis, lengths = librosa.filters.constant_q(22050)
>>> import matplotlib.pyplot as plt
>>> plt.figure(figsize=(10, 6))
>>> plt.subplot(2, 1, 1)
>>> notes = librosa.midi_to_note(np.arange(24, 24 + len(basis)))
>>> for i, (f, n) in enumerate(zip(basis, notes)[:12]):
... f_scale = librosa.util.normalize(f) / 2
... plt.plot(i + f_scale.real)
... plt.plot(i + f_scale.imag, linestyle=':')
>>> plt.axis('tight')
>>> plt.yticks(range(len(notes[:12])), notes[:12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filters (one octave, time domain)')
>>> plt.xlabel('Time (samples at 22050 Hz)')
>>> plt.legend(['Real', 'Imaginary'], frameon=True, framealpha=0.8)
>>> plt.subplot(2, 1, 2)
>>> F = np.abs(np.fft.fftn(basis, axes=[-1]))
>>> # Keep only the positive frequencies
>>> F = F[:, :(1 + F.shape[1] // 2)]
>>> librosa.display.specshow(F, x_axis='linear')
>>> plt.yticks(range(len(notes))[::12], notes[::12])
>>> plt.ylabel('CQ filters')
>>> plt.title('CQ filter magnitudes (frequency domain)')
>>> plt.tight_layout()
'''
if fmin is None:
fmin = note_to_hz('C1')
if window is None:
window = scipy.signal.hann
# Pass-through parameters to get the filter lengths
lengths = constant_q_lengths(sr, fmin,
n_bins=n_bins,
bins_per_octave=bins_per_octave,
tuning=tuning,
window=window,
resolution=resolution)
# Apply tuning correction
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(resolution) / (2.0**(1. / bins_per_octave) - 1)
# Convert lengths back to frequencies
freqs = Q * sr / lengths
# Build the filters
filters = []
for ilen, freq in zip(lengths, freqs):
# Build the filter: note, length will be ceil(ilen)
sig = np.exp(np.arange(ilen, dtype=float) * 1j * 2 * np.pi * freq / sr)
# Apply the windowing function
sig = sig * __float_window(window)(ilen)
# Normalize
sig = util.normalize(sig, norm=norm)
filters.append(sig)
# Pad and stack
max_len = max(lengths)
if pad_fft:
max_len = int(2.0**(np.ceil(np.log2(max_len))))
else:
max_len = np.ceil(max_len)
filters = np.asarray([util.pad_center(filt, max_len, **kwargs)
for filt in filters])
return filters, np.asarray(lengths)
@cache
def constant_q_lengths(sr, fmin, n_bins=84, bins_per_octave=12,
tuning=0.0, window='hann', resolution=2):
r'''Return length of each filter in a constant-Q basis.
Parameters
----------
sr : int > 0 [scalar]
Audio sampling rate
fmin : float > 0 [scalar]
Minimum frequency bin.
n_bins : int > 0 [scalar]
Number of frequencies. Defaults to 7 octaves (84 bins).
bins_per_octave : int > 0 [scalar]
Number of bins per octave
tuning : float in `[-0.5, +0.5)` [scalar]
Tuning deviation from A440 in fractions of a bin
window : str or callable
Window function to use on filters
resolution : float > 0 [scalar]
Resolution of filter windows. Larger values use longer windows.
Returns
-------
lengths : np.ndarray
The length of each filter.
See Also
--------
constant_q
librosa.core.cqt
'''
if fmin <= 0:
raise ParameterError('fmin must be positive')
if bins_per_octave <= 0:
raise ParameterError('bins_per_octave must be positive')
if resolution <= 0:
raise ParameterError('resolution must be positive')
if n_bins <= 0 or not isinstance(n_bins, int):
raise ParameterError('n_bins must be a positive integer')
correction = 2.0**(float(tuning) / bins_per_octave)
fmin = correction * fmin
# Q should be capitalized here, so we suppress the name warning
# pylint: disable=invalid-name
Q = float(resolution) / (2.0**(1. / bins_per_octave) - 1)
# Compute the frequencies
freq = fmin * (2.0 ** (np.arange(n_bins, dtype=float) / bins_per_octave))
if np.any(freq * (1 + window_bandwidth(window) / Q) > sr / 2.0):
raise ParameterError('Filter pass-band lies beyond Nyquist')
# Convert frequencies to filter lengths
lengths = Q * sr / freq
return lengths
@cache
def cq_to_chroma(n_input, bins_per_octave=12, n_chroma=12,
fmin=None, window=None, base_c=True):
'''Convert a Constant-Q basis to Chroma.
Parameters
----------
n_input : int > 0 [scalar]
Number of input components (CQT bins)
bins_per_octave : int > 0 [scalar]
How many bins per octave in the CQT
n_chroma : int > 0 [scalar]
Number of output bins (per octave) in the chroma
fmin : None or float > 0
Center frequency of the first constant-Q channel.
Default: 'C1' ~= 32.7 Hz
window : None or np.ndarray
If provided, the cq_to_chroma filter bank will be
convolved with `window`.
base_c : bool
If True, the first chroma bin will start at 'C'
If False, the first chroma bin will start at 'A'
Returns
-------
cq_to_chroma : np.ndarray [shape=(n_chroma, n_input)]
Transformation matrix: `Chroma = np.dot(cq_to_chroma, CQT)`
Raises
------
ParameterError
If `n_input` is not an integer multiple of `n_chroma`
Examples
--------
Get a CQT, and wrap bins to chroma
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> CQT = librosa.cqt(y, sr=sr)
>>> chroma_map = librosa.filters.cq_to_chroma(CQT.shape[0])
>>> chromagram = chroma_map.dot(CQT)
>>> # Max-normalize each time step
>>> chromagram = librosa.util.normalize(chromagram, axis=0)
>>> import matplotlib.pyplot as plt
>>> plt.subplot(3, 1, 1)
>>> librosa.display.specshow(librosa.logamplitude(CQT**2,
... ref_power=np.max),
... y_axis='cqt_note', x_axis='time')
>>> plt.title('CQT Power')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 2)
>>> librosa.display.specshow(chromagram, y_axis='chroma', x_axis='time')
>>> plt.title('Chroma (wrapped CQT)')
>>> plt.colorbar()
>>> plt.subplot(3, 1, 3)
>>> chroma = librosa.feature.chromagram(y=y, sr=sr)
>>> librosa.display.specshow(chroma, y_axis='chroma', x_axis='time')
>>> plt.title('librosa.feature.chroma')
>>> plt.colorbar()
>>> plt.tight_layout()
'''
# How many fractional bins are we merging?
n_merge = float(bins_per_octave) / n_chroma
if fmin is None:
fmin = note_to_hz('C1')
if np.mod(n_merge, 1) != 0:
raise ParameterError('Incompatible CQ merge: '
'input bins must be an '
'integer multiple of output bins.')
# Tile the identity to merge fractional bins
cq_to_ch = np.repeat(np.eye(n_chroma), n_merge, axis=1)
# Roll it left to center on the target bin
cq_to_ch = np.roll(cq_to_ch, - int(n_merge // 2), axis=1)
# How many octaves are we repeating?
n_octaves = np.ceil(np.float(n_input) / bins_per_octave)
# Repeat and trim
cq_to_ch = np.tile(cq_to_ch, int(n_octaves))[:, :n_input]
# What's the note number of the first bin in the CQT?
# midi uses 12 bins per octave here
midi_0 = np.mod(hz_to_midi(fmin), 12)
if base_c:
# rotate to C
roll = midi_0
else:
# rotate to A
roll = midi_0 - 9
# Adjust the roll in terms of how many chroma we want out
# We need to be careful with rounding here
roll = int(np.round(roll * (n_chroma / 12.)))
# Apply the roll
cq_to_ch = np.roll(cq_to_ch, roll, axis=0).astype(float)
if window is not None:
cq_to_ch = scipy.signal.convolve(cq_to_ch,
np.atleast_2d(window),
mode='same')
return cq_to_ch
def window_bandwidth(window, default=1.0):
'''Get the bandwidth of a window function.
If the window function is unknown, return a default value.
Parameters
----------
window : callable or string
A window function, or the name of a window function.
Examples:
- scipy.signal.hann
- 'boxcar'
default : float >= 0
The default value, if `window` is unknown.
Returns
-------
bandwidth : float
The bandwidth of the given window function
See Also
--------
scipy.signal.windows
'''
if hasattr(window, '__name__'):
key = window.__name__
else:
key = window
if key not in WINDOW_BANDWIDTHS:
warnings.warn("Unknown window function '{:s}'.".format(key))
return WINDOW_BANDWIDTHS.get(key, default)
| isc |
hyqneuron/pylearn2-maxsom | pylearn2/scripts/datasets/browse_norb.py | 44 | 15741 | #!/usr/bin/env python
"""
A browser for the NORB and small NORB datasets. Navigate the images by
choosing the values for the label vector. Note that for the 'big' NORB
dataset, you can only set the first 5 label dimensions. You can then cycle
through the 3-12 images that fit those labels.
"""
import sys
import os
import argparse
import numpy
import warnings
try:
import matplotlib
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
matplotlib = None
pyplot = None
from pylearn2.datasets.new_norb import NORB
from pylearn2.utils import safe_zip, serial
def _parse_args():
parser = argparse.ArgumentParser(
description="Browser for NORB dataset.")
parser.add_argument('--which_norb',
type=str,
required=False,
choices=('big', 'small'),
help="'Selects the (big) NORB, or the Small NORB.")
parser.add_argument('--which_set',
type=str,
required=False,
choices=('train', 'test', 'both'),
help="'train', or 'test'")
parser.add_argument('--pkl',
type=str,
required=False,
help=".pkl file of NORB dataset")
parser.add_argument('--stereo_viewer',
action='store_true',
help="Swaps left and right stereo images, so you "
"can see them in 3D by crossing your eyes.")
parser.add_argument('--no_norm',
action='store_true',
help="Don't normalize pixel values")
result = parser.parse_args()
if (result.pkl is not None) == (result.which_norb is not None or
result.which_set is not None):
print("Must supply either --pkl, or both --which_norb and "
"--which_set.")
sys.exit(1)
if (result.which_norb is None) != (result.which_set is None):
print("When not supplying --pkl, you must supply both "
"--which_norb and --which_set.")
sys.exit(1)
if result.pkl is not None:
if not result.pkl.endswith('.pkl'):
print("--pkl must be a filename that ends in .pkl")
sys.exit(1)
if not os.path.isfile(result.pkl):
print("couldn't find --pkl file '%s'" % result.pkl)
sys.exit(1)
return result
def _make_grid_to_short_label(dataset):
"""
Returns an array x such that x[a][b] gives label index a's b'th unique
value. In other words, it maps label grid indices a, b to the
corresponding label value.
"""
unique_values = [sorted(list(frozenset(column)))
for column
in dataset.y[:, :5].transpose()]
# If dataset contains blank images, removes the '-1' labels
# corresponding to blank images, since they aren't contained in the
# label grid.
category_index = dataset.label_name_to_index['category']
unique_categories = unique_values[category_index]
category_to_name = dataset.label_to_value_funcs[category_index]
if any(category_to_name(category) == 'blank'
for category in unique_categories):
for d in range(1, len(unique_values)):
assert unique_values[d][0] == -1, ("unique_values: %s" %
str(unique_values))
unique_values[d] = unique_values[d][1:]
return unique_values
def _get_blank_label(dataset):
"""
Returns the label vector associated with blank images.
If dataset is a Small NORB (i.e. it has no blank images), this returns
None.
"""
category_index = dataset.label_name_to_index['category']
category_to_name = dataset.label_to_value_funcs[category_index]
blank_label = 5
try:
blank_name = category_to_name(blank_label)
except ValueError:
# Returns None if there is no 'blank' category (e.g. if we're using
# the small NORB dataset.
return None
assert blank_name == 'blank'
blank_rowmask = dataset.y[:, category_index] == blank_label
blank_labels = dataset.y[blank_rowmask, :]
if not blank_rowmask.any():
return None
if not numpy.all(blank_labels[0, :] == blank_labels[1:, :]):
raise ValueError("Expected all labels of category 'blank' to have "
"the same value, but they differed.")
return blank_labels[0, :].copy()
def _make_label_to_row_indices(labels):
"""
Returns a map from short labels (the first 5 elements of the label
vector) to the list of row indices of rows in the dense design matrix
with that label.
For Small NORB, all unique short labels have exactly one row index.
For big NORB, a short label can have 0-N row indices.
"""
result = {}
for row_index, label in enumerate(labels):
short_label = tuple(label[:5])
if result.get(short_label, None) is None:
result[short_label] = []
result[short_label].append(row_index)
return result
def main():
"""Top-level function."""
args = _parse_args()
if args.pkl is not None:
dataset = serial.load(args.pkl)
else:
dataset = NORB(args.which_norb, args.which_set)
# Indexes into the first 5 labels, which live on a 5-D grid.
grid_indices = [0, ] * 5
grid_to_short_label = _make_grid_to_short_label(dataset)
# Maps 5-D label vector to a list of row indices for dataset.X, dataset.y
# that have those labels.
label_to_row_indices = _make_label_to_row_indices(dataset.y)
# Indexes into the row index lists returned by label_to_row_indices.
object_image_index = [0, ]
blank_image_index = [0, ]
blank_label = _get_blank_label(dataset)
# Index into grid_indices currently being edited
grid_dimension = [0, ]
dataset_is_stereo = 's' in dataset.view_converter.axes
figure, all_axes = pyplot.subplots(1,
3 if dataset_is_stereo else 2,
squeeze=True,
figsize=(10, 3.5))
set_name = (os.path.split(args.pkl)[1] if args.which_set is None
else "%sing set" % args.which_set)
figure.canvas.set_window_title("NORB dataset (%s)" % set_name)
label_text = figure.suptitle('Up/down arrows choose label, '
'left/right arrows change it',
x=0.1,
horizontalalignment="left")
# Hides axes' tick marks
for axes in all_axes:
axes.get_xaxis().set_visible(False)
axes.get_yaxis().set_visible(False)
text_axes, image_axes = (all_axes[0], all_axes[1:])
image_captions = (('left', 'right') if dataset_is_stereo
else ('mono image', ))
if args.stereo_viewer:
image_captions = tuple(reversed(image_captions))
for image_ax, caption in safe_zip(image_axes, image_captions):
image_ax.set_title(caption)
text_axes.set_frame_on(False) # Hides background of text_axes
def is_blank(grid_indices):
assert len(grid_indices) == 5
assert all(x >= 0 for x in grid_indices)
ci = dataset.label_name_to_index['category'] # category index
category = grid_to_short_label[ci][grid_indices[ci]]
category_name = dataset.label_to_value_funcs[ci](category)
return category_name == 'blank'
def get_short_label(grid_indices):
"""
Returns the first 5 elements of the label vector pointed to by
grid_indices. We use the first 5, since they're the labels used by
both the 'big' and Small NORB datasets.
"""
# Need to special-case the 'blank' category, since it lies outside of
# the grid.
if is_blank(grid_indices): # won't happen with SmallNORB
return tuple(blank_label[:5])
else:
return tuple(grid_to_short_label[i][g]
for i, g in enumerate(grid_indices))
def get_row_indices(grid_indices):
short_label = get_short_label(grid_indices)
return label_to_row_indices.get(short_label, None)
axes_to_pixels = {}
def redraw(redraw_text, redraw_images):
row_indices = get_row_indices(grid_indices)
if row_indices is None:
row_index = None
image_index = 0
num_images = 0
else:
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)[0]
row_index = row_indices[image_index]
num_images = len(row_indices)
def draw_text():
if row_indices is None:
padding_length = dataset.y.shape[1] - len(grid_indices)
current_label = (tuple(get_short_label(grid_indices)) +
(0, ) * padding_length)
else:
current_label = dataset.y[row_index, :]
label_names = dataset.label_index_to_name
label_values = [label_to_value(label) for label_to_value, label
in safe_zip(dataset.label_to_value_funcs,
current_label)]
lines = ['%s: %s' % (t, v)
for t, v
in safe_zip(label_names, label_values)]
if dataset.y.shape[1] > 5:
# Inserts image number & blank line between editable and
# fixed labels.
lines = (lines[:5] +
['No such image' if num_images == 0
else 'image: %d of %d' % (image_index + 1,
num_images),
'\n'] +
lines[5:])
# prepends the current index's line with an arrow.
lines[grid_dimension[0]] = '==> ' + lines[grid_dimension[0]]
text_axes.clear()
# "transAxes": 0, 0 = bottom-left, 1, 1 at upper-right.
text_axes.text(0, 0.5, # coords
'\n'.join(lines),
verticalalignment='center',
transform=text_axes.transAxes)
def draw_images():
if row_indices is None:
for axis in image_axes:
axis.clear()
else:
data_row = dataset.X[row_index:row_index + 1, :]
axes_names = dataset.view_converter.axes
assert len(axes_names) in (4, 5)
assert axes_names[0] == 'b'
assert axes_names[-3] == 0
assert axes_names[-2] == 1
assert axes_names[-1] == 'c'
def draw_image(image, axes):
assert len(image.shape) == 2
norm = matplotlib.colors.NoNorm() if args.no_norm else None
axes_to_pixels[axes] = image
axes.imshow(image, norm=norm, cmap='gray')
if 's' in axes_names:
image_pair = \
dataset.get_topological_view(mat=data_row,
single_tensor=True)
# Shaves off the singleton dimensions
# (batch # and channel #), leaving just 's', 0, and 1.
image_pair = tuple(image_pair[0, :, :, :, 0])
if args.stereo_viewer:
image_pair = tuple(reversed(image_pair))
for axis, image in safe_zip(image_axes, image_pair):
draw_image(image, axis)
else:
image = dataset.get_topological_view(mat=data_row)
image = image[0, :, :, 0]
draw_image(image, image_axes[0])
if redraw_text:
draw_text()
if redraw_images:
draw_images()
figure.canvas.draw()
default_status_text = ("mouseover image%s for pixel values" %
("" if len(image_axes) == 1 else "s"))
status_text = figure.text(0.5, 0.1, default_status_text)
def on_mouse_motion(event):
original_text = status_text.get_text()
if event.inaxes not in image_axes:
status_text.set_text(default_status_text)
else:
pixels = axes_to_pixels[event.inaxes]
row = int(event.ydata + .5)
col = int(event.xdata + .5)
status_text.set_text("Pixel value: %g" % pixels[row, col])
if status_text.get_text != original_text:
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_index_type(step):
num_dimensions = len(grid_indices)
if dataset.y.shape[1] > 5:
# If dataset is big NORB, add one for the image index
num_dimensions += 1
grid_dimension[0] = add_mod(grid_dimension[0],
step,
num_dimensions)
def incr_index(step):
assert step in (0, -1, 1), ("Step was %d" % step)
image_index = (blank_image_index
if is_blank(grid_indices)
else object_image_index)
if grid_dimension[0] == 5: # i.e. the image index
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# increment the image index
image_index[0] = add_mod(image_index[0],
step,
len(row_indices))
else:
# increment one of the grid indices
gd = grid_dimension[0]
grid_indices[gd] = add_mod(grid_indices[gd],
step,
len(grid_to_short_label[gd]))
row_indices = get_row_indices(grid_indices)
if row_indices is None:
image_index[0] = 0
else:
# some grid indices have 2 images instead of 3.
image_index[0] = min(image_index[0], len(row_indices))
# Disables left/right key if we're currently showing a blank,
# and the current index type is neither 'category' (0) nor
# 'image number' (5)
disable_left_right = (is_blank(grid_indices) and
not (grid_dimension[0] in (0, 5)))
if event.key == 'up':
incr_index_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_index_type(1)
redraw(True, False)
elif event.key == 'q':
sys.exit(0)
elif not disable_left_right:
if event.key == 'left':
incr_index(-1)
redraw(True, True)
elif event.key == 'right':
incr_index(1)
redraw(True, True)
figure.canvas.mpl_connect('key_press_event', on_key_press)
figure.canvas.mpl_connect('motion_notify_event', on_mouse_motion)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
jaidevd/scikit-learn | examples/cluster/plot_kmeans_silhouette_analysis.py | 83 | 5888 | """
===============================================================================
Selecting the number of clusters with silhouette analysis on KMeans clustering
===============================================================================
Silhouette analysis can be used to study the separation distance between the
resulting clusters. The silhouette plot displays a measure of how close each
point in one cluster is to points in the neighboring clusters and thus provides
a way to assess parameters like number of clusters visually. This measure has a
range of [-1, 1].
Silhouette coefficients (as these values are referred to as) near +1 indicate
that the sample is far away from the neighboring clusters. A value of 0
indicates that the sample is on or very close to the decision boundary between
two neighboring clusters and negative values indicate that those samples might
have been assigned to the wrong cluster.
In this example the silhouette analysis is used to choose an optimal value for
``n_clusters``. The silhouette plot shows that the ``n_clusters`` value of 3, 5
and 6 are a bad pick for the given data due to the presence of clusters with
below average silhouette scores and also due to wide fluctuations in the size
of the silhouette plots. Silhouette analysis is more ambivalent in deciding
between 2 and 4.
Also from the thickness of the silhouette plot the cluster size can be
visualized. The silhouette plot for cluster 0 when ``n_clusters`` is equal to
2, is bigger in size owing to the grouping of the 3 sub clusters into one big
cluster. However when the ``n_clusters`` is equal to 4, all the plots are more
or less of similar thickness and hence are of similar sizes as can be also
verified from the labelled scatter plot on the right.
"""
from __future__ import print_function
from sklearn.datasets import make_blobs
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import numpy as np
print(__doc__)
# Generating the sample data from make_blobs
# This particular setting has one distinct cluster and 3 clusters placed close
# together.
X, y = make_blobs(n_samples=500,
n_features=2,
centers=4,
cluster_std=1,
center_box=(-10.0, 10.0),
shuffle=True,
random_state=1) # For reproducibility
range_n_clusters = [2, 3, 4, 5, 6]
for n_clusters in range_n_clusters:
# Create a subplot with 1 row and 2 columns
fig, (ax1, ax2) = plt.subplots(1, 2)
fig.set_size_inches(18, 7)
# The 1st subplot is the silhouette plot
# The silhouette coefficient can range from -1, 1 but in this example all
# lie within [-0.1, 1]
ax1.set_xlim([-0.1, 1])
# The (n_clusters+1)*10 is for inserting blank space between silhouette
# plots of individual clusters, to demarcate them clearly.
ax1.set_ylim([0, len(X) + (n_clusters + 1) * 10])
# Initialize the clusterer with n_clusters value and a random generator
# seed of 10 for reproducibility.
clusterer = KMeans(n_clusters=n_clusters, random_state=10)
cluster_labels = clusterer.fit_predict(X)
# The silhouette_score gives the average value for all the samples.
# This gives a perspective into the density and separation of the formed
# clusters
silhouette_avg = silhouette_score(X, cluster_labels)
print("For n_clusters =", n_clusters,
"The average silhouette_score is :", silhouette_avg)
# Compute the silhouette scores for each sample
sample_silhouette_values = silhouette_samples(X, cluster_labels)
y_lower = 10
for i in range(n_clusters):
# Aggregate the silhouette scores for samples belonging to
# cluster i, and sort them
ith_cluster_silhouette_values = \
sample_silhouette_values[cluster_labels == i]
ith_cluster_silhouette_values.sort()
size_cluster_i = ith_cluster_silhouette_values.shape[0]
y_upper = y_lower + size_cluster_i
color = cm.spectral(float(i) / n_clusters)
ax1.fill_betweenx(np.arange(y_lower, y_upper),
0, ith_cluster_silhouette_values,
facecolor=color, edgecolor=color, alpha=0.7)
# Label the silhouette plots with their cluster numbers at the middle
ax1.text(-0.05, y_lower + 0.5 * size_cluster_i, str(i))
# Compute the new y_lower for next plot
y_lower = y_upper + 10 # 10 for the 0 samples
ax1.set_title("The silhouette plot for the various clusters.")
ax1.set_xlabel("The silhouette coefficient values")
ax1.set_ylabel("Cluster label")
# The vertical line for average silhouette score of all the values
ax1.axvline(x=silhouette_avg, color="red", linestyle="--")
ax1.set_yticks([]) # Clear the yaxis labels / ticks
ax1.set_xticks([-0.1, 0, 0.2, 0.4, 0.6, 0.8, 1])
# 2nd Plot showing the actual clusters formed
colors = cm.spectral(cluster_labels.astype(float) / n_clusters)
ax2.scatter(X[:, 0], X[:, 1], marker='.', s=30, lw=0, alpha=0.7,
c=colors)
# Labeling the clusters
centers = clusterer.cluster_centers_
# Draw white circles at cluster centers
ax2.scatter(centers[:, 0], centers[:, 1],
marker='o', c="white", alpha=1, s=200)
for i, c in enumerate(centers):
ax2.scatter(c[0], c[1], marker='$%d$' % i, alpha=1, s=50)
ax2.set_title("The visualization of the clustered data.")
ax2.set_xlabel("Feature space for the 1st feature")
ax2.set_ylabel("Feature space for the 2nd feature")
plt.suptitle(("Silhouette analysis for KMeans clustering on sample data "
"with n_clusters = %d" % n_clusters),
fontsize=14, fontweight='bold')
plt.show()
| bsd-3-clause |
billy-inn/scikit-learn | examples/linear_model/plot_ransac.py | 250 | 1673 | """
===========================================
Robust linear model estimation using RANSAC
===========================================
In this example we see how to robustly fit a linear model to faulty data using
the RANSAC algorithm.
"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn import linear_model, datasets
n_samples = 1000
n_outliers = 50
X, y, coef = datasets.make_regression(n_samples=n_samples, n_features=1,
n_informative=1, noise=10,
coef=True, random_state=0)
# Add outlier data
np.random.seed(0)
X[:n_outliers] = 3 + 0.5 * np.random.normal(size=(n_outliers, 1))
y[:n_outliers] = -3 + 10 * np.random.normal(size=n_outliers)
# Fit line using all data
model = linear_model.LinearRegression()
model.fit(X, y)
# Robustly fit linear model with RANSAC algorithm
model_ransac = linear_model.RANSACRegressor(linear_model.LinearRegression())
model_ransac.fit(X, y)
inlier_mask = model_ransac.inlier_mask_
outlier_mask = np.logical_not(inlier_mask)
# Predict data of estimated models
line_X = np.arange(-5, 5)
line_y = model.predict(line_X[:, np.newaxis])
line_y_ransac = model_ransac.predict(line_X[:, np.newaxis])
# Compare estimated coefficients
print("Estimated coefficients (true, normal, RANSAC):")
print(coef, model.coef_, model_ransac.estimator_.coef_)
plt.plot(X[inlier_mask], y[inlier_mask], '.g', label='Inliers')
plt.plot(X[outlier_mask], y[outlier_mask], '.r', label='Outliers')
plt.plot(line_X, line_y, '-k', label='Linear regressor')
plt.plot(line_X, line_y_ransac, '-b', label='RANSAC regressor')
plt.legend(loc='lower right')
plt.show()
| bsd-3-clause |
alexsavio/scikit-learn | examples/gaussian_process/plot_gpc_iris.py | 81 | 2231 | """
=====================================================
Gaussian process classification (GPC) on iris dataset
=====================================================
This example illustrates the predicted probability of GPC for an isotropic
and anisotropic RBF kernel on a two-dimensional version for the iris-dataset.
The anisotropic RBF kernel obtains slightly higher log-marginal-likelihood by
assigning different length-scales to the two feature dimensions.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features.
y = np.array(iris.target, dtype=int)
h = .02 # step size in the mesh
kernel = 1.0 * RBF([1.0])
gpc_rbf_isotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
kernel = 1.0 * RBF([1.0, 1.0])
gpc_rbf_anisotropic = GaussianProcessClassifier(kernel=kernel).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
titles = ["Isotropic RBF", "Anisotropic RBF"]
plt.figure(figsize=(10, 5))
for i, clf in enumerate((gpc_rbf_isotropic, gpc_rbf_anisotropic)):
# Plot the predicted probabilities. For that, we will assign a color to
# each point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 2, i + 1)
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape((xx.shape[0], xx.shape[1], 3))
plt.imshow(Z, extent=(x_min, x_max, y_min, y_max), origin="lower")
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=np.array(["r", "g", "b"])[y])
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.ylim(yy.min(), yy.max())
plt.xticks(())
plt.yticks(())
plt.title("%s, LML: %.3f" %
(titles[i], clf.log_marginal_likelihood(clf.kernel_.theta)))
plt.tight_layout()
plt.show()
| bsd-3-clause |
bouhlelma/smt | smt/sampling_methods/tests/test_sampling_method_examples.py | 3 | 1403 | import unittest
import matplotlib
matplotlib.use("Agg")
class Test(unittest.TestCase):
def test_random(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import Random
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = Random(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_lhs(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import LHS
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = LHS(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
def test_full_factorial(self):
import numpy as np
import matplotlib.pyplot as plt
from smt.sampling_methods import FullFactorial
xlimits = np.array([[0.0, 4.0], [0.0, 3.0]])
sampling = FullFactorial(xlimits=xlimits)
num = 50
x = sampling(num)
print(x.shape)
plt.plot(x[:, 0], x[:, 1], "o")
plt.xlabel("x")
plt.ylabel("y")
plt.show()
if __name__ == "__main__":
unittest.main()
| bsd-3-clause |
vortex-ape/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 278 | 3402 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular class.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
jefffohl/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/__init__.py | 72 | 2225 |
import matplotlib
import inspect
import warnings
# ipython relies on interactive_bk being defined here
from matplotlib.rcsetup import interactive_bk
__all__ = ['backend','show','draw_if_interactive',
'new_figure_manager', 'backend_version']
backend = matplotlib.get_backend() # validates, to match all_backends
def pylab_setup():
'return new_figure_manager, draw_if_interactive and show for pylab'
# Import the requested backend into a generic module object
if backend.startswith('module://'):
backend_name = backend[9:]
else:
backend_name = 'backend_'+backend
backend_name = backend_name.lower() # until we banish mixed case
backend_name = 'matplotlib.backends.%s'%backend_name.lower()
backend_mod = __import__(backend_name,
globals(),locals(),[backend_name])
# Things we pull in from all backends
new_figure_manager = backend_mod.new_figure_manager
# image backends like pdf, agg or svg do not need to do anything
# for "show" or "draw_if_interactive", so if they are not defined
# by the backend, just do nothing
def do_nothing_show(*args, **kwargs):
frame = inspect.currentframe()
fname = frame.f_back.f_code.co_filename
if fname in ('<stdin>', '<ipython console>'):
warnings.warn("""
Your currently selected backend, '%s' does not support show().
Please select a GUI backend in your matplotlibrc file ('%s')
or with matplotlib.use()""" %
(backend, matplotlib.matplotlib_fname()))
def do_nothing(*args, **kwargs): pass
backend_version = getattr(backend_mod,'backend_version', 'unknown')
show = getattr(backend_mod, 'show', do_nothing_show)
draw_if_interactive = getattr(backend_mod, 'draw_if_interactive', do_nothing)
# Additional imports which only happen for certain backends. This section
# should probably disappear once all backends are uniform.
if backend.lower() in ['wx','wxagg']:
Toolbar = backend_mod.Toolbar
__all__.append('Toolbar')
matplotlib.verbose.report('backend %s version %s' % (backend,backend_version))
return new_figure_manager, draw_if_interactive, show
| gpl-3.0 |
sohyongsheng/kaggle-carvana | plot_learning_curves.py | 1 | 2337 | import numpy
import matplotlib.pyplot
import pylab
import sys
def plot_learning_curves(experiment, epochs, train_losses, cross_validation_losses, dice_scores, x_limits = None, y_limits = None):
axes = matplotlib.pyplot.figure().gca()
x_axis = axes.get_xaxis()
x_axis.set_major_locator(pylab.MaxNLocator(integer = True))
matplotlib.pyplot.plot(epochs, train_losses)
matplotlib.pyplot.plot(epochs, cross_validation_losses)
matplotlib.pyplot.plot(epochs, dice_scores)
matplotlib.pyplot.legend(['Training loss', 'Cross validation loss', 'Dice scores'])
matplotlib.pyplot.xlabel('Epochs')
matplotlib.pyplot.ylabel('Loss or Dice score')
matplotlib.pyplot.title(experiment)
if x_limits is not None: matplotlib.pyplot.xlim(x_limits)
if y_limits is not None: matplotlib.pyplot.ylim(y_limits)
output_directory = './results/' + experiment + '/learningCurves/'
image_file = output_directory + 'learning_curves.png'
matplotlib.pyplot.tight_layout()
matplotlib.pyplot.savefig(image_file)
def process_results(experiment, x_limits, y_limits):
output_directory = './results/' + experiment + '/learningCurves/'
train_losses = numpy.load(output_directory + 'train_losses.npy')
cross_validation_losses = numpy.load(output_directory + 'cross_validation_losses.npy')
dice_scores = numpy.load(output_directory + 'dice_scores.npy')
epochs = numpy.arange(1, len(train_losses) + 1)
plot_learning_curves(experiment, epochs, train_losses, cross_validation_losses, dice_scores, x_limits, y_limits)
training_curves = numpy.column_stack((epochs, train_losses, cross_validation_losses, dice_scores))
numpy.savetxt(
output_directory + 'training_curves.csv',
training_curves,
fmt = '%d, %.5f, %.5f, %.5f',
header = 'Epochs, Train loss, Cross validation loss, Dice scores'
)
if __name__ == '__main__':
dice_score_limits = [0.995, 0.997]
loss_limits = [0.02, 0.08]
x_limits = [1, 150]
# Assign either dice_score_limits or loss_limits depending on what you want to focus on.
y_limits = loss_limits
# experiments = ['experiment' + str(i) for i in [53, 60, 61]]
experiments = ['my_solution']
for experiment in experiments:
process_results(experiment, x_limits, y_limits)
| gpl-3.0 |
JesseLivezey/plankton | pylearn2/packaged_dependencies/theano_linear/unshared_conv/localdot.py | 5 | 4839 | """
WRITEME
"""
import logging
from ..linear import LinearTransform
from .unshared_conv import FilterActs, ImgActs
from theano.compat.six.moves import xrange
from theano.sandbox import cuda
if cuda.cuda_available:
import gpu_unshared_conv # register optimizations
import numpy as np
try:
import matplotlib.pyplot as plt
except ImportError:
pass
logger = logging.getLogger(__name__)
class LocalDot(LinearTransform):
"""
LocalDot is an linear operation computationally similar to
convolution in the spatial domain, except that whereas convolution
applying a single filter or set of filters across an image, the
LocalDot has different filterbanks for different points in the image.
Mathematically, this is a general linear transform except for a
restriction that filters are 0 outside of a spatially localized patch
within the image.
Image shape is 5-tuple:
color_groups
colors_per_group
rows
cols
images
Filterbank shape is 7-tuple (!)
0 row_positions
1 col_positions
2 colors_per_group
3 height
4 width
5 color_groups
6 filters_per_group
The result of left-multiplication a 5-tuple with shape:
filter_groups
filters_per_group
row_positions
col_positions
images
Parameters
----------
filters : WRITEME
irows : WRITEME
Image rows
icols : WRITEME
Image columns
subsample : WRITEME
padding_start : WRITEME
filters_shape : WRITEME
message : WRITEME
"""
def __init__(self, filters, irows, icols=None,
subsample=(1, 1),
padding_start=None,
filters_shape=None,
message=""):
LinearTransform.__init__(self, [filters])
self._filters = filters
if filters_shape is None:
self._filters_shape = tuple(filters.get_value(borrow=True).shape)
else:
self._filters_shape = tuple(filters_shape)
self._irows = irows
if icols is None:
self._icols = irows
else:
self._icols = icols
if self._icols != self._irows:
raise NotImplementedError('GPU code at least needs square imgs')
self._subsample = tuple(subsample)
self._padding_start = padding_start
if len(self._filters_shape) != 7:
raise TypeError('need 7-tuple filter shape', self._filters_shape)
if self._subsample[0] != self._subsample[1]:
raise ValueError('subsampling must be same in rows and cols')
self._filter_acts = FilterActs(self._subsample[0])
self._img_acts = ImgActs(module_stride=self._subsample[0])
if message:
self._message = message
else:
self._message = filters.name
def rmul(self, x):
"""
.. todo::
WRITEME
"""
assert x.ndim == 5
return self._filter_acts(x, self._filters)
def rmul_T(self, x):
"""
.. todo::
WRITEME
"""
return self._img_acts(self._filters, x, self._irows, self._icols)
def col_shape(self):
"""
.. todo::
WRITEME
"""
ishape = self.row_shape() + (-99,)
fshape = self._filters_shape
hshape, = self._filter_acts.infer_shape(None, (ishape, fshape))
assert hshape[-1] == -99
return hshape[:-1]
def row_shape(self):
"""
.. todo::
WRITEME
"""
fshape = self._filters_shape
fmodulesR, fmodulesC, fcolors, frows, fcols = fshape[:-2]
fgroups, filters_per_group = fshape[-2:]
return fgroups, fcolors, self._irows, self._icols
def print_status(self):
"""
.. todo::
WRITEME
"""
raise NotImplementedError("TODO: fix dependence on non-existent "
"ndarray_status function")
"""print ndarray_status(
self._filters.get_value(borrow=True),
msg='%s{%s}'% (self.__class__.__name__,
self._message))
"""
def imshow_gray(self):
"""
.. todo::
WRITEME
"""
filters = self._filters.get_value()
modR, modC, colors, rows, cols, grps, fs_per_grp = filters.shape
logger.info(filters.shape)
rval = np.zeros((
modR * (rows + 1) - 1,
modC * (cols + 1) - 1,
))
for rr, modr in enumerate(xrange(0, rval.shape[0], rows + 1)):
for cc, modc in enumerate(xrange(0, rval.shape[1], cols + 1)):
rval[modr:modr + rows, modc:modc + cols] = filters[rr, cc, 0, :, :, 0, 0]
plt.imshow(rval, cmap='gray')
return rval
| bsd-3-clause |
vivekmishra1991/scikit-learn | sklearn/decomposition/dict_learning.py | 104 | 44632 | """ Dictionary learning
"""
from __future__ import print_function
# Author: Vlad Niculae, Gael Varoquaux, Alexandre Gramfort
# License: BSD 3 clause
import time
import sys
import itertools
from math import sqrt, ceil
import numpy as np
from scipy import linalg
from numpy.lib.stride_tricks import as_strided
from ..base import BaseEstimator, TransformerMixin
from ..externals.joblib import Parallel, delayed, cpu_count
from ..externals.six.moves import zip
from ..utils import (check_array, check_random_state, gen_even_slices,
gen_batches, _get_n_jobs)
from ..utils.extmath import randomized_svd, row_norms
from ..utils.validation import check_is_fitted
from ..linear_model import Lasso, orthogonal_mp_gram, LassoLars, Lars
def _sparse_encode(X, dictionary, gram, cov=None, algorithm='lasso_lars',
regularization=None, copy_cov=True,
init=None, max_iter=1000):
"""Generic sparse coding
Each column of the result is the solution to a Lasso problem.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows.
gram: None | array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
gram can be None if method is 'threshold'.
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary * X'
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than regularization
from the projection dictionary * data'
regularization : int | float
The regularization parameter. It corresponds to alpha when
algorithm is 'lasso_lars', 'lasso_cd' or 'threshold'.
Otherwise it corresponds to n_nonzero_coefs.
init: array of shape (n_samples, n_components)
Initialization value of the sparse code. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
Returns
-------
code: array of shape (n_components, n_features)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
if X.ndim == 1:
X = X[:, np.newaxis]
n_samples, n_features = X.shape
if cov is None and algorithm != 'lasso_cd':
# overwriting cov is safe
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm == 'lasso_lars':
alpha = float(regularization) / n_features # account for scaling
try:
err_mgt = np.seterr(all='ignore')
lasso_lars = LassoLars(alpha=alpha, fit_intercept=False,
verbose=False, normalize=False,
precompute=gram, fit_path=False)
lasso_lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lasso_lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'lasso_cd':
alpha = float(regularization) / n_features # account for scaling
clf = Lasso(alpha=alpha, fit_intercept=False, normalize=False,
precompute=gram, max_iter=max_iter, warm_start=True)
clf.coef_ = init
clf.fit(dictionary.T, X.T, check_input=False)
new_code = clf.coef_
elif algorithm == 'lars':
try:
err_mgt = np.seterr(all='ignore')
lars = Lars(fit_intercept=False, verbose=False, normalize=False,
precompute=gram, n_nonzero_coefs=int(regularization),
fit_path=False)
lars.fit(dictionary.T, X.T, Xy=cov)
new_code = lars.coef_
finally:
np.seterr(**err_mgt)
elif algorithm == 'threshold':
new_code = ((np.sign(cov) *
np.maximum(np.abs(cov) - regularization, 0)).T)
elif algorithm == 'omp':
new_code = orthogonal_mp_gram(gram, cov, regularization, None,
row_norms(X, squared=True),
copy_Xy=copy_cov).T
else:
raise ValueError('Sparse coding method must be "lasso_lars" '
'"lasso_cd", "lasso", "threshold" or "omp", got %s.'
% algorithm)
return new_code
# XXX : could be moved to the linear_model module
def sparse_encode(X, dictionary, gram=None, cov=None, algorithm='lasso_lars',
n_nonzero_coefs=None, alpha=None, copy_cov=True, init=None,
max_iter=1000, n_jobs=1):
"""Sparse coding
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix
dictionary: array of shape (n_components, n_features)
The dictionary matrix against which to solve the sparse coding of
the data. Some of the algorithms assume normalized rows for meaningful
output.
gram: array, shape=(n_components, n_components)
Precomputed Gram matrix, dictionary * dictionary'
cov: array, shape=(n_components, n_samples)
Precomputed covariance, dictionary' * X
algorithm: {'lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'}
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
n_nonzero_coefs: int, 0.1 * n_features by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
alpha: float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threhold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
init: array of shape (n_samples, n_components)
Initialization value of the sparse codes. Only used if
`algorithm='lasso_cd'`.
max_iter: int, 1000 by default
Maximum number of iterations to perform if `algorithm='lasso_cd'`.
copy_cov: boolean, optional
Whether to copy the precomputed covariance matrix; if False, it may be
overwritten.
n_jobs: int, optional
Number of parallel jobs to run.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse codes
See also
--------
sklearn.linear_model.lars_path
sklearn.linear_model.orthogonal_mp
sklearn.linear_model.Lasso
SparseCoder
"""
dictionary = check_array(dictionary)
X = check_array(X)
n_samples, n_features = X.shape
n_components = dictionary.shape[0]
if gram is None and algorithm != 'threshold':
# Transposing product to ensure Fortran ordering
gram = np.dot(dictionary, dictionary.T).T
if cov is None and algorithm != 'lasso_cd':
copy_cov = False
cov = np.dot(dictionary, X.T)
if algorithm in ('lars', 'omp'):
regularization = n_nonzero_coefs
if regularization is None:
regularization = min(max(n_features / 10, 1), n_components)
else:
regularization = alpha
if regularization is None:
regularization = 1.
if n_jobs == 1 or algorithm == 'threshold':
code = _sparse_encode(X,
dictionary, gram, cov=cov,
algorithm=algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init,
max_iter=max_iter)
# This ensure that dimensionality of code is always 2,
# consistant with the case n_jobs > 1
if code.ndim == 1:
code = code[np.newaxis, :]
return code
# Enter parallel code block
code = np.empty((n_samples, n_components))
slices = list(gen_even_slices(n_samples, _get_n_jobs(n_jobs)))
code_views = Parallel(n_jobs=n_jobs)(
delayed(_sparse_encode)(
X[this_slice], dictionary, gram,
cov[:, this_slice] if cov is not None else None,
algorithm,
regularization=regularization, copy_cov=copy_cov,
init=init[this_slice] if init is not None else None,
max_iter=max_iter)
for this_slice in slices)
for this_slice, this_view in zip(slices, code_views):
code[this_slice] = this_view
return code
def _update_dict(dictionary, Y, code, verbose=False, return_r2=False,
random_state=None):
"""Update the dense dictionary factor in place.
Parameters
----------
dictionary: array of shape (n_features, n_components)
Value of the dictionary at the previous iteration.
Y: array of shape (n_features, n_samples)
Data matrix.
code: array of shape (n_components, n_samples)
Sparse coding of the data against which to optimize the dictionary.
verbose:
Degree of output the procedure will print.
return_r2: bool
Whether to compute and return the residual sum of squares corresponding
to the computed solution.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
Returns
-------
dictionary: array of shape (n_features, n_components)
Updated dictionary.
"""
n_components = len(code)
n_samples = Y.shape[0]
random_state = check_random_state(random_state)
# Residuals, computed 'in-place' for efficiency
R = -np.dot(dictionary, code)
R += Y
R = np.asfortranarray(R)
ger, = linalg.get_blas_funcs(('ger',), (dictionary, code))
for k in range(n_components):
# R <- 1.0 * U_k * V_k^T + R
R = ger(1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
dictionary[:, k] = np.dot(R, code[k, :].T)
# Scale k'th atom
atom_norm_square = np.dot(dictionary[:, k], dictionary[:, k])
if atom_norm_square < 1e-20:
if verbose == 1:
sys.stdout.write("+")
sys.stdout.flush()
elif verbose:
print("Adding new random atom")
dictionary[:, k] = random_state.randn(n_samples)
# Setting corresponding coefs to 0
code[k, :] = 0.0
dictionary[:, k] /= sqrt(np.dot(dictionary[:, k],
dictionary[:, k]))
else:
dictionary[:, k] /= sqrt(atom_norm_square)
# R <- -1.0 * U_k * V_k^T + R
R = ger(-1.0, dictionary[:, k], code[k, :], a=R, overwrite_a=True)
if return_r2:
R **= 2
# R is fortran-ordered. For numpy version < 1.6, sum does not
# follow the quick striding first, and is thus inefficient on
# fortran ordered data. We take a flat view of the data with no
# striding
R = as_strided(R, shape=(R.size, ), strides=(R.dtype.itemsize,))
R = np.sum(R)
return dictionary, R
return dictionary
def dict_learning(X, n_components, alpha, max_iter=100, tol=1e-8,
method='lars', n_jobs=1, dict_init=None, code_init=None,
callback=None, verbose=False, random_state=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components: int,
Number of dictionary atoms to extract.
alpha: int,
Sparsity controlling parameter.
max_iter: int,
Maximum number of iterations to perform.
tol: float,
Tolerance for the stopping condition.
method: {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
n_jobs: int,
Number of parallel jobs to run, or -1 to autodetect.
dict_init: array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
code_init: array of shape (n_samples, n_components),
Initial value for the sparse code for warm restart scenarios.
callback:
Callable that gets invoked every five iterations.
verbose:
Degree of output the procedure will print.
random_state: int or RandomState
Pseudo number generator state used for random sampling.
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code: array of shape (n_samples, n_components)
The sparse code factor in the matrix factorization.
dictionary: array of shape (n_components, n_features),
The dictionary factor in the matrix factorization.
errors: array
Vector of errors at each iteration.
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to True.
See also
--------
dict_learning_online
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if method not in ('lars', 'cd'):
raise ValueError('Coding method %r not supported as a fit algorithm.'
% method)
method = 'lasso_' + method
t0 = time.time()
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init the code and the dictionary with SVD of Y
if code_init is not None and dict_init is not None:
code = np.array(code_init, order='F')
# Don't copy V, it will happen below
dictionary = dict_init
else:
code, S, dictionary = linalg.svd(X, full_matrices=False)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r: # True even if n_components=None
code = code[:, :n_components]
dictionary = dictionary[:n_components, :]
else:
code = np.c_[code, np.zeros((len(code), n_components - r))]
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
# Fortran-order dict, as we are going to access its row vectors
dictionary = np.array(dictionary, order='F')
residuals = 0
errors = []
current_cost = np.nan
if verbose == 1:
print('[dict_learning]', end=' ')
# If max_iter is 0, number of iterations returned should be zero
ii = -1
for ii in range(max_iter):
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
print ("Iteration % 3i "
"(elapsed time: % 3is, % 4.1fmn, current cost % 7.3f)"
% (ii, dt, dt / 60, current_cost))
# Update code
code = sparse_encode(X, dictionary, algorithm=method, alpha=alpha,
init=code, n_jobs=n_jobs)
# Update dictionary
dictionary, residuals = _update_dict(dictionary.T, X.T, code.T,
verbose=verbose, return_r2=True,
random_state=random_state)
dictionary = dictionary.T
# Cost function
current_cost = 0.5 * residuals + alpha * np.sum(np.abs(code))
errors.append(current_cost)
if ii > 0:
dE = errors[-2] - errors[-1]
# assert(dE >= -tol * errors[-1])
if dE < tol * errors[-1]:
if verbose == 1:
# A line return
print("")
elif verbose:
print("--- Convergence reached after %d iterations" % ii)
break
if ii % 5 == 0 and callback is not None:
callback(locals())
if return_n_iter:
return code, dictionary, errors, ii + 1
else:
return code, dictionary, errors
def dict_learning_online(X, n_components=2, alpha=1, n_iter=100,
return_code=True, dict_init=None, callback=None,
batch_size=3, verbose=False, shuffle=True, n_jobs=1,
method='lars', iter_offset=0, random_state=None,
return_inner_stats=False, inner_stats=None,
return_n_iter=False):
"""Solves a dictionary learning matrix factorization problem online.
Finds the best dictionary and the corresponding sparse code for
approximating the data matrix X by solving::
(U^*, V^*) = argmin 0.5 || X - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
where V is the dictionary and U is the sparse code. This is
accomplished by repeatedly iterating over mini-batches by slicing
the input data.
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
X: array of shape (n_samples, n_features)
Data matrix.
n_components : int,
Number of dictionary atoms to extract.
alpha : float,
Sparsity controlling parameter.
n_iter : int,
Number of iterations to perform.
return_code : boolean,
Whether to also return the code U or just the dictionary V.
dict_init : array of shape (n_components, n_features),
Initial value for the dictionary for warm restart scenarios.
callback :
Callable that gets invoked every five iterations.
batch_size : int,
The number of samples to take in each batch.
verbose :
Degree of output the procedure will print.
shuffle : boolean,
Whether to shuffle the data before splitting it in batches.
n_jobs : int,
Number of parallel jobs to run, or -1 to autodetect.
method : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
iter_offset : int, default 0
Number of previous iterations completed on the dictionary used for
initialization.
random_state : int or RandomState
Pseudo number generator state used for random sampling.
return_inner_stats : boolean, optional
Return the inner statistics A (dictionary covariance) and B
(data approximation). Useful to restart the algorithm in an
online setting. If return_inner_stats is True, return_code is
ignored
inner_stats : tuple of (A, B) ndarrays
Inner sufficient statistics that are kept by the algorithm.
Passing them at initialization is useful in online settings, to
avoid loosing the history of the evolution.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
return_n_iter : bool
Whether or not to return the number of iterations.
Returns
-------
code : array of shape (n_samples, n_components),
the sparse code (only returned if `return_code=True`)
dictionary : array of shape (n_components, n_features),
the solutions to the dictionary learning problem
n_iter : int
Number of iterations run. Returned only if `return_n_iter` is
set to `True`.
See also
--------
dict_learning
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
if n_components is None:
n_components = X.shape[1]
if method not in ('lars', 'cd'):
raise ValueError('Coding method not supported as a fit algorithm.')
method = 'lasso_' + method
t0 = time.time()
n_samples, n_features = X.shape
# Avoid integer division problems
alpha = float(alpha)
random_state = check_random_state(random_state)
if n_jobs == -1:
n_jobs = cpu_count()
# Init V with SVD of X
if dict_init is not None:
dictionary = dict_init
else:
_, S, dictionary = randomized_svd(X, n_components,
random_state=random_state)
dictionary = S[:, np.newaxis] * dictionary
r = len(dictionary)
if n_components <= r:
dictionary = dictionary[:n_components, :]
else:
dictionary = np.r_[dictionary,
np.zeros((n_components - r, dictionary.shape[1]))]
if verbose == 1:
print('[dict_learning]', end=' ')
if shuffle:
X_train = X.copy()
random_state.shuffle(X_train)
else:
X_train = X
dictionary = check_array(dictionary.T, order='F', dtype=np.float64,
copy=False)
X_train = check_array(X_train, order='C', dtype=np.float64, copy=False)
batches = gen_batches(n_samples, batch_size)
batches = itertools.cycle(batches)
# The covariance of the dictionary
if inner_stats is None:
A = np.zeros((n_components, n_components))
# The data approximation
B = np.zeros((n_features, n_components))
else:
A = inner_stats[0].copy()
B = inner_stats[1].copy()
# If n_iter is zero, we need to return zero.
ii = iter_offset - 1
for ii, batch in zip(range(iter_offset, iter_offset + n_iter), batches):
this_X = X_train[batch]
dt = (time.time() - t0)
if verbose == 1:
sys.stdout.write(".")
sys.stdout.flush()
elif verbose:
if verbose > 10 or ii % ceil(100. / verbose) == 0:
print ("Iteration % 3i (elapsed time: % 3is, % 4.1fmn)"
% (ii, dt, dt / 60))
this_code = sparse_encode(this_X, dictionary.T, algorithm=method,
alpha=alpha, n_jobs=n_jobs).T
# Update the auxiliary variables
if ii < batch_size - 1:
theta = float((ii + 1) * batch_size)
else:
theta = float(batch_size ** 2 + ii + 1 - batch_size)
beta = (theta + 1 - batch_size) / (theta + 1)
A *= beta
A += np.dot(this_code, this_code.T)
B *= beta
B += np.dot(this_X.T, this_code.T)
# Update dictionary
dictionary = _update_dict(dictionary, B, A, verbose=verbose,
random_state=random_state)
# XXX: Can the residuals be of any use?
# Maybe we need a stopping criteria based on the amount of
# modification in the dictionary
if callback is not None:
callback(locals())
if return_inner_stats:
if return_n_iter:
return dictionary.T, (A, B), ii - iter_offset + 1
else:
return dictionary.T, (A, B)
if return_code:
if verbose > 1:
print('Learning code...', end=' ')
elif verbose == 1:
print('|', end=' ')
code = sparse_encode(X, dictionary.T, algorithm=method, alpha=alpha,
n_jobs=n_jobs)
if verbose > 1:
dt = (time.time() - t0)
print('done (total time: % 3is, % 4.1fmn)' % (dt, dt / 60))
if return_n_iter:
return code, dictionary.T, ii - iter_offset + 1
else:
return code, dictionary.T
if return_n_iter:
return dictionary.T, ii - iter_offset + 1
else:
return dictionary.T
class SparseCodingMixin(TransformerMixin):
"""Sparse coding mixin"""
def _set_sparse_coding_params(self, n_components,
transform_algorithm='omp',
transform_n_nonzero_coefs=None,
transform_alpha=None, split_sign=False,
n_jobs=1):
self.n_components = n_components
self.transform_algorithm = transform_algorithm
self.transform_n_nonzero_coefs = transform_n_nonzero_coefs
self.transform_alpha = transform_alpha
self.split_sign = split_sign
self.n_jobs = n_jobs
def transform(self, X, y=None):
"""Encode the data as a sparse combination of the dictionary atoms.
Coding method is determined by the object parameter
`transform_algorithm`.
Parameters
----------
X : array of shape (n_samples, n_features)
Test data to be transformed, must have the same number of
features as the data used to train the model.
Returns
-------
X_new : array, shape (n_samples, n_components)
Transformed data
"""
check_is_fitted(self, 'components_')
# XXX : kwargs is not documented
X = check_array(X)
n_samples, n_features = X.shape
code = sparse_encode(
X, self.components_, algorithm=self.transform_algorithm,
n_nonzero_coefs=self.transform_n_nonzero_coefs,
alpha=self.transform_alpha, n_jobs=self.n_jobs)
if self.split_sign:
# feature vector is split into a positive and negative side
n_samples, n_features = code.shape
split_code = np.empty((n_samples, 2 * n_features))
split_code[:, :n_features] = np.maximum(code, 0)
split_code[:, n_features:] = -np.minimum(code, 0)
code = split_code
return code
class SparseCoder(BaseEstimator, SparseCodingMixin):
"""Sparse coding
Finds a sparse representation of data against a fixed, precomputed
dictionary.
Each row of the result is the solution to a sparse coding problem.
The goal is to find a sparse array `code` such that::
X ~= code * dictionary
Read more in the :ref:`User Guide <SparseCoder>`.
Parameters
----------
dictionary : array, [n_components, n_features]
The dictionary atoms used for sparse coding. Lines are assumed to be
normalized to unit norm.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data:
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
Attributes
----------
components_ : array, [n_components, n_features]
The unchanged dictionary atoms
See also
--------
DictionaryLearning
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
sparse_encode
"""
def __init__(self, dictionary, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
split_sign=False, n_jobs=1):
self._set_sparse_coding_params(dictionary.shape[0],
transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.components_ = dictionary
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
class DictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
max_iter : int,
maximum number of iterations to perform
tol : float,
tolerance for numerical error
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection ``dictionary * X'``
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
code_init : array of shape (n_samples, n_components),
initial value for the code, for warm restart
dict_init : array of shape (n_components, n_features),
initial values for the dictionary, for warm restart
verbose :
degree of verbosity of the printed output
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
dictionary atoms extracted from the data
error_ : array
vector of errors at each iteration
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
MiniBatchDictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, max_iter=1000, tol=1e-8,
fit_algorithm='lars', transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
n_jobs=1, code_init=None, dict_init=None, verbose=False,
split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.max_iter = max_iter
self.tol = tol
self.fit_algorithm = fit_algorithm
self.code_init = code_init
self.dict_init = dict_init
self.verbose = verbose
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self: object
Returns the object itself
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
V, U, E, self.n_iter_ = dict_learning(
X, n_components, self.alpha,
tol=self.tol, max_iter=self.max_iter,
method=self.fit_algorithm,
n_jobs=self.n_jobs,
code_init=self.code_init,
dict_init=self.dict_init,
verbose=self.verbose,
random_state=random_state,
return_n_iter=True)
self.components_ = U
self.error_ = E
return self
class MiniBatchDictionaryLearning(BaseEstimator, SparseCodingMixin):
"""Mini-batch dictionary learning
Finds a dictionary (a set of atoms) that can best be used to represent data
using a sparse code.
Solves the optimization problem::
(U^*,V^*) = argmin 0.5 || Y - U V ||_2^2 + alpha * || U ||_1
(U,V)
with || V_k ||_2 = 1 for all 0 <= k < n_components
Read more in the :ref:`User Guide <DictionaryLearning>`.
Parameters
----------
n_components : int,
number of dictionary elements to extract
alpha : float,
sparsity controlling parameter
n_iter : int,
total number of iterations to perform
fit_algorithm : {'lars', 'cd'}
lars: uses the least angle regression method to solve the lasso problem
(linear_model.lars_path)
cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). Lars will be faster if
the estimated components are sparse.
transform_algorithm : {'lasso_lars', 'lasso_cd', 'lars', 'omp', \
'threshold'}
Algorithm used to transform the data.
lars: uses the least angle regression method (linear_model.lars_path)
lasso_lars: uses Lars to compute the Lasso solution
lasso_cd: uses the coordinate descent method to compute the
Lasso solution (linear_model.Lasso). lasso_lars will be faster if
the estimated components are sparse.
omp: uses orthogonal matching pursuit to estimate the sparse solution
threshold: squashes to zero all coefficients less than alpha from
the projection dictionary * X'
transform_n_nonzero_coefs : int, ``0.1 * n_features`` by default
Number of nonzero coefficients to target in each column of the
solution. This is only used by `algorithm='lars'` and `algorithm='omp'`
and is overridden by `alpha` in the `omp` case.
transform_alpha : float, 1. by default
If `algorithm='lasso_lars'` or `algorithm='lasso_cd'`, `alpha` is the
penalty applied to the L1 norm.
If `algorithm='threshold'`, `alpha` is the absolute value of the
threshold below which coefficients will be squashed to zero.
If `algorithm='omp'`, `alpha` is the tolerance parameter: the value of
the reconstruction error targeted. In this case, it overrides
`n_nonzero_coefs`.
split_sign : bool, False by default
Whether to split the sparse feature vector into the concatenation of
its negative part and its positive part. This can improve the
performance of downstream classifiers.
n_jobs : int,
number of parallel jobs to run
dict_init : array of shape (n_components, n_features),
initial value of the dictionary for warm restart scenarios
verbose :
degree of verbosity of the printed output
batch_size : int,
number of samples in each mini-batch
shuffle : bool,
whether to shuffle the samples before forming batches
random_state : int or RandomState
Pseudo number generator state used for random sampling.
Attributes
----------
components_ : array, [n_components, n_features]
components extracted from the data
inner_stats_ : tuple of (A, B) ndarrays
Internal sufficient statistics that are kept by the algorithm.
Keeping them is useful in online settings, to avoid loosing the
history of the evolution, but they shouldn't have any use for the
end user.
A (n_components, n_components) is the dictionary covariance matrix.
B (n_features, n_components) is the data approximation matrix
n_iter_ : int
Number of iterations run.
Notes
-----
**References:**
J. Mairal, F. Bach, J. Ponce, G. Sapiro, 2009: Online dictionary learning
for sparse coding (http://www.di.ens.fr/sierra/pdfs/icml09.pdf)
See also
--------
SparseCoder
DictionaryLearning
SparsePCA
MiniBatchSparsePCA
"""
def __init__(self, n_components=None, alpha=1, n_iter=1000,
fit_algorithm='lars', n_jobs=1, batch_size=3,
shuffle=True, dict_init=None, transform_algorithm='omp',
transform_n_nonzero_coefs=None, transform_alpha=None,
verbose=False, split_sign=False, random_state=None):
self._set_sparse_coding_params(n_components, transform_algorithm,
transform_n_nonzero_coefs,
transform_alpha, split_sign, n_jobs)
self.alpha = alpha
self.n_iter = n_iter
self.fit_algorithm = fit_algorithm
self.dict_init = dict_init
self.verbose = verbose
self.shuffle = shuffle
self.batch_size = batch_size
self.split_sign = split_sign
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model from data in X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
random_state = check_random_state(self.random_state)
X = check_array(X)
U, (A, B), self.n_iter_ = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, return_code=False,
method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=self.dict_init,
batch_size=self.batch_size, shuffle=self.shuffle,
verbose=self.verbose, random_state=random_state,
return_inner_stats=True,
return_n_iter=True)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = self.n_iter
return self
def partial_fit(self, X, y=None, iter_offset=None):
"""Updates the model using the data in X as a mini-batch.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples
and n_features is the number of features.
iter_offset: integer, optional
The number of iteration on data batches that has been
performed before this call to partial_fit. This is optional:
if no number is passed, the memory of the object is
used.
Returns
-------
self : object
Returns the instance itself.
"""
if not hasattr(self, 'random_state_'):
self.random_state_ = check_random_state(self.random_state)
X = check_array(X)
if hasattr(self, 'components_'):
dict_init = self.components_
else:
dict_init = self.dict_init
inner_stats = getattr(self, 'inner_stats_', None)
if iter_offset is None:
iter_offset = getattr(self, 'iter_offset_', 0)
U, (A, B) = dict_learning_online(
X, self.n_components, self.alpha,
n_iter=self.n_iter, method=self.fit_algorithm,
n_jobs=self.n_jobs, dict_init=dict_init,
batch_size=len(X), shuffle=False,
verbose=self.verbose, return_code=False,
iter_offset=iter_offset, random_state=self.random_state_,
return_inner_stats=True, inner_stats=inner_stats)
self.components_ = U
# Keep track of the state of the algorithm to be able to do
# some online fitting (partial_fit)
self.inner_stats_ = (A, B)
self.iter_offset_ = iter_offset + self.n_iter
return self
| bsd-3-clause |
snario/geopandas | geopandas/plotting.py | 2 | 9645 | from __future__ import print_function
import numpy as np
from six import next
from six.moves import xrange
def plot_polygon(ax, poly, facecolor='red', edgecolor='black', alpha=0.5):
""" Plot a single Polygon geometry """
from descartes.patch import PolygonPatch
a = np.asarray(poly.exterior)
# without Descartes, we could make a Patch of exterior
ax.add_patch(PolygonPatch(poly, facecolor=facecolor, alpha=alpha))
ax.plot(a[:, 0], a[:, 1], color=edgecolor)
for p in poly.interiors:
x, y = zip(*p.coords)
ax.plot(x, y, color=edgecolor)
def plot_multipolygon(ax, geom, facecolor='red', edgecolor='black', alpha=0.5):
""" Can safely call with either Polygon or Multipolygon geometry
"""
if geom.type == 'Polygon':
plot_polygon(ax, geom, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha)
elif geom.type == 'MultiPolygon':
for poly in geom.geoms:
plot_polygon(ax, poly, facecolor=facecolor, edgecolor=edgecolor, alpha=alpha)
def plot_linestring(ax, geom, color='black', linewidth=1):
""" Plot a single LineString geometry """
a = np.array(geom)
ax.plot(a[:, 0], a[:, 1], color=color, linewidth=linewidth)
def plot_multilinestring(ax, geom, color='red', linewidth=1):
""" Can safely call with either LineString or MultiLineString geometry
"""
if geom.type == 'LineString':
plot_linestring(ax, geom, color=color, linewidth=linewidth)
elif geom.type == 'MultiLineString':
for line in geom.geoms:
plot_linestring(ax, line, color=color, linewidth=linewidth)
def plot_point(ax, pt, marker='o', markersize=2):
""" Plot a single Point geometry """
ax.plot(pt.x, pt.y, marker=marker, markersize=markersize, linewidth=0)
def gencolor(N, colormap='Set1'):
"""
Color generator intended to work with one of the ColorBrewer
qualitative color scales.
Suggested values of colormap are the following:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
(although any matplotlib colormap will work).
"""
from matplotlib import cm
# don't use more than 9 discrete colors
n_colors = min(N, 9)
cmap = cm.get_cmap(colormap, n_colors)
colors = cmap(range(n_colors))
for i in xrange(N):
yield colors[i % n_colors]
def plot_series(s, colormap='Set1', axes=None, **color_kwds):
""" Plot a GeoSeries
Generate a plot of a GeoSeries geometry with matplotlib.
Parameters
----------
Series
The GeoSeries to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
colormap : str (default 'Set1')
The name of a colormap recognized by matplotlib. Any
colormap will work, but categorical colormaps are
generally recommended. Examples of useful discrete
colormaps include:
Accent, Dark2, Paired, Pastel1, Pastel2, Set1, Set2, Set3
axes : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
**color_kwds : dict
Color options to be passed on to plot_polygon
Returns
-------
matplotlib axes instance
"""
import matplotlib.pyplot as plt
if axes is None:
fig = plt.gcf()
fig.add_subplot(111, aspect='equal')
ax = plt.gca()
else:
ax = axes
color = gencolor(len(s), colormap=colormap)
for geom in s:
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=next(color), **color_kwds)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=next(color))
elif geom.type == 'Point':
plot_point(ax, geom)
plt.draw()
return ax
def plot_dataframe(s, column=None, colormap=None,
categorical=False, legend=False, axes=None,
scheme=None, k=5,
**color_kwds
):
""" Plot a GeoDataFrame
Generate a plot of a GeoDataFrame with matplotlib. If a
column is specified, the plot coloring will be based on values
in that column. Otherwise, a categorical plot of the
geometries in the `geometry` column will be generated.
Parameters
----------
GeoDataFrame
The GeoDataFrame to be plotted. Currently Polygon,
MultiPolygon, LineString, MultiLineString and Point
geometries can be plotted.
column : str (default None)
The name of the column to be plotted.
categorical : bool (default False)
If False, colormap will reflect numerical values of the
column being plotted. For non-numerical columns (or if
column=None), this will be set to True.
colormap : str (default 'Set1')
The name of a colormap recognized by matplotlib.
legend : bool (default False)
Plot a legend (Experimental; currently for categorical
plots only)
axes : matplotlib.pyplot.Artist (default None)
axes on which to draw the plot
scheme : pysal.esda.mapclassify.Map_Classifier
Choropleth classification schemes
k : int (default 5)
Number of classes (ignored if scheme is None)
**color_kwds : dict
Color options to be passed on to plot_polygon
Returns
-------
matplotlib axes instance
"""
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
from matplotlib.colors import Normalize
from matplotlib import cm
if column is None:
return plot_series(s.geometry, colormap=colormap, axes=axes, **color_kwds)
else:
if s[column].dtype is np.dtype('O'):
categorical = True
if categorical:
if colormap is None:
colormap = 'Set1'
categories = list(set(s[column].values))
categories.sort()
valuemap = dict([(k, v) for (v, k) in enumerate(categories)])
values = [valuemap[k] for k in s[column]]
else:
values = s[column]
if scheme is not None:
values = __pysal_choro(values, scheme, k=k)
cmap = norm_cmap(values, colormap, Normalize, cm)
if axes is None:
fig = plt.gcf()
fig.add_subplot(111, aspect='equal')
ax = plt.gca()
else:
ax = axes
for geom, value in zip(s.geometry, values):
if geom.type == 'Polygon' or geom.type == 'MultiPolygon':
plot_multipolygon(ax, geom, facecolor=cmap.to_rgba(value), **color_kwds)
elif geom.type == 'LineString' or geom.type == 'MultiLineString':
plot_multilinestring(ax, geom, color=cmap.to_rgba(value))
# TODO: color point geometries
elif geom.type == 'Point':
plot_point(ax, geom)
if legend:
if categorical:
patches = []
for value, cat in enumerate(categories):
patches.append(Line2D([0], [0], linestyle="none",
marker="o", alpha=color_kwds.get('alpha', 0.5),
markersize=10, markerfacecolor=cmap.to_rgba(value)))
ax.legend(patches, categories, numpoints=1, loc='best')
else:
# TODO: show a colorbar
raise NotImplementedError
plt.draw()
return ax
def __pysal_choro(values, scheme, k=5):
""" Wrapper for choropleth schemes from PySAL for use with plot_dataframe
Parameters
----------
values
Series to be plotted
scheme
pysal.esda.mapclassify classificatin scheme ['Equal_interval'|'Quantiles'|'Fisher_Jenks']
k
number of classes (2 <= k <=9)
Returns
-------
values
Series with values replaced with class identifier if PySAL is available, otherwise the original values are used
"""
try:
from pysal.esda.mapclassify import Quantiles, Equal_Interval, Fisher_Jenks
schemes = {}
schemes['equal_interval'] = Equal_Interval
schemes['quantiles'] = Quantiles
schemes['fisher_jenks'] = Fisher_Jenks
s0 = scheme
scheme = scheme.lower()
if scheme not in schemes:
scheme = 'quantiles'
print('Unrecognized scheme: ', s0)
print('Using Quantiles instead')
if k < 2 or k > 9:
print('Invalid k: ', k)
print('2<=k<=9, setting k=5 (default)')
k = 5
binning = schemes[scheme](values, k)
values = binning.yb
except ImportError:
print('PySAL not installed, setting map to default')
return values
def norm_cmap(values, cmap, normalize, cm):
""" Normalize and set colormap
Parameters
----------
values
Series or array to be normalized
cmap
matplotlib Colormap
normalize
matplotlib.colors.Normalize
cm
matplotlib.cm
Returns
-------
n_cmap
mapping of normalized values to colormap (cmap)
"""
mn, mx = min(values), max(values)
norm = normalize(vmin=mn, vmax=mx)
n_cmap = cm.ScalarMappable(norm=norm, cmap=cmap)
return n_cmap
| bsd-3-clause |
shirtsgroup/pygo | analysis/MBAR_foldingcurve_umbrella.py | 1 | 6397 | #!/usr/bin/python2.4
import sys
import numpy
import pymbar # for MBAR analysis
import timeseries # for timeseries analysis
import os
import os.path
import pdb # for debugging
from optparse import OptionParser
import MBAR_pmfQz
import wham
import MBAR_pmfQ
import cPickle
def parse_args():
parser=OptionParser()
#parser.add_option("-t", "--temprange", nargs=2, default=[300.0,450.0], type="float", dest="temprange", help="temperature range of replicas")
parser.add_option("-r", "--replicas", default=24, type="int",dest="replicas", help="number of replicas (default: 24)")
parser.add_option("-n", "--N_max", default=100000, type="int",dest="N_max", help="number of data points to read in (default: 100k)")
parser.add_option("-s", "--skip", default=1, type="int",dest="skip", help="skip every n data points")
parser.add_option("--direc", dest="direc", help="Qtraj_singleprot.txt file location")
parser.add_option("--tfile", dest="tfile", default="/home/edz3fz/proteinmontecarlo/T.txt", help="file of temperatures (default: T.txt)")
parser.add_option('--cpt', action="store_true", default=False, help="use checkpoint files, if they exist")
(options,args) = parser.parse_args()
return options
def get_ukln(args,N_max,K,Z,beta_k,spring_constant,U_kn,z_kn,N_k):
print 'Computing reduced potential energies...'
u_kln = numpy.zeros([K,K,N_max], numpy.float32)
for k in range(K):
for l in range(K):
#z_index = l/(len(T)) # z is outer dimension
#T_index = l%(len(T)) # T is inner dimension
dz = z_kn[k,0:N_k[k]] - Z[l]
u_kln[k,l,0:N_k[k]] = beta_k[l] * (U_kn[k,0:N_k[k]] + spring_constant[l]*(dz)**2)
return u_kln
def get_mbar(args, beta_k, Z, U_kn, N_k, u_kln):
if args.cpt:
if os.path.exists('%s/f_k_foldingcurve.npy' % args.direc):
print 'Reading in free energies from %s/f_k.npy' % args.direc
f_k = numpy.load('%s/f_k.npy' % args.direc)
mbar = pymbar.MBAR(u_kln,N_k,initial_f_k = f_k, maximum_iterations=0,verbose=True,use_optimized=1)
return mbar
print 'Using WHAM to generate historgram-based initial guess of dimensionless free energies f_k...'
#beta_k = numpy.array(beta_k.tolist()*len(Z))
#f_k = wham.histogram_wham(beta_k, U_kn, N_k)
print 'Initializing MBAR...'
mbar = pymbar.MBAR(u_kln, N_k, #initial_f_k = f_k,
use_optimized='', verbose=True)
mbar_file = '%s/f_k_foldingcurve.npy' % args.direc
print 'Saving free energies to %s' % mbar_file
saving = True
if saving:
numpy.save(mbar_file, mbar.f_k)
return mbar
def main():
options = parse_args()
kB = 0.00831447/4.184 #Boltzmann constant
T = numpy.loadtxt(options.tfile)
Z = numpy.arange(9,31.5,1.5)
print 'Initial temperature states are', T
print 'Distance states are', Z
K = len(T)*len(Z)
spring_constant = numpy.ones(K)
# read in data
U_kn, Q_kn, z_kn, N_max = MBAR_pmfQz.read_data(options, K, Z, T, spring_constant[0])
# subsample the data
U_kn, Q_kn, z_kn, N_k = MBAR_pmfQz.subsample(U_kn,Q_kn,z_kn,K,N_max)
# insert unweighted states
T_new = numpy.arange(200,410,10)
T_new = numpy.array([200,210,220,230,235,240,245,250,255,260,265,270,275,280,285,290,295,300,305,310,315,320,325,330,335,340,345,350,375,400])
Z_new = numpy.zeros(len(T_new))
K_new = len(T_new)
print 'inserting unweighted temperature states', T_new
# update states
print 'Inserting blank states'
Z = Z.tolist()
Z = [x for x in Z for _ in range(len(T))]
Z = numpy.concatenate((numpy.array(Z),Z_new))
T = numpy.array(T.tolist()*(K/len(T)))
T = numpy.concatenate((T,T_new))
K += K_new
spring_constant = numpy.concatenate((spring_constant,numpy.zeros(K_new)))
print 'all temperature states are ', T
print 'all surface states are ', Z
print 'there are a total of %i states' % K
N_k = numpy.concatenate((N_k,numpy.zeros(K_new)))
U_kn = numpy.concatenate((U_kn,numpy.zeros([K_new,N_max])))
Q_kn = numpy.concatenate((Q_kn,numpy.zeros([K_new,N_max])))
z_kn = numpy.concatenate((z_kn,numpy.zeros([K_new,N_max])))
beta_k = 1/(kB*T)
u_kln = get_ukln(options, N_max, K, Z, beta_k, spring_constant, U_kn, z_kn, N_k)
print "Initializing MBAR..."
# Use Adaptive Method (Both Newton-Raphson and Self-Consistent, testing which is better)
mbar = get_mbar(options,beta_k,Z,U_kn,N_k,u_kln)
print "Computing Expectations for E..."
(E_expect, dE_expect) = mbar.computeExpectations(u_kln)*(beta_k)**(-1)
print "Computing Expectations for E^2..."
(E2_expect,dE2_expect) = mbar.computeExpectations(u_kln*u_kln)*(beta_k)**(-2)
print "Computing Expectations for Q..."
(Q,dQ) = mbar.computeExpectations(Q_kn)
print "Computing Heat Capacity as ( <E^2> - <E>^2 ) / ( R*T^2 )..."
Cv = numpy.zeros([K], numpy.float64)
dCv = numpy.zeros([K], numpy.float64)
for i in range(K):
Cv[i] = (E2_expect[i] - (E_expect[i]*E_expect[i])) / ( kB * T[i] * T[i])
dCv[i] = 2*dE_expect[i]**2 / (kB *T[i]*T[i]) # from propagation of error
numpy.save(options.direc+'/foldingcurve_umbrella',numpy.array([T, Q, dQ]))
numpy.save(options.direc+'/heatcap_umbrella',numpy.array([T, Cv, dCv]))
# pdb.set_trace()
#
# print 'Computing PMF(Q) at 325 K'
# nbins = 25
# target_temperature = 325
# target_beta = 1.0/(kB*target_temperature)
# nbins, bin_centers, bin_counts, bin_kn = get_bins(nbins,K,N_max,Q_kn)
# u_kn = target_beta*U_kn
# f_i, d2f_i = mbar.computePMF_states(u_kn, bin_kn, nbins)
# pmf_file = '%s/pmfQ_umbrella_%i.pkl' % (options.direc, target_temperature)
# f = file(pmf_file, 'wb')
# print 'Saving target temperature, bin centers, f_i, df_i to %s' % pmf_file
# cPickle.dump(target_temperature,f)
# cPickle.dump(bin_centers,f)
# cPickle.dump(f_i,f)
# cPickle.dump(d2f_i,f)
# f.close()
#
# try:
# import matplotlib.pyplot as plt
# plt.figure(1)
# plt.plot(T,Q,'k')
# plt.errorbar(T, Q, yerr=dQ)
# plt.xlabel('Temperature (K)')
# plt.ylabel('Q fraction native contacts')
# plt.savefig(options.direc+'/foldingcurve_umbrella.png')
# plt.show()
# except:
# pass
#
if __name__ == '__main__':
main()
| gpl-2.0 |
rahul-c1/scikit-learn | benchmarks/bench_multilabel_metrics.py | 11 | 7258 | #!/usr/bin/env python
"""
A comparison of multilabel target formats and metrics over them
"""
from __future__ import division
from __future__ import print_function
from timeit import timeit
from functools import partial
import itertools
import argparse
import sys
import matplotlib.pyplot as plt
import scipy.sparse as sp
import numpy as np
from sklearn.datasets import make_multilabel_classification
from sklearn.metrics import (f1_score, accuracy_score, hamming_loss,
jaccard_similarity_score)
from sklearn.utils.testing import ignore_warnings
METRICS = {
'f1': f1_score,
'f1-by-sample': partial(f1_score, average='samples'),
'accuracy': accuracy_score,
'hamming': hamming_loss,
'jaccard': jaccard_similarity_score,
}
FORMATS = {
'sequences': lambda y: [list(np.flatnonzero(s)) for s in y],
'dense': lambda y: y,
'csr': lambda y: sp.csr_matrix(y),
'csc': lambda y: sp.csc_matrix(y),
}
@ignore_warnings
def benchmark(metrics=tuple(v for k, v in sorted(METRICS.items())),
formats=tuple(v for k, v in sorted(FORMATS.items())),
samples=1000, classes=4, density=.2,
n_times=5):
"""Times metric calculations for a number of inputs
Parameters
----------
metrics : array-like of callables (1d or 0d)
The metric functions to time.
formats : array-like of callables (1d or 0d)
These may transform a dense indicator matrix into multilabel
representation.
samples : array-like of ints (1d or 0d)
The number of samples to generate as input.
classes : array-like of ints (1d or 0d)
The number of classes in the input.
density : array-like of ints (1d or 0d)
The density of positive labels in the input.
n_times : int
Time calling the metric n_times times.
Returns
-------
array of floats shaped like (metrics, formats, samples, classes, density)
Time in seconds.
"""
metrics = np.atleast_1d(metrics)
samples = np.atleast_1d(samples)
classes = np.atleast_1d(classes)
density = np.atleast_1d(density)
formats = np.atleast_1d(formats)
out = np.zeros((len(metrics), len(formats), len(samples), len(classes),
len(density)), dtype=float)
it = itertools.product(samples, classes, density)
for i, (s, c, d) in enumerate(it):
_, y_true = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=42)
_, y_pred = make_multilabel_classification(n_samples=s, n_features=1,
n_classes=c, n_labels=d * c,
return_indicator=True,
random_state=84)
for j, f in enumerate(formats):
f_true = f(y_true)
f_pred = f(y_pred)
for k, metric in enumerate(metrics):
t = timeit(partial(metric, f_true, f_pred), number=n_times)
out[k, j].flat[i] = t
return out
def _tabulate(results, metrics, formats):
"""Prints results by metric and format
Uses the last ([-1]) value of other fields
"""
column_width = max(max(len(k) for k in formats) + 1, 8)
first_width = max(len(k) for k in metrics)
head_fmt = ('{:<{fw}s}' + '{:>{cw}s}' * len(formats))
row_fmt = ('{:<{fw}s}' + '{:>{cw}.3f}' * len(formats))
print(head_fmt.format('Metric', *formats,
cw=column_width, fw=first_width))
for metric, row in zip(metrics, results[:, :, -1, -1, -1]):
print(row_fmt.format(metric, *row,
cw=column_width, fw=first_width))
def _plot(results, metrics, formats, title, x_ticks, x_label,
format_markers=('x', '|', 'o', '+'),
metric_colors=('c', 'm', 'y', 'k', 'g', 'r', 'b')):
"""
Plot the results by metric, format and some other variable given by
x_label
"""
fig = plt.figure('scikit-learn multilabel metrics benchmarks')
plt.title(title)
ax = fig.add_subplot(111)
for i, metric in enumerate(metrics):
for j, format in enumerate(formats):
ax.plot(x_ticks, results[i, j].flat,
label='{}, {}'.format(metric, format),
marker=format_markers[j],
color=metric_colors[i % len(metric_colors)])
ax.set_xlabel(x_label)
ax.set_ylabel('Time (s)')
ax.legend()
plt.show()
if __name__ == "__main__":
ap = argparse.ArgumentParser()
ap.add_argument('metrics', nargs='*', default=sorted(METRICS),
help='Specifies metrics to benchmark, defaults to all. '
'Choices are: '.format(sorted(METRICS)))
ap.add_argument('--formats', nargs='+', choices=sorted(FORMATS),
help='Specifies multilabel formats to benchmark '
'(defaults to all).')
ap.add_argument('--samples', type=int, default=1000,
help='The number of samples to generate')
ap.add_argument('--classes', type=int, default=10,
help='The number of classes')
ap.add_argument('--density', type=float, default=.2,
help='The average density of labels per sample')
ap.add_argument('--plot', choices=['classes', 'density', 'samples'],
default=None,
help='Plot time with respect to this parameter varying '
'up to the specified value')
ap.add_argument('--n-steps', default=10, type=int,
help='Plot this many points for each metric')
ap.add_argument('--n-times',
default=5, type=int,
help="Time performance over n_times trials")
args = ap.parse_args()
if args.plot is not None:
max_val = getattr(args, args.plot)
if args.plot in ('classes', 'samples'):
min_val = 2
else:
min_val = 0
steps = np.linspace(min_val, max_val, num=args.n_steps + 1)[1:]
if args.plot in ('classes', 'samples'):
steps = np.unique(np.round(steps).astype(int))
setattr(args, args.plot, steps)
if args.metrics is None:
args.metrics = sorted(METRICS)
if args.formats is None:
args.formats = sorted(FORMATS)
results = benchmark([METRICS[k] for k in args.metrics],
[FORMATS[k] for k in args.formats],
args.samples, args.classes, args.density,
args.n_times)
_tabulate(results, args.metrics, args.formats)
if args.plot is not None:
print('Displaying plot', file=sys.stderr)
title = ('Multilabel metrics with %s' %
', '.join('{0}={1}'.format(field, getattr(args, field))
for field in ['samples', 'classes', 'density']
if args.plot != field))
_plot(results, args.metrics, args.formats, title, steps, args.plot)
| bsd-3-clause |
plotly/python-api | packages/python/plotly/plotly/graph_objs/scatter/marker/_colorbar.py | 1 | 69628 | from plotly.basedatatypes import BaseTraceHierarchyType as _BaseTraceHierarchyType
import copy as _copy
class ColorBar(_BaseTraceHierarchyType):
# class properties
# --------------------
_parent_path_str = "scatter.marker"
_path_str = "scatter.marker.colorbar"
_valid_props = {
"bgcolor",
"bordercolor",
"borderwidth",
"dtick",
"exponentformat",
"len",
"lenmode",
"nticks",
"outlinecolor",
"outlinewidth",
"separatethousands",
"showexponent",
"showticklabels",
"showtickprefix",
"showticksuffix",
"thickness",
"thicknessmode",
"tick0",
"tickangle",
"tickcolor",
"tickfont",
"tickformat",
"tickformatstopdefaults",
"tickformatstops",
"ticklen",
"tickmode",
"tickprefix",
"ticks",
"ticksuffix",
"ticktext",
"ticktextsrc",
"tickvals",
"tickvalssrc",
"tickwidth",
"title",
"titlefont",
"titleside",
"x",
"xanchor",
"xpad",
"y",
"yanchor",
"ypad",
}
# bgcolor
# -------
@property
def bgcolor(self):
"""
Sets the color of padded area.
The 'bgcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bgcolor"]
@bgcolor.setter
def bgcolor(self, val):
self["bgcolor"] = val
# bordercolor
# -----------
@property
def bordercolor(self):
"""
Sets the axis line color.
The 'bordercolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["bordercolor"]
@bordercolor.setter
def bordercolor(self, val):
self["bordercolor"] = val
# borderwidth
# -----------
@property
def borderwidth(self):
"""
Sets the width (in px) or the border enclosing this color bar.
The 'borderwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["borderwidth"]
@borderwidth.setter
def borderwidth(self, val):
self["borderwidth"] = val
# dtick
# -----
@property
def dtick(self):
"""
Sets the step in-between ticks on this axis. Use with `tick0`.
Must be a positive number, or special strings available to
"log" and "date" axes. If the axis `type` is "log", then ticks
are set every 10^(n*dtick) where n is the tick number. For
example, to set a tick mark at 1, 10, 100, 1000, ... set dtick
to 1. To set tick marks at 1, 100, 10000, ... set dtick to 2.
To set tick marks at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special values;
"L<f>", where `f` is a positive number, gives ticks linearly
spaced in value (but not position). For example `tick0` = 0.1,
`dtick` = "L0.5" will put ticks at 0.1, 0.6, 1.1, 1.6 etc. To
show powers of 10 plus small digits between, use "D1" (all
digits) or "D2" (only 2 and 5). `tick0` is ignored for "D1" and
"D2". If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval between
ticks to one day, set `dtick` to 86400000.0. "date" also has
special values "M<n>" gives ticks spaced by a number of months.
`n` must be a positive integer. To set ticks on the 15th of
every third month, set `tick0` to "2000-01-15" and `dtick` to
"M3". To set ticks every 4 years, set `dtick` to "M48"
The 'dtick' property accepts values of any type
Returns
-------
Any
"""
return self["dtick"]
@dtick.setter
def dtick(self, val):
self["dtick"] = val
# exponentformat
# --------------
@property
def exponentformat(self):
"""
Determines a formatting rule for the tick exponents. For
example, consider the number 1,000,000,000. If "none", it
appears as 1,000,000,000. If "e", 1e+9. If "E", 1E+9. If
"power", 1x10^9 (with 9 in a super script). If "SI", 1G. If
"B", 1B.
The 'exponentformat' property is an enumeration that may be specified as:
- One of the following enumeration values:
['none', 'e', 'E', 'power', 'SI', 'B']
Returns
-------
Any
"""
return self["exponentformat"]
@exponentformat.setter
def exponentformat(self, val):
self["exponentformat"] = val
# len
# ---
@property
def len(self):
"""
Sets the length of the color bar This measure excludes the
padding of both ends. That is, the color bar length is this
length minus the padding on both ends.
The 'len' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["len"]
@len.setter
def len(self, val):
self["len"] = val
# lenmode
# -------
@property
def lenmode(self):
"""
Determines whether this color bar's length (i.e. the measure in
the color variation direction) is set in units of plot
"fraction" or in *pixels. Use `len` to set the value.
The 'lenmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["lenmode"]
@lenmode.setter
def lenmode(self, val):
self["lenmode"] = val
# nticks
# ------
@property
def nticks(self):
"""
Specifies the maximum number of ticks for the particular axis.
The actual number of ticks will be chosen automatically to be
less than or equal to `nticks`. Has an effect only if
`tickmode` is set to "auto".
The 'nticks' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [0, 9223372036854775807]
Returns
-------
int
"""
return self["nticks"]
@nticks.setter
def nticks(self, val):
self["nticks"] = val
# outlinecolor
# ------------
@property
def outlinecolor(self):
"""
Sets the axis line color.
The 'outlinecolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["outlinecolor"]
@outlinecolor.setter
def outlinecolor(self, val):
self["outlinecolor"] = val
# outlinewidth
# ------------
@property
def outlinewidth(self):
"""
Sets the width (in px) of the axis line.
The 'outlinewidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["outlinewidth"]
@outlinewidth.setter
def outlinewidth(self, val):
self["outlinewidth"] = val
# separatethousands
# -----------------
@property
def separatethousands(self):
"""
If "true", even 4-digit integers are separated
The 'separatethousands' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["separatethousands"]
@separatethousands.setter
def separatethousands(self, val):
self["separatethousands"] = val
# showexponent
# ------------
@property
def showexponent(self):
"""
If "all", all exponents are shown besides their significands.
If "first", only the exponent of the first tick is shown. If
"last", only the exponent of the last tick is shown. If "none",
no exponents appear.
The 'showexponent' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showexponent"]
@showexponent.setter
def showexponent(self, val):
self["showexponent"] = val
# showticklabels
# --------------
@property
def showticklabels(self):
"""
Determines whether or not the tick labels are drawn.
The 'showticklabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showticklabels"]
@showticklabels.setter
def showticklabels(self, val):
self["showticklabels"] = val
# showtickprefix
# --------------
@property
def showtickprefix(self):
"""
If "all", all tick labels are displayed with a prefix. If
"first", only the first tick is displayed with a prefix. If
"last", only the last tick is displayed with a suffix. If
"none", tick prefixes are hidden.
The 'showtickprefix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showtickprefix"]
@showtickprefix.setter
def showtickprefix(self, val):
self["showtickprefix"] = val
# showticksuffix
# --------------
@property
def showticksuffix(self):
"""
Same as `showtickprefix` but for tick suffixes.
The 'showticksuffix' property is an enumeration that may be specified as:
- One of the following enumeration values:
['all', 'first', 'last', 'none']
Returns
-------
Any
"""
return self["showticksuffix"]
@showticksuffix.setter
def showticksuffix(self, val):
self["showticksuffix"] = val
# thickness
# ---------
@property
def thickness(self):
"""
Sets the thickness of the color bar This measure excludes the
size of the padding, ticks and labels.
The 'thickness' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["thickness"]
@thickness.setter
def thickness(self, val):
self["thickness"] = val
# thicknessmode
# -------------
@property
def thicknessmode(self):
"""
Determines whether this color bar's thickness (i.e. the measure
in the constant color direction) is set in units of plot
"fraction" or in "pixels". Use `thickness` to set the value.
The 'thicknessmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fraction', 'pixels']
Returns
-------
Any
"""
return self["thicknessmode"]
@thicknessmode.setter
def thicknessmode(self, val):
self["thicknessmode"] = val
# tick0
# -----
@property
def tick0(self):
"""
Sets the placement of the first tick on this axis. Use with
`dtick`. If the axis `type` is "log", then you must take the
log of your starting tick (e.g. to set the starting tick to
100, set the `tick0` to 2) except when `dtick`=*L<f>* (see
`dtick` for more info). If the axis `type` is "date", it should
be a date string, like date data. If the axis `type` is
"category", it should be a number, using the scale where each
category is assigned a serial number from zero in the order it
appears.
The 'tick0' property accepts values of any type
Returns
-------
Any
"""
return self["tick0"]
@tick0.setter
def tick0(self, val):
self["tick0"] = val
# tickangle
# ---------
@property
def tickangle(self):
"""
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the tick
labels vertically.
The 'tickangle' property is a angle (in degrees) that may be
specified as a number between -180 and 180. Numeric values outside this
range are converted to the equivalent value
(e.g. 270 is converted to -90).
Returns
-------
int|float
"""
return self["tickangle"]
@tickangle.setter
def tickangle(self, val):
self["tickangle"] = val
# tickcolor
# ---------
@property
def tickcolor(self):
"""
Sets the tick color.
The 'tickcolor' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color:
aliceblue, antiquewhite, aqua, aquamarine, azure,
beige, bisque, black, blanchedalmond, blue,
blueviolet, brown, burlywood, cadetblue,
chartreuse, chocolate, coral, cornflowerblue,
cornsilk, crimson, cyan, darkblue, darkcyan,
darkgoldenrod, darkgray, darkgrey, darkgreen,
darkkhaki, darkmagenta, darkolivegreen, darkorange,
darkorchid, darkred, darksalmon, darkseagreen,
darkslateblue, darkslategray, darkslategrey,
darkturquoise, darkviolet, deeppink, deepskyblue,
dimgray, dimgrey, dodgerblue, firebrick,
floralwhite, forestgreen, fuchsia, gainsboro,
ghostwhite, gold, goldenrod, gray, grey, green,
greenyellow, honeydew, hotpink, indianred, indigo,
ivory, khaki, lavender, lavenderblush, lawngreen,
lemonchiffon, lightblue, lightcoral, lightcyan,
lightgoldenrodyellow, lightgray, lightgrey,
lightgreen, lightpink, lightsalmon, lightseagreen,
lightskyblue, lightslategray, lightslategrey,
lightsteelblue, lightyellow, lime, limegreen,
linen, magenta, maroon, mediumaquamarine,
mediumblue, mediumorchid, mediumpurple,
mediumseagreen, mediumslateblue, mediumspringgreen,
mediumturquoise, mediumvioletred, midnightblue,
mintcream, mistyrose, moccasin, navajowhite, navy,
oldlace, olive, olivedrab, orange, orangered,
orchid, palegoldenrod, palegreen, paleturquoise,
palevioletred, papayawhip, peachpuff, peru, pink,
plum, powderblue, purple, red, rosybrown,
royalblue, rebeccapurple, saddlebrown, salmon,
sandybrown, seagreen, seashell, sienna, silver,
skyblue, slateblue, slategray, slategrey, snow,
springgreen, steelblue, tan, teal, thistle, tomato,
turquoise, violet, wheat, white, whitesmoke,
yellow, yellowgreen
Returns
-------
str
"""
return self["tickcolor"]
@tickcolor.setter
def tickcolor(self, val):
self["tickcolor"] = val
# tickfont
# --------
@property
def tickfont(self):
"""
Sets the color bar's tick label font
The 'tickfont' property is an instance of Tickfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Tickfont`
- A dict of string/value properties that will be passed
to the Tickfont constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
plotly.graph_objs.scatter.marker.colorbar.Tickfont
"""
return self["tickfont"]
@tickfont.setter
def tickfont(self, val):
self["tickfont"] = val
# tickformat
# ----------
@property
def tickformat(self):
"""
Sets the tick label formatting rule using d3 formatting mini-
languages which are very similar to those in Python. For
numbers, see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for dates
see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add one item
to d3's date formatter: "%{n}f" for fractional seconds with n
digits. For example, *2016-10-13 09:15:23.456* with tickformat
"%H~%M~%S.%2f" would display "09~15~23.46"
The 'tickformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickformat"]
@tickformat.setter
def tickformat(self, val):
self["tickformat"] = val
# tickformatstops
# ---------------
@property
def tickformatstops(self):
"""
The 'tickformatstops' property is a tuple of instances of
Tickformatstop that may be specified as:
- A list or tuple of instances of plotly.graph_objs.scatter.marker.colorbar.Tickformatstop
- A list or tuple of dicts of string/value properties that
will be passed to the Tickformatstop constructor
Supported dict properties:
dtickrange
range [*min*, *max*], where "min", "max" -
dtick values which describe some zoom level, it
is possible to omit "min" or "max" value by
passing "null"
enabled
Determines whether or not this stop is used. If
`false`, this stop is ignored even within its
`dtickrange`.
name
When used in a template, named items are
created in the output figure in addition to any
items the figure already has in this array. You
can modify these items in the output figure by
making your own item with `templateitemname`
matching this `name` alongside your
modifications (including `visible: false` or
`enabled: false` to hide it). Has no effect
outside of a template.
templateitemname
Used to refer to a named item in this array in
the template. Named items from the template
will be created even without a matching item in
the input figure, but you can modify one by
making an item with `templateitemname` matching
its `name`, alongside your modifications
(including `visible: false` or `enabled: false`
to hide it). If there is no template or no
matching item, this item will be hidden unless
you explicitly show it with `visible: true`.
value
string - dtickformat for described zoom level,
the same as "tickformat"
Returns
-------
tuple[plotly.graph_objs.scatter.marker.colorbar.Tickformatstop]
"""
return self["tickformatstops"]
@tickformatstops.setter
def tickformatstops(self, val):
self["tickformatstops"] = val
# tickformatstopdefaults
# ----------------------
@property
def tickformatstopdefaults(self):
"""
When used in a template (as layout.template.data.scatter.marker
.colorbar.tickformatstopdefaults), sets the default property
values to use for elements of
scatter.marker.colorbar.tickformatstops
The 'tickformatstopdefaults' property is an instance of Tickformatstop
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Tickformatstop`
- A dict of string/value properties that will be passed
to the Tickformatstop constructor
Supported dict properties:
Returns
-------
plotly.graph_objs.scatter.marker.colorbar.Tickformatstop
"""
return self["tickformatstopdefaults"]
@tickformatstopdefaults.setter
def tickformatstopdefaults(self, val):
self["tickformatstopdefaults"] = val
# ticklen
# -------
@property
def ticklen(self):
"""
Sets the tick length (in px).
The 'ticklen' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ticklen"]
@ticklen.setter
def ticklen(self, val):
self["ticklen"] = val
# tickmode
# --------
@property
def tickmode(self):
"""
Sets the tick mode for this axis. If "auto", the number of
ticks is set via `nticks`. If "linear", the placement of the
ticks is determined by a starting position `tick0` and a tick
step `dtick` ("linear" is the default value if `tick0` and
`dtick` are provided). If "array", the placement of the ticks
is set via `tickvals` and the tick text is `ticktext`. ("array"
is the default value if `tickvals` is provided).
The 'tickmode' property is an enumeration that may be specified as:
- One of the following enumeration values:
['auto', 'linear', 'array']
Returns
-------
Any
"""
return self["tickmode"]
@tickmode.setter
def tickmode(self, val):
self["tickmode"] = val
# tickprefix
# ----------
@property
def tickprefix(self):
"""
Sets a tick label prefix.
The 'tickprefix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["tickprefix"]
@tickprefix.setter
def tickprefix(self, val):
self["tickprefix"] = val
# ticks
# -----
@property
def ticks(self):
"""
Determines whether ticks are drawn or not. If "", this axis'
ticks are not drawn. If "outside" ("inside"), this axis' are
drawn outside (inside) the axis lines.
The 'ticks' property is an enumeration that may be specified as:
- One of the following enumeration values:
['outside', 'inside', '']
Returns
-------
Any
"""
return self["ticks"]
@ticks.setter
def ticks(self, val):
self["ticks"] = val
# ticksuffix
# ----------
@property
def ticksuffix(self):
"""
Sets a tick label suffix.
The 'ticksuffix' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["ticksuffix"]
@ticksuffix.setter
def ticksuffix(self, val):
self["ticksuffix"] = val
# ticktext
# --------
@property
def ticktext(self):
"""
Sets the text displayed at the ticks position via `tickvals`.
Only has an effect if `tickmode` is set to "array". Used with
`tickvals`.
The 'ticktext' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["ticktext"]
@ticktext.setter
def ticktext(self, val):
self["ticktext"] = val
# ticktextsrc
# -----------
@property
def ticktextsrc(self):
"""
Sets the source reference on Chart Studio Cloud for ticktext .
The 'ticktextsrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["ticktextsrc"]
@ticktextsrc.setter
def ticktextsrc(self, val):
self["ticktextsrc"] = val
# tickvals
# --------
@property
def tickvals(self):
"""
Sets the values at which ticks on this axis appear. Only has an
effect if `tickmode` is set to "array". Used with `ticktext`.
The 'tickvals' property is an array that may be specified as a tuple,
list, numpy array, or pandas Series
Returns
-------
numpy.ndarray
"""
return self["tickvals"]
@tickvals.setter
def tickvals(self, val):
self["tickvals"] = val
# tickvalssrc
# -----------
@property
def tickvalssrc(self):
"""
Sets the source reference on Chart Studio Cloud for tickvals .
The 'tickvalssrc' property must be specified as a string or
as a plotly.grid_objs.Column object
Returns
-------
str
"""
return self["tickvalssrc"]
@tickvalssrc.setter
def tickvalssrc(self, val):
self["tickvalssrc"] = val
# tickwidth
# ---------
@property
def tickwidth(self):
"""
Sets the tick width (in px).
The 'tickwidth' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["tickwidth"]
@tickwidth.setter
def tickwidth(self, val):
self["tickwidth"] = val
# title
# -----
@property
def title(self):
"""
The 'title' property is an instance of Title
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.Title`
- A dict of string/value properties that will be passed
to the Title constructor
Supported dict properties:
font
Sets this color bar's title font. Note that the
title's font used to be set by the now
deprecated `titlefont` attribute.
side
Determines the location of color bar's title
with respect to the color bar. Note that the
title's location used to be set by the now
deprecated `titleside` attribute.
text
Sets the title of the color bar. Note that
before the existence of `title.text`, the
title's contents used to be defined as the
`title` attribute itself. This behavior has
been deprecated.
Returns
-------
plotly.graph_objs.scatter.marker.colorbar.Title
"""
return self["title"]
@title.setter
def title(self, val):
self["title"] = val
# titlefont
# ---------
@property
def titlefont(self):
"""
Deprecated: Please use scatter.marker.colorbar.title.font
instead. Sets this color bar's title font. Note that the
title's font used to be set by the now deprecated `titlefont`
attribute.
The 'font' property is an instance of Font
that may be specified as:
- An instance of :class:`plotly.graph_objs.scatter.marker.colorbar.title.Font`
- A dict of string/value properties that will be passed
to the Font constructor
Supported dict properties:
color
family
HTML font family - the typeface that will be
applied by the web browser. The web browser
will only be able to apply a font if it is
available on the system which it operates.
Provide multiple font families, separated by
commas, to indicate the preference in which to
apply fonts if they aren't available on the
system. The Chart Studio Cloud (at
https://chart-studio.plotly.com or on-premise)
generates images on a server, where only a
select number of fonts are installed and
supported. These include "Arial", "Balto",
"Courier New", "Droid Sans",, "Droid Serif",
"Droid Sans Mono", "Gravitas One", "Old
Standard TT", "Open Sans", "Overpass", "PT Sans
Narrow", "Raleway", "Times New Roman".
size
Returns
-------
"""
return self["titlefont"]
@titlefont.setter
def titlefont(self, val):
self["titlefont"] = val
# titleside
# ---------
@property
def titleside(self):
"""
Deprecated: Please use scatter.marker.colorbar.title.side
instead. Determines the location of color bar's title with
respect to the color bar. Note that the title's location used
to be set by the now deprecated `titleside` attribute.
The 'side' property is an enumeration that may be specified as:
- One of the following enumeration values:
['right', 'top', 'bottom']
Returns
-------
"""
return self["titleside"]
@titleside.setter
def titleside(self, val):
self["titleside"] = val
# x
# -
@property
def x(self):
"""
Sets the x position of the color bar (in plot fraction).
The 'x' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["x"]
@x.setter
def x(self, val):
self["x"] = val
# xanchor
# -------
@property
def xanchor(self):
"""
Sets this color bar's horizontal position anchor. This anchor
binds the `x` position to the "left", "center" or "right" of
the color bar.
The 'xanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['left', 'center', 'right']
Returns
-------
Any
"""
return self["xanchor"]
@xanchor.setter
def xanchor(self, val):
self["xanchor"] = val
# xpad
# ----
@property
def xpad(self):
"""
Sets the amount of padding (in px) along the x direction.
The 'xpad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["xpad"]
@xpad.setter
def xpad(self, val):
self["xpad"] = val
# y
# -
@property
def y(self):
"""
Sets the y position of the color bar (in plot fraction).
The 'y' property is a number and may be specified as:
- An int or float in the interval [-2, 3]
Returns
-------
int|float
"""
return self["y"]
@y.setter
def y(self, val):
self["y"] = val
# yanchor
# -------
@property
def yanchor(self):
"""
Sets this color bar's vertical position anchor This anchor
binds the `y` position to the "top", "middle" or "bottom" of
the color bar.
The 'yanchor' property is an enumeration that may be specified as:
- One of the following enumeration values:
['top', 'middle', 'bottom']
Returns
-------
Any
"""
return self["yanchor"]
@yanchor.setter
def yanchor(self, val):
self["yanchor"] = val
# ypad
# ----
@property
def ypad(self):
"""
Sets the amount of padding (in px) along the y direction.
The 'ypad' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["ypad"]
@ypad.setter
def ypad(self, val):
self["ypad"] = val
# Self properties description
# ---------------------------
@property
def _prop_descriptions(self):
return """\
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter.marker.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
r.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scatter.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatter.marker.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
scatter.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatter.marker.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
"""
_mapped_properties = {
"titlefont": ("title", "font"),
"titleside": ("title", "side"),
}
def __init__(
self,
arg=None,
bgcolor=None,
bordercolor=None,
borderwidth=None,
dtick=None,
exponentformat=None,
len=None,
lenmode=None,
nticks=None,
outlinecolor=None,
outlinewidth=None,
separatethousands=None,
showexponent=None,
showticklabels=None,
showtickprefix=None,
showticksuffix=None,
thickness=None,
thicknessmode=None,
tick0=None,
tickangle=None,
tickcolor=None,
tickfont=None,
tickformat=None,
tickformatstops=None,
tickformatstopdefaults=None,
ticklen=None,
tickmode=None,
tickprefix=None,
ticks=None,
ticksuffix=None,
ticktext=None,
ticktextsrc=None,
tickvals=None,
tickvalssrc=None,
tickwidth=None,
title=None,
titlefont=None,
titleside=None,
x=None,
xanchor=None,
xpad=None,
y=None,
yanchor=None,
ypad=None,
**kwargs
):
"""
Construct a new ColorBar object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.scatter.marker.ColorBar`
bgcolor
Sets the color of padded area.
bordercolor
Sets the axis line color.
borderwidth
Sets the width (in px) or the border enclosing this
color bar.
dtick
Sets the step in-between ticks on this axis. Use with
`tick0`. Must be a positive number, or special strings
available to "log" and "date" axes. If the axis `type`
is "log", then ticks are set every 10^(n*dtick) where n
is the tick number. For example, to set a tick mark at
1, 10, 100, 1000, ... set dtick to 1. To set tick marks
at 1, 100, 10000, ... set dtick to 2. To set tick marks
at 1, 5, 25, 125, 625, 3125, ... set dtick to
log_10(5), or 0.69897000433. "log" has several special
values; "L<f>", where `f` is a positive number, gives
ticks linearly spaced in value (but not position). For
example `tick0` = 0.1, `dtick` = "L0.5" will put ticks
at 0.1, 0.6, 1.1, 1.6 etc. To show powers of 10 plus
small digits between, use "D1" (all digits) or "D2"
(only 2 and 5). `tick0` is ignored for "D1" and "D2".
If the axis `type` is "date", then you must convert the
time to milliseconds. For example, to set the interval
between ticks to one day, set `dtick` to 86400000.0.
"date" also has special values "M<n>" gives ticks
spaced by a number of months. `n` must be a positive
integer. To set ticks on the 15th of every third month,
set `tick0` to "2000-01-15" and `dtick` to "M3". To set
ticks every 4 years, set `dtick` to "M48"
exponentformat
Determines a formatting rule for the tick exponents.
For example, consider the number 1,000,000,000. If
"none", it appears as 1,000,000,000. If "e", 1e+9. If
"E", 1E+9. If "power", 1x10^9 (with 9 in a super
script). If "SI", 1G. If "B", 1B.
len
Sets the length of the color bar This measure excludes
the padding of both ends. That is, the color bar length
is this length minus the padding on both ends.
lenmode
Determines whether this color bar's length (i.e. the
measure in the color variation direction) is set in
units of plot "fraction" or in *pixels. Use `len` to
set the value.
nticks
Specifies the maximum number of ticks for the
particular axis. The actual number of ticks will be
chosen automatically to be less than or equal to
`nticks`. Has an effect only if `tickmode` is set to
"auto".
outlinecolor
Sets the axis line color.
outlinewidth
Sets the width (in px) of the axis line.
separatethousands
If "true", even 4-digit integers are separated
showexponent
If "all", all exponents are shown besides their
significands. If "first", only the exponent of the
first tick is shown. If "last", only the exponent of
the last tick is shown. If "none", no exponents appear.
showticklabels
Determines whether or not the tick labels are drawn.
showtickprefix
If "all", all tick labels are displayed with a prefix.
If "first", only the first tick is displayed with a
prefix. If "last", only the last tick is displayed with
a suffix. If "none", tick prefixes are hidden.
showticksuffix
Same as `showtickprefix` but for tick suffixes.
thickness
Sets the thickness of the color bar This measure
excludes the size of the padding, ticks and labels.
thicknessmode
Determines whether this color bar's thickness (i.e. the
measure in the constant color direction) is set in
units of plot "fraction" or in "pixels". Use
`thickness` to set the value.
tick0
Sets the placement of the first tick on this axis. Use
with `dtick`. If the axis `type` is "log", then you
must take the log of your starting tick (e.g. to set
the starting tick to 100, set the `tick0` to 2) except
when `dtick`=*L<f>* (see `dtick` for more info). If the
axis `type` is "date", it should be a date string, like
date data. If the axis `type` is "category", it should
be a number, using the scale where each category is
assigned a serial number from zero in the order it
appears.
tickangle
Sets the angle of the tick labels with respect to the
horizontal. For example, a `tickangle` of -90 draws the
tick labels vertically.
tickcolor
Sets the tick color.
tickfont
Sets the color bar's tick label font
tickformat
Sets the tick label formatting rule using d3 formatting
mini-languages which are very similar to those in
Python. For numbers, see:
https://github.com/d3/d3-3.x-api-
reference/blob/master/Formatting.md#d3_format And for
dates see: https://github.com/d3/d3-3.x-api-
reference/blob/master/Time-Formatting.md#format We add
one item to d3's date formatter: "%{n}f" for fractional
seconds with n digits. For example, *2016-10-13
09:15:23.456* with tickformat "%H~%M~%S.%2f" would
display "09~15~23.46"
tickformatstops
A tuple of :class:`plotly.graph_objects.scatter.marker.
colorbar.Tickformatstop` instances or dicts with
compatible properties
tickformatstopdefaults
When used in a template (as layout.template.data.scatte
r.marker.colorbar.tickformatstopdefaults), sets the
default property values to use for elements of
scatter.marker.colorbar.tickformatstops
ticklen
Sets the tick length (in px).
tickmode
Sets the tick mode for this axis. If "auto", the number
of ticks is set via `nticks`. If "linear", the
placement of the ticks is determined by a starting
position `tick0` and a tick step `dtick` ("linear" is
the default value if `tick0` and `dtick` are provided).
If "array", the placement of the ticks is set via
`tickvals` and the tick text is `ticktext`. ("array" is
the default value if `tickvals` is provided).
tickprefix
Sets a tick label prefix.
ticks
Determines whether ticks are drawn or not. If "", this
axis' ticks are not drawn. If "outside" ("inside"),
this axis' are drawn outside (inside) the axis lines.
ticksuffix
Sets a tick label suffix.
ticktext
Sets the text displayed at the ticks position via
`tickvals`. Only has an effect if `tickmode` is set to
"array". Used with `tickvals`.
ticktextsrc
Sets the source reference on Chart Studio Cloud for
ticktext .
tickvals
Sets the values at which ticks on this axis appear.
Only has an effect if `tickmode` is set to "array".
Used with `ticktext`.
tickvalssrc
Sets the source reference on Chart Studio Cloud for
tickvals .
tickwidth
Sets the tick width (in px).
title
:class:`plotly.graph_objects.scatter.marker.colorbar.Ti
tle` instance or dict with compatible properties
titlefont
Deprecated: Please use
scatter.marker.colorbar.title.font instead. Sets this
color bar's title font. Note that the title's font used
to be set by the now deprecated `titlefont` attribute.
titleside
Deprecated: Please use
scatter.marker.colorbar.title.side instead. Determines
the location of color bar's title with respect to the
color bar. Note that the title's location used to be
set by the now deprecated `titleside` attribute.
x
Sets the x position of the color bar (in plot
fraction).
xanchor
Sets this color bar's horizontal position anchor. This
anchor binds the `x` position to the "left", "center"
or "right" of the color bar.
xpad
Sets the amount of padding (in px) along the x
direction.
y
Sets the y position of the color bar (in plot
fraction).
yanchor
Sets this color bar's vertical position anchor This
anchor binds the `y` position to the "top", "middle" or
"bottom" of the color bar.
ypad
Sets the amount of padding (in px) along the y
direction.
Returns
-------
ColorBar
"""
super(ColorBar, self).__init__("colorbar")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
# Validate arg
# ------------
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError(
"""\
The first argument to the plotly.graph_objs.scatter.marker.ColorBar
constructor must be a dict or
an instance of :class:`plotly.graph_objs.scatter.marker.ColorBar`"""
)
# Handle skip_invalid
# -------------------
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
# Populate data dict with properties
# ----------------------------------
_v = arg.pop("bgcolor", None)
_v = bgcolor if bgcolor is not None else _v
if _v is not None:
self["bgcolor"] = _v
_v = arg.pop("bordercolor", None)
_v = bordercolor if bordercolor is not None else _v
if _v is not None:
self["bordercolor"] = _v
_v = arg.pop("borderwidth", None)
_v = borderwidth if borderwidth is not None else _v
if _v is not None:
self["borderwidth"] = _v
_v = arg.pop("dtick", None)
_v = dtick if dtick is not None else _v
if _v is not None:
self["dtick"] = _v
_v = arg.pop("exponentformat", None)
_v = exponentformat if exponentformat is not None else _v
if _v is not None:
self["exponentformat"] = _v
_v = arg.pop("len", None)
_v = len if len is not None else _v
if _v is not None:
self["len"] = _v
_v = arg.pop("lenmode", None)
_v = lenmode if lenmode is not None else _v
if _v is not None:
self["lenmode"] = _v
_v = arg.pop("nticks", None)
_v = nticks if nticks is not None else _v
if _v is not None:
self["nticks"] = _v
_v = arg.pop("outlinecolor", None)
_v = outlinecolor if outlinecolor is not None else _v
if _v is not None:
self["outlinecolor"] = _v
_v = arg.pop("outlinewidth", None)
_v = outlinewidth if outlinewidth is not None else _v
if _v is not None:
self["outlinewidth"] = _v
_v = arg.pop("separatethousands", None)
_v = separatethousands if separatethousands is not None else _v
if _v is not None:
self["separatethousands"] = _v
_v = arg.pop("showexponent", None)
_v = showexponent if showexponent is not None else _v
if _v is not None:
self["showexponent"] = _v
_v = arg.pop("showticklabels", None)
_v = showticklabels if showticklabels is not None else _v
if _v is not None:
self["showticklabels"] = _v
_v = arg.pop("showtickprefix", None)
_v = showtickprefix if showtickprefix is not None else _v
if _v is not None:
self["showtickprefix"] = _v
_v = arg.pop("showticksuffix", None)
_v = showticksuffix if showticksuffix is not None else _v
if _v is not None:
self["showticksuffix"] = _v
_v = arg.pop("thickness", None)
_v = thickness if thickness is not None else _v
if _v is not None:
self["thickness"] = _v
_v = arg.pop("thicknessmode", None)
_v = thicknessmode if thicknessmode is not None else _v
if _v is not None:
self["thicknessmode"] = _v
_v = arg.pop("tick0", None)
_v = tick0 if tick0 is not None else _v
if _v is not None:
self["tick0"] = _v
_v = arg.pop("tickangle", None)
_v = tickangle if tickangle is not None else _v
if _v is not None:
self["tickangle"] = _v
_v = arg.pop("tickcolor", None)
_v = tickcolor if tickcolor is not None else _v
if _v is not None:
self["tickcolor"] = _v
_v = arg.pop("tickfont", None)
_v = tickfont if tickfont is not None else _v
if _v is not None:
self["tickfont"] = _v
_v = arg.pop("tickformat", None)
_v = tickformat if tickformat is not None else _v
if _v is not None:
self["tickformat"] = _v
_v = arg.pop("tickformatstops", None)
_v = tickformatstops if tickformatstops is not None else _v
if _v is not None:
self["tickformatstops"] = _v
_v = arg.pop("tickformatstopdefaults", None)
_v = tickformatstopdefaults if tickformatstopdefaults is not None else _v
if _v is not None:
self["tickformatstopdefaults"] = _v
_v = arg.pop("ticklen", None)
_v = ticklen if ticklen is not None else _v
if _v is not None:
self["ticklen"] = _v
_v = arg.pop("tickmode", None)
_v = tickmode if tickmode is not None else _v
if _v is not None:
self["tickmode"] = _v
_v = arg.pop("tickprefix", None)
_v = tickprefix if tickprefix is not None else _v
if _v is not None:
self["tickprefix"] = _v
_v = arg.pop("ticks", None)
_v = ticks if ticks is not None else _v
if _v is not None:
self["ticks"] = _v
_v = arg.pop("ticksuffix", None)
_v = ticksuffix if ticksuffix is not None else _v
if _v is not None:
self["ticksuffix"] = _v
_v = arg.pop("ticktext", None)
_v = ticktext if ticktext is not None else _v
if _v is not None:
self["ticktext"] = _v
_v = arg.pop("ticktextsrc", None)
_v = ticktextsrc if ticktextsrc is not None else _v
if _v is not None:
self["ticktextsrc"] = _v
_v = arg.pop("tickvals", None)
_v = tickvals if tickvals is not None else _v
if _v is not None:
self["tickvals"] = _v
_v = arg.pop("tickvalssrc", None)
_v = tickvalssrc if tickvalssrc is not None else _v
if _v is not None:
self["tickvalssrc"] = _v
_v = arg.pop("tickwidth", None)
_v = tickwidth if tickwidth is not None else _v
if _v is not None:
self["tickwidth"] = _v
_v = arg.pop("title", None)
_v = title if title is not None else _v
if _v is not None:
self["title"] = _v
_v = arg.pop("titlefont", None)
_v = titlefont if titlefont is not None else _v
if _v is not None:
self["titlefont"] = _v
_v = arg.pop("titleside", None)
_v = titleside if titleside is not None else _v
if _v is not None:
self["titleside"] = _v
_v = arg.pop("x", None)
_v = x if x is not None else _v
if _v is not None:
self["x"] = _v
_v = arg.pop("xanchor", None)
_v = xanchor if xanchor is not None else _v
if _v is not None:
self["xanchor"] = _v
_v = arg.pop("xpad", None)
_v = xpad if xpad is not None else _v
if _v is not None:
self["xpad"] = _v
_v = arg.pop("y", None)
_v = y if y is not None else _v
if _v is not None:
self["y"] = _v
_v = arg.pop("yanchor", None)
_v = yanchor if yanchor is not None else _v
if _v is not None:
self["yanchor"] = _v
_v = arg.pop("ypad", None)
_v = ypad if ypad is not None else _v
if _v is not None:
self["ypad"] = _v
# Process unknown kwargs
# ----------------------
self._process_kwargs(**dict(arg, **kwargs))
# Reset skip_invalid
# ------------------
self._skip_invalid = False
| mit |
dominicmeroux/Reading-In-and-Analyzing-Calendar-Data-by-Interfacing-Between-MySQL-and-Python | Utilization-Report-MySQL.py | 1 | 18653 | from __future__ import print_function
from icalendar import *
from datetime import date, datetime, timedelta
import mysql.connector
from mysql.connector import errorcode
import pickle
import csv
import pandas
from pandas.io import sql
import matplotlib.pyplot as plt
import xlsxwriter
import numpy as np
import os
import re
import glob
import pytz
from StringIO import StringIO
#from zipfile import ZipFile
from urllib import urlopen
import calendar_parser as cp
# for calendar_parser, I downloaded the Python file created for this package
# https://github.com/oblique63/Python-GoogleCalendarParser/blob/master/calendar_parser.py
# and saved it in the working directory with my Python file (Jupyter Notebook file).
# In calendar_parser.py, their function _fix_timezone is very crucial for my code to
# display the correct local time.
USER = # enter database username
PASS = # enter database password
HOST = # enter hostname, e.g. '127.0.0.1'
cnx = mysql.connector.connect(user=USER, password=PASS, host=HOST)
cursor = cnx.cursor()
# Approach / Code modified from MySQL Connector web page
DB_NAME = "CalDb"
# 1) Creates database if it doesn't already exist
# 2) Then connects to the database
def create_database(cursor):
try:
cursor.execute(
"CREATE DATABASE {} DEFAULT CHARACTER SET 'utf8'".format(DB_NAME))
except mysql.connector.Error as err:
print("Failed creating database: {}".format(err))
exit(1)
try:
cnx.database = DB_NAME
except mysql.connector.Error as err:
if err.errno == errorcode.ER_BAD_DB_ERROR:
create_database(cursor)
cnx.database = DB_NAME
else:
print(err)
exit(1)
# Create table specifications
TABLES = {}
TABLES['eBike'] = (
"CREATE TABLE IF NOT EXISTS `eBike` ("
" `eBikeName` varchar(10),"
" `Organizer` varchar(100),"
" `Created` datetime NOT NULL,"
" `Start` datetime NOT NULL,"
" `End` datetime NOT NULL"
") ENGINE=InnoDB")
# If table does not already exist, this code will create it based on specifications
for name, ddl in TABLES.iteritems():
try:
print("Creating table {}: ".format(name), end='')
cursor.execute(ddl)
except mysql.connector.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print("already exists.")
else:
print(err.msg)
else:
print("OK")
# Obtain current count from each calendar to read in and add additional entries only
cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Gold'")
GoldExistingCount = cursor.fetchall()
cursor.execute("SELECT COUNT(*) FROM eBike WHERE eBikeName = 'Blue'")
BlueExistingCount = cursor.fetchall()
# Declare lists
eBikeName = []
Organizer = []
DTcreated = []
DTstart = []
DTend = []
Counter = 0
Cal1URL = # Google Calendar URL (from Calendar Settings -> Private Address)
Cal2URL = # URL of second Google Calendar...can scale this code to as many calendars as desired
# at an extremily large number (e.g. entire company level), could modify and use parallel processing (e.g. PySpark)
Blue = Cal1URL
Gold = Cal2URL
URL_list = [Blue, Gold]
for i in URL_list:
Counter = 0
b = urlopen(i)
cal = Calendar.from_ical(b.read())
timezones = cal.walk('VTIMEZONE')
if (i == Blue):
BlueLen = len(cal.walk())
elif (i == Gold):
GoldLen = len(cal.walk())
#print (cal)
#print ("Stuff")
#print (cal.subcomponents)
for k in cal.walk():
if k.name == "VEVENT":
Counter += 1
if (i == Blue):
if BlueLen - Counter > GoldExistingCount[0][0]:
eBikeName.append('Blue')
Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) )
DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) )
DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) )
DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) )
#print (k.property_items('ATTENDEE'))
elif (i == Gold):
if GoldLen - Counter > BlueExistingCount[0][0]:
eBikeName.append('Gold')
Organizer.append( re.sub(r'mailto:', "", str(k.get('ORGANIZER') ) ) )
DTcreated.append( cp._fix_timezone( k.decoded('CREATED'), pytz.timezone(timezones[0]['TZID']) ) )
DTstart.append( cp._fix_timezone( k.decoded('DTSTART'), pytz.timezone(timezones[0]['TZID']) ) )
DTend.append( cp._fix_timezone( k.decoded('DTEND'), pytz.timezone(timezones[0]['TZID']) ) )
b.close()
# Now that calendar data is fully read in, create a list with data in a format for
# entering into the MySQL database.
#
# At this point, if the MySQL Connector component is not desired, other approaches
# include creating a Pandas dataframe or something else.
# For reference, a Pandas dataframe could be created with the following command:
# df = pandas.DataFrame({'ORGANIZER' : Organizer,'CREATED' : DTcreated, 'DTSTART' : DTstart,'DTEND': DTend})
eBikeData = [] #####################################################
for i in range(len(DTcreated)):
# Add in condition that the organizer email address cannot be 'none' or any other P&T Management email
if (Organizer[i] != 'None' and Organizer[i] != 'lauren.bennett@berkeley.edu' and Organizer[i] != 'dmeroux@berkeley.edu' and Organizer[i] != 'berkeley.edu_534da9tjgdsahifulshf42lfbo@group.calendar.google.com'):
eBikeData.append((eBikeName[i], Organizer[i], DTcreated[i], DTstart[i], DTend[i]))
# Insert calendar data into MySQL table eBike
cursor.executemany("INSERT INTO eBike (eBikeName, Organizer, Created, Start, End) VALUES (%s, %s, %s, %s, %s)",
eBikeData)
cnx.commit()
# Find emails associated with reservations created at latest 6 days ago
cursor.execute("SELECT DISTINCT Organizer FROM eBike WHERE DATEDIFF(CURDATE(), Start) <= 6 AND DATEDIFF(CURDATE(), Start) >= 0")
WeeklyEmail = cursor.fetchall()
Email = []
for i in range(len(WeeklyEmail)):
Email.append(WeeklyEmail[i][0])
if(Email[i] != 'None'):
print(Email[i])
# https://xlsxwriter.readthedocs.org
# Workbook Document Name
workbook = xlsxwriter.Workbook('E-BikeUpdate' + datetime.strftime(datetime.now(), "%Y-%m-%d") + '.xlsx')
# Define 'bold' format
bold = workbook.add_format({'bold': True})
format1 = workbook.add_format({'bold': 1,
'bg_color': '#3CDAE5',
'font_color': '#092A51'})
format2 = workbook.add_format({'bold': 1,
'bg_color': '#DA7BD0',
'font_color': '#A50202'})
# Add Intro Sheet
worksheet = workbook.add_worksheet('INTRO')
worksheet.write('A1', 'Sheet', bold)
worksheet.write('A2', 'Ebike_Rides_by_User')
worksheet.write('A3', 'Trips_by_Res_Time')
worksheet.write('A4', 'Trips_by_Weekday')
worksheet.write('A5', 'Utilization')
worksheet.write('A6', 'Aggregate_Advance_Reservation')
worksheet.write('A7', 'Time_Series_Advance_Reservation')
worksheet.write('B1', 'Description', bold)
worksheet.write('B2', 'Total E-Bike Rides by User Email')
worksheet.write('B3', 'Total E-Bike Rides by Reservation Hour')
worksheet.write('B4', 'Total E-Bike Rides by Weekday')
worksheet.write('B5', 'Average and Maximum Percent and Hours Utilization')
worksheet.write('B6', 'Number of Days E-Bikes Were Reserved in Advance, by Count of Reservations')
worksheet.write('B7', 'Number of Days E-Bikes Were Reserved in Advance, by Reservation Start Datetime')
### Total e-Bike Rides by User
cursor.execute("SELECT Organizer, COUNT(*) AS Total_Rides FROM eBike GROUP BY Organizer ORDER BY Total_Rides DESC;")
TotalRides_by_User = cursor.fetchall()
# Worksheet Name
worksheet1 = workbook.add_worksheet('Ebike_Rides_by_User')
# Column Names
worksheet1.write('A1', 'User', bold)
worksheet1.write('B1', 'Total Rides', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for UserEmail, UserRideCount in (TotalRides_by_User):
worksheet1.write(row, col, UserEmail)
worksheet1.write(row, col + 1, UserRideCount)
row += 1
# Conditional Formatting: E-bike Users with 20+ Rides
worksheet1.conditional_format('B1:B9999', {'type': 'cell',
'criteria': '>=',
'value': 20,
'format': format1})
### Total Trips by Reservation Time
cursor.execute("SELECT EXTRACT(HOUR FROM Start) AS Hour_24, DATE_FORMAT(Start, '%h %p') AS Reservation_Time, COUNT(*) AS Total_Rides FROM eBike GROUP BY Reservation_Time, Hour_24 ORDER BY Hour_24 ASC")
Trips_by_Time = cursor.fetchall()
# Worksheet Name
worksheet2 = workbook.add_worksheet('Trips_by_Res_Time') # Data.
# Column Names
worksheet2.write('A1', 'Reservation Start Time', bold)
worksheet2.write('B1', 'Total Rides', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Hour_24, Reservation_Time, Total_Rides in (Trips_by_Time):
worksheet2.write(row, col, Reservation_Time)
worksheet2.write(row, col + 1, Total_Rides)
row += 1
# Add Chart
chart = workbook.add_chart({'type': 'line'})
# Add Data to Chart
chart.add_series({
'categories': '=Trips_by_Res_Time!$A$2:$A$16',
'values': '=Trips_by_Res_Time!$B$2:$B$16',
'fill': {'color': '#791484'},
'border': {'color': '#52B7CB'}
})
# Format Chart
chart.set_title({
'name': 'Total Rides by Reservation Start Time',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB',
},
})
chart.set_x_axis({
'name': 'Reservation Start Time',
'empty_cells': 'gaps',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'name': 'Arial',
'color': '#52B7CB',
},
})
chart.set_y_axis({
'name': 'Total Rides',
'empty_cells': 'gaps',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'italic': True,
'color': '#52B7CB',
},
})
# Remove Legend
chart.set_legend({'position': 'none'})
# Insert Chart
worksheet2.insert_chart('E1', chart)
# GO TO END OF DATA
### Total Trips by Weekday
cursor.execute("SELECT DAYNAME(Start) AS Weekday, COUNT(*) AS Total_Rides FROM eBike GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')")
Trips_by_Weekday = cursor.fetchall()
# Worksheet Name
worksheet3 = workbook.add_worksheet('Trips_by_Weekday')
# Column Names
worksheet3.write('A1', 'Weekday', bold)
worksheet3.write('B1', 'Total Rides', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Weekday, Total_Rides_by_Weekday in (Trips_by_Weekday):
worksheet3.write(row, col, Weekday)
worksheet3.write(row, col + 1, Total_Rides_by_Weekday)
row += 1
# Add Chart
chart = workbook.add_chart({'type': 'line'})
# Add Data to Chart
chart.add_series({
'categories': '=Trips_by_Weekday!$A$2:$A$8)',
'values': '=Trips_by_Weekday!$B$2:$B$8)',
'fill': {'color': '#791484'},
'border': {'color': '#52B7CB'}
})
# Format Chart
chart.set_title({
'name': 'Total Rides by Weekday',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB',
},
})
chart.set_x_axis({
'name': 'Weekday',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'name': 'Arial',
'color': '#52B7CB',
},
})
chart.set_y_axis({
'name': 'Total Rides',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'italic': True,
'color': '#52B7CB',
},
})
# Remove Legend
chart.set_legend({'position': 'none'})
# Insert Chart
worksheet3.insert_chart('E1', chart)
### Average and Maximum Hours and Percent Utilization by Weekday
cursor.execute("SELECT DAYNAME(Start) AS Weekday, MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Max_Hours, (MAX((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Max_PCT_Utilization, AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Avg_Hours, (AVG((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS Avg_PCT_Utilization FROM eBike WHERE (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 < 95 GROUP BY Weekday ORDER BY FIELD(Weekday, 'MONDAY', 'TUESDAY', 'WEDNESDAY', 'THURSDAY', 'FRIDAY', 'SATURDAY', 'SUNDAY')")
Avg_Max_Hours_PCTutilization_by_Weekday = cursor.fetchall()
# Worksheet Name
worksheet4 = workbook.add_worksheet('Utilization')
# Column Names
worksheet4.write('A1', 'Weekday', bold)
worksheet4.write('B1', 'Maximum Reservation Duration (hrs)', bold)
worksheet4.write('C1', 'Maximum Percentage Utilization', bold)
worksheet4.write('D1', 'Average Reservation Duration (hrs)', bold)
worksheet4.write('E1', 'Average Percent Utilization', bold)
worksheet4.write('F1', 'NOTE: A small handfull of outliers above 95% utilization are excluded', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Weekday_AMH, Max_Hours, Max_PCT_Utilization, Avg_Hours, Avg_PCT_Utilization in (Avg_Max_Hours_PCTutilization_by_Weekday):
worksheet4.write(row, col, Weekday_AMH)
worksheet4.write(row, col + 1, Max_Hours)
worksheet4.write(row, col + 2, Max_PCT_Utilization)
worksheet4.write(row, col + 3, Avg_Hours)
worksheet4.write(row, col + 4, Avg_PCT_Utilization)
row += 1
# Conditional Formatting: Percent Utilization Greater Than 50
worksheet4.conditional_format('E2:E8', {'type': 'cell',
'criteria': '>=',
'value': 30,
'format': format1})
############################################
cursor.execute("SELECT Start, End, DAYNAME(Start) AS Weekday, ((HOUR(End - Start)*60 + MINUTE(End - Start))/60) AS Hours, (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 AS PCT_Utilization FROM eBike ORDER BY (((HOUR(End - Start)*60 + MINUTE(End - Start))/60)/8)*100 DESC")
Utilization = cursor.fetchall()
worksheet4.write('A11', 'Reservation Start', bold)
worksheet4.write('B11', 'Reservation End', bold)
worksheet4.write('C11', 'Weekday', bold)
worksheet4.write('D11', 'Hours Reserved', bold)
worksheet4.write('E11', 'Percent Utilization', bold)
row += 3
col = 0
count = 12
for Start, End, Day, Hour, PCT_Utilization in (Utilization):
worksheet4.write(row, col, Start) ########################## https://xlsxwriter.readthedocs.io/working_with_dates_and_time.html
worksheet4.write(row, col + 1, End) #####
worksheet4.write(row, col + 2, Day) #####
worksheet4.write(row, col + 3, Hour)
worksheet4.write(row, col + 4, PCT_Utilization)
row += 1
if (PCT_Utilization > 95.0):
count += 1
# Add Chart
chart = workbook.add_chart({'type': 'column'})
# Add Data to Chart
chart.add_series({
'values': '=Utilization!$E$'+str(count)+':$E$'+str(len(Utilization)),
'fill': {'color': '#52B7CB'},
'border': {'color': '#52B7CB'}
})
count = 0
# Format Chart
chart.set_title({
'name': 'Percent Utilization',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB',
},
})
chart.set_x_axis({
'name': 'Reservation',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'name': 'Arial',
'color': '#52B7CB',
},
})
chart.set_y_axis({
'name': 'Percent Utilization',
'name_font': {
'name': 'Calibri',
'color': '#52B7CB'
},
'num_font': {
'italic': True,
'color': '#52B7CB',
},
})
# Remove Legend
chart.set_legend({'position': 'none'})
# Insert Chart
worksheet4.insert_chart('G4', chart)
####
### How far in advance reservations are created
# How far in advance reservations are created
cursor.execute("SELECT DATEDIFF(Start, Created) AS Days_Advance_Reservation, COUNT(*) AS Number_Reserved_Trips FROM eBike WHERE DATEDIFF(Start, Created) >= 0 GROUP BY Days_Advance_Reservation ORDER BY Days_Advance_Reservation DESC")
Advance_Reservation = cursor.fetchall()
# Worksheet Name
worksheet5 = workbook.add_worksheet('Aggregate_Advance_Reservation')
# Column Names
worksheet5.write('A1', 'Days E-Bike was Reserved Ahead of Time', bold)
worksheet5.write('B1', 'Total Reservations', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for Days_Advance_Reservation, Number_Reserved_Trips in (Advance_Reservation):
worksheet5.write(row, col, Days_Advance_Reservation)
worksheet5.write(row, col + 1, Number_Reserved_Trips)
row += 1
worksheet5.conditional_format('B2:B9999', {'type': 'cell',
'criteria': '>=',
'value': 5,
'format': format2})
# Time series of how far in advance reservations are created
cursor.execute("SELECT Start, DATEDIFF(Start, Created) AS Days_Advance_Reservation FROM eBike WHERE DATEDIFF(Start, Created) > 0 ORDER BY Start ASC")
Time_Series_Advance_Reservation = cursor.fetchall()
Starts = []
for i in range(0, len(Time_Series_Advance_Reservation)):
Starts.append(str(Time_Series_Advance_Reservation[i][0]))
# Worksheet Name
worksheet6 = workbook.add_worksheet('Time_Series_Advance_Reservation')
# Column Names
worksheet6.write('A1', 'Reservation Start Date', bold)
worksheet6.write('B1', 'Days E-Bike was Reserved Ahead of Time', bold)
# Declare Starting Point for row, col
row = 1
col = 0
# Iterate over the data and write it out row by row
for StartVal in Starts:
worksheet6.write(row, col, StartVal)
row += 1
row = 1
for Start, Days_Advance_Reservation in (Time_Series_Advance_Reservation):
worksheet6.write(row, col + 1, Days_Advance_Reservation)
row += 1
# Add Chart
chart = workbook.add_chart({'type': 'line'})
worksheet6.conditional_format('B2:B9999', {'type': 'cell',
'criteria': '>=',
'value': 5,
'format': format2})
workbook.close()
cursor.close()
cnx.close()
| mit |
gviejo/ThalamusPhysio | python/main_pop_pca.py | 1 | 15802 |
import numpy as np
import pandas as pd
# from matplotlib.pyplot import plot,show,draw
import scipy.io
from functions import *
import _pickle as cPickle
import time
import os, sys
import ipyparallel
import neuroseries as nts
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
# to know which neurons to keep
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}# BAD
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
clients = ipyparallel.Client()
print(clients.ids)
dview = clients.direct_view()
def compute_pop_pca(session):
data_directory = '/mnt/DataGuillaume/MergedData/'
import numpy as np
import scipy.io
import scipy.stats
import _pickle as cPickle
import time
import os, sys
import neuroseries as nts
from functions import loadShankStructure, loadSpikeData, loadEpoch, loadThetaMod, loadSpeed, loadXML, loadRipples, loadLFP, downsample, getPeaksandTroughs, butter_bandpass_filter
import pandas as pd
# to know which neurons to keep
data_directory = '/mnt/DataGuillaume/MergedData/'
datasets = np.loadtxt(data_directory+'datasets_ThalHpc.list', delimiter = '\n', dtype = str, comments = '#')
theta_mod, theta_ses = loadThetaMod('/mnt/DataGuillaume/MergedData/THETA_THAL_mod.pickle', datasets, return_index=True)
theta = pd.DataFrame( index = theta_ses['rem'],
columns = ['phase', 'pvalue', 'kappa'],
data = theta_mod['rem'])
tmp2 = theta.index[theta.isnull().any(1)].values
tmp3 = theta.index[(theta['pvalue'] > 0.01).values].values
tmp = np.unique(np.concatenate([tmp2,tmp3]))
theta_modth = theta.drop(tmp, axis = 0)
neurons_index = theta_modth.index.values
bins1 = np.arange(-1005, 1010, 25)*1000
times = np.floor(((bins1[0:-1] + (bins1[1] - bins1[0])/2)/1000)).astype('int')
premeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
posmeanscore = {i:{'rem':pd.DataFrame(index = [], columns = ['mean', 'std']),'rip':pd.DataFrame(index = times, columns = [])} for i in range(3)}
bins2 = np.arange(-1012.5,1025,25)*1000
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(3)}
# for session in datasets:
# for session in datasets[0:15]:
# for session in ['Mouse12/Mouse12-120815']:
start_time = time.clock()
print(session)
generalinfo = scipy.io.loadmat(data_directory+session+'/Analysis/GeneralInfo.mat')
shankStructure = loadShankStructure(generalinfo)
if len(generalinfo['channelStructure'][0][0][1][0]) == 2:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][1][0][0] - 1
else:
hpc_channel = generalinfo['channelStructure'][0][0][1][0][0][0][0] - 1
spikes,shank = loadSpikeData(data_directory+session+'/Analysis/SpikeData.mat', shankStructure['thalamus'])
wake_ep = loadEpoch(data_directory+session, 'wake')
sleep_ep = loadEpoch(data_directory+session, 'sleep')
sws_ep = loadEpoch(data_directory+session, 'sws')
rem_ep = loadEpoch(data_directory+session, 'rem')
sleep_ep = sleep_ep.merge_close_intervals(threshold=1.e3)
sws_ep = sleep_ep.intersect(sws_ep)
rem_ep = sleep_ep.intersect(rem_ep)
speed = loadSpeed(data_directory+session+'/Analysis/linspeed.mat').restrict(wake_ep)
speed_ep = nts.IntervalSet(speed[speed>2.5].index.values[0:-1], speed[speed>2.5].index.values[1:]).drop_long_intervals(26000).merge_close_intervals(50000)
wake_ep = wake_ep.intersect(speed_ep).drop_short_intervals(3000000)
n_channel,fs, shank_to_channel = loadXML(data_directory+session+"/"+session.split("/")[1]+'.xml')
rip_ep,rip_tsd = loadRipples(data_directory+session)
hd_info = scipy.io.loadmat(data_directory+session+'/Analysis/HDCells.mat')['hdCellStats'][:,-1]
hd_info_neuron = np.array([hd_info[n] for n in spikes.keys()])
all_neurons = np.array(list(spikes.keys()))
mod_neurons = np.array([int(n.split("_")[1]) for n in neurons_index if session.split("/")[1] in n])
if len(sleep_ep) > 1:
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_25ms/"+session.split("/")[1]+".h5")
# all_pop = store['allwake']
pre_pop = store['presleep']
pos_pop = store['postsleep']
store.close()
store = pd.HDFStore("/mnt/DataGuillaume/population_activity_100ms/"+session.split("/")[1]+".h5")
all_pop = store['allwake']
# pre_pop = store['presleep']
# pos_pop = store['postsleep']
store.close()
def compute_eigen(popwak):
popwak = popwak - popwak.mean(0)
popwak = popwak / (popwak.std(0)+1e-8)
from sklearn.decomposition import PCA
pca = PCA(n_components = popwak.shape[1])
xy = pca.fit_transform(popwak.values)
pc = pca.explained_variance_ > (1 + np.sqrt(1/(popwak.shape[0]/popwak.shape[1])))**2.0
eigen = pca.components_[pc]
lambdaa = pca.explained_variance_[pc]
return eigen, lambdaa
def compute_score(ep_pop, eigen, lambdaa, thr):
ep_pop = ep_pop - ep_pop.mean(0)
ep_pop = ep_pop / (ep_pop.std(0)+1e-8)
a = ep_pop.values
score = np.zeros(len(ep_pop))
for i in range(len(eigen)):
if lambdaa[i] >= thr:
score += (np.dot(a, eigen[i])**2.0 - np.dot(a**2.0, eigen[i]**2.0))
score = nts.Tsd(t = ep_pop.index.values, d = score)
return score
def compute_rip_score(tsd, score, bins):
times = np.floor(((bins[0:-1] + (bins[1] - bins[0])/2)/1000)).astype('int')
rip_score = pd.DataFrame(index = times, columns = [])
for r,i in zip(tsd.index.values,range(len(tsd))):
xbins = (bins + r).astype('int')
y = score.groupby(pd.cut(score.index.values, bins=xbins, labels = times)).mean()
if ~y.isnull().any():
rip_score[r] = y
return rip_score
def get_xmin(ep, minutes):
duree = (ep['end'] - ep['start'])/1000/1000/60
tmp = ep.iloc[np.where(np.ceil(duree.cumsum()) <= minutes + 1)[0]]
return nts.IntervalSet(tmp['start'], tmp['end'])
pre_ep = nts.IntervalSet(sleep_ep['start'][0], sleep_ep['end'][0])
post_ep = nts.IntervalSet(sleep_ep['start'][1], sleep_ep['end'][1])
pre_sws_ep = sws_ep.intersect(pre_ep)
pos_sws_ep = sws_ep.intersect(post_ep)
pre_sws_ep = get_xmin(pre_sws_ep.iloc[::-1], 30)
pos_sws_ep = get_xmin(pos_sws_ep, 30)
if pre_sws_ep.tot_length('s')/60 > 5.0 and pos_sws_ep.tot_length('s')/60 > 5.0:
for hd in range(3):
if hd == 0 or hd == 2:
index = np.where(hd_info_neuron == 0)[0]
elif hd == 1:
index = np.where(hd_info_neuron == 1)[0]
if hd == 0:
index = np.intersect1d(index, mod_neurons)
elif hd == 2:
index = np.intersect1d(index, np.setdiff1d(all_neurons, mod_neurons))
allpop = all_pop[index].copy()
prepop = nts.TsdFrame(pre_pop[index].copy())
pospop = nts.TsdFrame(pos_pop[index].copy())
# prepop25ms = nts.TsdFrame(pre_pop_25ms[index].copy())
# pospop25ms = nts.TsdFrame(pos_pop_25ms[index].copy())
if allpop.shape[1] and allpop.shape[1] > 5:
eigen,lambdaa = compute_eigen(allpop)
seuil = 1.2
if np.sum(lambdaa > seuil):
pre_score = compute_score(prepop, eigen, lambdaa, seuil)
pos_score = compute_score(pospop, eigen, lambdaa, seuil)
prerip_score = compute_rip_score(rip_tsd.restrict(pre_sws_ep), pre_score, bins1)
posrip_score = compute_rip_score(rip_tsd.restrict(pos_sws_ep), pos_score, bins1)
# pre_score_25ms = compute_score(prepop25ms, eigen)
# pos_score_25ms = compute_score(pospop25ms, eigen)
# prerip25ms_score = compute_rip_score(rip_tsd.restrict(pre_ep), pre_score_25ms, bins2)
# posrip25ms_score = compute_rip_score(rip_tsd.restrict(post_ep), pos_score_25ms, bins2)
# prerip25ms_score = prerip25ms_score - prerip25ms_score.mean(0)
# posrip25ms_score = posrip25ms_score - posrip25ms_score.mean(0)
# prerip25ms_score = prerip25ms_score / prerip25ms_score.std(0)
# posrip25ms_score = posrip25ms_score / posrip25ms_score.std(0)
# prerip25ms_score = prerip25ms_score.loc[-500:500]
# posrip25ms_score = posrip25ms_score.loc[-500:500]
# sys.exit()
# tmp = pd.concat([pd.DataFrame(prerip25ms_score.idxmax().values, columns = ['pre']),pd.DataFrame(posrip25ms_score.idxmax().values, columns = ['pos'])],axis = 1)
# tmp = pd.DataFrame(data = [[prerip25ms_score.mean(1).idxmax(), posrip25ms_score.mean(1).idxmax()]], columns = ['pre', 'pos'])
# tsmax[hd] = tsmax[hd].append(tmp, ignore_index = True)
premeanscore[hd]['rip'][session] = prerip_score.mean(1)
posmeanscore[hd]['rip'][session] = posrip_score.mean(1)
# if len(rem_ep.intersect(pre_ep)) and len(rem_ep.intersect(post_ep)):
# premeanscore[hd]['rem'].loc[session,'mean'] = pre_score.restrict(rem_ep.intersect(pre_ep)).mean()
# posmeanscore[hd]['rem'].loc[session,'mean'] = pos_score.restrict(rem_ep.intersect(post_ep)).mean()
# premeanscore[hd]['rem'].loc[session,'std'] = pre_score.restrict(rem_ep.intersect(pre_ep)).std()
# posmeanscore[hd]['rem'].loc[session,'std'] = pos_score.restrict(rem_ep.intersect(post_ep)).std()
return [premeanscore, posmeanscore, tsmax]
# sys.exit()
a = dview.map_sync(compute_pop_pca, datasets)
prescore = {i:pd.DataFrame(index = times) for i in range(3)}
posscore = {i:pd.DataFrame(index = times) for i in range(3)}
for i in range(len(a)):
for j in range(3):
if len(a[i][0][j]['rip'].columns):
s = a[i][0][j]['rip'].columns[0]
prescore[j][s] = a[i][0][j]['rip']
posscore[j][s] = a[i][1][j]['rip']
# prescore = premeanscore
# posscore = posmeanscore
from pylab import *
titles = ['non hd mod', 'hd', 'non hd non mod']
figure()
for i in range(3):
subplot(1,3,i+1)
times = prescore[i].index.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(prescore[i].mean(1).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posscore[i].mean(1).values, (1,)), label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
show()
sys.exit()
#########################################
# search for peak in 25 ms array
########################################
tsmax = {i:pd.DataFrame(columns = ['pre', 'pos']) for i in range(2)}
for i in range(len(a)):
for hd in range(2):
tsmax[hd] = tsmax[hd].append(a[i][2][hd], ignore_index = True)
from pylab import *
plot(tsmax[0]['pos'], np.ones(len(tsmax[0]['pos'])), 'o')
plot(tsmax[0]['pos'].mean(), [1], '|', markersize = 10)
plot(tsmax[1]['pos'], np.zeros(len(tsmax[1]['pos'])), 'o')
plot(tsmax[1]['pos'].mean(), [0], '|', markersize = 10)
sys.exit()
#########################################
# SAVING
########################################
store = pd.HDFStore("../figures/figures_articles/figure3/pca_analysis_3.h5")
for i,j in zip(range(3),('nohd_mod', 'hd', 'nohd_nomod')):
store.put(j+'pre_rip', prescore[i])
store.put(j+'pos_rip', posscore[i])
store.close()
# a = dview.map_sync(compute_population_correlation, datasets[0:15])
# for i in range(len(a)):
# if type(a[i]) is dict:
# s = list(a[i].keys())[0]
# premeanscore.loc[s] = a[i][s]['pre']
# posmeanscore.loc[s] = a[i][s]['pos']
from pylab import *
titles = ['non hd', 'hd']
figure()
for i in range(2):
subplot(1,3,i+1)
times = premeanscore[i]['rip'].columns.values
# for s in premeanscore[i]['rip'].index.values:
# plot(times, premeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'blue')
# plot(times, posmeanscore[i]['rip'].loc[s].values, linewidth = 0.3, color = 'red')
plot(times, gaussFilt(premeanscore[i]['rip'].mean(0).values, (1,)), label = 'pre', color = 'blue', linewidth = 2)
plot(times, gaussFilt(posmeanscore[i]['rip'].mean(0).values, (1,)),label = 'post', color = 'red', linewidth = 2)
legend()
title(titles[i])
subplot(1,3,3)
bar([1,2], [premeanscore[0]['rem'].mean(0)['mean'], premeanscore[1]['rem'].mean(0)['mean']])
bar([3,4], [posmeanscore[0]['rem'].mean(0)['mean'], posmeanscore[1]['rem'].mean(0)['mean']])
xticks([1,2], ['non hd', 'hd'])
xticks([3,4], ['non hd', 'hd'])
show()
figure()
subplot(121)
times = premeanscore[0]['rip'].columns.values
for s in premeanscore[0]['rip'].index.values:
print(s)
plot(times, premeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'blue')
plot(premeanscore[0]['rip'].mean(0))
subplot(122)
for s in posmeanscore[0]['rip'].index.values:
plot(times, posmeanscore[0]['rip'].loc[s].values, linewidth = 1, color = 'red')
plot(posmeanscore[0]['rip'].mean(0))
show()
| gpl-3.0 |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/utils/__init__.py | 2 | 12026 | """
The :mod:`sklearn.utils` module includes various utilities.
"""
from collections import Sequence
import numpy as np
from scipy.sparse import issparse
import warnings
from .murmurhash import murmurhash3_32
from .validation import (as_float_array,
assert_all_finite, warn_if_not_float,
check_random_state, column_or_1d, check_array,
check_consistent_length, check_X_y, indexable)
from .class_weight import compute_class_weight
from sklearn.utils.sparsetools import minimum_spanning_tree
__all__ = ["murmurhash3_32", "as_float_array",
"assert_all_finite", "check_array",
"warn_if_not_float",
"check_random_state",
"compute_class_weight",
"minimum_spanning_tree",
"column_or_1d", "safe_indexing",
"check_consistent_length", "check_X_y", 'indexable']
class deprecated(object):
"""Decorator to mark a function or class as deprecated.
Issue a warning when the function is called/the class is instantiated and
adds a warning to the docstring.
The optional extra argument will be appended to the deprecation message
and the docstring. Note: to use this with the default value for extra, put
in an empty of parentheses:
>>> from sklearn.utils import deprecated
>>> deprecated() # doctest: +ELLIPSIS
<sklearn.utils.deprecated object at ...>
>>> @deprecated()
... def some_function(): pass
"""
# Adapted from http://wiki.python.org/moin/PythonDecoratorLibrary,
# but with many changes.
def __init__(self, extra=''):
"""
Parameters
----------
extra: string
to be added to the deprecation messages
"""
self.extra = extra
def __call__(self, obj):
if isinstance(obj, type):
return self._decorate_class(obj)
else:
return self._decorate_fun(obj)
def _decorate_class(self, cls):
msg = "Class %s is deprecated" % cls.__name__
if self.extra:
msg += "; %s" % self.extra
# FIXME: we should probably reset __new__ for full generality
init = cls.__init__
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return init(*args, **kwargs)
cls.__init__ = wrapped
wrapped.__name__ = '__init__'
wrapped.__doc__ = self._update_doc(init.__doc__)
wrapped.deprecated_original = init
return cls
def _decorate_fun(self, fun):
"""Decorate function fun"""
msg = "Function %s is deprecated" % fun.__name__
if self.extra:
msg += "; %s" % self.extra
def wrapped(*args, **kwargs):
warnings.warn(msg, category=DeprecationWarning)
return fun(*args, **kwargs)
wrapped.__name__ = fun.__name__
wrapped.__dict__ = fun.__dict__
wrapped.__doc__ = self._update_doc(fun.__doc__)
return wrapped
def _update_doc(self, olddoc):
newdoc = "DEPRECATED"
if self.extra:
newdoc = "%s: %s" % (newdoc, self.extra)
if olddoc:
newdoc = "%s\n\n%s" % (newdoc, olddoc)
return newdoc
def safe_mask(X, mask):
"""Return a mask which is safe to use on X.
Parameters
----------
X : {array-like, sparse matrix}
Data on which to apply mask.
mask: array
Mask to be used on X.
Returns
-------
mask
"""
mask = np.asarray(mask)
if np.issubdtype(mask.dtype, np.int):
return mask
if hasattr(X, "toarray"):
ind = np.arange(mask.shape[0])
mask = ind[mask]
return mask
def safe_indexing(X, indices):
"""Return items or rows from X using indices.
Allows simple indexing of lists or arrays.
Parameters
----------
X : array-like, sparse-matrix, list.
Data from which to sample rows or items.
indices : array-like, list
Indices according to which X will be subsampled.
"""
if hasattr(X, "iloc"):
# Pandas Dataframes and Series
return X.iloc[indices]
elif hasattr(X, "shape"):
if hasattr(X, 'take') and (hasattr(indices, 'dtype') and
indices.dtype.kind == 'i'):
# This is often substantially faster than X[indices]
return X.take(indices, axis=0)
else:
return X[indices]
else:
return [X[idx] for idx in indices]
def resample(*arrays, **options):
"""Resample arrays or sparse matrices in a consistent way
The default strategy implements one step of the bootstrapping
procedure.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
replace : boolean, True by default
Implements resampling with replacement. If False, this will implement
(sliced) random permutations.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
Returns
-------
Sequence of resampled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import resample
>>> X, X_sparse, y = resample(X, X_sparse, y, random_state=0)
>>> X
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 4 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 1., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([0, 1, 0])
>>> resample(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:class:`sklearn.cross_validation.Bootstrap`
:func:`sklearn.utils.shuffle`
"""
random_state = check_random_state(options.pop('random_state', None))
replace = options.pop('replace', True)
max_n_samples = options.pop('n_samples', None)
if options:
raise ValueError("Unexpected kw arguments: %r" % options.keys())
if len(arrays) == 0:
return None
first = arrays[0]
n_samples = first.shape[0] if hasattr(first, 'shape') else len(first)
if max_n_samples is None:
max_n_samples = n_samples
if max_n_samples > n_samples:
raise ValueError("Cannot sample %d out of arrays with dim %d" % (
max_n_samples, n_samples))
check_consistent_length(*arrays)
arrays = [check_array(x, accept_sparse='csr', ensure_2d=False)
for x in arrays]
if replace:
indices = random_state.randint(0, n_samples, size=(max_n_samples,))
else:
indices = np.arange(n_samples)
random_state.shuffle(indices)
indices = indices[:max_n_samples]
resampled_arrays = []
for array in arrays:
array = array[indices]
resampled_arrays.append(array)
if len(resampled_arrays) == 1:
# syntactic sugar for the unit argument case
return resampled_arrays[0]
else:
return resampled_arrays
def shuffle(*arrays, **options):
"""Shuffle arrays or sparse matrices in a consistent way
This is a convenience alias to ``resample(*arrays, replace=False)`` to do
random permutations of the collections.
Parameters
----------
`*arrays` : sequence of arrays or scipy.sparse matrices with same shape[0]
random_state : int or RandomState instance
Control the shuffling for reproducible behavior.
n_samples : int, None by default
Number of samples to generate. If left to None this is
automatically set to the first dimension of the arrays.
Returns
-------
Sequence of shuffled views of the collections. The original arrays are
not impacted.
Examples
--------
It is possible to mix sparse and dense arrays in the same run::
>>> X = [[1., 0.], [2., 1.], [0., 0.]]
>>> y = np.array([0, 1, 2])
>>> from scipy.sparse import coo_matrix
>>> X_sparse = coo_matrix(X)
>>> from sklearn.utils import shuffle
>>> X, X_sparse, y = shuffle(X, X_sparse, y, random_state=0)
>>> X
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> X_sparse # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
<3x2 sparse matrix of type '<... 'numpy.float64'>'
with 3 stored elements in Compressed Sparse Row format>
>>> X_sparse.toarray()
array([[ 0., 0.],
[ 2., 1.],
[ 1., 0.]])
>>> y
array([2, 1, 0])
>>> shuffle(y, n_samples=2, random_state=0)
array([0, 1])
See also
--------
:func:`sklearn.utils.resample`
"""
options['replace'] = False
return resample(*arrays, **options)
def safe_sqr(X, copy=True):
"""Element wise squaring of array-likes and sparse matrices.
Parameters
----------
X : array like, matrix, sparse matrix
Returns
-------
X ** 2 : element wise square
"""
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'])
if issparse(X):
if copy:
X = X.copy()
X.data **= 2
else:
if copy:
X = X ** 2
else:
X **= 2
return X
def gen_batches(n, batch_size):
"""Generator to create slices containing batch_size elements, from 0 to n.
The last slice may contain less than batch_size elements, when batch_size
does not divide n.
Examples
--------
>>> from sklearn.utils import gen_batches
>>> list(gen_batches(7, 3))
[slice(0, 3, None), slice(3, 6, None), slice(6, 7, None)]
>>> list(gen_batches(6, 3))
[slice(0, 3, None), slice(3, 6, None)]
>>> list(gen_batches(2, 3))
[slice(0, 2, None)]
"""
start = 0
for _ in range(int(n // batch_size)):
end = start + batch_size
yield slice(start, end)
start = end
if start < n:
yield slice(start, n)
def gen_even_slices(n, n_packs, n_samples=None):
"""Generator to create n_packs slices going up to n.
Pass n_samples when the slices are to be used for sparse matrix indexing;
slicing off-the-end raises an exception, while it works for NumPy arrays.
Examples
--------
>>> from sklearn.utils import gen_even_slices
>>> list(gen_even_slices(10, 1))
[slice(0, 10, None)]
>>> list(gen_even_slices(10, 10)) #doctest: +ELLIPSIS
[slice(0, 1, None), slice(1, 2, None), ..., slice(9, 10, None)]
>>> list(gen_even_slices(10, 5)) #doctest: +ELLIPSIS
[slice(0, 2, None), slice(2, 4, None), ..., slice(8, 10, None)]
>>> list(gen_even_slices(10, 3))
[slice(0, 4, None), slice(4, 7, None), slice(7, 10, None)]
"""
start = 0
for pack_num in range(n_packs):
this_n = n // n_packs
if pack_num < n % n_packs:
this_n += 1
if this_n > 0:
end = start + this_n
if n_samples is not None:
end = min(n_samples, end)
yield slice(start, end, None)
start = end
def tosequence(x):
"""Cast iterable x to a Sequence, avoiding a copy if possible."""
if isinstance(x, np.ndarray):
return np.asarray(x)
elif isinstance(x, Sequence):
return x
else:
return list(x)
class ConvergenceWarning(Warning):
"Custom warning to capture convergence problems"
| bsd-3-clause |
AstroFloyd/LearningPython | Fitting/scipy.optimize.least_squares.py | 1 | 2724 | #!/bin/env python3
# https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.least_squares.html
"""Solve a curve fitting problem using robust loss function to take care of outliers in the data. Define the
model function as y = a + b * exp(c * t), where t is a predictor variable, y is an observation and a, b, c are
parameters to estimate.
"""
import numpy as np
from scipy.optimize import least_squares
# Function which generates the data with noise and outliers:
def gen_data(x, a, b, c, noise=0, n_outliers=0, random_state=0):
y = a + b*x + c*x**2
rnd = np.random.RandomState(random_state)
error = noise * rnd.randn(x.size)
outliers = rnd.randint(0, x.size, n_outliers)
error[outliers] *= 10
return y + error
# Function for computing residuals:
def resFun(c, x, y):
return c[0] + c[1] * x + c[2] * x**2 - y
trueCoefs = [-5, 1, 3]
sigma = 1.5
print("True coefficients: ", trueCoefs)
print("Sigma: ", sigma)
f = np.poly1d(trueCoefs)
xDat = np.linspace(0, 2, 20)
errors = sigma*np.random.normal(size=len(xDat))
yDat = f(xDat) + errors
# Initial estimate of parameters:
# x0 = np.array([1.0, 1.0, 0.0])
x0 = np.array([-4.0, 2.0, 5.0])
# Compute a standard least-squares solution:
res = least_squares(resFun, x0, args=(xDat, yDat))
#print('res: ', res)
print('Success: ', res.success)
print('Cost: ', res.cost)
print('Optimality: ', res.optimality)
print('Coefficients: ', res.x)
print('Grad: ', res.grad)
print('Residuals: ', res.fun)
Chi2 = sum(res.fun**2)
redChi2 = Chi2/(len(xDat)-len(res.x)) # Reduced Chi^2 = Chi^2 / (n-m)
print("Chi2: ", Chi2, res.cost*2)
print("Red. Chi2: ", redChi2)
# Plot all the curves. We see that by selecting an appropriate loss we can get estimates close to
# optimal even in the presence of strong outliers. But keep in mind that generally it is recommended to try
# 'soft_l1' or 'huber' losses first (if at all necessary) as the other two options may cause difficulties in
# optimization process.
y_true = gen_data(xDat, trueCoefs[2], trueCoefs[1], trueCoefs[0])
y_lsq = gen_data(xDat, *res.x)
print()
#exit()
import matplotlib.pyplot as plt
#plt.style.use('dark_background') # Invert colours
#plt.plot(xDat, yDat, 'o')
plt.errorbar(xDat, yDat, yerr=errors, fmt='ro') # Plot red circles with actual error bars
plt.plot(xDat, y_true, 'k', linewidth=2, label='true')
plt.plot(xDat, y_lsq, label='linear loss')
plt.xlabel("t")
plt.ylabel("y")
plt.legend()
plt.tight_layout()
# plt.show()
plt.savefig('scipy.optimize.least_squares.png') # Save the plot as png
plt.close() # Close the plot in order to start a new one later
| gpl-3.0 |
sugartom/tensorflow-alien | tensorflow/contrib/learn/python/learn/tests/dataframe/in_memory_source_test.py | 62 | 3960 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests NumpySource and PandasSource."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.contrib.learn.python.learn.dataframe.transforms import in_memory_source
from tensorflow.python.client import session
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
from tensorflow.python.training import coordinator
from tensorflow.python.training import queue_runner_impl
# pylint: disable=g-import-not-at-top
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
HAS_PANDAS = False
def get_rows(array, row_indices):
rows = [array[i] for i in row_indices]
return np.vstack(rows)
class NumpySourceTestCase(test.TestCase):
def testNumpySource(self):
batch_size = 3
iterations = 1000
array = np.arange(32).reshape([16, 2])
numpy_source = in_memory_source.NumpySource(array, batch_size=batch_size)
index_column = numpy_source().index
value_column = numpy_source().value
cache = {}
with ops.Graph().as_default():
value_tensor = value_column.build(cache)
index_tensor = index_column.build(cache)
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
expected_index = [
j % array.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_value = get_rows(array, expected_index)
actual_index, actual_value = sess.run([index_tensor, value_tensor])
np.testing.assert_array_equal(expected_index, actual_index)
np.testing.assert_array_equal(expected_value, actual_value)
coord.request_stop()
coord.join(threads)
class PandasSourceTestCase(test.TestCase):
def testPandasFeeding(self):
if not HAS_PANDAS:
return
batch_size = 3
iterations = 1000
index = np.arange(100, 132)
a = np.arange(32)
b = np.arange(32, 64)
dataframe = pd.DataFrame({"a": a, "b": b}, index=index)
pandas_source = in_memory_source.PandasSource(
dataframe, batch_size=batch_size)
pandas_columns = pandas_source()
cache = {}
with ops.Graph().as_default():
pandas_tensors = [col.build(cache) for col in pandas_columns]
with session.Session() as sess:
coord = coordinator.Coordinator()
threads = queue_runner_impl.start_queue_runners(sess=sess, coord=coord)
for i in range(iterations):
indices = [
j % dataframe.shape[0]
for j in range(batch_size * i, batch_size * (i + 1))
]
expected_df_indices = dataframe.index[indices]
expected_rows = dataframe.iloc[indices]
actual_value = sess.run(pandas_tensors)
np.testing.assert_array_equal(expected_df_indices, actual_value[0])
for col_num, col in enumerate(dataframe.columns):
np.testing.assert_array_equal(expected_rows[col].values,
actual_value[col_num + 1])
coord.request_stop()
coord.join(threads)
if __name__ == "__main__":
test.main()
| apache-2.0 |
scienceopen/spectral_analysis | scripts/FilterDesign.py | 1 | 3846 | #!/usr/bin/env python
"""
Design FIR filter coefficients using Parks-McClellan or windowing algorithm
and plot filter transfer function.
Michael Hirsch, Ph.D.
example for PiRadar CW prototype,
writing filter coefficients for use by filters.f90:
./FilterDesign.py 9950 10050 100e3 -L 4096 -m firwin -o cwfir.asc
Refs:
http://www.iowahills.com/5FIRFiltersPage.html
"""
import numpy as np
from pathlib import Path
import scipy.signal as signal
from matplotlib.pyplot import show, figure
from argparse import ArgumentParser
from signal_subspace.plots import plotfilt
try:
import seaborn as sns
sns.set_context("talk")
except ImportError:
pass
def computefir(fc, L: int, ofn, fs: int, method: str):
"""
bandpass FIR design
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.firwin.html
http://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.remez.html
L: number of taps
output:
b: FIR filter coefficients
"""
assert len(fc) == 2, "specify lower and upper bandpass filter corner frequencies in Hz."
if method == "remez":
b = signal.remez(numtaps=L, bands=[0, 0.9 * fc[0], fc[0], fc[1], 1.1 * fc[1], 0.5 * fs], desired=[0, 1, 0], Hz=fs)
elif method == "firwin":
b = signal.firwin(L, [fc[0], fc[1]], window="blackman", pass_zero=False, nyq=fs // 2)
elif method == "firwin2":
b = signal.firwin2(
L,
[0, fc[0], fc[1], fs // 2],
[0, 1, 1, 0],
window="blackman",
nyq=fs // 2,
# antisymmetric=True,
)
else:
raise ValueError(f"unknown filter design method {method}")
if ofn:
ofn = Path(ofn).expanduser()
print(f"writing {ofn}")
# FIXME make binary
with ofn.open("w") as h:
h.write(f"{b.size}\n") # first line is number of coefficients
b.tofile(h, sep=" ") # second line is space-delimited coefficents
return b
def butterplot(fs, fc):
"""
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.butter.html
"""
b, a = signal.butter(4, 100, "low", analog=True)
w, h = signal.freqs(b, a)
ax = figure().gca()
ax.semilogx(fs * 0.5 / np.pi * w, 20 * np.log10(abs(h)))
ax.set_title("Butterworth filter frequency response")
ax.set_xlabel("Frequency [Hz]")
ax.set_ylabel("Amplitude [dB]")
ax.grid(which="both", axis="both")
ax.axvline(fc, color="green") # cutoff frequency
ax.set_ylim(-50, 0)
def chebyshevplot(fs):
"""
https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.cheby1.html#scipy.signal.cheby1
"""
b, a = signal.cheby1(4, 5, 100, "high", analog=True)
w, h = signal.freqs(b, a)
ax = figure().gca()
ax.semilogx(w, 20 * np.log10(abs(h)))
ax.set_title("Chebyshev Type I frequency response (rp=5)")
ax.set_xlabel("Frequency [radians / second]")
ax.set_ylabel("Amplitude [dB]")
ax.grid(which="both", axis="both")
ax.axvline(100, color="green") # cutoff frequency
ax.axhline(-5, color="green") # rp
def main():
p = ArgumentParser()
p.add_argument("fc", help="lower,upper bandpass filter corner frequences [Hz]", nargs=2, type=float)
p.add_argument("fs", help="optional sampling frequency [Hz]", type=float)
p.add_argument("-o", "--ofn", help="output coefficient file to write")
p.add_argument("-L", help="number of coefficients for FIR filter", type=int, default=63)
p.add_argument("-m", "--method", help="filter design method [remez,firwin,firwin2]", default="firwin")
p.add_argument("-k", "--filttype", help="filter type: low, high, bandpass", default="low")
p = p.parse_args()
b = computefir(p.fc, p.L, p.ofn, p.fs, p.method)
plotfilt(b, p.fs, p.ofn)
show()
if __name__ == "__main__":
main()
| mit |
biocore/qiita | qiita_db/meta_util.py | 2 | 20723 | r"""
Util functions (:mod: `qiita_db.meta_util`)
===========================================
..currentmodule:: qiita_db.meta_util
This module provides utility functions that use the ORM objects. ORM objects
CANNOT import from this file.
Methods
-------
..autosummary::
:toctree: generated/
get_lat_longs
"""
# -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from os import stat, rename
from os.path import join, relpath, basename
from time import strftime, localtime
import matplotlib.pyplot as plt
import matplotlib as mpl
from base64 import b64encode
from urllib.parse import quote
from io import BytesIO
from datetime import datetime
from collections import defaultdict, Counter
from tarfile import open as topen, TarInfo
from hashlib import md5
from re import sub
from json import loads, dump, dumps
from qiita_db.util import create_nested_path
from qiita_core.qiita_settings import qiita_config, r_client
from qiita_core.configuration_manager import ConfigurationManager
import qiita_db as qdb
def _get_data_fpids(constructor, object_id):
"""Small function for getting filepath IDS associated with data object
Parameters
----------
constructor : a subclass of BaseData
E.g., RawData, PreprocessedData, or ProcessedData
object_id : int
The ID of the data object
Returns
-------
set of int
"""
with qdb.sql_connection.TRN:
obj = constructor(object_id)
return {fpid for fpid, _, _ in obj.get_filepaths()}
def validate_filepath_access_by_user(user, filepath_id):
"""Validates if the user has access to the filepath_id
Parameters
----------
user : User object
The user we are interested in
filepath_id : int
The filepath id
Returns
-------
bool
If the user has access or not to the filepath_id
Notes
-----
Admins have access to all files so True is always returned
"""
TRN = qdb.sql_connection.TRN
with TRN:
if user.level == "admin":
# admins have access all files
return True
sql = """SELECT
(SELECT array_agg(artifact_id)
FROM qiita.artifact_filepath
WHERE filepath_id = {0}) AS artifact,
(SELECT array_agg(study_id)
FROM qiita.sample_template_filepath
WHERE filepath_id = {0}) AS sample_info,
(SELECT array_agg(prep_template_id)
FROM qiita.prep_template_filepath
WHERE filepath_id = {0}) AS prep_info,
(SELECT array_agg(analysis_id)
FROM qiita.analysis_filepath
WHERE filepath_id = {0}) AS analysis""".format(filepath_id)
TRN.add(sql)
arid, sid, pid, anid = TRN.execute_fetchflatten()
# artifacts
if arid:
# [0] cause we should only have 1
artifact = qdb.artifact.Artifact(arid[0])
if artifact.visibility == 'public':
# TODO: https://github.com/biocore/qiita/issues/1724
if artifact.artifact_type in ['SFF', 'FASTQ', 'FASTA',
'FASTA_Sanger',
'per_sample_FASTQ']:
study = artifact.study
has_access = study.has_access(user, no_public=True)
if (not study.public_raw_download and not has_access):
return False
return True
else:
study = artifact.study
if study:
# let's take the visibility via the Study
return artifact.study.has_access(user)
else:
analysis = artifact.analysis
return analysis in (
user.private_analyses | user.shared_analyses)
# sample info files
elif sid:
# the visibility of the sample info file is given by the
# study visibility
# [0] cause we should only have 1
return qdb.study.Study(sid[0]).has_access(user)
# prep info files
elif pid:
# the prep access is given by it's artifacts, if the user has
# access to any artifact, it should have access to the prep
# [0] cause we should only have 1
pt = qdb.metadata_template.prep_template.PrepTemplate(
pid[0])
a = pt.artifact
# however, the prep info file could not have any artifacts attached
# , in that case we will use the study access level
if a is None:
return qdb.study.Study(pt.study_id).has_access(user)
else:
if (a.visibility == 'public' or a.study.has_access(user)):
return True
else:
for c in a.descendants.nodes():
if ((c.visibility == 'public' or
c.study.has_access(user))):
return True
return False
# analyses
elif anid:
# [0] cause we should only have 1
aid = anid[0]
analysis = qdb.analysis.Analysis(aid)
return analysis in (
user.private_analyses | user.shared_analyses)
return False
def update_redis_stats():
"""Generate the system stats and save them in redis
Returns
-------
list of str
artifact filepaths that are not present in the file system
"""
STUDY = qdb.study.Study
number_studies = {'public': 0, 'private': 0, 'sandbox': 0}
number_of_samples = {'public': 0, 'private': 0, 'sandbox': 0}
num_studies_ebi = 0
num_samples_ebi = 0
number_samples_ebi_prep = 0
stats = []
missing_files = []
per_data_type_stats = Counter()
for study in STUDY.iter():
st = study.sample_template
if st is None:
continue
# counting samples submitted to EBI-ENA
len_samples_ebi = sum([esa is not None
for esa in st.ebi_sample_accessions.values()])
if len_samples_ebi != 0:
num_studies_ebi += 1
num_samples_ebi += len_samples_ebi
samples_status = defaultdict(set)
for pt in study.prep_templates():
pt_samples = list(pt.keys())
pt_status = pt.status
if pt_status == 'public':
per_data_type_stats[pt.data_type()] += len(pt_samples)
samples_status[pt_status].update(pt_samples)
# counting experiments (samples in preps) submitted to EBI-ENA
number_samples_ebi_prep += sum([
esa is not None
for esa in pt.ebi_experiment_accessions.values()])
# counting studies
if 'public' in samples_status:
number_studies['public'] += 1
elif 'private' in samples_status:
number_studies['private'] += 1
else:
# note that this is a catch all for other status; at time of
# writing there is status: awaiting_approval
number_studies['sandbox'] += 1
# counting samples; note that some of these lines could be merged with
# the block above but I decided to split it in 2 for clarity
if 'public' in samples_status:
number_of_samples['public'] += len(samples_status['public'])
if 'private' in samples_status:
number_of_samples['private'] += len(samples_status['private'])
if 'sandbox' in samples_status:
number_of_samples['sandbox'] += len(samples_status['sandbox'])
# processing filepaths
for artifact in study.artifacts():
for adata in artifact.filepaths:
try:
s = stat(adata['fp'])
except OSError:
missing_files.append(adata['fp'])
else:
stats.append(
(adata['fp_type'], s.st_size, strftime('%Y-%m',
localtime(s.st_mtime))))
num_users = qdb.util.get_count('qiita.qiita_user')
num_processing_jobs = qdb.util.get_count('qiita.processing_job')
lat_longs = dumps(get_lat_longs())
summary = {}
all_dates = []
# these are some filetypes that are too small to plot alone so we'll merge
# in other
group_other = {'html_summary', 'tgz', 'directory', 'raw_fasta', 'log',
'biom', 'raw_sff', 'raw_qual', 'qza', 'html_summary_dir',
'qza', 'plain_text', 'raw_barcodes'}
for ft, size, ym in stats:
if ft in group_other:
ft = 'other'
if ft not in summary:
summary[ft] = {}
if ym not in summary[ft]:
summary[ft][ym] = 0
all_dates.append(ym)
summary[ft][ym] += size
all_dates = sorted(set(all_dates))
# sorting summaries
ordered_summary = {}
for dt in summary:
new_list = []
current_value = 0
for ad in all_dates:
if ad in summary[dt]:
current_value += summary[dt][ad]
new_list.append(current_value)
ordered_summary[dt] = new_list
plot_order = sorted([(k, ordered_summary[k][-1]) for k in ordered_summary],
key=lambda x: x[1])
# helper function to generate y axis, modified from:
# http://stackoverflow.com/a/1094933
def sizeof_fmt(value, position):
number = None
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(value) < 1024.0:
number = "%3.1f%s" % (value, unit)
break
value /= 1024.0
if number is None:
number = "%.1f%s" % (value, 'Yi')
return number
all_dates_axis = range(len(all_dates))
plt.locator_params(axis='y', nbins=10)
plt.figure(figsize=(20, 10))
for k, v in plot_order:
plt.plot(all_dates_axis, ordered_summary[k], linewidth=2, label=k)
plt.xticks(all_dates_axis, all_dates)
plt.legend()
plt.grid()
ax = plt.gca()
ax.yaxis.set_major_formatter(mpl.ticker.FuncFormatter(sizeof_fmt))
plt.xticks(rotation=90)
plt.xlabel('Date')
plt.ylabel('Storage space per data type')
plot = BytesIO()
plt.savefig(plot, format='png')
plot.seek(0)
img = 'data:image/png;base64,' + quote(b64encode(plot.getbuffer()))
time = datetime.now().strftime('%m-%d-%y %H:%M:%S')
portal = qiita_config.portal
# making sure per_data_type_stats has some data so hmset doesn't fail
if per_data_type_stats == {}:
per_data_type_stats['No data'] = 0
vals = [
('number_studies', number_studies, r_client.hmset),
('number_of_samples', number_of_samples, r_client.hmset),
('per_data_type_stats', dict(per_data_type_stats), r_client.hmset),
('num_users', num_users, r_client.set),
('lat_longs', (lat_longs), r_client.set),
('num_studies_ebi', num_studies_ebi, r_client.set),
('num_samples_ebi', num_samples_ebi, r_client.set),
('number_samples_ebi_prep', number_samples_ebi_prep, r_client.set),
('img', img, r_client.set),
('time', time, r_client.set),
('num_processing_jobs', num_processing_jobs, r_client.set)]
for k, v, f in vals:
redis_key = '%s:stats:%s' % (portal, k)
# important to "flush" variables to avoid errors
r_client.delete(redis_key)
f(redis_key, v)
# preparing vals to insert into DB
vals = dumps(dict([x[:-1] for x in vals]))
sql = """INSERT INTO qiita.stats_daily (stats, stats_timestamp)
VALUES (%s, NOW())"""
qdb.sql_connection.perform_as_transaction(sql, [vals])
return missing_files
def get_lat_longs():
"""Retrieve the latitude and longitude of all the public samples in the DB
Returns
-------
list of [float, float]
The latitude and longitude for each sample in the database
"""
with qdb.sql_connection.TRN:
# getting all the public studies
studies = qdb.study.Study.get_by_status('public')
results = []
if studies:
# we are going to create multiple union selects to retrieve the
# latigute and longitude of all available studies. Note that
# UNION in PostgreSQL automatically removes duplicates
sql_query = """
SELECT {0}, CAST(sample_values->>'latitude' AS FLOAT),
CAST(sample_values->>'longitude' AS FLOAT)
FROM qiita.sample_{0}
WHERE sample_values->>'latitude' != 'NaN' AND
sample_values->>'longitude' != 'NaN' AND
isnumeric(sample_values->>'latitude') AND
isnumeric(sample_values->>'longitude')"""
sql = [sql_query.format(s.id) for s in studies]
sql = ' UNION '.join(sql)
qdb.sql_connection.TRN.add(sql)
# note that we are returning set to remove duplicates
results = qdb.sql_connection.TRN.execute_fetchindex()
return results
def generate_biom_and_metadata_release(study_status='public'):
"""Generate a list of biom/meatadata filepaths and a tgz of those files
Parameters
----------
study_status : str, optional
The study status to search for. Note that this should always be set
to 'public' but having this exposed helps with testing. The other
options are 'private' and 'sandbox'
"""
studies = qdb.study.Study.get_by_status(study_status)
qiita_config = ConfigurationManager()
working_dir = qiita_config.working_dir
portal = qiita_config.portal
bdir = qdb.util.get_db_files_base_dir()
time = datetime.now().strftime('%m-%d-%y %H:%M:%S')
data = []
for s in studies:
# [0] latest is first, [1] only getting the filepath
sample_fp = relpath(s.sample_template.get_filepaths()[0][1], bdir)
for a in s.artifacts(artifact_type='BIOM'):
if a.processing_parameters is None or a.visibility != study_status:
continue
merging_schemes, parent_softwares = a.merging_scheme
software = a.processing_parameters.command.software
software = '%s v%s' % (software.name, software.version)
for x in a.filepaths:
if x['fp_type'] != 'biom' or 'only-16s' in x['fp']:
continue
fp = relpath(x['fp'], bdir)
for pt in a.prep_templates:
categories = pt.categories()
platform = ''
target_gene = ''
if 'platform' in categories:
platform = ', '.join(
set(pt.get_category('platform').values()))
if 'target_gene' in categories:
target_gene = ', '.join(
set(pt.get_category('target_gene').values()))
for _, prep_fp in pt.get_filepaths():
if 'qiime' not in prep_fp:
break
prep_fp = relpath(prep_fp, bdir)
# format: (biom_fp, sample_fp, prep_fp, qiita_artifact_id,
# platform, target gene, merging schemes,
# artifact software/version,
# parent sofware/version)
data.append((fp, sample_fp, prep_fp, a.id, platform,
target_gene, merging_schemes, software,
parent_softwares))
# writing text and tgz file
ts = datetime.now().strftime('%m%d%y-%H%M%S')
tgz_dir = join(working_dir, 'releases')
create_nested_path(tgz_dir)
tgz_name = join(tgz_dir, '%s-%s-building.tgz' % (portal, study_status))
tgz_name_final = join(tgz_dir, '%s-%s.tgz' % (portal, study_status))
txt_lines = [
"biom fp\tsample fp\tprep fp\tqiita artifact id\tplatform\t"
"target gene\tmerging scheme\tartifact software\tparent software"]
with topen(tgz_name, "w|gz") as tgz:
for biom_fp, sample_fp, prep_fp, aid, pform, tg, ms, asv, psv in data:
txt_lines.append("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s" % (
biom_fp, sample_fp, prep_fp, aid, pform, tg, ms, asv, psv))
tgz.add(join(bdir, biom_fp), arcname=biom_fp, recursive=False)
tgz.add(join(bdir, sample_fp), arcname=sample_fp, recursive=False)
tgz.add(join(bdir, prep_fp), arcname=prep_fp, recursive=False)
info = TarInfo(name='%s-%s-%s.txt' % (portal, study_status, ts))
txt_hd = BytesIO()
txt_hd.write(bytes('\n'.join(txt_lines), 'ascii'))
txt_hd.seek(0)
info.size = len(txt_hd.read())
txt_hd.seek(0)
tgz.addfile(tarinfo=info, fileobj=txt_hd)
with open(tgz_name, "rb") as f:
md5sum = md5()
for c in iter(lambda: f.read(4096), b""):
md5sum.update(c)
rename(tgz_name, tgz_name_final)
vals = [
('filepath', tgz_name_final[len(working_dir):], r_client.set),
('md5sum', md5sum.hexdigest(), r_client.set),
('time', time, r_client.set)]
for k, v, f in vals:
redis_key = '%s:release:%s:%s' % (portal, study_status, k)
# important to "flush" variables to avoid errors
r_client.delete(redis_key)
f(redis_key, v)
def generate_plugin_releases():
"""Generate releases for plugins
"""
ARCHIVE = qdb.archive.Archive
qiita_config = ConfigurationManager()
working_dir = qiita_config.working_dir
commands = [c for s in qdb.software.Software.iter(active=True)
for c in s.commands if c.post_processing_cmd is not None]
tnow = datetime.now()
ts = tnow.strftime('%m%d%y-%H%M%S')
tgz_dir = join(working_dir, 'releases', 'archive')
create_nested_path(tgz_dir)
tgz_dir_release = join(tgz_dir, ts)
create_nested_path(tgz_dir_release)
for cmd in commands:
cmd_name = cmd.name
mschemes = [v for _, v in ARCHIVE.merging_schemes().items()
if cmd_name in v]
for ms in mschemes:
ms_name = sub('[^0-9a-zA-Z]+', '', ms)
ms_fp = join(tgz_dir_release, ms_name)
create_nested_path(ms_fp)
pfp = join(ms_fp, 'archive.json')
archives = {k: loads(v)
for k, v in ARCHIVE.retrieve_feature_values(
archive_merging_scheme=ms).items()
if v != ''}
with open(pfp, 'w') as f:
dump(archives, f)
# now let's run the post_processing_cmd
ppc = cmd.post_processing_cmd
# concatenate any other parameters into a string
params = ' '.join(["%s=%s" % (k, v) for k, v in
ppc['script_params'].items()])
# append archives file and output dir parameters
params = ("%s --fp_archive=%s --output_dir=%s" % (
params, pfp, ms_fp))
ppc_cmd = "%s %s %s" % (
ppc['script_env'], ppc['script_path'], params)
p_out, p_err, rv = qdb.processing_job._system_call(ppc_cmd)
p_out = p_out.rstrip()
if rv != 0:
raise ValueError('Error %d: %s' % (rv, p_out))
p_out = loads(p_out)
# tgz-ing all files
tgz_name = join(tgz_dir, 'archive-%s-building.tgz' % ts)
tgz_name_final = join(tgz_dir, 'archive.tgz')
with topen(tgz_name, "w|gz") as tgz:
tgz.add(tgz_dir_release, arcname=basename(tgz_dir_release))
# getting the release md5
with open(tgz_name, "rb") as f:
md5sum = md5()
for c in iter(lambda: f.read(4096), b""):
md5sum.update(c)
rename(tgz_name, tgz_name_final)
vals = [
('filepath', tgz_name_final[len(working_dir):], r_client.set),
('md5sum', md5sum.hexdigest(), r_client.set),
('time', tnow.strftime('%m-%d-%y %H:%M:%S'), r_client.set)]
for k, v, f in vals:
redis_key = 'release-archive:%s' % k
# important to "flush" variables to avoid errors
r_client.delete(redis_key)
f(redis_key, v)
| bsd-3-clause |
pratapvardhan/scikit-learn | examples/decomposition/plot_faces_decomposition.py | 103 | 4394 | """
============================
Faces dataset decompositions
============================
This example applies to :ref:`olivetti_faces` different unsupervised
matrix decomposition (dimension reduction) methods from the module
:py:mod:`sklearn.decomposition` (see the documentation chapter
:ref:`decompositions`) .
"""
print(__doc__)
# Authors: Vlad Niculae, Alexandre Gramfort
# License: BSD 3 clause
import logging
from time import time
from numpy.random import RandomState
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.cluster import MiniBatchKMeans
from sklearn import decomposition
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
n_row, n_col = 2, 3
n_components = n_row * n_col
image_shape = (64, 64)
rng = RandomState(0)
###############################################################################
# Load faces data
dataset = fetch_olivetti_faces(shuffle=True, random_state=rng)
faces = dataset.data
n_samples, n_features = faces.shape
# global centering
faces_centered = faces - faces.mean(axis=0)
# local centering
faces_centered -= faces_centered.mean(axis=1).reshape(n_samples, -1)
print("Dataset consists of %d faces" % n_samples)
###############################################################################
def plot_gallery(title, images, n_col=n_col, n_row=n_row):
plt.figure(figsize=(2. * n_col, 2.26 * n_row))
plt.suptitle(title, size=16)
for i, comp in enumerate(images):
plt.subplot(n_row, n_col, i + 1)
vmax = max(comp.max(), -comp.min())
plt.imshow(comp.reshape(image_shape), cmap=plt.cm.gray,
interpolation='nearest',
vmin=-vmax, vmax=vmax)
plt.xticks(())
plt.yticks(())
plt.subplots_adjust(0.01, 0.05, 0.99, 0.93, 0.04, 0.)
###############################################################################
# List of the different estimators, whether to center and transpose the
# problem, and whether the transformer uses the clustering API.
estimators = [
('Eigenfaces - RandomizedPCA',
decomposition.RandomizedPCA(n_components=n_components, whiten=True),
True),
('Non-negative components - NMF',
decomposition.NMF(n_components=n_components, init='nndsvda', tol=5e-3),
False),
('Independent components - FastICA',
decomposition.FastICA(n_components=n_components, whiten=True),
True),
('Sparse comp. - MiniBatchSparsePCA',
decomposition.MiniBatchSparsePCA(n_components=n_components, alpha=0.8,
n_iter=100, batch_size=3,
random_state=rng),
True),
('MiniBatchDictionaryLearning',
decomposition.MiniBatchDictionaryLearning(n_components=15, alpha=0.1,
n_iter=50, batch_size=3,
random_state=rng),
True),
('Cluster centers - MiniBatchKMeans',
MiniBatchKMeans(n_clusters=n_components, tol=1e-3, batch_size=20,
max_iter=50, random_state=rng),
True),
('Factor Analysis components - FA',
decomposition.FactorAnalysis(n_components=n_components, max_iter=2),
True),
]
###############################################################################
# Plot a sample of the input data
plot_gallery("First centered Olivetti faces", faces_centered[:n_components])
###############################################################################
# Do the estimation and plot it
for name, estimator, center in estimators:
print("Extracting the top %d %s..." % (n_components, name))
t0 = time()
data = faces
if center:
data = faces_centered
estimator.fit(data)
train_time = (time() - t0)
print("done in %0.3fs" % train_time)
if hasattr(estimator, 'cluster_centers_'):
components_ = estimator.cluster_centers_
else:
components_ = estimator.components_
if hasattr(estimator, 'noise_variance_'):
plot_gallery("Pixelwise variance",
estimator.noise_variance_.reshape(1, -1), n_col=1,
n_row=1)
plot_gallery('%s - Train time %.1fs' % (name, train_time),
components_[:n_components])
plt.show()
| bsd-3-clause |
gunan/tensorflow | tensorflow/python/keras/engine/data_adapter_test.py | 1 | 43158 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""DataAdapter tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl.testing import parameterized
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.experimental.ops import cardinality
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import test_util
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.engine import data_adapter
from tensorflow.python.keras.utils import data_utils
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
from tensorflow.python.util import nest
class DummyArrayLike(object):
"""Dummy array-like object."""
def __init__(self, data):
self.data = data
def __len__(self):
return len(self.data)
def __getitem__(self, key):
return self.data[key]
@property
def shape(self):
return self.data.shape
@property
def dtype(self):
return self.data.dtype
def fail_on_convert(x, **kwargs):
_ = x
_ = kwargs
raise TypeError('Cannot convert DummyArrayLike to a tensor')
ops.register_tensor_conversion_function(DummyArrayLike, fail_on_convert)
class DataAdapterTestBase(keras_parameterized.TestCase):
def setUp(self):
super(DataAdapterTestBase, self).setUp()
self.batch_size = 5
self.numpy_input = np.zeros((50, 10))
self.numpy_target = np.ones(50)
self.tensor_input = constant_op.constant(2.0, shape=(50, 10))
self.tensor_target = array_ops.ones((50,))
self.arraylike_input = DummyArrayLike(self.numpy_input)
self.arraylike_target = DummyArrayLike(self.numpy_target)
self.dataset_input = dataset_ops.DatasetV2.from_tensor_slices(
(self.numpy_input, self.numpy_target)).shuffle(50).batch(
self.batch_size)
def generator():
while True:
yield (np.zeros((self.batch_size, 10)), np.ones(self.batch_size))
self.generator_input = generator()
self.iterator_input = data_utils.threadsafe_generator(generator)()
self.sequence_input = TestSequence(batch_size=self.batch_size,
feature_shape=10)
self.model = keras.models.Sequential(
[keras.layers.Dense(8, input_shape=(10,), activation='softmax')])
class TestSequence(data_utils.Sequence):
def __init__(self, batch_size, feature_shape):
self.batch_size = batch_size
self.feature_shape = feature_shape
def __getitem__(self, item):
return (np.zeros((self.batch_size, self.feature_shape)),
np.ones((self.batch_size,)))
def __len__(self):
return 10
class TensorLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(TensorLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.TensorLikeDataAdapter
def test_can_handle_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(self.numpy_input))
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input, self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_batch_size_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5)
self.assertEqual(adapter.batch_size(), 5)
def test_partial_batch_numpy(self):
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=4)
self.assertEqual(adapter.get_size(), 13) # 50/4
self.assertTrue(adapter.has_partial_batch())
self.assertEqual(adapter.partial_batch_size(), 2)
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.numpy_input, self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.numpy_input, self.numpy_target, batch_size=5)
def test_can_handle_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
self.assertTrue(self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)))
self.assertTrue(
self.adapter_cls.can_handle(pd.DataFrame(self.numpy_input)[0]))
self.assertTrue(
self.adapter_cls.can_handle(
pd.DataFrame(self.numpy_input),
pd.DataFrame(self.numpy_input)[0]))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_pandas(self):
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
self.skipTest('Skipping test because pandas is not installed.')
input_a = keras.Input(shape=(3,), name='input_a')
input_b = keras.Input(shape=(3,), name='input_b')
input_c = keras.Input(shape=(1,), name='input_b')
x = keras.layers.Dense(4, name='dense_1')(input_a)
y = keras.layers.Dense(3, name='dense_2')(input_b)
z = keras.layers.Dense(1, name='dense_3')(input_c)
model_1 = keras.Model(inputs=input_a, outputs=x)
model_2 = keras.Model(inputs=[input_a, input_b], outputs=[x, y])
model_3 = keras.Model(inputs=input_c, outputs=z)
model_1.compile(optimizer='rmsprop', loss='mse')
model_2.compile(optimizer='rmsprop', loss='mse')
input_a_np = np.random.random((10, 3))
input_b_np = np.random.random((10, 3))
input_a_df = pd.DataFrame(input_a_np)
input_b_df = pd.DataFrame(input_b_np)
output_a_df = pd.DataFrame(np.random.random((10, 4)))
output_b_df = pd.DataFrame(np.random.random((10, 3)))
model_1.fit(input_a_df,
output_a_df)
model_2.fit([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.fit([input_a_df],
[output_a_df])
model_1.fit({'input_a': input_a_df},
output_a_df)
model_2.fit({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
model_1.evaluate(input_a_df,
output_a_df)
model_2.evaluate([input_a_df, input_b_df],
[output_a_df, output_b_df])
model_1.evaluate([input_a_df],
[output_a_df])
model_1.evaluate({'input_a': input_a_df},
output_a_df)
model_2.evaluate({'input_a': input_a_df, 'input_b': input_b_df},
[output_a_df, output_b_df])
# Verify predicting on pandas vs numpy returns the same result
predict_1_pandas = model_1.predict(input_a_df)
predict_2_pandas = model_2.predict([input_a_df, input_b_df])
predict_3_pandas = model_3.predict(input_a_df[0])
predict_1_numpy = model_1.predict(input_a_np)
predict_2_numpy = model_2.predict([input_a_np, input_b_np])
predict_3_numpy = model_3.predict(np.asarray(input_a_df[0]))
self.assertAllClose(predict_1_numpy, predict_1_pandas)
self.assertAllClose(predict_2_numpy, predict_2_pandas)
self.assertAllClose(predict_3_numpy, predict_3_pandas)
# Extra ways to pass in dataframes
model_1.predict([input_a_df])
model_1.predict({'input_a': input_a_df})
model_2.predict({'input_a': input_a_df, 'input_b': input_b_df})
def test_can_handle(self):
self.assertTrue(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
self.assertFalse(self.adapter_cls.can_handle(self.arraylike_input))
self.assertFalse(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.tensor_input, self.tensor_target, batch_size=5)
def test_size(self):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = np.arange(num_samples)
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, and that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch remains contiguous
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.tensor_input, self.tensor_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class GenericArrayLikeDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GenericArrayLikeDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GenericArrayLikeDataAdapter
def test_can_handle_some_numpy(self):
self.assertTrue(self.adapter_cls.can_handle(
self.arraylike_input))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.arraylike_target))
# Because adapters are mutually exclusive, don't handle cases
# where all the data is numpy or an eagertensor
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(
self.adapter_cls.can_handle(self.numpy_input,
self.numpy_target))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(
self.adapter_cls.can_handle(self.tensor_input, self.tensor_target))
# But do handle mixes that include generic arraylike data
self.assertTrue(
self.adapter_cls.can_handle(self.numpy_input,
self.arraylike_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.numpy_target))
self.assertTrue(
self.adapter_cls.can_handle(self.arraylike_input,
self.tensor_target))
self.assertTrue(
self.adapter_cls.can_handle(self.tensor_input,
self.arraylike_target))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
def test_size(self):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=5)
self.assertEqual(adapter.get_size(), 10)
self.assertFalse(adapter.has_partial_batch())
def test_epochs(self):
num_epochs = 3
adapter = self.adapter_cls(
self.arraylike_input,
self.numpy_target, batch_size=5, epochs=num_epochs)
ds_iter = iter(adapter.get_dataset())
num_batches_per_epoch = self.numpy_input.shape[0] // 5
for _ in range(num_batches_per_epoch * num_epochs):
next(ds_iter)
with self.assertRaises(StopIteration):
next(ds_iter)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
# First verify that DummyArrayLike can't be converted to a Tensor
with self.assertRaises(TypeError):
ops.convert_to_tensor_v2(self.arraylike_input)
# Then train on the array like.
# It should not be converted to a tensor directly (which would force it into
# memory), only the sliced data should be converted.
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle=True, batch_size=5)
self.model.fit(self.arraylike_input,
self.arraylike_target,
shuffle='batch', batch_size=5)
self.model.evaluate(self.arraylike_input,
self.arraylike_target, batch_size=5)
self.model.predict(self.arraylike_input, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_numpy_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.numpy_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.numpy_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.numpy_target, batch_size=5)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training_tensor_target(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.arraylike_input,
self.tensor_target, batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle=True,
batch_size=5)
self.model.fit(self.arraylike_input,
self.tensor_target, shuffle='batch',
batch_size=5)
self.model.evaluate(self.arraylike_input,
self.tensor_target, batch_size=5)
def test_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 32
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle=True, epochs=2)
def _get_epoch(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return np.concatenate(ds_data)
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_data = _get_epoch(ds_iter)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
def test_batch_shuffle_correctness(self):
with context.eager_mode():
num_samples = 100
batch_size = 6
x = DummyArrayLike(np.arange(num_samples))
np.random.seed(99)
adapter = self.adapter_cls(
x, y=None, batch_size=batch_size, shuffle='batch', epochs=2)
def _get_epoch_batches(ds_iter):
ds_data = []
for _ in range(int(math.ceil(num_samples / batch_size))):
ds_data.append(next(ds_iter)[0].numpy())
return ds_data
ds_iter = iter(adapter.get_dataset())
# First epoch.
epoch_batch_data = _get_epoch_batches(ds_iter)
epoch_data = np.concatenate(epoch_batch_data)
def _verify_batch(batch):
# Verify that a batch contains only contiguous data, but that it has
# been shuffled.
shuffled_batch = np.sort(batch)
self.assertNotAllClose(batch, shuffled_batch)
for i in range(1, len(batch)):
self.assertEqual(shuffled_batch[i-1] + 1, shuffled_batch[i])
# Assert that the data within each batch is shuffled contiguous data
for batch in epoch_batch_data:
_verify_batch(batch)
# Check that individual batches are unshuffled
# Check that shuffling occurred.
self.assertNotAllClose(x, epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(epoch_data))
# Second epoch.
second_epoch_batch_data = _get_epoch_batches(ds_iter)
second_epoch_data = np.concatenate(second_epoch_batch_data)
# Assert that the data within each batch remains contiguous
for batch in second_epoch_batch_data:
_verify_batch(batch)
# Check that shuffling occurred.
self.assertNotAllClose(x, second_epoch_data)
# Check that shuffling is different across epochs.
self.assertNotAllClose(epoch_data, second_epoch_data)
# Check that each elements appears, and only once.
self.assertAllClose(x, np.sort(second_epoch_data))
@parameterized.named_parameters(
('batch_size_5', 5, None, 5),
('batch_size_50', 50, 4, 50), # Sanity check: batch_size takes precedence
('steps_1', None, 1, 50),
('steps_4', None, 4, 13),
)
def test_batch_size(self, batch_size_in, steps, batch_size_out):
adapter = self.adapter_cls(
self.arraylike_input,
self.arraylike_target, batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.batch_size(), batch_size_out)
@parameterized.named_parameters(
('batch_size_5', 5, None, 10, 0),
('batch_size_4', 4, None, 13, 2),
('steps_1', None, 1, 1, 0),
('steps_5', None, 5, 5, 0),
('steps_4', None, 4, 4, 11),
)
def test_partial_batch(
self, batch_size_in, steps, size, partial_batch_size):
adapter = self.adapter_cls(
self.arraylike_input, self.arraylike_target,
batch_size=batch_size_in,
steps=steps)
self.assertEqual(adapter.get_size(), size) # 50/steps
self.assertEqual(adapter.has_partial_batch(), bool(partial_batch_size))
self.assertEqual(adapter.partial_batch_size(), partial_batch_size or None)
class DatasetAdapterTest(DataAdapterTestBase):
def setUp(self):
super(DatasetAdapterTest, self).setUp()
self.adapter_cls = data_adapter.DatasetAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertTrue(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
dataset = self.adapter_cls(self.dataset_input).get_dataset()
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(dataset)
def test_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertIsNone(adapter.batch_size())
def test_partial_batch(self):
adapter = self.adapter_cls(self.dataset_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.dataset_input, y=self.dataset_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.dataset_input, sample_weights=self.dataset_input)
class GeneratorDataAdapterTest(DataAdapterTestBase):
def setUp(self):
super(GeneratorDataAdapterTest, self).setUp()
self.adapter_cls = data_adapter.GeneratorDataAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertTrue(self.adapter_cls.can_handle(self.generator_input))
self.assertFalse(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.generator_input, steps_per_epoch=10)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.iterator_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertIsNone(adapter.get_size())
def test_batch_size(self):
adapter = self.adapter_cls(self.generator_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.generator_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.generator_input, y=self.generator_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(
self.generator_input, sample_weights=self.generator_input)
def test_not_shuffled(self):
def generator():
for i in range(10):
yield np.ones((1, 1)) * i
adapter = self.adapter_cls(generator(), shuffle=True)
with context.eager_mode():
for i, data in enumerate(adapter.get_dataset()):
self.assertEqual(i, data[0].numpy().flatten())
class KerasSequenceAdapterTest(DataAdapterTestBase):
def setUp(self):
super(KerasSequenceAdapterTest, self).setUp()
self.adapter_cls = data_adapter.KerasSequenceAdapter
def test_can_handle(self):
self.assertFalse(self.adapter_cls.can_handle(self.numpy_input))
self.assertFalse(self.adapter_cls.can_handle(self.tensor_input))
self.assertFalse(self.adapter_cls.can_handle(self.dataset_input))
self.assertFalse(self.adapter_cls.can_handle(self.generator_input))
self.assertTrue(self.adapter_cls.can_handle(self.sequence_input))
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
def test_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input)
@keras_parameterized.run_all_keras_modes(always_skip_v1=True)
@test_util.run_v2_only
@data_utils.dont_use_multiprocessing_pool
def test_with_multiprocessing_training(self):
self.model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',
run_eagerly=testing_utils.should_run_eagerly())
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
# Fit twice to ensure there isn't any duplication that prevent the worker
# from starting.
self.model.fit(self.sequence_input, workers=1, use_multiprocessing=True,
max_queue_size=10, steps_per_epoch=10)
def test_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.get_size(), 10)
def test_batch_size(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertEqual(adapter.batch_size(), None)
self.assertEqual(adapter.representative_batch_size(), 5)
def test_partial_batch(self):
adapter = self.adapter_cls(self.sequence_input)
self.assertFalse(adapter.has_partial_batch())
self.assertIsNone(adapter.partial_batch_size())
def test_invalid_targets_argument(self):
with self.assertRaisesRegexp(ValueError, r'`y` argument is not supported'):
self.adapter_cls(self.sequence_input, y=self.sequence_input)
def test_invalid_sample_weights_argument(self):
with self.assertRaisesRegexp(ValueError,
r'`sample_weight` argument is not supported'):
self.adapter_cls(self.sequence_input, sample_weights=self.sequence_input)
class DataHandlerTest(keras_parameterized.TestCase):
def test_finite_dataset_with_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=2)
self.assertEqual(data_handler.inferred_steps, 2)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
def test_finite_dataset_without_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1)
data_handler = data_adapter.DataHandler(data, initial_epoch=0, epochs=2)
self.assertEqual(data_handler.inferred_steps, 3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_finite_dataset_with_steps_per_epoch_exact_size(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2, 3]).batch(1)
# If user specifies exact size of `Dataset` as `steps_per_epoch`,
# create a new iterator each epoch.
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=4)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
def test_infinite_dataset_with_steps_per_epoch(self):
data = dataset_ops.Dataset.from_tensor_slices([0, 1, 2]).batch(1).repeat()
data_handler = data_adapter.DataHandler(
data, initial_epoch=0, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator).numpy())
returned_data.append(epoch_data)
self.assertEqual(returned_data, [[0, 1, 2], [0, 1, 2]])
def test_unknown_cardinality_dataset_with_steps_per_epoch(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN)
# User can choose to only partially consume `Dataset`.
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2, steps_per_epoch=2)
self.assertFalse(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1], [2, 3]])
self.assertEqual(data_handler.inferred_steps, 2)
def test_unknown_cardinality_dataset_without_steps_per_epoch(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1, 2, 3, 4, 5, 6])
filtered_ds = ds.filter(lambda x: x < 4)
self.assertEqual(
cardinality.cardinality(filtered_ds).numpy(), cardinality.UNKNOWN)
data_handler = data_adapter.DataHandler(
filtered_ds, initial_epoch=0, epochs=2)
self.assertEqual(data_handler.inferred_steps, None)
self.assertTrue(data_handler._adapter.should_recreate_iterator())
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
with data_handler.catch_stop_iteration():
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[0, 1, 2, 3], [0, 1, 2, 3]])
self.assertEqual(data_handler.inferred_steps, 4)
def test_insufficient_data(self):
ds = dataset_ops.DatasetV2.from_tensor_slices([0, 1])
ds = ds.filter(lambda *args, **kwargs: True)
data_handler = data_adapter.DataHandler(
ds, initial_epoch=0, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
with data_handler.catch_stop_iteration():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertTrue(data_handler._insufficient_data)
self.assertEqual(returned_data, [[0, 1]])
def test_numpy(self):
x = np.array([0, 1, 2])
y = np.array([0, 2, 4])
sw = np.array([0, 4, 8])
data_handler = data_adapter.DataHandler(
x=x, y=y, sample_weight=sw, batch_size=1, epochs=2)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data,
[[(0, 0, 0), (1, 2, 4),
(2, 4, 8)], [(0, 0, 0), (1, 2, 4), (2, 4, 8)]])
def test_generator(self):
def generator():
for _ in range(2):
for step in range(3):
yield (ops.convert_to_tensor_v2([step]),)
data_handler = data_adapter.DataHandler(
generator(), epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_composite_tensor(self):
st = sparse_tensor.SparseTensor(
indices=[[0, 0], [1, 0], [2, 0]], values=[0, 1, 2], dense_shape=[3, 1])
data_handler = data_adapter.DataHandler(st, epochs=2, steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(
nest.map_structure(sparse_ops.sparse_tensor_to_dense, returned_data))
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_list_of_scalars(self):
data_handler = data_adapter.DataHandler([[0], [1], [2]],
epochs=2,
steps_per_epoch=3)
returned_data = []
for _, iterator in data_handler.enumerate_epochs():
epoch_data = []
for _ in data_handler.steps():
epoch_data.append(next(iterator))
returned_data.append(epoch_data)
returned_data = self.evaluate(returned_data)
self.assertEqual(returned_data, [[([0],), ([1],),
([2],)], [([0],), ([1],), ([2],)]])
def test_class_weight_user_errors(self):
with self.assertRaisesRegexp(ValueError, 'to be a dict with keys'):
data_adapter.DataHandler(
x=[[0], [1], [2]],
y=[[2], [1], [0]],
batch_size=1,
sample_weight=[[1.], [2.], [4.]],
class_weight={
0: 0.5,
1: 1.,
3: 1.5 # Skips class `2`.
})
with self.assertRaisesRegexp(ValueError, 'with a single output'):
data_adapter.DataHandler(
x=np.ones((10, 1)),
y=[np.ones((10, 1)), np.zeros((10, 1))],
batch_size=2,
class_weight={
0: 0.5,
1: 1.,
2: 1.5
})
class TestValidationSplit(keras_parameterized.TestCase):
@parameterized.named_parameters(('numpy_arrays', True), ('tensors', False))
def test_validation_split_shuffled(self, use_numpy):
if use_numpy:
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 2, 4, 6, 8])
sw = np.array([0, 4, 8, 12, 16])
else:
x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4])
y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8])
sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16])
(train_x, train_y, train_sw), (val_x, val_y, val_sw) = (
data_adapter.train_validation_split((x, y, sw), validation_split=0.2))
self.assertEqual(int(train_x.shape[0]), 4)
self.assertEqual(int(train_y.shape[0]), 4)
self.assertEqual(int(train_sw.shape[0]), 4)
for i in range(4):
# Check that all arrays were shuffled in identical order.
self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy())
self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy())
self.assertEqual(int(val_x.shape[0]), 1)
self.assertEqual(int(val_y.shape[0]), 1)
self.assertEqual(int(val_sw.shape[0]), 1)
for i in range(1):
# Check that all arrays were shuffled in identical order.
self.assertEqual(2 * train_x[i].numpy(), train_y[i].numpy())
self.assertEqual(2 * train_y[i].numpy(), train_sw[i].numpy())
# Check that arrays contain expected values.
self.assertEqual(
sorted(array_ops.concat([train_x, val_x], axis=0).numpy().tolist()),
sorted(ops.convert_to_tensor_v2(x).numpy().tolist()))
self.assertEqual(
sorted(array_ops.concat([train_y, val_y], axis=0).numpy().tolist()),
sorted(ops.convert_to_tensor_v2(y).numpy().tolist()))
self.assertEqual(
sorted(array_ops.concat([train_sw, val_sw], axis=0).numpy().tolist()),
sorted(ops.convert_to_tensor_v2(sw).numpy().tolist()))
@parameterized.named_parameters(('numpy_arrays', True), ('tensors', False))
def test_validation_split_unshuffled(self, use_numpy):
if use_numpy:
x = np.array([0, 1, 2, 3, 4])
y = np.array([0, 2, 4, 6, 8])
sw = np.array([0, 4, 8, 12, 16])
else:
x = ops.convert_to_tensor_v2([0, 1, 2, 3, 4])
y = ops.convert_to_tensor_v2([0, 2, 4, 6, 8])
sw = ops.convert_to_tensor_v2([0, 4, 8, 12, 16])
(train_x, train_y, train_sw), (val_x, val_y, val_sw) = (
data_adapter.train_validation_split((x, y, sw),
validation_split=0.2,
shuffle=False))
self.assertEqual(train_x.numpy().tolist(), [0, 1, 2, 3])
self.assertEqual(train_y.numpy().tolist(), [0, 2, 4, 6])
self.assertEqual(train_sw.numpy().tolist(), [0, 4, 8, 12])
self.assertEqual(val_x.numpy().tolist(), [4])
self.assertEqual(val_y.numpy().tolist(), [8])
self.assertEqual(val_sw.numpy().tolist(), [16])
def test_validation_split_user_error(self):
with self.assertRaisesRegexp(ValueError, 'is only supported for Tensors'):
data_adapter.train_validation_split(
lambda: np.ones((10, 1)), validation_split=0.2)
def test_validation_split_examples_too_few(self):
with self.assertRaisesRegexp(
ValueError, 'not sufficient to split it'):
data_adapter.train_validation_split(
np.ones((1, 10)), validation_split=0.2)
def test_validation_split_none(self):
train_sw, val_sw = data_adapter.train_validation_split(
None, validation_split=0.2)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
(_, train_sw), (_, val_sw) = data_adapter.train_validation_split(
(np.ones((10, 1)), None), validation_split=0.2)
self.assertIsNone(train_sw)
self.assertIsNone(val_sw)
class TestUtils(keras_parameterized.TestCase):
def test_expand_1d_sparse_tensors_untouched(self):
st = sparse_tensor.SparseTensor(
indices=[[0], [10]], values=[1, 2], dense_shape=[10])
st = data_adapter.expand_1d(st)
self.assertEqual(st.shape.rank, 1)
if __name__ == '__main__':
ops.enable_eager_execution()
test.main()
| apache-2.0 |
sandeepgupta2k4/tensorflow | tensorflow/examples/learn/iris_val_based_early_stopping.py | 62 | 2827 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with early stopping."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import shutil
from sklearn import datasets
from sklearn import metrics
from sklearn.cross_validation import train_test_split
import tensorflow as tf
learn = tf.contrib.learn
def clean_folder(folder):
"""Cleans the given folder if it exists."""
try:
shutil.rmtree(folder)
except OSError:
pass
def main(unused_argv):
iris = datasets.load_iris()
x_train, x_test, y_train, y_test = train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
x_train, x_val, y_train, y_val = train_test_split(
x_train, y_train, test_size=0.2, random_state=42)
val_monitor = learn.monitors.ValidationMonitor(
x_val, y_val, early_stopping_rounds=200)
model_dir = '/tmp/iris_model'
clean_folder(model_dir)
# classifier with early stopping on training data
classifier1 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir)
classifier1.fit(x=x_train, y=y_train, steps=2000)
predictions1 = list(classifier1.predict(x_test, as_iterable=True))
score1 = metrics.accuracy_score(y_test, predictions1)
model_dir = '/tmp/iris_model_val'
clean_folder(model_dir)
# classifier with early stopping on validation data, save frequently for
# monitor to pick up new checkpoints.
classifier2 = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10],
n_classes=3,
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(save_checkpoints_secs=1))
classifier2.fit(x=x_train, y=y_train, steps=2000, monitors=[val_monitor])
predictions2 = list(classifier2.predict(x_test, as_iterable=True))
score2 = metrics.accuracy_score(y_test, predictions2)
# In many applications, the score is improved by using early stopping
print('score1: ', score1)
print('score2: ', score2)
print('score2 > score1: ', score2 > score1)
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
NunoEdgarGub1/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 142 | 4761 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
plt.figure(figsize=(12, 8))
plt.subplot(221)
plt.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
plt.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
plt.subplot(224)
plt.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
plt.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
plt.xlabel("x scores")
plt.ylabel("y scores")
plt.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
plt.xticks(())
plt.yticks(())
plt.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
plt.subplot(222)
plt.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
plt.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
plt.xlabel("X comp. 1")
plt.ylabel("X comp. 2")
plt.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.subplot(223)
plt.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
plt.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
plt.xlabel("Y comp. 1")
plt.ylabel("Y comp. 2")
plt.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
plt.legend(loc="best")
plt.xticks(())
plt.yticks(())
plt.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
duthchao/kaggle-galaxies | predict_augmented_npy_maxout2048_pysex.py | 7 | 9584 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048_pysex.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms, processor_class=ra.LoadAndProcessFixedPysexCenteringRescaling)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| bsd-3-clause |
anhaidgroup/py_stringsimjoin | py_stringsimjoin/join/overlap_coefficient_join_py.py | 1 | 17453 | # overlap coefficient join
from joblib import delayed, Parallel
from six import iteritems
import pandas as pd
import pyprind
from py_stringsimjoin.filter.overlap_filter import OverlapFilter
from py_stringsimjoin.index.inverted_index import InvertedIndex
from py_stringsimjoin.utils.generic_helper import convert_dataframe_to_array, \
find_output_attribute_indices, get_attrs_to_project, \
get_num_processes_to_launch, get_output_header_from_tables, \
get_output_row_from_tables, remove_redundant_attrs, split_table, COMP_OP_MAP
from py_stringsimjoin.utils.missing_value_handler import \
get_pairs_with_missing_value
from py_stringsimjoin.utils.validation import validate_attr, \
validate_attr_type, validate_comp_op_for_sim_measure, validate_key_attr, \
validate_input_table, validate_threshold, validate_tokenizer, \
validate_output_attrs
def overlap_coefficient_join_py(ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op='>=',
allow_empty=True, allow_missing=False,
l_out_attrs=None, r_out_attrs=None,
l_out_prefix='l_', r_out_prefix='r_',
out_sim_score=True, n_jobs=1, show_progress=True):
"""Join two tables using overlap coefficient.
For two sets X and Y, the overlap coefficient between them is given by:
:math:`overlap\\_coefficient(X, Y) = \\frac{|X \\cap Y|}{\\min(|X|, |Y|)}`
In the case where one of X and Y is an empty set and the other is a
non-empty set, we define their overlap coefficient to be 0. In the case
where both X and Y are empty sets, we define their overlap coefficient to
be 1.
Finds tuple pairs from left table and right table such that the overlap
coefficient between the join attributes satisfies the condition on input
threshold. For example, if the comparison operator is '>=', finds tuple
pairs whose overlap coefficient between the strings that are the values of
the join attributes is greater than or equal to the input threshold, as
specified in "threshold".
Args:
ltable (DataFrame): left input table.
rtable (DataFrame): right input table.
l_key_attr (string): key attribute in left table.
r_key_attr (string): key attribute in right table.
l_join_attr (string): join attribute in left table.
r_join_attr (string): join attribute in right table.
tokenizer (Tokenizer): tokenizer to be used to tokenize join
attributes.
threshold (float): overlap coefficient threshold to be satisfied.
comp_op (string): comparison operator. Supported values are '>=', '>'
and '=' (defaults to '>=').
allow_empty (boolean): flag to indicate whether tuple pairs with empty
set of tokens in both the join attributes should be included in the
output (defaults to True).
allow_missing (boolean): flag to indicate whether tuple pairs with
missing value in at least one of the join attributes should be
included in the output (defaults to False). If this flag is set to
True, a tuple in ltable with missing value in the join attribute
will be matched with every tuple in rtable and vice versa.
l_out_attrs (list): list of attribute names from the left table to be
included in the output table (defaults to None).
r_out_attrs (list): list of attribute names from the right table to be
included in the output table (defaults to None).
l_out_prefix (string): prefix to be used for the attribute names coming
from the left table, in the output table (defaults to 'l\_').
r_out_prefix (string): prefix to be used for the attribute names coming
from the right table, in the output table (defaults to 'r\_').
out_sim_score (boolean): flag to indicate whether similarity score
should be included in the output table (defaults to True). Setting
this flag to True will add a column named '_sim_score' in the
output table. This column will contain the similarity scores for the
tuple pairs in the output.
n_jobs (int): number of parallel jobs to use for the computation
(defaults to 1). If -1 is given, all CPUs are used. If 1 is given,
no parallel computing code is used at all, which is useful for
debugging. For n_jobs below -1, (n_cpus + 1 + n_jobs) are used
(where n_cpus is the total number of CPUs in the machine). Thus for
n_jobs = -2, all CPUs but one are used. If (n_cpus + 1 + n_jobs)
becomes less than 1, then no parallel computing code will be used
(i.e., equivalent to the default).
show_progress (boolean): flag to indicate whether task progress should
be displayed to the user (defaults to True).
Returns:
An output table containing tuple pairs that satisfy the join
condition (DataFrame).
"""
# check if the input tables are dataframes
validate_input_table(ltable, 'left table')
validate_input_table(rtable, 'right table')
# check if the key attributes and join attributes exist
validate_attr(l_key_attr, ltable.columns,
'key attribute', 'left table')
validate_attr(r_key_attr, rtable.columns,
'key attribute', 'right table')
validate_attr(l_join_attr, ltable.columns,
'join attribute', 'left table')
validate_attr(r_join_attr, rtable.columns,
'join attribute', 'right table')
# check if the join attributes are not of numeric type
validate_attr_type(l_join_attr, ltable[l_join_attr].dtype,
'join attribute', 'left table')
validate_attr_type(r_join_attr, rtable[r_join_attr].dtype,
'join attribute', 'right table')
# check if the input tokenizer is valid
validate_tokenizer(tokenizer)
# check if the input threshold is valid
validate_threshold(threshold, 'OVERLAP_COEFFICIENT')
# check if the comparison operator is valid
validate_comp_op_for_sim_measure(comp_op, 'OVERLAP_COEFFICIENT')
# check if the output attributes exist
validate_output_attrs(l_out_attrs, ltable.columns,
r_out_attrs, rtable.columns)
# check if the key attributes are unique and do not contain missing values
validate_key_attr(l_key_attr, ltable, 'left table')
validate_key_attr(r_key_attr, rtable, 'right table')
# set return_set flag of tokenizer to be True, in case it is set to False
revert_tokenizer_return_set_flag = False
if not tokenizer.get_return_set():
tokenizer.set_return_set(True)
revert_tokenizer_return_set_flag = True
# remove redundant attrs from output attrs.
l_out_attrs = remove_redundant_attrs(l_out_attrs, l_key_attr)
r_out_attrs = remove_redundant_attrs(r_out_attrs, r_key_attr)
# get attributes to project.
l_proj_attrs = get_attrs_to_project(l_out_attrs, l_key_attr, l_join_attr)
r_proj_attrs = get_attrs_to_project(r_out_attrs, r_key_attr, r_join_attr)
# Do a projection on the input dataframes to keep only the required
# attributes. Then, remove rows with missing value in join attribute from
# the input dataframes. Then, convert the resulting dataframes into ndarray.
ltable_array = convert_dataframe_to_array(ltable, l_proj_attrs, l_join_attr)
rtable_array = convert_dataframe_to_array(rtable, r_proj_attrs, r_join_attr)
# computes the actual number of jobs to launch.
n_jobs = min(get_num_processes_to_launch(n_jobs), len(rtable_array))
if n_jobs <= 1:
# if n_jobs is 1, do not use any parallel code.
output_table = _overlap_coefficient_join_split(
ltable_array, rtable_array,
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
else:
# if n_jobs is above 1, split the right table into n_jobs splits and
# join each right table split with the whole of left table in a separate
# process.
r_splits = split_table(rtable_array, n_jobs)
results = Parallel(n_jobs=n_jobs)(
delayed(_overlap_coefficient_join_split)(
ltable_array, r_splits[job_index],
l_proj_attrs, r_proj_attrs,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score,
(show_progress and (job_index==n_jobs-1)))
for job_index in range(n_jobs))
output_table = pd.concat(results)
# If allow_missing flag is set, then compute all pairs with missing value in
# at least one of the join attributes and then add it to the output
# obtained from the join.
if allow_missing:
missing_pairs = get_pairs_with_missing_value(
ltable, rtable,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress)
output_table = pd.concat([output_table, missing_pairs])
# add an id column named '_id' to the output table.
output_table.insert(0, '_id', range(0, len(output_table)))
# revert the return_set flag of tokenizer, in case it was modified.
if revert_tokenizer_return_set_flag:
tokenizer.set_return_set(False)
return output_table
def _overlap_coefficient_join_split(ltable_list, rtable_list,
l_columns, r_columns,
l_key_attr, r_key_attr,
l_join_attr, r_join_attr,
tokenizer, threshold, comp_op,
allow_empty,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix,
out_sim_score, show_progress):
"""Perform overlap coefficient join for a split of ltable and rtable"""
# find column indices of key attr, join attr and output attrs in ltable
l_key_attr_index = l_columns.index(l_key_attr)
l_join_attr_index = l_columns.index(l_join_attr)
l_out_attrs_indices = find_output_attribute_indices(l_columns, l_out_attrs)
# find column indices of key attr, join attr and output attrs in rtable
r_key_attr_index = r_columns.index(r_key_attr)
r_join_attr_index = r_columns.index(r_join_attr)
r_out_attrs_indices = find_output_attribute_indices(r_columns, r_out_attrs)
# Build inverted index over ltable
inverted_index = InvertedIndex(ltable_list, l_join_attr_index,
tokenizer, cache_size_flag=True)
# While building the index, we cache the record ids with empty set of
# tokens. This is needed to handle the allow_empty flag.
cached_data = inverted_index.build(allow_empty)
l_empty_records = cached_data['empty_records']
overlap_filter = OverlapFilter(tokenizer, 1)
comp_fn = COMP_OP_MAP[comp_op]
output_rows = []
has_output_attributes = (l_out_attrs is not None or
r_out_attrs is not None)
if show_progress:
prog_bar = pyprind.ProgBar(len(rtable_list))
for r_row in rtable_list:
r_string = r_row[r_join_attr_index]
r_join_attr_tokens = tokenizer.tokenize(r_string)
r_num_tokens = len(r_join_attr_tokens)
# If allow_empty flag is set and the current rtable record has empty set
# of tokens in the join attribute, then generate output pairs joining
# the current rtable record with those records in ltable with empty set
# of tokens in the join attribute. These ltable record ids are cached in
# l_empty_records list which was constructed when building the inverted
# index.
if allow_empty and r_num_tokens == 0:
for l_id in l_empty_records:
if has_output_attributes:
output_row = get_output_row_from_tables(
ltable_list[l_id], r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices,
r_out_attrs_indices)
else:
output_row = [ltable_list[l_id][l_key_attr_index],
r_row[r_key_attr_index]]
if out_sim_score:
output_row.append(1.0)
output_rows.append(output_row)
continue
# probe inverted index and find overlap of candidates
candidate_overlap = overlap_filter.find_candidates(
r_join_attr_tokens, inverted_index)
for cand, overlap in iteritems(candidate_overlap):
# compute the actual similarity score
sim_score = (float(overlap) /
float(min(r_num_tokens,
inverted_index.size_cache[cand])))
if comp_fn(sim_score, threshold):
if has_output_attributes:
output_row = get_output_row_from_tables(
ltable_list[cand], r_row,
l_key_attr_index, r_key_attr_index,
l_out_attrs_indices, r_out_attrs_indices)
else:
output_row = [ltable_list[cand][l_key_attr_index],
r_row[r_key_attr_index]]
# if out_sim_score flag is set, append the overlap coefficient
# score to the output record.
if out_sim_score:
output_row.append(sim_score)
output_rows.append(output_row)
if show_progress:
prog_bar.update()
output_header = get_output_header_from_tables(l_key_attr, r_key_attr,
l_out_attrs, r_out_attrs,
l_out_prefix, r_out_prefix)
if out_sim_score:
output_header.append("_sim_score")
output_table = pd.DataFrame(output_rows, columns=output_header)
return output_table
| bsd-3-clause |
numairmansur/RoBO | examples/example_bagged_nets.py | 1 | 1284 | import sys
import logging
import numpy as np
import matplotlib.pyplot as plt
import robo.models.neural_network as robo_net
import robo.models.bagged_networks as bn
from robo.initial_design.init_random_uniform import init_random_uniform
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def f(x):
return np.sinc(x * 10 - 5).sum(axis=1)[:, None]
rng = np.random.RandomState(42)
X = init_random_uniform(np.zeros(1), np.ones(1), 20, rng).astype(np.float32)
Y = f(X)
x = np.linspace(0, 1, 512, dtype=np.float32)[:, None]
vals = f(x).astype(np.float32)
plt.grid()
plt.plot(x[:, 0], f(x)[:, 0], label="true", color="green")
plt.plot(X[:, 0], Y[:, 0], "ro")
model = bn.BaggedNets(robo_net.SGDNet, num_models=16, bootstrap_with_replacement=True,
n_epochs=16384, error_threshold=1e-3,
n_units=[32, 32, 32], dropout=0,
batch_size=10, learning_rate=1e-3,
shuffle_batches=True)
m = model.train(X, Y)
mean_pred, var_pred = model.predict(x)
std_pred = np.sqrt(var_pred)
plt.plot(x[:, 0], mean_pred[:, 0], label="bagged nets", color="blue")
plt.fill_between(x[:, 0], mean_pred[:, 0] + std_pred[:, 0], mean_pred[:, 0] - std_pred[:, 0], alpha=0.2, color="blue")
plt.legend()
plt.show()
| bsd-3-clause |
Adai0808/scikit-learn | sklearn/cluster/tests/test_affinity_propagation.py | 341 | 2620 | """
Testing for Clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.cluster.affinity_propagation_ import AffinityPropagation
from sklearn.cluster.affinity_propagation_ import affinity_propagation
from sklearn.datasets.samples_generator import make_blobs
from sklearn.metrics import euclidean_distances
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=60, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=0)
def test_affinity_propagation():
# Affinity Propagation algorithm
# Compute similarities
S = -euclidean_distances(X, squared=True)
preference = np.median(S) * 10
# Compute Affinity Propagation
cluster_centers_indices, labels = affinity_propagation(
S, preference=preference)
n_clusters_ = len(cluster_centers_indices)
assert_equal(n_clusters, n_clusters_)
af = AffinityPropagation(preference=preference, affinity="precomputed")
labels_precomputed = af.fit(S).labels_
af = AffinityPropagation(preference=preference, verbose=True)
labels = af.fit(X).labels_
assert_array_equal(labels, labels_precomputed)
cluster_centers_indices = af.cluster_centers_indices_
n_clusters_ = len(cluster_centers_indices)
assert_equal(np.unique(labels).size, n_clusters_)
assert_equal(n_clusters, n_clusters_)
# Test also with no copy
_, labels_no_copy = affinity_propagation(S, preference=preference,
copy=False)
assert_array_equal(labels, labels_no_copy)
# Test input validation
assert_raises(ValueError, affinity_propagation, S[:, :-1])
assert_raises(ValueError, affinity_propagation, S, damping=0)
af = AffinityPropagation(affinity="unknown")
assert_raises(ValueError, af.fit, X)
def test_affinity_propagation_predict():
# Test AffinityPropagation.predict
af = AffinityPropagation(affinity="euclidean")
labels = af.fit_predict(X)
labels2 = af.predict(X)
assert_array_equal(labels, labels2)
def test_affinity_propagation_predict_error():
# Test exception in AffinityPropagation.predict
# Not fitted.
af = AffinityPropagation(affinity="euclidean")
assert_raises(ValueError, af.predict, X)
# Predict not supported when affinity="precomputed".
S = np.dot(X, X.T)
af = AffinityPropagation(affinity="precomputed")
af.fit(S)
assert_raises(ValueError, af.predict, X)
| bsd-3-clause |
f3r/scikit-learn | sklearn/tests/test_metaestimators.py | 57 | 4958 | """Common tests for metaestimators"""
import functools
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.externals.six import iterkeys
from sklearn.datasets import make_classification
from sklearn.utils.testing import assert_true, assert_false, assert_raises
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.feature_selection import RFE, RFECV
from sklearn.ensemble import BaggingClassifier
class DelegatorData(object):
def __init__(self, name, construct, skip_methods=(),
fit_args=make_classification()):
self.name = name
self.construct = construct
self.fit_args = fit_args
self.skip_methods = skip_methods
DELEGATING_METAESTIMATORS = [
DelegatorData('Pipeline', lambda est: Pipeline([('est', est)])),
DelegatorData('GridSearchCV',
lambda est: GridSearchCV(
est, param_grid={'param': [5]}, cv=2),
skip_methods=['score']),
DelegatorData('RandomizedSearchCV',
lambda est: RandomizedSearchCV(
est, param_distributions={'param': [5]}, cv=2, n_iter=1),
skip_methods=['score']),
DelegatorData('RFE', RFE,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('RFECV', RFECV,
skip_methods=['transform', 'inverse_transform', 'score']),
DelegatorData('BaggingClassifier', BaggingClassifier,
skip_methods=['transform', 'inverse_transform', 'score',
'predict_proba', 'predict_log_proba', 'predict'])
]
def test_metaestimator_delegation():
# Ensures specified metaestimators have methods iff subestimator does
def hides(method):
@property
def wrapper(obj):
if obj.hidden_method == method.__name__:
raise AttributeError('%r is hidden' % obj.hidden_method)
return functools.partial(method, obj)
return wrapper
class SubEstimator(BaseEstimator):
def __init__(self, param=1, hidden_method=None):
self.param = param
self.hidden_method = hidden_method
def fit(self, X, y=None, *args, **kwargs):
self.coef_ = np.arange(X.shape[1])
return True
def _check_fit(self):
if not hasattr(self, 'coef_'):
raise RuntimeError('Estimator is not fit')
@hides
def inverse_transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def transform(self, X, *args, **kwargs):
self._check_fit()
return X
@hides
def predict(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def predict_log_proba(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def decision_function(self, X, *args, **kwargs):
self._check_fit()
return np.ones(X.shape[0])
@hides
def score(self, X, *args, **kwargs):
self._check_fit()
return 1.0
methods = [k for k in iterkeys(SubEstimator.__dict__)
if not k.startswith('_') and not k.startswith('fit')]
methods.sort()
for delegator_data in DELEGATING_METAESTIMATORS:
delegate = SubEstimator()
delegator = delegator_data.construct(delegate)
for method in methods:
if method in delegator_data.skip_methods:
continue
assert_true(hasattr(delegate, method))
assert_true(hasattr(delegator, method),
msg="%s does not have method %r when its delegate does"
% (delegator_data.name, method))
# delegation before fit raises an exception
assert_raises(Exception, getattr(delegator, method),
delegator_data.fit_args[0])
delegator.fit(*delegator_data.fit_args)
for method in methods:
if method in delegator_data.skip_methods:
continue
# smoke test delegation
getattr(delegator, method)(delegator_data.fit_args[0])
for method in methods:
if method in delegator_data.skip_methods:
continue
delegate = SubEstimator(hidden_method=method)
delegator = delegator_data.construct(delegate)
assert_false(hasattr(delegate, method))
assert_false(hasattr(delegator, method),
msg="%s has method %r when its delegate does not"
% (delegator_data.name, method))
| bsd-3-clause |
apaloczy/ap_tools | utils.py | 1 | 54151 | # Description: General-purpose functions for personal use.
# Author: André Palóczy
# E-mail: paloczy@gmail.com
__all__ = ['seasonal_avg',
'seasonal_std',
'deseason',
'blkavg',
'blkavgdir',
'blkavgt',
'blkapply',
'stripmsk',
'pydatetime2m_arr',
'm2pydatetime_arr',
'npdt2dt',
'dt2sfloat',
'doy2date',
'flowfun',
'cumsimp',
'rot_vec',
'avgdir',
'lon180to360',
'lon360to180',
'bbox2ij',
'xy2dist',
'get_xtrackline',
'get_arrdepth',
'fpointsbox',
'near',
'near2',
'mnear',
'refine',
'denan',
'standardize',
'linear_trend',
'thomas',
'point_in_poly',
'get_mask_from_poly',
'sphericalpolygon_area',
'greatCircleBearing',
'weim',
'smoo2',
'topo_slope',
'curvature_geometric',
'get_isobath',
'angle_isobath',
'isopyc_depth',
'whiten_zero',
'wind2stress',
'gen_dates',
'fmt_isobath',
'float2latex',
'mat2npz',
'bb_map',
'dots_dualcolor']
from os import system
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from matplotlib import path
from mpl_toolkits.basemap import Basemap
from datetime import datetime, timedelta
from dateutil import rrule, parser
from scipy.io import loadmat, savemat
from scipy import signal
from scipy.signal import savgol_filter
from glob import glob
from netCDF4 import Dataset, num2date, date2num
# from pandas import rolling_window # FIXME, new pandas way of doing this is, e.g., arr = Series(...).rolling(...).mean()
from pandas import Timestamp
from gsw import distance
from pygeodesy import Datums, VincentyError
from pygeodesy.ellipsoidalVincenty import LatLon as LatLon
from pygeodesy.sphericalNvector import LatLon as LatLon_sphere
def seasonal_avg(t, F):
"""
USAGE
-----
F_seasonal = seasonal_avg(t, F)
Calculates the seasonal average of variable F(t).
Assumes 't' is a 'datetime.datetime' object.
"""
tmo = np.array([ti.month for ti in t])
ftmo = [tmo==mo for mo in range(1, 13)]
return np.array([F[ft].mean() for ft in ftmo])
def seasonal_std(t, F):
"""
USAGE
-----
F_seasonal = seasonal_std(t, F)
Calculates the seasonal standard deviation of variable F(t).
Assumes 't' is a 'datetime.datetime' object.
"""
tmo = np.array([ti.month for ti in t])
ftmo = [tmo==mo for mo in range(1, 13)]
return np.array([F[ft].std() for ft in ftmo])
def deseason(t, F):
"""
USAGE
-----
F_nonssn = deseason(t, F)
Removes the seasonal signal of variable F(t).
Assumes 't' is a 'datetime.datetime' object.
Also assumes that F is sampled monthly and only for
complete years (i.e., t.size is a multiple of 12).
"""
Fssn = seasonal_avg(t, F)
nyears = int(t.size/12)
aux = np.array([])
for n in range(nyears):
aux = np.concatenate((aux, Fssn))
return F - aux
def blkavg(x, y, every=2):
"""
Block-averages a variable y(x). Returns its block average
and standard deviation and new x axis.
"""
nx = x.size
xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([])
for i in range(every, nx+every, every):
yi = y[i-every:i]
xblk = np.append(xblk, np.nanmean(x[i-every:i]))
yblk = np.append(yblk, np.nanmean(yi))
yblkstd = np.append(yblkstd, np.nanstd(yi))
return xblk, yblk, yblkstd
def blkavgdir(x, ydir, every=2, degrees=False, axis=None):
"""
Block-averages a PERIODIC variable ydir(x). Returns its
block average and new x axis.
"""
nx = x.size
xblk, yblk, yblkstd = np.array([]), np.array([]), np.array([])
for i in range(every, nx+every, every):
xblk = np.append(xblk, np.nanmean(x[i-every:i]))
yblk = np.append(yblk, avgdir(ydir[i-every:i], degrees=degrees, axis=axis))
return xblk, yblk
def blkavgt(t, x, every=2):
"""
Block-averages a variable x(t). Returns its block average
and the new t axis.
"""
nt = t.size
units = 'days since 01-01-01'
calendar = 'proleptic_gregorian'
t = date2num(t, units=units, calendar=calendar)
tblk, xblk = np.array([]), np.array([])
for i in range(every, nt+every, every):
xi = x[i-every:i]
tblk = np.append(tblk, np.nanmean(t[i-every:i]))
xblk = np.append(xblk, np.nanmean(xi))
tblk = num2date(tblk, units=units, calendar=calendar)
return tblk, xblk
def blkapply(x, f, nblks, overlap=0, demean=False, detrend=False, verbose=True):
"""
Divides array 'x' in 'nblks' blocks and applies function 'f' = f(x) on
each block.
"""
x = np.array(x)
assert callable(f), "f must be a function"
nx = x.size
ni = int(nx/nblks) # Number of data points in each chunk.
y = np.zeros(ni) # Array that will receive each block.
dn = int(round(ni - overlap*ni)) # How many indices to move forward with
# each chunk (depends on the % overlap).
# Demean/detrend the full record first (removes the lowest frequencies).
# Then, also demean/detrend each block beffore applying f().
if demean: x = x - x.mean()
if detrend: x = signal.detrend(x, type='linear')
n=0
il, ir = 0, ni
while ir<=nx:
xn = x[il:ir]
if demean: xn = xn - xn.mean()
if detrend: xn = signal.detrend(xn, type='linear')
y = y + f(xn) # Apply function and accumulate the current bock.
il+=dn; ir+=dn
n+=1
y /= n # Divide by number of blocks actually used.
ncap = nx - il # Number of points left out at the end of array.
if verbose:
print("")
print("Left last %d data points out (%.1f %% of all points)."%(ncap,100*ncap/nx))
if overlap>0:
print("")
print("Intended %d blocks, but could fit %d blocks, with"%(nblks,n))
print('overlap of %.1f %%, %d points per block.'%(100*overlap,dn))
print("")
return y
def stripmsk(arr, mask_invalid=False):
if mask_invalid:
arr = np.ma.masked_invalid(arr)
if np.ma.isMA(arr):
msk = arr.mask
arr = arr.data
arr[msk] = np.nan
return arr
def pydatetime2m_arr(pydt_arr):
pydt_arr = np.array(pydt_arr)
secperyr = 86400.0
timedt = timedelta(days=366)
matdt = []
for pydt in pydt_arr.tolist():
m = pydt.toordinal() + timedt
dfrac = pydt - datetime(pydt.year,pydt.month,pydt.day,0,0,0).seconds/secperyr
matdt.append(m.toordinal() + dfrac)
return np.array(matdt)
def m2pydatetime_arr(mdatenum_arr):
mdatenum_arr = np.array(mdatenum_arr)
timedt = timedelta(days=366)
pydt = []
for mdt in mdatenum_arr.tolist():
d = datetime.fromordinal(int(mdt))
dfrac = timedelta(days=mdt%1) - timedt
pydt.append(d + dfrac)
return np.array(pydt)
def npdt2dt(tnp):
"""
USAGE
-----
t_datetime = npdt2dt(t_numpydatetime64)
Convert an array of numpy.datetime64 timestamps to datetime.datetime.
"""
return np.array([Timestamp(ti).to_pydatetime() for ti in tnp])
def dt2sfloat(t):
"""
USAGE
-----
t_float = dt2sfloat(t_datetime)
Convert an array of datetime.datetime timestamps to an array of floats
representing elapsed seconds since the first timestamp.
"""
t = np.array(t)
t0 = t[0]
return np.array([(tn - t0).total_seconds() for tn in t])
def doy2date(doy, year=2017):
"""
USAGE
-----
t = doy2date(doy, year=2017)
Convert an array `doy` of decimal yeardays to
an array of datetime.datetime timestamps.
"""
doy = np.array(doy)*86400 # [seconds/day].
tunit = 'seconds since %d-01-01 00:00:00'%year
return np.array([num2date(dn, tunit) for dn in doy])
def flowfun(x, y, u, v, variable='psi', geographic=True):
"""
FLOWFUN Computes the potential PHI and the streamfunction PSI
of a 2-dimensional flow defined by the matrices of velocity
components U and V, so that
d(PHI) d(PSI) d(PHI) d(PSI)
u = ----- - ----- , v = ----- + -----
dx dy dx dy
P = FLOWFUN(x,y,u,v) returns an array P of the same size as u and v,
which can be the velocity potential (PHI) or the streamfunction (PSI)
Because these scalar fields are defined up to the integration constant,
their absolute values are such that PHI[0,0] = PSI[0,0] = 0.
For a potential (irrotational) flow PSI = 0, and the Laplacian
of PSI is equal to the divergence of the velocity field.
A solenoidal (non-divergent) flow can be described by the
streamfunction alone, and the Laplacian of the streamfunction
is equal to the vorticity (curl) of the velocity field.
The units of the grid coordinates are assumed to be consistent
with the units of the velocity components, e.g., [m] and [m/s].
If variable=='psi', the streamfunction (PSI) is returned.
If variable=='phi', the velocity potential (PHI) is returned.
If geographic==True (default), (x,y) are assumed to be
(longitude,latitude) and are converted to meters before
computing (dx,dy).
If geographic==False, (x,y) are assumed to be in meters.
Uses function 'cumsimp()' (Simpson rule summation).
Author: Kirill K. Pankratov, March 7, 1994.
Source: http://www-pord.ucsd.edu/~matlab/stream.htm
Translated to Python by André Palóczy, January 15, 2015.
Modified by André Palóczy on January 15, 2015.
"""
x,y,u,v = map(np.asanyarray, (x,y,u,v))
if not x.shape==y.shape==u.shape==v.shape:
print("Error: Arrays (x, y, u, v) must be of equal shape.")
return
## Calculating grid spacings.
if geographic:
dlat, _ = np.gradient(y)
_, dlon = np.gradient(x)
deg2m = 111120.0 # [m/deg]
dx = dlon*deg2m*np.cos(y*np.pi/180.) # [m]
dy = dlat*deg2m # [m]
else:
dy, _ = np.gradient(y)
_, dx = np.gradient(x)
ly, lx = x.shape # Shape of the (x,y,u,v) arrays.
## Now the main computations.
## Integrate velocity fields to get potential and streamfunction.
## Use Simpson rule summation (function CUMSIMP).
## Compute velocity potential PHI (non-rotating part).
if variable=='phi':
cx = cumsimp(u[0,:]*dx[0,:]) # Compute x-integration constant
cy = cumsimp(v[:,0]*dy[:,0]) # Compute y-integration constant
cx = np.expand_dims(cx, 0)
cy = np.expand_dims(cy, 1)
phiy = cumsimp(v*dy) + np.tile(cx, (ly,1))
phix = cumsimp(u.T*dx.T).T + np.tile(cy, (1,lx))
phi = (phix + phiy)/2.
return phi
## Compute streamfunction PSI (non-divergent part).
if variable=='psi':
cx = cumsimp(v[0,:]*dx[0,:]) # Compute x-integration constant
cy = cumsimp(u[:,0]*dy[:,0]) # Compute y-integration constant
cx = np.expand_dims(cx, 0)
cy = np.expand_dims(cy, 1)
psix = -cumsimp(u*dy) + np.tile(cx, (ly,1))
psiy = cumsimp(v.T*dx.T).T - np.tile(cy, (1,lx))
psi = (psix + psiy)/2.
return psi
def cumsimp(y):
"""
F = CUMSIMP(Y) Simpson-rule column-wise cumulative summation.
Numerical approximation of a function F(x) such that
Y(X) = dF/dX. Each column of the input matrix Y represents
the value of the integrand Y(X) at equally spaced points
X = 0,1,...size(Y,1).
The output is a matrix F of the same size as Y.
The first row of F is equal to zero and each following row
is the approximation of the integral of each column of matrix
Y up to the givem row.
CUMSIMP assumes continuity of each column of the function Y(X)
and uses Simpson rule summation.
Similar to the command F = CUMSUM(Y), exept for zero first
row and more accurate summation (under the assumption of
continuous integrand Y(X)).
Author: Kirill K. Pankratov, March 7, 1994.
Source: http://www-pord.ucsd.edu/~matlab/stream.htm
Translated to Python by André Palóczy, January 15, 2015.
"""
y = np.asanyarray(y)
## 3-point interpolation coefficients to midpoints.
## Second-order polynomial (parabolic) interpolation coefficients
## from Xbasis = [0 1 2] to Xint = [.5 1.5]
c1 = 3/8.
c2 = 6/8.
c3 = -1/8.
if y.ndim==1:
y = np.expand_dims(y,1)
f = np.zeros((y.size,1)) # Initialize summation array.
squeeze_after = True
elif y.ndim==2:
f = np.zeros(y.shape) # Initialize summation array.
squeeze_after = False
else:
print("Error: Input array has more than 2 dimensions.")
return
if y.size==2: # If only 2 elements in columns - simple average.
f[1,:] = (y[0,:] + y[1,:])/2.
return f
else: # If more than two elements in columns - Simpson summation.
## Interpolate values of y to all midpoints.
f[1:-1,:] = c1*y[:-2,:] + c2*y[1:-1,:] + c3*y[2:,:]
f[2:,:] = f[2:,:] + c3*y[:-2,:] + c2*y[1:-1,:] + c1*y[2:,:]
f[1,:] = f[1,:]*2
f[-1,:] = f[-1,:]*2
## Simpson (1,4,1) rule.
f[1:,:] = 2*f[1:,:] + y[:-1,:] + y[1:,:]
f = np.cumsum(f, axis=0)/6. # Cumulative sum, 6 - denominator from the Simpson rule.
if squeeze_after:
f = f.squeeze()
return f
def rot_vec(u, v, angle=-45, degrees=True):
"""
USAGE
-----
u_rot,v_rot = rot_vec(u,v,angle=-45.,degrees=True)
Returns the rotated vector components (`u_rot`,`v_rot`)
from the zonal-meridional input vector components (`u`,`v`).
The rotation is done using the angle `angle` positive counterclockwise
(trigonometric convention). If `degrees` is set to `True``(default),
then `angle` is converted to radians.
is
Example
-------
>>> from matplotlib.pyplot import quiver
>>> from ap_tools.utils import rot_vec
>>> u = -1.
>>> v = -1.
>>> u2,v2 = rot_vec(u,v, angle=-30.)
"""
u,v = map(np.asanyarray, (u,v))
if degrees:
angle = angle*np.pi/180. # Degrees to radians.
u_rot = +u*np.cos(angle) + v*np.sin(angle) # Usually the across-shore component.
v_rot = -u*np.sin(angle) + v*np.cos(angle) # Usually the along-shore component.
return u_rot,v_rot
def avgdir(dirs, degrees=False, axis=None):
"""
USAGE
-----
dirm = avgdir(dirs, degrees=False, axis=None)
Calculate the mean direction of an array of directions 'dirs'.
If 'degrees' is 'False' (default), the input directions must be
in radians. If 'degrees' is 'True', the input directions must be
in degrees.
The direction angle is measured from the ZONAL axis, i.e.,
(0, 90, -90) deg are (Eastward, Northward, Southward).
180 and -180 deg are both Westward.
If 'axis' is 'None' (default) the mean is calculated on the
flattened array. Otherwise, 'axis' is the index of the axis
to calculate the mean over.
"""
dirs = np.array(dirs)
if degrees:
dirs = dirs*np.pi/180 # Degrees to radians.
uxs = np.cos(dirs)
vys = np.sin(dirs)
dirm = np.arctan2(vys.sum(axis=axis), uxs.sum(axis=axis))
if degrees:
dirm = dirm*180/np.pi # From radians to degrees.
return dirm
def lon180to360(lon):
"""
Converts longitude values in the range [-180,+180]
to longitude values in the range [0,360].
"""
lon = np.asanyarray(lon)
return (lon + 360.0) % 360.0
def lon360to180(lon):
"""
Converts longitude values in the range [0,360]
to longitude values in the range [-180,+180].
"""
lon = np.asanyarray(lon)
return ((lon + 180.) % 360.) - 180.
def bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True):
"""
USAGE
-----
ilon_start, ilon_end, jlat_start, jlat_end = bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True)
OR
(ilon_start_left, ilon_end_left, jlat_start, jlat_end), (ilon_start_right, ilon_end_right, jlat_start, jlat_end) = ...
... bbox2ij(lon, lat, bbox=[-135., -85., -76., -64.], FIX_IDL=True)
Return indices for i,j that will completely cover the specified bounding box. 'lon' and 'lat' are 2D coordinate arrays
(generated by meshgrid), and 'bbox' is a list like [lon_start, lon_end, lat_start, lat_end] describing the desired
longitude-latitude box.
If the specified bbox is such that it crosses the edges of the longitude array, two tuples of indices are returned.
The first (second) tuple traces out the left (right) part of the bbox.
If FIX_IDL is set to 'True' (default), the indices returned correspond to the "short route" around the globe, which
amounts to assuming that the specified bbox crosses the International Date. If FIX_IDL is set to 'False', the
"long route" is used instead.
Example
-------
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> lon = np.arange(-180., 180.25, 0.25)
>>> lat = np.arange(-90., 90.25, 0.25)
>>> lon, lat = np.meshgrid(lon, lat)
>>> h = np.sin(lon) + np.cos(lat)
>>> i0, i1, j0, j1 = bbox2ij(lon, lat, bbox=[-71, -63., 39., 46])
>>> h_subset = h[j0:j1,i0:i1]
>>> lon_subset = lon[j0:j1,i0:i1]
>>> lat_subset = lat[j0:j1,i0:i1]
>>> fig, ax = plt.subplots()
>>> ax.pcolor(lon_subset,lat_subset,h_subset)
>>> plt.axis('tight')
Original function downloaded from http://gis.stackexchange.com/questions/71630/subsetting-a-curvilinear-netcdf-file-roms-model-output-using-a-lon-lat-boundin
Modified by André Palóczy on August 20, 2016 to handle bboxes that
cross the International Date Line or the edges of the longitude array.
"""
lon, lat, bbox = map(np.asanyarray, (lon, lat, bbox))
# Test whether the wanted bbox crosses the International Date Line (brach cut of the longitude array).
dlon = bbox[:2].ptp()
IDL_BBOX=dlon>180.
IDL_BBOX=np.logical_and(IDL_BBOX, FIX_IDL)
mypath = np.array([bbox[[0,1,1,0]], bbox[[2,2,3,3]]]).T
p = path.Path(mypath)
points = np.vstack((lon.flatten(), lat.flatten())).T
n, m = lon.shape
inside = p.contains_points(points).reshape((n, m))
# Fix mask if bbox goes throught the International Date Line.
if IDL_BBOX:
fcol=np.all(~inside, axis=0)
flin=np.any(inside, axis=1)
fcol, flin = map(np.expand_dims, (fcol, flin), (0, 1))
fcol = np.tile(fcol, (n, 1))
flin = np.tile(flin, (1, m))
inside=np.logical_and(flin, fcol)
print("Bbox crosses the International Date Line.")
ii, jj = np.meshgrid(range(m), range(n))
iiin, jjin = ii[inside], jj[inside]
i0, i1, j0, j1 = min(iiin), max(iiin), min(jjin), max(jjin)
SPLIT_BBOX=(i1-i0)==(m-1) # Test whether the wanted bbox crosses edges of the longitude array.
# If wanted bbox crosses edges of the longitude array, return indices for the two boxes separately.
if SPLIT_BBOX:
Iiin = np.unique(iiin)
ib0 = np.diff(Iiin).argmax() # Find edge of the inner side of the left bbox.
ib1 = ib0 + 1 # Find edge of the inner side of the right bbox.
Il, Ir = Iiin[ib0], Iiin[ib1] # Indices of the columns that bound the inner side of the two bboxes.
print("Bbox crosses edges of the longitude array. Returning two sets of indices.")
return (i0, Il, j0, j1), (Ir, i1, j0, j1)
else:
return i0, i1, j0, j1
def xy2dist(x, y, cyclic=False, datum='WGS84'):
"""
USAGE
-----
d = xy2dist(x, y, cyclic=False, datum='WGS84')
Calculates a distance axis from a line defined by longitudes and latitudes
'x' and 'y', using either the Vicenty formulae on an ellipsoidal earth
(ellipsoid defaults to WGS84) or on a sphere (if datum=='Sphere').
Example
-------
>>> yi, yf = -23.550520, 32.71573800
>>> xi, xf = -46.633309, -117.161084
>>> x, y = np.linspace(xi, xf), np.linspace(yi, yf)
>>> d_ellipse = xy2dist(x, y, datum='WGS84')[-1]*1e-3 # [km].
>>> d_sphere = xy2dist(x, y, datum='Sphere')[-1]*1e-3 # [km].
>>> dd = np.abs(d_ellipse - d_sphere)
>>> dperc = 100*dd/d_ellipse
>>> msg = 'Difference of %.1f km over a %.0f km-long line (%.3f %% difference)'%(dd, d_ellipse, dperc)
>>> print(msg)
"""
if datum!="Sphere":
xy = [LatLon(y0, x0, datum=Datums[datum]) for x0, y0 in zip(x, y)]
else:
xy = [LatLon_sphere(y0, x0) for x0, y0 in zip(x, y)]
d = np.array([xy[n].distanceTo(xy[n+1]) for n in range(len(xy)-1)])
return np.append(0, np.cumsum(d))
def get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=10):
"""
USAGE
-----
lonp, latp = get_xtrackline(lon1, lon2, lat1, lat2, L=200, dL=13)
Generates a great-circle line with length 2L (with L in km) that is perpendicular to the great-circle line
defined by the input points (lon1, lat1) and (lon2, lat2). The spacing between the points along the output
line is dL km. Assumes a spherical Earth.
"""
km2m = 1e3
L, dL = L*km2m, dL*km2m
nh = int(L/dL)
p1, p2 = LatLon_sphere(lat1, lon1), LatLon_sphere(lat2, lon2)
angperp = p1.initialBearingTo(p2) + 90
angperpb = angperp + 180
pm = p1.midpointTo(p2)
# Create perpendicular line starting from the midpoint.
N = range(1, nh + 1)
pperp = []
_ = [pperp.append(pm.destination(dL*n, angperpb)) for n in N]
pperp.reverse()
pperp.append(pm)
_ = [pperp.append(pm.destination(dL*n, angperp)) for n in N]
lonperp = np.array([p.lon for p in pperp])
latperp = np.array([p.lat for p in pperp])
return lonperp, latperp
def get_arrdepth(arr):
"""
USAGE
-----
arr_depths = get_arrdepth(arr)
Determine number of nested levels in each
element of an array of arrays of arrays...
(or other array-like objects).
"""
arr = np.array(arr) # Make sure first level is an array.
all_nlevs = []
for i in range(arr.size):
nlev=0
wrk_arr = arr[i]
while np.size(wrk_arr)>0:
try:
wrk_arr = np.array(wrk_arr[i])
except Exception:
all_nlevs.append(nlev)
nlev=0
break
nlev+=1
return np.array(all_nlevs)
def fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True):
"""
USAGE
-----
fpts = fpointsbox(x, y, fig, ax, nboxes=1, plot=True, pause_secs=5, return_index=True)
Find points in a rectangle made with 2 ginput points.
"""
fpts = np.array([])
for n in range(nboxes):
box = np.array(fig.ginput(n=2, timeout=0))
try:
xb, yb = box[:,0], box[:,1]
except IndexError:
print("No points selected. Skipping box \# %d."%(n+1))
continue
xl, xr, yd, yu = xb.min(), xb.max(), yb.min(), yb.max()
xbox = np.array([xl, xr, xr, xl, xl])
ybox = np.array([yd, yd, yu, yu, yd])
fxbox, fybox = np.logical_and(x>xl, x<xr), np.logical_and(y>yd, y<yu)
fptsi = np.logical_and(fxbox, fybox)
if return_index:
fptsi = np.where(fptsi)[0]
fpts = np.append(fpts, fptsi)
if plot:
ax.plot(xbox, ybox, 'r', linestyle='solid', marker='o', ms=4)
ax.plot(x[fptsi], y[fptsi], 'r', linestyle='none', marker='+', ms=5)
plt.draw()
fig.show()
else:
fig.close()
if plot:
plt.draw()
fig.show()
system("sleep %d"%pause_secs)
return fpts
def near(x, x0, npts=1, return_index=False):
"""
USAGE
-----
xnear = near(x, x0, npts=1, return_index=False)
Finds 'npts' points (defaults to 1) in array 'x'
that are closest to a specified 'x0' point.
If 'return_index' is True (defauts to False),
then the indices of the closest points are
returned. The indices are ordered in order of
closeness.
"""
x = list(x)
xnear = []
xidxs = []
for n in range(npts):
idx = np.nanargmin(np.abs(np.array(x)-x0))
xnear.append(x.pop(idx))
if return_index:
xidxs.append(idx)
if return_index: # Sort indices according to the proximity of wanted points.
xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()]
xnear.sort()
if npts==1:
xnear = xnear[0]
if return_index:
xidxs = xidxs[0]
else:
xnear = np.array(xnear)
if return_index:
return xidxs
else:
return xnear
def near2(x, y, x0, y0, npts=1, return_index=False):
"""
USAGE
-----
xnear, ynear = near2(x, y, x0, y0, npts=1, return_index=False)
Finds 'npts' points (defaults to 1) in arrays 'x' and 'y'
that are closest to a specified '(x0, y0)' point. If
'return_index' is True (defauts to False), then the
indices of the closest point(s) are returned.
Example
-------
>>> x = np.arange(0., 100., 0.25)
>>> y = np.arange(0., 100., 0.25)
>>> x, y = np.meshgrid(x, y)
>>> x0, y0 = 44.1, 30.9
>>> xn, yn = near2(x, y, x0, y0, npts=1)
>>> print("(x0, y0) = (%f, %f)"%(x0, y0))
>>> print("(xn, yn) = (%f, %f)"%(xn, yn))
"""
x, y = map(np.array, (x, y))
shp = x.shape
xynear = []
xyidxs = []
dx = x - x0
dy = y - y0
dr = dx**2 + dy**2
for n in range(npts):
xyidx = np.unravel_index(np.nanargmin(dr), dims=shp)
if return_index:
xyidxs.append(xyidx)
xyn = (x[xyidx], y[xyidx])
xynear.append(xyn)
dr[xyidx] = np.nan
if npts==1:
xynear = xynear[0]
if return_index:
xyidxs = xyidxs[0]
if return_index:
return xyidxs
else:
return xynear
def mnear(x, y, x0, y0):
"""
USAGE
-----
xmin,ymin = mnear(x, y, x0, y0)
Finds the the point in a (lons,lats) line
that is closest to a specified (lon0,lat0) point.
"""
x,y,x0,y0 = map(np.asanyarray, (x,y,x0,y0))
point = (x0,y0)
d = np.array([])
for n in range(x.size):
xn,yn = x[n],y[n]
dn = distance((xn,x0),(yn,y0)) # Calculate distance point-wise.
d = np.append(d,dn)
idx = d.argmin()
return x[idx],y[idx]
def refine(line, nref=100, close=True):
"""
USAGE
-----
ref_line = refine(line, nref=100, close=True)
Given a 1-D sequence of points 'line', returns a
new sequence 'ref_line', which is built by linearly
interpolating 'nref' points between each pair of
subsequent points in the original line.
If 'close' is True (default), the first value of
the original line is repeated at the end of the
refined line, as in a closed polygon.
"""
line = np.squeeze(np.asanyarray(line))
if close:
line = np.append(line,line[0])
ref_line = np.array([])
for n in range(line.shape[0]-1):
xi, xf = line[n], line[n+1]
xref = np.linspace(xi,xf,nref)
ref_line = np.append(ref_line, xref)
return ref_line
def point_in_poly(x,y,poly):
"""
USAGE
-----
isinside = point_in_poly(x,y,poly)
Determine if a point is inside a given polygon or not
Polygon is a list of (x,y) pairs. This fuction
returns True or False. The algorithm is called
'Ray Casting Method'.
Source: http://pseentertainmentcorp.com/smf/index.php?topic=545.0
"""
n = len(poly)
inside = False
p1x,p1y = poly[0]
for i in range(n+1):
p2x,p2y = poly[i % n]
if y > min(p1y,p2y):
if y <= max(p1y,p2y):
if x <= max(p1x,p2x):
if p1y != p2y:
xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
if p1x == p2x or x <= xinters:
inside = not inside
p1x,p1y = p2x,p2y
return inside
def get_mask_from_poly(xp, yp, poly, verbose=False):
"""
USAGE
-----
mask = get_mask_from_poly(xp, yp, poly, verbose=False)
Given two arrays 'xp' and 'yp' of (x,y) coordinates (generated by meshgrid)
and a polygon defined by an array of (x,y) coordinates 'poly', with
shape = (n,2), return a boolean array 'mask', where points that lie inside
'poly' are set to 'True'.
"""
print('Building the polygon mask...')
jmax, imax = xp.shape
mask = np.zeros((jmax,imax))
for j in range(jmax):
if verbose:
print("Row %s of %s"%(j+1,jmax))
for i in range(imax):
px, py = xp[j,i], yp[j,i]
# Test if this point is within the polygon.
mask[j,i] = point_in_poly(px, py, poly)
return mask
def sphericalpolygon_area(lons, lats, R=6371000.):
"""
USAGE
-----
area = sphericalpolygon_area(lons, lats, R=6371000.)
Calculates the area of a polygon on the surface of a sphere of
radius R using Girard's Theorem, which states that the area of
a polygon of great circles is R**2 times the sum of the angles
between the polygons minus (N-2)*pi, where N is number of corners.
R = 6371000 m (6371 km, default) is a typical value for the mean
radius of the Earth.
Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python
"""
lons, lats = map(np.asanyarray, (lons, lats))
N = lons.size
angles = np.empty(N)
for i in range(N):
phiB1, phiA, phiB2 = np.roll(lats, i)[:3]
LB1, LA, LB2 = np.roll(lons, i)[:3]
# calculate angle with north (eastward)
beta1 = greatCircleBearing(LA, phiA, LB1, phiB1)
beta2 = greatCircleBearing(LA, phiA, LB2, phiB2)
# calculate angle between the polygons and add to angle array
angles[i] = np.arccos(np.cos(-beta1)*np.cos(-beta2) + np.sin(-beta1)*np.sin(-beta2))
return (np.sum(angles) - (N-2)*np.pi)*R**2
def greatCircleBearing(lon1, lat1, lon2, lat2):
"""
USAGE
-----
angle = greatCircleBearing(lon1, lat1, lon2, lat2)
Calculates the angle (positive eastward) a
great circle passing through points (lon1,lat1)
and (lon2,lat2) makes with true nirth.
Source: http://stackoverflow.com/questions/4681737/how-to-calculate-the-area-of-a-polygon-on-the-earths-surface-using-python
"""
lon1, lat1, lon2, lat2 = map(np.asanyarray, (lon1, lat1, lon2, lat2))
dLong = lon1 - lon2
d2r = np.pi/180.
s = np.cos(d2r*lat2)*np.sin(d2r*dLong)
c = np.cos(d2r*lat1)*np.sin(d2r*lat2) - np.sin(lat1*d2r)*np.cos(d2r*lat2)*np.cos(d2r*dLong)
return np.arctan2(s, c)
def weim(x, N, kind='hann', badflag=-9999, beta=14):
"""
Usage
-----
xs = weim(x, N, kind='hann', badflag=-9999, beta=14)
Description
-----------
Calculates the smoothed array 'xs' from the original array 'x' using the specified
window of type 'kind' and size 'N'. 'N' must be an odd number.
Parameters
----------
x : 1D array
Array to be smoothed.
N : integer
Window size. Must be odd.
kind : string, optional
One of the window types available in the numpy module:
hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed.
hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is
discontinuous at the edges, and may produce undesired artifacts.
blackman : Similar to the hann and hamming windows, with sharper ends.
bartlett : Triangular-like. Its end-points are zeroed.
kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter.
For beta=0, the window is rectangular. As beta increases, the window gets narrower.
Refer to the numpy functions for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this value are ignored.
beta : float, optional
Shape parameter for the kaiser window. For windows other than the kaiser window,
this parameter does nothing.
Returns
-------
xs : 1D array
The smoothed array.
---------------------------------------
André Palóczy Filho (paloczy@gmail.com)
June 2012
==============================================================================================================
"""
###########################################
### Checking window type and dimensions ###
###########################################
kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser']
if ( kind not in kinds ):
raise ValueError('Invalid window type requested: %s'%kind)
if np.mod(N,2) == 0:
raise ValueError('Window size must be odd')
###########################
### Creating the window ###
###########################
if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required).
wstr = 'np.kaiser(N, beta)'
else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required).
if kind == 'hann':
kind = 'hanning'
wstr = 'np.' + kind + '(N)'
w = eval(wstr)
x = np.asarray(x).flatten()
Fnan = np.isnan(x).flatten()
ln = (N-1)/2
lx = x.size
lf = lx - ln
xs = np.nan*np.ones(lx)
# Eliminating bad data from mean computation.
fbad=x==badflag
x[fbad] = np.nan
for i in range(lx):
if i <= ln:
xx = x[:ln+i+1]
ww = w[ln-i:]
elif i >= lf:
xx = x[i-ln:]
ww = w[:lf-i-1]
else:
xx = x[i-ln:i+ln+1]
ww = w.copy()
f = ~np.isnan(xx) # Counting only NON-NaNs, both in the input array and in the window points.
xx = xx[f]
ww = ww[f]
if f.sum() == 0: # Thou shalt not divide by zero.
xs[i] = x[i]
else:
xs[i] = np.sum(xx*ww)/np.sum(ww)
xs[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array.
return xs
def smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14):
"""
Usage
-----
As = smoo2(A, hei, wid, kind='hann', badflag=-9999, beta=14)
Description
-----------
Calculates the smoothed array 'As' from the original array 'A' using the specified
window of type 'kind' and shape ('hei','wid').
Parameters
----------
A : 2D array
Array to be smoothed.
hei : integer
Window height. Must be odd and greater than or equal to 3.
wid : integer
Window width. Must be odd and greater than or equal to 3.
kind : string, optional
One of the window types available in the numpy module:
hann (default) : Gaussian-like. The weight decreases toward the ends. Its end-points are zeroed.
hamming : Similar to the hann window. Its end-points are not zeroed, therefore it is
discontinuous at the edges, and may produce undesired artifacts.
blackman : Similar to the hann and hamming windows, with sharper ends.
bartlett : Triangular-like. Its end-points are zeroed.
kaiser : Flexible shape. Takes the optional parameter "beta" as a shape parameter.
For beta=0, the window is rectangular. As beta increases, the window gets narrower.
Refer to the numpy functions for details about each window type.
badflag : float, optional
The bad data flag. Elements of the input array 'A' holding this value are ignored.
beta : float, optional
Shape parameter for the kaiser window. For windows other than the kaiser window,
this parameter does nothing.
Returns
-------
As : 2D array
The smoothed array.
---------------------------------------
André Palóczy Filho (paloczy@gmail.com)
April 2012
==============================================================================================================
"""
###########################################
### Checking window type and dimensions ###
###########################################
kinds = ['hann', 'hamming', 'blackman', 'bartlett', 'kaiser']
if ( kind not in kinds ):
raise ValueError('Invalid window type requested: %s'%kind)
if ( np.mod(hei,2) == 0 ) or ( np.mod(wid,2) == 0 ):
raise ValueError('Window dimensions must be odd')
if (hei <= 1) or (wid <= 1):
raise ValueError('Window shape must be (3,3) or greater')
##############################
### Creating the 2D window ###
##############################
if ( kind == 'kaiser' ): # If the window kind is kaiser (beta is required).
wstr = 'np.outer(np.kaiser(hei, beta), np.kaiser(wid, beta))'
else: # If the window kind is hann, hamming, blackman or bartlett (beta is not required).
if kind == 'hann':
kind = 'hanning'
# computing outer product to make a 2D window out of the original 1d windows.
wstr = 'np.outer(np.' + kind + '(hei), np.' + kind + '(wid))'
wdw = eval(wstr)
A = np.asanyarray(A)
Fnan = np.isnan(A)
imax, jmax = A.shape
As = np.nan*np.ones( (imax, jmax) )
for i in range(imax):
for j in range(jmax):
### Default window parameters.
wupp = 0
wlow = hei
wlef = 0
wrig = wid
lh = np.floor(hei/2)
lw = np.floor(wid/2)
### Default array ranges (functions of the i,j indices).
upp = i-lh
low = i+lh+1
lef = j-lw
rig = j+lw+1
##################################################
### Tiling window and input array at the edges ###
##################################################
# Upper edge.
if upp < 0:
wupp = wupp-upp
upp = 0
# Left edge.
if lef < 0:
wlef = wlef-lef
lef = 0
# Bottom edge.
if low > imax:
ex = low-imax
wlow = wlow-ex
low = imax
# Right edge.
if rig > jmax:
ex = rig-jmax
wrig = wrig-ex
rig = jmax
###############################################
### Computing smoothed value at point (i,j) ###
###############################################
Ac = A[upp:low, lef:rig]
wdwc = wdw[wupp:wlow, wlef:wrig]
fnan = np.isnan(Ac)
Ac[fnan] = 0; wdwc[fnan] = 0 # Eliminating NaNs from mean computation.
fbad = Ac==badflag
wdwc[fbad] = 0 # Eliminating bad data from mean computation.
a = Ac * wdwc
As[i,j] = a.sum() / wdwc.sum()
As[Fnan] = np.nan # Assigning NaN to the positions holding NaNs in the input array.
return As
def denan(arr):
"""
USAGE
-----
denaned_arr = denan(arr)
Remove the NaNs from an array.
"""
f = np.isnan(arr)
return arr[~f]
def standardize(series):
"""
USAGE
-----
series2 = standardize(series)
Standardizes a series by subtracting its mean value
and dividing by its standard deviation. The result is
a dimensionless series. Inputs can be of type
"np.array", or "Pandas.Series"/"Pandas.TimeSeries".
"""
Mean, Std = series.mean(), series.std()
return (series - Mean)/Std
def linear_trend(series, return_line=True):
"""
USAGE
-----
line = linear_trend(series, return_line=True)
OR
b, a, x = linear_trend(series, return_line=False)
Returns the linear fit (line = b*x + a) associated
with the 'series' array.
Adapted from pylab.detrend_linear.
"""
series = np.asanyarray(series)
x = np.arange(series.size, dtype=np.float_)
C = np.cov(x, series, bias=1) # Covariance matrix.
b = C[0, 1]/C[0, 0] # Angular coefficient.
a = series.mean() - b*x.mean() # Linear coefficient.
line = b*x + a
if return_line:
return line
else:
return b, a, x
def thomas(A, b):
"""
USAGE
-----
x = thomas(A,b)
Solve Ax = b (where A is a tridiagonal matrix)
using the Thomas Algorithm.
References
----------
For a step-by-step derivation of the algorithm, see
e.g., http://www3.ul.ie/wlee/ms6021_thomas.pdf
"""
# Step 1: Sweep rows from top to bottom,
# calculating gammas and rhos along the way.
N = b.size
gam = [float(A[0,1]/A[0,0])]
rho = [float(b[0]/A[0,0])]
for i in range(0, N):
rho.append(float((b[i] - A[i,i-1]*rho[-1])/(A[i,i] - A[i,i-1]*gam[-1])))
if i<N-1: # No gamma in the last row.
gam.append(float(A[i,i+1]/(A[i,i] - A[i,i-1]*gam[-1])))
# Step 2: Substitute solutions for unknowns
# starting from the bottom row all the way up.
x = [] # Vector of unknowns.
x.append(rho.pop()) # Last row is already solved.
for i in range(N-2, -1, -1):
x.append(float(rho.pop() - gam.pop()*x[-1]))
x.reverse()
return np.array(x)
def topo_slope(lon, lat, h):
"""
USAGE
-----
lons, lats, slope = topo_slope(lon, lat, h)
Calculates bottom slope for a topography fields 'h' at
coordinates ('lon', 'lat') using first-order finite differences.
The output arrays have shape (M-1,L-1), where M,L = h.shape().
"""
lon,lat,h = map(np.asanyarray, (lon,lat,h))
deg2m = 1852.*60. # m/deg.
deg2rad = np.pi/180. # rad/deg.
x = lon*deg2m*np.cos(lat*deg2rad)
y = lat*deg2m
# First-order differences, accurate to O(dx) and O(dy),
# respectively.
sx = (h[:,1:] - h[:,:-1]) / (x[:,1:] - x[:,:-1])
sy = (h[1:,:] - h[:-1,:]) / (y[1:,:] - y[:-1,:])
# Finding the values of the derivatives sx and sy
# at the same location in physical space.
sx = 0.5*(sx[1:,:]+sx[:-1,:])
sy = 0.5*(sy[:,1:]+sy[:,:-1])
# Calculating the bottom slope.
slope = np.sqrt(sx**2 + sy**2)
# Finding the lon,lat coordinates of the
# values of the derivatives sx and sy.
lons = 0.5*(lon[1:,:]+lon[:-1,:])
lats = 0.5*(lat[1:,:]+lat[:-1,:])
lons = 0.5*(lons[:,1:]+lons[:,:-1])
lats = 0.5*(lats[:,1:]+lats[:,:-1])
return lons, lats, slope
def curvature_geometric(x, y):
"""
USAGE
-----
k = curvature_geometric(x, y)
Estimates the curvature k of a 2D curve (x,y) using a geometric method.
If your curve is given by two arrays, x and y, you can
approximate its curvature at each point by the reciprocal of the
radius of a circumscribing triangle with that point, the preceding
point, and the succeeding point as vertices. The radius of such a
triangle is one fourth the product of the three sides divided by its
area.
The curvature will be positive for curvature to the left and
negative for curvature to the right as you advance along the curve.
Note that if your data are too closely spaced together or subject
to substantial noise errors, this formula will not be very accurate.
Author: Roger Stafford
Source: http://www.mathworks.com/matlabcentral/newsreader/view_thread/125637
Translated to Python by André Palóczy, January 19, 2015.
"""
x,y = map(np.asanyarray, (x,y))
x1 = x[:-2]; x2 = x[1:-1]; x3 = x[2:]
y1 = y[:-2]; y2 = y[1:-1]; y3 = y[2:]
## a, b, and c are the three sides of the triangle.
a = np.sqrt((x3-x2)**2 + (y3-y2)**2)
b = np.sqrt((x1-x3)**2 + (y1-y3)**2)
c = np.sqrt((x2-x1)**2 + (y2-y1)**2)
## A is the area of the triangle.
A = 0.5*(x1*y2 + x2*y3 + x3*y1 - x1*y3 - x2*y1 - x3*y2)
## The reciprocal of the circumscribed radius, i.e., the curvature.
k = 4.0*A/(a*b*c)
return np.squeeze(k)
def get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw):
"""
USAGE
-----
lon_isob, lat_isob = get_isobath(lon, lat, topo, iso, cyclic=False, smooth_isobath=False, window_length=21, win_type='barthann', **kw)
Retrieves the 'lon_isob','lat_isob' coordinates of a wanted 'iso'
isobath from a topography array 'topo', with 'lon_topo','lat_topo'
coordinates.
"""
lon, lat, topo = map(np.array, (lon, lat, topo))
fig, ax = plt.subplots()
cs = ax.contour(lon, lat, topo, [iso])
coll = cs.collections[0]
## Test all lines to find thel ongest one.
## This is assumed to be the wanted isobath.
ncoll = len(coll.get_paths())
siz = np.array([])
for n in range(ncoll):
path = coll.get_paths()[n]
siz = np.append(siz, path.vertices.shape[0])
f = siz.argmax()
xiso = coll.get_paths()[f].vertices[:, 0]
yiso = coll.get_paths()[f].vertices[:, 1]
plt.close()
# Smooth the isobath with a moving window.
# Periodize according to window length to avoid losing edges.
if smooth_isobath:
fleft = window_length//2
fright = -window_length//2 + 1
if cyclic:
xl = xiso[:fleft] + 360
xr = xiso[fright:] - 360
yl = yiso[:fleft]
yr = yiso[fright:]
xiso = np.concatenate((xr, xiso, xl))
yiso = np.concatenate((yr, yiso, yl))
# xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME
# yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw)[fleft:fright] # FIXME
# else:
# xiso = rolling_window(xiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME
# yiso = rolling_window(yiso, window=window_length, win_type=win_type, center=True, **kw) # FIXME
return xiso, yiso
def angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw):
"""
USAGE
-----
lon_isob, lat_isob, angle = angle_isobath(lon, lat, h, isobath=100, cyclic=False, smooth_isobath=True, window_length=21, win_type='barthann', plot_map=False, **kw)
Returns the coordinates ('lon_isob', 'lat_isob') and the angle an isobath
makes with the zonal direction for a topography array 'h' at coordinates
('lon', 'lat'). Defaults to the 100 m isobath.
If 'smooth_isobath'==True, smooths the isobath with a rolling window of type
'win_type' and 'window_length' points wide.
All keyword arguments are passed to 'pandas.rolling_window()'.
If 'plot_map'==True, plots a map showing
the isobath (and its soothed version if smooth_isobath==True).
"""
lon, lat, h = map(np.array, (lon, lat, h))
R = 6371000.0 # Mean radius of the earth in meters (6371 km), from gsw.constants.earth_radius.
deg2rad = np.pi/180. # [rad/deg]
# Extract isobath coordinates
xiso, yiso = get_isobath(lon, lat, h, isobath)
if cyclic: # Add cyclic point.
xiso = np.append(xiso, xiso[0])
yiso = np.append(yiso, yiso[0])
# Smooth the isobath with a moving window.
if smooth_isobath:
xiso = rolling_window(xiso, window=window_length, win_type=win_type, **kw)
yiso = rolling_window(yiso, window=window_length, win_type=win_type, **kw)
# From the coordinates of the isobath, find the angle it forms with the
# zonal axis, using points k+1 and k.
shth = yiso.size-1
theta = np.zeros(shth)
for k in range(shth):
dyk = R*(yiso[k+1]-yiso[k])
dxk = R*(xiso[k+1]-xiso[k])*np.cos(yiso[k]*deg2rad)
theta[k] = np.arctan2(dyk,dxk)
xisom = 0.5*(xiso[1:] + xiso[:-1])
yisom = 0.5*(yiso[1:] + yiso[:-1])
# Plots map showing the extracted isobath.
if plot_map:
fig, ax = plt.subplots()
m = bb_map([lon.min(), lon.max()], [lat.min(), lat.max()], projection='cyl', resolution='h', ax=ax)
m.plot(xisom, yisom, color='b', linestyle='-', zorder=3, latlon=True)
input("Press any key to continue.")
plt.close()
return xisom, yisom, theta
def isopyc_depth(z, dens0, isopyc=1027.75, dzref=1.):
"""
USAGE
-----
hisopyc = isopyc_depth(z, dens0, isopyc=1027.75)
Calculates the spatial distribution of the depth of a specified isopycnal 'isopyc'
(defaults to 1027.75 kg/m3) from a 3D density array rho0 (in kg/m3) with shape
(nz,ny,nx) and a 1D depth array 'z' (in m) with shape (nz).
'dzref' is the desired resolution for the refined depth array (defaults to 1 m) which
is generated for calculating the depth of the isopycnal. The smaller 'dzref', the smoother
the resolution of the returned isopycnal depth array 'hisopyc'.
"""
z, dens0 = map(np.asanyarray, (z, dens0))
ny, nx = dens0.shape[1:]
zref = np.arange(z.min(), z.max(), dzref)
if np.ma.isMaskedArray(dens0):
dens0 = np.ma.filled(dens0, np.nan)
hisopyc = np.nan*np.ones((ny,nx))
for j in range(ny):
for i in range(nx):
dens0ij = dens0[:,j,i]
if np.logical_or(np.logical_or(isopyc<np.nanmin(dens0ij), np.nanmax(dens0ij)<isopyc), np.isnan(dens0ij).all()):
continue
else:
dens0ref = np.interp(zref, z, dens0ij) # Refined density profile.
dens0refn = near(dens0ref, isopyc)
fz=dens0ref==dens0refn
try:
hisopyc[j,i] = zref[fz]
except ValueError:
print("Warning: More than 1 (%d) nearest depths found. Using the median of the depths for point (j=%d,i=%d)."%(fz.sum(), j, i))
hisopyc[j,i] = np.nanmedian(zref[fz])
return hisopyc
def whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9):
"""
USAGE
-----
whiten_zero(x, y, z, ax, cs, n=1, cmap=plt.cm.RdBu_r, zorder=9)
Changes to white the color of the 'n' (defaults to 1)
neighboring patches about the zero contour created
by a command like 'cs = ax.contourf(x, y, z)'.
"""
x, y, z = map(np.asanyarray, (x,y,z))
white = (1.,1.,1.)
cslevs = cs.levels
assert 0. in cslevs
f0=np.where(cslevs==0.)[0][0]
f0m, f0p = f0-n, f0+n
c0m, c0p = cslevs[f0m], cslevs[f0p]
ax.contourf(x, y, z, levels=[c0m, c0p], linestyles='none', colors=[white, white], cmap=None, zorder=zorder)
def wind2stress(u, v, formula='large_pond1981-modified'):
"""
USAGE
-----
taux,tauy = wind2stress(u, v, formula='mellor2004')
Converts u,v wind vector components to taux,tauy
wind stress vector components.
"""
rho_air = 1.226 # kg/m3
mag = np.sqrt(u**2+v**2) # m/s
Cd = np.zeros( mag.shape ) # Drag coefficient.
if formula=='large_pond1981-modified':
# Large and Pond (1981) formula
# modified for light winds, as
# in Trenberth et al. (1990).
f=mag<=1.
Cd[f] = 2.18e-3
f=np.logical_and(mag>1.,mag<3.)
Cd[f] = (0.62+1.56/mag[f])*1e-3
f=np.logical_and(mag>=3.,mag<10.)
Cd[f] = 1.14e-3
f=mag>=10.
Cd[f] = (0.49 + 0.065*mag[f])*1e-3
elif formula=='mellor2004':
Cd = 7.5e-4 + 6.7e-5*mag
else:
np.disp('Unknown formula for Cd.')
pass
# Computing wind stress [N/m2]
taux = rho_air*Cd*mag*u
tauy = rho_air*Cd*mag*v
return taux,tauy
def gen_dates(start, end, dt='day', input_datetime=False):
"""
Returns a list of datetimes within the date range
from `start` to `end`, at a `dt` time interval.
`dt` can be 'second', 'minute', 'hour', 'day', 'week',
'month' or 'year'.
If `input_datetime` is False (default), `start` and `end`
must be a date in string form. If `input_datetime` is True,
`start` and `end` must be datetime objects.
Note
----
Modified from original function
by Filipe Fernandes (ocefpaf@gmail.com).
Example
-------
>>> from ap_tools.utils import gen_dates
>>> from datetime import datetime
>>> start = '1989-08-19'
>>> end = datetime.utcnow().strftime("%Y-%m-%d")
>>> gen_dates(start, end, dt='day')
"""
DT = dict(second=rrule.SECONDLY,
minute=rrule.MINUTELY,
hour=rrule.HOURLY,
day=rrule.DAILY,
week=rrule.WEEKLY,
month=rrule.MONTHLY,
year=rrule.YEARLY)
dt = DT[dt]
if input_datetime: # Input are datetime objects. No parsing needed.
dates = rrule.rrule(dt, dtstart=start, until=end)
else: # Input in string form, parse into datetime objects.
dates = rrule.rrule(dt, dtstart=parser.parse(start), until=parser.parse(end))
return list(dates)
def fmt_isobath(cs, fontsize=8, fmt='%g', inline=True, inline_spacing=7, manual=True, **kw):
"""
Formats the labels of isobath contours. `manual` is set to `True` by default,
but can be `False`, or a tuple/list of tuples with the coordinates of the labels.
All options are passed to plt.clabel().
"""
isobstrH = plt.clabel(cs, fontsize=fontsize, fmt=fmt, inline=inline, \
inline_spacing=inline_spacing, manual=manual, **kw)
for ih in range(0, len(isobstrH)): # Appends 'm' for meters at the end of the label.
isobstrh = isobstrH[ih]
isobstr = isobstrh.get_text()
isobstr = isobstr.replace('-','') + ' m'
isobstrh.set_text(isobstr)
def float2latex(f, ndigits=1):
"""
USAGE
-----
texstr = float2latex(f, ndigits=1)
Converts a float input into a latex-formatted
string with 'ndigits' (defaults to 1).
Adapted from:
http://stackoverflow.com/questions/13490292/format-number-using-latex-notation-in-python
"""
float_str = "{0:.%se}"%ndigits
float_str = float_str.format(f)
base, exponent = float_str.split("e")
return "${0} \times 10^{{{1}}}$".format(base, int(exponent))
def mat2npz(matname):
"""
USAGE
-----
mat2npz(matname)
Extract variables stored in a .mat file,
and saves them in a .npz file.
"""
d = loadmat(matname)
_ = d.pop('__header__')
_ = d.pop('__globals__')
_ = d.pop('__version__')
npzname = matname[:-4] + '.npz'
np.savez(npzname,**d)
return None
def bb_map(lons, lats, ax, projection='merc', resolution='i', drawparallels=True, drawmeridians=True):
"""
USAGE
-----
m = bb_map(lons, lats, **kwargs)
Returns a Basemap instance with lon,lat bounding limits
inferred from the input arrays `lons`,`lats`.
Coastlines, countries, states, parallels and meridians
are drawn, and continents are filled.
"""
lons,lats = map(np.asanyarray, (lons,lats))
lonmin,lonmax = lons.min(),lons.max()
latmin,latmax = lats.min(),lats.max()
m = Basemap(llcrnrlon=lonmin,
urcrnrlon=lonmax,
llcrnrlat=latmin,
urcrnrlat=latmax,
projection=projection,
resolution=resolution,
ax=ax)
plt.ioff() # Avoid showing the figure.
m.fillcontinents(color='0.9', zorder=9)
m.drawcoastlines(zorder=10)
m.drawstates(zorder=10)
m.drawcountries(linewidth=2.0, zorder=10)
m.drawmapboundary(zorder=9999)
if drawmeridians:
m.drawmeridians(np.arange(np.floor(lonmin), np.ceil(lonmax), 1), linewidth=0.15, labels=[1, 0, 1, 0], zorder=12)
if drawparallels:
m.drawparallels(np.arange(np.floor(latmin), np.ceil(latmax), 1), linewidth=0.15, labels=[1, 0, 0, 0], zorder=12)
plt.ion()
return m
def dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r', marker='o', markersize=5):
"""
USAGE
-----
dots_dualcolor(x, y, z, thresh=20., color_low='b', color_high='r')
Plots dots colored with a dual-color criterion,
separated by a threshold value.
"""
ax = plt.gca()
# Below-threshold dots.
f=z<=thresh
ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_low, mec=color_low)
# Above-threshold dots.
f=z>thresh
ax.plot(x[f], y[f], lw=0, marker=marker, ms=markersize, mfc=color_high, mec=color_high)
if __name__=='__main__':
import doctest
doctest.testmod()
| mit |
miaecle/deepchem | examples/tox21/tox21_KernelSVM.py | 6 | 1174 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 23 16:02:07 2017
@author: zqwu
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import numpy as np
import deepchem as dc
import tempfile
from sklearn.svm import SVC
# Only for debug!
np.random.seed(123)
# Load Tox21 dataset
n_features = 1024
tox21_tasks, tox21_datasets, transformers = dc.molnet.load_tox21()
train_dataset, valid_dataset, test_dataset = tox21_datasets
# Fit models
metric = dc.metrics.Metric(dc.metrics.roc_auc_score, np.mean)
def model_builder(model_dir):
sklearn_model = SVC(C=1.0, class_weight="balanced", probability=True)
return dc.models.SklearnModel(sklearn_model, model_dir)
model_dir = tempfile.mkdtemp()
model = dc.models.SingletaskToMultitask(tox21_tasks, model_builder, model_dir)
# Fit trained model
model.fit(train_dataset)
model.save()
print("Evaluating model")
train_scores = model.evaluate(train_dataset, [metric], transformers)
valid_scores = model.evaluate(valid_dataset, [metric], transformers)
print("Train scores")
print(train_scores)
print("Validation scores")
print(valid_scores)
| mit |
idlead/scikit-learn | sklearn/naive_bayes.py | 29 | 28917 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Gaussian Naive Bayes supports fitting with *sample_weight*.
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
.. versionadded:: 0.17
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
# If the ratio of data variance between dimensions is too small, it
# will cause numerical errors. To address this, we artificially
# boost the variance by epsilon, a small fraction of the standard
# deviation of the largest dimension.
epsilon = 1e-9 * np.var(X, axis=0).max()
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
MichaelAquilina/numpy | numpy/lib/npyio.py | 42 | 71218 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import itertools
import warnings
import weakref
from operator import itemgetter
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core.multiarray import packbits, unpackbits
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like, has_nested_fields,
flatten_dtype, easy_dtype, _bytes_to_name
)
from numpy.compat import (
asbytes, asstr, asbytes_nested, bytes, basestring, unicode
)
if sys.version_info[0] >= 3:
import pickle
else:
import cPickle as pickle
from future_builtins import map
loads = pickle.loads
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return object.__getattribute__(self, '_obj').keys()
def zipfile_factory(*args, **kwargs):
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(*args, **kwargs)
class NpzFile(object):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: True
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> npz.files
['y', 'x']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=True,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = 0
if key in self._files:
member = 1
elif key in self.files:
member = 1
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
def __iter__(self):
return iter(self.files)
def items(self):
"""
Return a list of tuples, with each tuple (filename, array in file).
"""
return [(f, self[f]) for f in self.files]
def iteritems(self):
"""Generator that returns tuples (filename, array in file)."""
for f in self.files:
yield (f, self[f])
def keys(self):
"""Return files in the archive with a ``.npy`` extension."""
return self.files
def iterkeys(self):
"""Return an iterator over the files in the archive."""
return self.__iter__()
def __contains__(self, key):
return self.files.__contains__(key)
def load(file, mmap_mode=None, allow_pickle=True, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
Parameters
----------
file : file-like object or string
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail.
Default: True
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files on Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
import gzip
own_fid = False
if isinstance(file, basestring):
fid = open(file, "rb")
own_fid = True
else:
fid = file
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of Numpy arrays is loaded
# in. Pickle does not pass on the encoding information to
# Numpy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = asbytes('PK\x03\x04')
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
fid.seek(-N, 1) # back-up
if magic.startswith(_ZIP_PREFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
tmp = own_fid
own_fid = False
return NpzFile(fid, own_fid=tmp, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("allow_pickle=False, but file does not contain "
"non-pickled data")
try:
return pickle.load(fid, **pickle_kwargs)
except:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file or str
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string, a ``.npy``
extension will be appended to the file name if it does not already
have one.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
arr : array_like
Array data to be saved.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see the module docstring
of `numpy.lib.format` or the Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
own_fid = False
if isinstance(file, basestring):
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
else:
fid = file
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def savez(file, *args, **kwds):
"""
Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the file name (string) or an open file (file-like object)
where the data will be saved. If file is a string, the ``.npz``
extension will be appended to the file name if it is not already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see `numpy.lib.format` or the
Numpy Enhancement Proposal
http://docs.scipy.org/doc/numpy/neps/npy-format.html
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_1', 'arr_0']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> npzfile.files
['y', 'x']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored file names are
arr_0, arr_1, etc.
Parameters
----------
file : str
File name of ``.npz`` file.
args : Arguments
Function arguments.
kwds : Keyword arguments
Keywords.
See Also
--------
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
# Import deferred for startup time improvement
import tempfile
if isinstance(file, basestring):
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
# Stage arrays in a temporary file on disk, before writing to zip.
fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if b'0x' in x:
return float.fromhex(asstr(x))
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, np.complex):
return lambda x: complex(asstr(x))
elif issubclass(typ, np.bytes_):
return bytes
else:
return str
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence, optional
The characters or list of characters used to indicate the start of a
comment;
default: '#'.
delimiter : str, optional
The string used to separate values. By default, this is any
whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will convert
that column to a float. E.g., if column 0 is a date string:
``converters = {0: datestr2num}``. Converters can also be used to
provide a default value for missing data (but see also `genfromtxt`):
``converters = {3: lambda s: float(s.strip() or 0)}``. Default: None.
skiprows : int, optional
Skip the first `skiprows` lines; default: 0.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO("0 1\\n2 3")
>>> np.loadtxt(c)
array([[ 0., 1.],
[ 2., 3.]])
>>> d = StringIO("M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([('M', 21, 72.0), ('F', 35, 58.0)],
dtype=[('gender', '|S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO("1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([ 1., 3.])
>>> y
array([ 2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [asbytes(comments)]
else:
comments = [asbytes(comment) for comment in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile(asbytes('|').join(comments))
user_converters = converters
if delimiter is not None:
delimiter = asbytes(delimiter)
if usecols is not None:
usecols = list(usecols)
fown = False
try:
if _is_string_like(fname):
fown = True
if fname.endswith('.gz'):
import gzip
fh = iter(gzip.GzipFile(fname))
elif fname.endswith('.bz2'):
import bz2
fh = iter(bz2.BZ2File(fname))
elif sys.version_info[0] == 2:
fh = iter(open(fname, 'U'))
else:
fh = iter(open(fname))
else:
fh = iter(fname)
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
X = []
def flatten_dtype(dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = flatten_dtype(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if len(tp.shape) > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
def pack_items(items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(pack_items(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter.
Note that although the file is opened as text, this function
returns bytes.
"""
line = asbytes(line)
if comments is not None:
line = regex_comments.split(asbytes(line), maxsplit=1)[0]
line = line.strip(asbytes('\r\n'))
if line:
return line.split(delimiter)
else:
return []
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
converters[i] = conv
# Parse each line, including the first
for i, line in enumerate(itertools.chain([first_line], fh)):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[i] for i in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
finally:
if fown:
fh.close()
X = np.array(X, dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# '):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
a) a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
b) a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4j %.4e %+.4j %.4e %+.4j'` for 3 columns
c) a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<http://docs.python.org/library/string.html#
format-specification-mini-language>`_, Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
own_fh = False
if _is_string_like(fname):
own_fh = True
if fname.endswith('.gz'):
import gzip
fh = gzip.open(fname, 'wb')
else:
if sys.version_info[0] >= 3:
fh = open(fname, 'wb')
else:
fh = open(fname, 'w')
elif hasattr(fname, 'write'):
fh = fname
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.descr)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, str):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(asbytes(comments + header + newline))
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
fh.write(asbytes(format % tuple(row2) + newline))
else:
for row in X:
try:
fh.write(asbytes(format % tuple(row) + newline))
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(asbytes(comments + footer + newline))
finally:
if own_fh:
fh.close()
def fromregex(file, regexp, dtype):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
File name or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312L, 'foo'), (1534L, 'bar'), (444L, 'qux')],
dtype=[('num', '<i8'), ('key', '|S3')])
>>> output['num']
array([1312, 1534, 444], dtype=int64)
"""
own_fh = False
if not hasattr(file, "read"):
file = open(file, 'rb')
own_fh = True
try:
if not hasattr(regexp, 'match'):
regexp = re.compile(asbytes(regexp))
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
seq = regexp.findall(file.read())
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None, deletechars=None,
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file or str
File, filename, or generator to read. If the filename extension is
`.gz` or `.bz2`, the file is first decompressed. Note that
generators must return byte strings in Python 3k.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first valid line
after the first `skip_header` lines.
If `names` is a sequence or a single-string of comma-separated names,
the names will be used to define the field names in a structured dtype.
If `names` is None, the names of the dtype fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] Numpy User Guide, section `I/O with Numpy
<http://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO("1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Using dtype = None
>>> s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
Specifying dtype and names
>>> s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, 'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', '|S5')])
An example with fixed-width columns
>>> s = StringIO("11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, 'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', '|S5')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
# Py3 data conversions to bytes, for convenience
if comments is not None:
comments = asbytes(comments)
if isinstance(delimiter, unicode):
delimiter = asbytes(delimiter)
if isinstance(missing_values, (unicode, list, tuple)):
missing_values = asbytes_nested(missing_values)
#
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
# Initialize the filehandle, the LineSplitter and the NameValidator
own_fhd = False
try:
if isinstance(fname, basestring):
if sys.version_info[0] == 2:
fhd = iter(np.lib._datasource.open(fname, 'rbU'))
else:
fhd = iter(np.lib._datasource.open(fname, 'rb'))
own_fhd = True
else:
fhd = iter(fname)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, or generator. "
"(got %s instead)" % type(fname))
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip)._handyman
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
try:
while not first_values:
first_line = next(fhd)
if names is True:
if comments in first_line:
first_line = (
asbytes('').join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = asbytes('')
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([_bytes_to_name(_.strip())
for _ in first_values])
first_line = asbytes('')
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
# Define the list of missing_values (one column: one list)
missing_values = [list([asbytes('')]) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, bytes):
user_value = user_missing_values.split(asbytes(","))
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
converters[i].update(conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
#miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
if own_fhd:
fhd.close()
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v in (type('S'), np.string_)]
# ... and take the largest number of chars.
for i in strcolidx:
column_types[i] = "|S%i" % max(len(row[i]) for row in data)
#
if names is None:
# If the dtype is uniform, don't define names, else use ''
base = set([c.type for c in converters if c._checked])
if len(base) == 1:
(ddtype, mdtype) = (list(base)[0], np.bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(column_types)]
if usemask:
mdtype = [(defaultfmt % i, np.bool)
for (i, dt) in enumerate(column_types)]
else:
ddtype = list(zip(names, column_types))
mdtype = list(zip(names, [np.bool] * len(column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', np.bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if ttype == np.string_:
ttype = "|S%i" % max(len(row[i]) for row in data)
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names:
mdtype = [(_, np.bool) for _ in dtype.names]
else:
mdtype = np.bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names or (), converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != asbytes('')]
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
0x0all/kaggle-galaxies | predict_augmented_npy_maxout2048.py | 8 | 9452 | """
Load an analysis file and redo the predictions on the validation set / test set,
this time with augmented data and averaging. Store them as numpy files.
"""
import numpy as np
# import pandas as pd
import theano
import theano.tensor as T
import layers
import cc_layers
import custom
import load_data
import realtime_augmentation as ra
import time
import csv
import os
import cPickle as pickle
BATCH_SIZE = 32 # 16
NUM_INPUT_FEATURES = 3
CHUNK_SIZE = 8000 # 10000 # this should be a multiple of the batch size
# ANALYSIS_PATH = "analysis/try_convnet_cc_multirot_3x69r45_untied_bias.pkl"
ANALYSIS_PATH = "analysis/final/try_convnet_cc_multirotflip_3x69r45_maxout2048.pkl"
DO_VALID = True # disable this to not bother with the validation set evaluation
DO_TEST = True # disable this to not generate predictions on the testset
target_filename = os.path.basename(ANALYSIS_PATH).replace(".pkl", ".npy.gz")
target_path_valid = os.path.join("predictions/final/augmented/valid", target_filename)
target_path_test = os.path.join("predictions/final/augmented/test", target_filename)
print "Loading model data etc."
analysis = np.load(ANALYSIS_PATH)
input_sizes = [(69, 69), (69, 69)]
ds_transforms = [
ra.build_ds_transform(3.0, target_size=input_sizes[0]),
ra.build_ds_transform(3.0, target_size=input_sizes[1]) + ra.build_augmentation_transform(rotation=45)]
num_input_representations = len(ds_transforms)
# split training data into training + a small validation set
num_train = load_data.num_train
num_valid = num_train // 10 # integer division
num_train -= num_valid
num_test = load_data.num_test
valid_ids = load_data.train_ids[num_train:]
train_ids = load_data.train_ids[:num_train]
test_ids = load_data.test_ids
train_indices = np.arange(num_train)
valid_indices = np.arange(num_train, num_train+num_valid)
test_indices = np.arange(num_test)
y_valid = np.load("data/solutions_train.npy")[num_train:]
print "Build model"
l0 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[0][0], input_sizes[0][1])
l0_45 = layers.Input2DLayer(BATCH_SIZE, NUM_INPUT_FEATURES, input_sizes[1][0], input_sizes[1][1])
l0r = layers.MultiRotSliceLayer([l0, l0_45], part_size=45, include_flip=True)
l0s = cc_layers.ShuffleBC01ToC01BLayer(l0r)
l1a = cc_layers.CudaConvnetConv2DLayer(l0s, n_filters=32, filter_size=6, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l1 = cc_layers.CudaConvnetPooling2DLayer(l1a, pool_size=2)
l2a = cc_layers.CudaConvnetConv2DLayer(l1, n_filters=64, filter_size=5, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l2 = cc_layers.CudaConvnetPooling2DLayer(l2a, pool_size=2)
l3a = cc_layers.CudaConvnetConv2DLayer(l2, n_filters=128, filter_size=3, weights_std=0.01, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3b = cc_layers.CudaConvnetConv2DLayer(l3a, n_filters=128, filter_size=3, pad=0, weights_std=0.1, init_bias_value=0.1, dropout=0.0, partial_sum=1, untie_biases=True)
l3 = cc_layers.CudaConvnetPooling2DLayer(l3b, pool_size=2)
l3s = cc_layers.ShuffleC01BToBC01Layer(l3)
j3 = layers.MultiRotMergeLayer(l3s, num_views=4) # 2) # merge convolutional parts
# l4 = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5)
l4a = layers.DenseLayer(j3, n_outputs=4096, weights_std=0.001, init_bias_value=0.01, dropout=0.5, nonlinearity=layers.identity)
l4 = layers.FeatureMaxPoolingLayer(l4a, pool_size=2, feature_dim=1, implementation='reshape')
# l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.0, dropout=0.5, nonlinearity=custom.clip_01) # nonlinearity=layers.identity)
l5 = layers.DenseLayer(l4, n_outputs=37, weights_std=0.01, init_bias_value=0.1, dropout=0.5, nonlinearity=layers.identity)
# l6 = layers.OutputLayer(l5, error_measure='mse')
l6 = custom.OptimisedDivGalaxyOutputLayer(l5) # this incorporates the constraints on the output (probabilities sum to one, weighting, etc.)
xs_shared = [theano.shared(np.zeros((1,1,1,1), dtype=theano.config.floatX)) for _ in xrange(num_input_representations)]
idx = T.lscalar('idx')
givens = {
l0.input_var: xs_shared[0][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
l0_45.input_var: xs_shared[1][idx*BATCH_SIZE:(idx+1)*BATCH_SIZE],
}
compute_output = theano.function([idx], l6.predictions(dropout_active=False), givens=givens)
print "Load model parameters"
layers.set_param_values(l6, analysis['param_values'])
print "Create generators"
# set here which transforms to use to make predictions
augmentation_transforms = []
for zoom in [1 / 1.2, 1.0, 1.2]:
for angle in np.linspace(0, 360, 10, endpoint=False):
augmentation_transforms.append(ra.build_augmentation_transform(rotation=angle, zoom=zoom))
augmentation_transforms.append(ra.build_augmentation_transform(rotation=(angle + 180), zoom=zoom, shear=180)) # flipped
print " %d augmentation transforms." % len(augmentation_transforms)
augmented_data_gen_valid = ra.realtime_fixed_augmented_data_gen(valid_indices, 'train', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
valid_gen = load_data.buffered_gen_mp(augmented_data_gen_valid, buffer_size=1)
augmented_data_gen_test = ra.realtime_fixed_augmented_data_gen(test_indices, 'test', augmentation_transforms=augmentation_transforms, chunk_size=CHUNK_SIZE, target_sizes=input_sizes, ds_transforms=ds_transforms)
test_gen = load_data.buffered_gen_mp(augmented_data_gen_test, buffer_size=1)
approx_num_chunks_valid = int(np.ceil(num_valid * len(augmentation_transforms) / float(CHUNK_SIZE)))
approx_num_chunks_test = int(np.ceil(num_test * len(augmentation_transforms) / float(CHUNK_SIZE)))
print "Approximately %d chunks for the validation set" % approx_num_chunks_valid
print "Approximately %d chunks for the test set" % approx_num_chunks_test
if DO_VALID:
print
print "VALIDATION SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(valid_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_valid
load_data.save_gz(target_path_valid, all_predictions)
print "Evaluate"
rmse_valid = analysis['losses_valid'][-1]
rmse_augmented = np.sqrt(np.mean((y_valid - all_predictions)**2))
print " MSE (last iteration):\t%.6f" % rmse_valid
print " MSE (augmented):\t%.6f" % rmse_augmented
if DO_TEST:
print
print "TEST SET"
print "Compute predictions"
predictions_list = []
start_time = time.time()
for e, (chunk_data, chunk_length) in enumerate(test_gen):
print "Chunk %d" % (e + 1)
xs_chunk = chunk_data
# need to transpose the chunks to move the 'channels' dimension up
xs_chunk = [x_chunk.transpose(0, 3, 1, 2) for x_chunk in xs_chunk]
print " load data onto GPU"
for x_shared, x_chunk in zip(xs_shared, xs_chunk):
x_shared.set_value(x_chunk)
num_batches_chunk = int(np.ceil(chunk_length / float(BATCH_SIZE)))
# make predictions, don't forget to cute off the zeros at the end
predictions_chunk_list = []
for b in xrange(num_batches_chunk):
if b % 1000 == 0:
print " batch %d/%d" % (b + 1, num_batches_chunk)
predictions = compute_output(b)
predictions_chunk_list.append(predictions)
predictions_chunk = np.vstack(predictions_chunk_list)
predictions_chunk = predictions_chunk[:chunk_length] # cut off zeros / padding
print " compute average over transforms"
predictions_chunk_avg = predictions_chunk.reshape(-1, len(augmentation_transforms), 37).mean(1)
predictions_list.append(predictions_chunk_avg)
time_since_start = time.time() - start_time
print " %s since start" % load_data.hms(time_since_start)
all_predictions = np.vstack(predictions_list)
print "Write predictions to %s" % target_path_test
load_data.save_gz(target_path_test, all_predictions)
print "Done!"
| bsd-3-clause |
fivejjs/AD3 | python/example.py | 3 | 2817 | import matplotlib.pyplot as plt
import numpy as np
from ad3 import simple_grid, general_graph
def example_binary():
# generate trivial data
x = np.ones((10, 10))
x[:, 5:] = -1
x_noisy = x + np.random.normal(0, 0.8, size=x.shape)
x_thresh = x_noisy > .0
# create unaries
unaries = x_noisy
# as we convert to int, we need to multipy to get sensible values
unaries = np.dstack([-unaries, unaries])
# create potts pairwise
pairwise = np.eye(2)
# do simple cut
result = np.argmax(simple_grid(unaries, pairwise)[0], axis=-1)
# use the gerneral graph algorithm
# first, we construct the grid graph
inds = np.arange(x.size).reshape(x.shape)
horz = np.c_[inds[:, :-1].ravel(), inds[:, 1:].ravel()]
vert = np.c_[inds[:-1, :].ravel(), inds[1:, :].ravel()]
edges = np.vstack([horz, vert])
# we flatten the unaries
pairwise_per_edge = np.repeat(pairwise[np.newaxis, :, :], edges.shape[0],
axis=0)
result_graph = np.argmax(general_graph(unaries.reshape(-1, 2), edges,
pairwise_per_edge)[0], axis=-1)
# plot results
plt.subplot(231, title="original")
plt.imshow(x, interpolation='nearest')
plt.subplot(232, title="noisy version")
plt.imshow(x_noisy, interpolation='nearest')
plt.subplot(234, title="thresholding result")
plt.imshow(x_thresh, interpolation='nearest')
plt.subplot(235, title="cut_simple")
plt.imshow(result, interpolation='nearest')
plt.subplot(236, title="cut_from_graph")
plt.imshow(result_graph.reshape(x.shape), interpolation='nearest')
plt.show()
def example_multinomial():
# generate dataset with three stripes
np.random.seed(4)
x = np.zeros((10, 12, 3))
x[:, :4, 0] = 1
x[:, 4:8, 1] = 1
x[:, 8:, 2] = 1
unaries = x + 1.5 * np.random.normal(size=x.shape)
x = np.argmax(x, axis=2)
unaries = unaries
x_thresh = np.argmax(unaries, axis=2)
# potts potential
pairwise_potts = 2 * np.eye(3)
result = np.argmax(simple_grid(unaries, pairwise_potts)[0], axis=-1)
# potential that penalizes 0-1 and 1-2 less than 0-2
pairwise_1d = 2 * np.eye(3) + 2
pairwise_1d[-1, 0] = 0
pairwise_1d[0, -1] = 0
print(pairwise_1d)
result_1d = np.argmax(simple_grid(unaries, pairwise_1d)[0], axis=-1)
plt.subplot(141, title="original")
plt.imshow(x, interpolation="nearest")
plt.subplot(142, title="thresholded unaries")
plt.imshow(x_thresh, interpolation="nearest")
plt.subplot(143, title="potts potentials")
plt.imshow(result, interpolation="nearest")
plt.subplot(144, title="1d topology potentials")
plt.imshow(result_1d, interpolation="nearest")
plt.show()
#example_binary()
example_multinomial()
| lgpl-3.0 |
zseder/hunmisc | hunmisc/utils/plotting/matplotlib_simple_xy.py | 1 | 1535 | """
Copyright 2011-13 Attila Zseder
Email: zseder@gmail.com
This file is part of hunmisc project
url: https://github.com/zseder/hunmisc
hunmisc is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with this library; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import sys
import matplotlib.pyplot as plt
from matplotlib import rc
def read_data(istream):
r = [[],[],[],[],[]]
for l in istream:
le = l.strip().split()
[r[i].append(le[i]) for i in xrange(len(le))]
return r
def main():
d = read_data(open(sys.argv[1]))
rc('font', size=14)
ax = plt.subplot(111)
ax.plot(d[0], d[1], label="$M$", linewidth=2)
ax.plot(d[0], d[2], label="$l KL$", linewidth=2)
ax.plot(d[0], d[3], label="$l (H_q+KL)$", linewidth=2)
ax.plot(d[0], d[4], label="$M + l (H_q+KL)$", linewidth=2)
plt.xlabel("Bits")
ax.legend(loc=7)
plt.show()
#plt.savefig("fig.png")
if __name__ == "__main__":
main()
| gpl-3.0 |
cs591B1-Project/Social-Media-Impact-on-Stock-Market-and-Price | data/07 exxon/dataAnalysis.py | 26 | 6163 | import numpy
from numpy import *
from operator import truediv
from ast import literal_eval
import matplotlib.pyplot as plt
import statsmodels.tsa.stattools as st
import scipy.stats as scit
def calCorrelation(s,v):
# STEP 1: Read all data files
p = [line.rstrip('\n') for line in open("positive.txt")]
n = [line.rstrip('\n') for line in open("negative.txt")]
a = [line.rstrip('\n') for line in open("all.txt")]
p_social = [line.rstrip('\n') for line in open("positive_social.txt")]
n_social = [line.rstrip('\n') for line in open("negative_social.txt")]
a_social = [line.rstrip('\n') for line in open("all_social.txt")]
p_election = [line.rstrip('\n') for line in open("positive_election.txt")]
n_election = [line.rstrip('\n') for line in open("negative_election.txt")]
a_election = [line.rstrip('\n') for line in open("all_election.txt")]
p_trump = [line.rstrip('\n') for line in open("positive_trump.txt")]
n_trump = [line.rstrip('\n') for line in open("negative_trump.txt")]
a_trump = [line.rstrip('\n') for line in open("all_trump.txt")]
p_clinton = [line.rstrip('\n') for line in open("positive_clinton.txt")]
n_clinton = [line.rstrip('\n') for line in open("negative_clinton.txt")]
a_clinton = [line.rstrip('\n') for line in open("all_clinton.txt")]
# STEP 2: Convert into numpy.array and float + bias of 1 for 0 data to avoid divided by 0 erro
pInt = numpy.array(map(float, p))+1
nInt = numpy.array(map(float, n))+1
aInt = numpy.array(map(float, a))+1
p_s_Int = numpy.array(map(float, p_social))+1
n_s_Int = numpy.array(map(float, n_social))+1
a_s_Int = numpy.array(map(float, a_social))+1
p_elec = numpy.array(map(float, p_election))+1
n_elec = numpy.array(map(float, n_election))+1
a_elec = numpy.array(map(float, a_election))+1
p_tr = numpy.array(map(float, p_trump))+1
n_tr = numpy.array(map(float, n_trump))+1
a_tr = numpy.array(map(float, a_trump))+1
p_hc = numpy.array(map(float, p_clinton))+1
n_hc = numpy.array(map(float, n_clinton))+1
a_hc = numpy.array(map(float, a_clinton))+1
# STEP 3: Grab only 30 data since those are only needed for samples to calculate correlation
pInt = pInt[0:30]
nInt = nInt[0:30]
aInt = aInt[0:30]
p_s_Int = p_s_Int[0:30]
n_s_Int = n_s_Int[0:30]
a_s_Int = a_s_Int[0:30]
p_elec = p_elec[0:30]
n_elec = n_elec[0:30]
a_elec = a_elec[0:30]
p_tr = p_tr[0:30]
n_tr = n_tr[0:30]
a_tr = a_tr[0:30]
p_hc = p_hc[0:30]
n_hc = n_hc[0:30]
a_hc = a_hc[0:30]
print n_tr
print p_tr
print a_tr
print n_elec
print p_elec
print a_elec
# STEP 4: Simple Correlation of Data against Stock Market Prices
p_corr = numpy.corrcoef([pInt, s])
n_corr = numpy.corrcoef([nInt, s])
a_corr = numpy.corrcoef([aInt, s])
print "Positive Sentiment Corr: " + str(p_corr)
print "Negative Sentiment Corr: " + str(n_corr)
print "Neutral Sentiment Corr: " + str(a_corr)
cross_corr = numpy.correlate(pInt, s)
print cross_corr
# STEP 5: Simple Correlation of Social Medica Data against Stock Market Prices
p_s_corr = numpy.corrcoef([p_s_Int, s])
n_s_corr = numpy.corrcoef([n_s_Int, s])
a_s_corr = numpy.corrcoef([a_s_Int, s])
print "Positive Social Sentiment Corr: " + str(p_s_corr)
print "Negative Social Sentiment Corr: " + str(n_s_corr)
print "Neutral Social Sentiment Corr: " + str(a_s_corr)
# above caculation does not tell us much
# STEP 6: Sentiment with Social Medica Factor Corr
p_allcorr = numpy.corrcoef([numpy.add(pInt, p_s_Int), s])
n_allcorr = numpy.corrcoef([numpy.add(nInt, n_s_Int), s])
a_allcorr = numpy.corrcoef([numpy.add(aInt, a_s_Int), s])
print "Positive Articles + Social Sentiment Corr: " + str(p_allcorr)
print "Negative Articles + Sentiment Corr: " + str(n_allcorr)
print "Neutral Articles + Sentiment Corr: " + str(a_allcorr)
# STEP 7: Volumn & News Article Correlation
p_v_corr = numpy.corrcoef([pInt, v])
n_v_corr = numpy.corrcoef([nInt, v])
a_v_corr = numpy.corrcoef([aInt, v])
print "Positive Articles Sentiment - Volume Corr: " + str(p_v_corr)
print "Negative Sentiment - Volume Corr: " + str(n_v_corr)
print "Neutral Sentiment - Volume Corr: " + str(a_v_corr)
# with social media
p_all_v_corr = numpy.corrcoef([numpy.add(pInt, p_s_Int), v])
n_all_v_corr = numpy.corrcoef([numpy.add(nInt, n_s_Int), v])
a_all_v_corr = numpy.corrcoef([numpy.add(aInt, a_s_Int), v])
print "Positive Articles + Social Sentiment Corr: " + str(p_all_v_corr)
print "Negative Articles + Sentiment Corr: " + str(n_all_v_corr)
print "Neutral Articles + Sentiment Corr: " + str(a_all_v_corr)
pn_ratio = pInt/nInt
print pn_ratio
pnr_corr = numpy.corrcoef([pn_ratio, s])
print "PNR Corr: " + str(pnr_corr)
tr_corr = numpy.corrcoef([n_tr+p_tr+a_tr, s])
print tr_corr
elec_corr = numpy.corrcoef([n_elec + p_elec + a_elec, s])
print elec_corr
n_elec_corr = numpy.corrcoef([n_elec, s])
print n_elec_corr
print "PNR Corr: " + str(pnr_corr)
p_sum = numpy.add(pInt, p_s_Int)
n_sum = numpy.add(nInt, n_s_Int)
print "P_SUM:" + str(p_sum)
print "N_SUM:" + str(n_sum)
beta = 1
alpha = 1
p_sum = pInt*numpy.array(p_s_Int)*beta
n_sum = nInt*numpy.array(n_s_Int)*alpha
pn_sum_ratio = p_sum/n_sum
pnr_sum_corr = numpy.corrcoef([pn_sum_ratio, s])
print "PNR Sum Corr: " + str(pnr_sum_corr)
#spearman_r = scit.spearmanr(pn_sum_ratio, s)
#print "Spearman's Correlation: " + str(spearman_r)
#cross_corr = numpy.correlate(pn_sum_ratio, s)
#print cross_corr
print "Null Hypothesis - Postive Sentiment does not cause stock market price"
testVector = numpy.column_stack((s, pInt))
st.grangercausalitytests(testVector, 8, verbose = True)
print "Null Hypothesis - Negative Sentiment does not cause stock market price"
testVector = numpy.column_stack((s, nInt))
st.grangercausalitytests(testVector, 8, verbose = True)
print "Null Hypothesis - Neutral Sentiment does not cause stock market price"
testVector = numpy.column_stack((s, aInt))
st.grangercausalitytests(testVector, 8, verbose = True)
testVector = numpy.column_stack((s, pn_ratio))
st.grangercausalitytests(testVector, 8, verbose = True)
testVector = numpy.column_stack((pn_ratio, s))
st.grangercausalitytests(testVector, 8, verbose = True)
| mit |
ZENGXH/scikit-learn | sklearn/neighbors/tests/test_kde.py | 208 | 5556 | import numpy as np
from sklearn.utils.testing import (assert_allclose, assert_raises,
assert_equal)
from sklearn.neighbors import KernelDensity, KDTree, NearestNeighbors
from sklearn.neighbors.ball_tree import kernel_norm
from sklearn.pipeline import make_pipeline
from sklearn.datasets import make_blobs
from sklearn.grid_search import GridSearchCV
from sklearn.preprocessing import StandardScaler
def compute_kernel_slow(Y, X, kernel, h):
d = np.sqrt(((Y[:, None, :] - X) ** 2).sum(-1))
norm = kernel_norm(h, X.shape[1], kernel) / X.shape[0]
if kernel == 'gaussian':
return norm * np.exp(-0.5 * (d * d) / (h * h)).sum(-1)
elif kernel == 'tophat':
return norm * (d < h).sum(-1)
elif kernel == 'epanechnikov':
return norm * ((1.0 - (d * d) / (h * h)) * (d < h)).sum(-1)
elif kernel == 'exponential':
return norm * (np.exp(-d / h)).sum(-1)
elif kernel == 'linear':
return norm * ((1 - d / h) * (d < h)).sum(-1)
elif kernel == 'cosine':
return norm * (np.cos(0.5 * np.pi * d / h) * (d < h)).sum(-1)
else:
raise ValueError('kernel not recognized')
def test_kernel_density(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
Y = rng.randn(n_samples, n_features)
for kernel in ['gaussian', 'tophat', 'epanechnikov',
'exponential', 'linear', 'cosine']:
for bandwidth in [0.01, 0.1, 1]:
dens_true = compute_kernel_slow(Y, X, kernel, bandwidth)
def check_results(kernel, bandwidth, atol, rtol):
kde = KernelDensity(kernel=kernel, bandwidth=bandwidth,
atol=atol, rtol=rtol)
log_dens = kde.fit(X).score_samples(Y)
assert_allclose(np.exp(log_dens), dens_true,
atol=atol, rtol=max(1E-7, rtol))
assert_allclose(np.exp(kde.score(Y)),
np.prod(dens_true),
atol=atol, rtol=max(1E-7, rtol))
for rtol in [0, 1E-5]:
for atol in [1E-6, 1E-2]:
for breadth_first in (True, False):
yield (check_results, kernel, bandwidth, atol, rtol)
def test_kernel_density_sampling(n_samples=100, n_features=3):
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
bandwidth = 0.2
for kernel in ['gaussian', 'tophat']:
# draw a tophat sample
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
samp = kde.sample(100)
assert_equal(X.shape, samp.shape)
# check that samples are in the right range
nbrs = NearestNeighbors(n_neighbors=1).fit(X)
dist, ind = nbrs.kneighbors(X, return_distance=True)
if kernel == 'tophat':
assert np.all(dist < bandwidth)
elif kernel == 'gaussian':
# 5 standard deviations is safe for 100 samples, but there's a
# very small chance this test could fail.
assert np.all(dist < 5 * bandwidth)
# check unsupported kernels
for kernel in ['epanechnikov', 'exponential', 'linear', 'cosine']:
kde = KernelDensity(bandwidth, kernel=kernel).fit(X)
assert_raises(NotImplementedError, kde.sample, 100)
# non-regression test: used to return a scalar
X = rng.randn(4, 1)
kde = KernelDensity(kernel="gaussian").fit(X)
assert_equal(kde.sample().shape, (1, 1))
def test_kde_algorithm_metric_choice():
# Smoke test for various metrics and algorithms
rng = np.random.RandomState(0)
X = rng.randn(10, 2) # 2 features required for haversine dist.
Y = rng.randn(10, 2)
for algorithm in ['auto', 'ball_tree', 'kd_tree']:
for metric in ['euclidean', 'minkowski', 'manhattan',
'chebyshev', 'haversine']:
if algorithm == 'kd_tree' and metric not in KDTree.valid_metrics:
assert_raises(ValueError, KernelDensity,
algorithm=algorithm, metric=metric)
else:
kde = KernelDensity(algorithm=algorithm, metric=metric)
kde.fit(X)
y_dens = kde.score_samples(Y)
assert_equal(y_dens.shape, Y.shape[:1])
def test_kde_score(n_samples=100, n_features=3):
pass
#FIXME
#np.random.seed(0)
#X = np.random.random((n_samples, n_features))
#Y = np.random.random((n_samples, n_features))
def test_kde_badargs():
assert_raises(ValueError, KernelDensity,
algorithm='blah')
assert_raises(ValueError, KernelDensity,
bandwidth=0)
assert_raises(ValueError, KernelDensity,
kernel='blah')
assert_raises(ValueError, KernelDensity,
metric='blah')
assert_raises(ValueError, KernelDensity,
algorithm='kd_tree', metric='blah')
def test_kde_pipeline_gridsearch():
# test that kde plays nice in pipelines and grid-searches
X, _ = make_blobs(cluster_std=.1, random_state=1,
centers=[[0, 1], [1, 0], [0, 0]])
pipe1 = make_pipeline(StandardScaler(with_mean=False, with_std=False),
KernelDensity(kernel="gaussian"))
params = dict(kerneldensity__bandwidth=[0.001, 0.01, 0.1, 1, 10])
search = GridSearchCV(pipe1, param_grid=params, cv=5)
search.fit(X)
assert_equal(search.best_params_['kerneldensity__bandwidth'], .1)
| bsd-3-clause |
nolanliou/tensorflow | tensorflow/examples/get_started/regression/imports85.py | 24 | 6638 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A dataset loader for imports85.data."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import numpy as np
import tensorflow as tf
try:
import pandas as pd # pylint: disable=g-import-not-at-top
except ImportError:
pass
URL = "https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data"
# Order is important for the csv-readers, so we use an OrderedDict here.
defaults = collections.OrderedDict([
("symboling", [0]),
("normalized-losses", [0.0]),
("make", [""]),
("fuel-type", [""]),
("aspiration", [""]),
("num-of-doors", [""]),
("body-style", [""]),
("drive-wheels", [""]),
("engine-location", [""]),
("wheel-base", [0.0]),
("length", [0.0]),
("width", [0.0]),
("height", [0.0]),
("curb-weight", [0.0]),
("engine-type", [""]),
("num-of-cylinders", [""]),
("engine-size", [0.0]),
("fuel-system", [""]),
("bore", [0.0]),
("stroke", [0.0]),
("compression-ratio", [0.0]),
("horsepower", [0.0]),
("peak-rpm", [0.0]),
("city-mpg", [0.0]),
("highway-mpg", [0.0]),
("price", [0.0])
]) # pyformat: disable
types = collections.OrderedDict((key, type(value[0]))
for key, value in defaults.items())
def _get_imports85():
path = tf.contrib.keras.utils.get_file(URL.split("/")[-1], URL)
return path
def dataset(y_name="price", train_fraction=0.7):
"""Load the imports85 data as a (train,test) pair of `Dataset`.
Each dataset generates (features_dict, label) pairs.
Args:
y_name: The name of the column to use as the label.
train_fraction: A float, the fraction of data to use for training. The
remainder will be used for evaluation.
Returns:
A (train,test) pair of `Datasets`
"""
# Download and cache the data
path = _get_imports85()
# Define how the lines of the file should be parsed
def decode_line(line):
"""Convert a csv line into a (features_dict,label) pair."""
# Decode the line to a tuple of items based on the types of
# csv_header.values().
items = tf.decode_csv(line, list(defaults.values()))
# Convert the keys and items to a dict.
pairs = zip(defaults.keys(), items)
features_dict = dict(pairs)
# Remove the label from the features_dict
label = features_dict.pop(y_name)
return features_dict, label
def has_no_question_marks(line):
"""Returns True if the line of text has no question marks."""
# split the line into an array of characters
chars = tf.string_split(line[tf.newaxis], "").values
# for each character check if it is a question mark
is_question = tf.equal(chars, "?")
any_question = tf.reduce_any(is_question)
no_question = ~any_question
return no_question
def in_training_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# If you randomly split the dataset you won't get the same split in both
# sessions if you stop and restart training later. Also a simple
# random split won't work with a dataset that's too big to `.cache()` as
# we are doing here.
num_buckets = 1000000
bucket_id = tf.string_to_hash_bucket_fast(line, num_buckets)
# Use the hash bucket id as a random number that's deterministic per example
return bucket_id < int(train_fraction * num_buckets)
def in_test_set(line):
"""Returns a boolean tensor, true if the line is in the training set."""
# Items not in the training set are in the test set.
# This line must use `~` instead of `not` because `not` only works on python
# booleans but we are dealing with symbolic tensors.
return ~in_training_set(line)
base_dataset = (tf.contrib.data
# Get the lines from the file.
.TextLineDataset(path)
# drop lines with question marks.
.filter(has_no_question_marks))
train = (base_dataset
# Take only the training-set lines.
.filter(in_training_set)
# Decode each line into a (features_dict, label) pair.
.map(decode_line)
# Cache data so you only decode the file once.
.cache())
# Do the same for the test-set.
test = (base_dataset.filter(in_test_set).cache().map(decode_line))
return train, test
def raw_dataframe():
"""Load the imports85 data as a pd.DataFrame."""
# Download and cache the data
path = _get_imports85()
# Load it into a pandas dataframe
df = pd.read_csv(path, names=types.keys(), dtype=types, na_values="?")
return df
def load_data(y_name="price", train_fraction=0.7, seed=None):
"""Get the imports85 data set.
A description of the data is available at:
https://archive.ics.uci.edu/ml/datasets/automobile
The data itself can be found at:
https://archive.ics.uci.edu/ml/machine-learning-databases/autos/imports-85.data
Args:
y_name: the column to return as the label.
train_fraction: the fraction of the dataset to use for training.
seed: The random seed to use when shuffling the data. `None` generates a
unique shuffle every run.
Returns:
a pair of pairs where the first pair is the training data, and the second
is the test data:
`(x_train, y_train), (x_test, y_test) = get_imports85_dataset(...)`
`x` contains a pandas DataFrame of features, while `y` contains the label
array.
"""
# Load the raw data columns.
data = raw_dataframe()
# Delete rows with unknowns
data = data.dropna()
# Shuffle the data
np.random.seed(seed)
# Split the data into train/test subsets.
x_train = data.sample(frac=train_fraction, random_state=seed)
x_test = data.drop(x_train.index)
# Extract the label from the features dataframe.
y_train = x_train.pop(y_name)
y_test = x_test.pop(y_name)
return (x_train, y_train), (x_test, y_test)
| apache-2.0 |
pratapvardhan/pandas | pandas/tests/indexes/multi/test_integrity.py | 3 | 9142 | # -*- coding: utf-8 -*-
import re
import numpy as np
import pandas as pd
import pandas.util.testing as tm
import pytest
from pandas import IntervalIndex, MultiIndex, RangeIndex
from pandas.compat import lrange, range
from pandas.core.dtypes.cast import construct_1d_object_array_from_listlike
def test_labels_dtypes():
# GH 8456
i = MultiIndex.from_tuples([('A', 1), ('A', 2)])
assert i.labels[0].dtype == 'int8'
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(40)])
assert i.labels[1].dtype == 'int8'
i = MultiIndex.from_product([['a'], range(400)])
assert i.labels[1].dtype == 'int16'
i = MultiIndex.from_product([['a'], range(40000)])
assert i.labels[1].dtype == 'int32'
i = pd.MultiIndex.from_product([['a'], range(1000)])
assert (i.labels[0] >= 0).all()
assert (i.labels[1] >= 0).all()
def test_values_boxed():
tuples = [(1, pd.Timestamp('2000-01-01')), (2, pd.NaT),
(3, pd.Timestamp('2000-01-03')),
(1, pd.Timestamp('2000-01-04')),
(2, pd.Timestamp('2000-01-02')),
(3, pd.Timestamp('2000-01-03'))]
result = pd.MultiIndex.from_tuples(tuples)
expected = construct_1d_object_array_from_listlike(tuples)
tm.assert_numpy_array_equal(result.values, expected)
# Check that code branches for boxed values produce identical results
tm.assert_numpy_array_equal(result.values[:4], result[:4].values)
def test_values_multiindex_datetimeindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(10 ** 18, 10 ** 18 + 5)
naive = pd.DatetimeIndex(ints)
aware = pd.DatetimeIndex(ints, tz='US/Central')
idx = pd.MultiIndex.from_arrays([naive, aware])
result = idx.values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive)
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware)
# n_lev > n_lab
result = idx[:2].values
outer = pd.DatetimeIndex([x[0] for x in result])
tm.assert_index_equal(outer, naive[:2])
inner = pd.DatetimeIndex([x[1] for x in result])
tm.assert_index_equal(inner, aware[:2])
def test_values_multiindex_periodindex():
# Test to ensure we hit the boxing / nobox part of MI.values
ints = np.arange(2007, 2012)
pidx = pd.PeriodIndex(ints, freq='D')
idx = pd.MultiIndex.from_arrays([ints, pidx])
result = idx.values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx)
# n_lev > n_lab
result = idx[:2].values
outer = pd.Int64Index([x[0] for x in result])
tm.assert_index_equal(outer, pd.Int64Index(ints[:2]))
inner = pd.PeriodIndex([x[1] for x in result])
tm.assert_index_equal(inner, pidx[:2])
def test_consistency():
# need to construct an overflow
major_axis = lrange(70000)
minor_axis = lrange(10)
major_labels = np.arange(70000)
minor_labels = np.repeat(lrange(10), 7000)
# the fact that is works means it's consistent
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
# inconsistent
major_labels = np.array([0, 0, 1, 1, 1, 2, 2, 3, 3])
minor_labels = np.array([0, 1, 0, 1, 1, 0, 1, 0, 1])
index = MultiIndex(levels=[major_axis, minor_axis],
labels=[major_labels, minor_labels])
assert not index.is_unique
def test_hash_collisions():
# non-smoke test that we don't get hash collisions
index = MultiIndex.from_product([np.arange(1000), np.arange(1000)],
names=['one', 'two'])
result = index.get_indexer(index.values)
tm.assert_numpy_array_equal(result, np.arange(
len(index), dtype='intp'))
for i in [0, 1, len(index) - 2, len(index) - 1]:
result = index.get_loc(index[i])
assert result == i
def test_dims():
pass
def take_invalid_kwargs():
vals = [['A', 'B'],
[pd.Timestamp('2011-01-01'), pd.Timestamp('2011-01-02')]]
idx = pd.MultiIndex.from_product(vals, names=['str', 'dt'])
indices = [1, 2]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
tm.assert_raises_regex(TypeError, msg, idx.take,
indices, foo=2)
msg = "the 'out' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, out=indices)
msg = "the 'mode' parameter is not supported"
tm.assert_raises_regex(ValueError, msg, idx.take,
indices, mode='clip')
def test_isna_behavior(idx):
# should not segfault GH5123
# NOTE: if MI representation changes, may make sense to allow
# isna(MI)
with pytest.raises(NotImplementedError):
pd.isna(idx)
def test_large_multiindex_error():
# GH12527
df_below_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(499999)]),
columns=['dest'])
with pytest.raises(KeyError):
df_below_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_below_1000000.loc[(3, 0), 'dest']
df_above_1000000 = pd.DataFrame(
1, index=pd.MultiIndex.from_product([[1, 2], range(500001)]),
columns=['dest'])
with pytest.raises(KeyError):
df_above_1000000.loc[(-1, 0), 'dest']
with pytest.raises(KeyError):
df_above_1000000.loc[(3, 0), 'dest']
def test_million_record_attribute_error():
# GH 18165
r = list(range(1000000))
df = pd.DataFrame({'a': r, 'b': r},
index=pd.MultiIndex.from_tuples([(x, x) for x in r]))
with tm.assert_raises_regex(AttributeError,
"'Series' object has no attribute 'foo'"):
df['a'].foo()
def test_can_hold_identifiers(idx):
key = idx[0]
assert idx._can_hold_identifiers_and_holds_name(key) is True
def test_metadata_immutable(idx):
levels, labels = idx.levels, idx.labels
# shouldn't be able to set at either the top level or base level
mutable_regex = re.compile('does not support mutable operations')
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0] = levels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
levels[0][0] = levels[0][0]
# ditto for labels
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0] = labels[0]
with tm.assert_raises_regex(TypeError, mutable_regex):
labels[0][0] = labels[0][0]
# and for names
names = idx.names
with tm.assert_raises_regex(TypeError, mutable_regex):
names[0] = names[0]
def test_level_setting_resets_attributes():
ind = pd.MultiIndex.from_arrays([
['A', 'A', 'B', 'B', 'B'], [1, 2, 1, 2, 3]
])
assert ind.is_monotonic
ind.set_levels([['A', 'B'], [1, 3, 2]], inplace=True)
# if this fails, probably didn't reset the cache correctly.
assert not ind.is_monotonic
def test_rangeindex_fallback_coercion_bug():
# GH 12893
foo = pd.DataFrame(np.arange(100).reshape((10, 10)))
bar = pd.DataFrame(np.arange(100).reshape((10, 10)))
df = pd.concat({'foo': foo.stack(), 'bar': bar.stack()}, axis=1)
df.index.names = ['fizz', 'buzz']
str(df)
expected = pd.DataFrame({'bar': np.arange(100),
'foo': np.arange(100)},
index=pd.MultiIndex.from_product(
[range(10), range(10)],
names=['fizz', 'buzz']))
tm.assert_frame_equal(df, expected, check_like=True)
result = df.index.get_level_values('fizz')
expected = pd.Int64Index(np.arange(10), name='fizz').repeat(10)
tm.assert_index_equal(result, expected)
result = df.index.get_level_values('buzz')
expected = pd.Int64Index(np.tile(np.arange(10), 10), name='buzz')
tm.assert_index_equal(result, expected)
def test_hash_error(indices):
index = indices
tm.assert_raises_regex(TypeError, "unhashable type: %r" %
type(index).__name__, hash, indices)
def test_mutability(indices):
if not len(indices):
return
pytest.raises(TypeError, indices.__setitem__, 0, indices[0])
def test_wrong_number_names(indices):
def testit(ind):
ind.names = ["apple", "banana", "carrot"]
tm.assert_raises_regex(ValueError, "^Length", testit, indices)
def test_memory_usage(idx):
result = idx.memory_usage()
if len(idx):
idx.get_loc(idx[0])
result2 = idx.memory_usage()
result3 = idx.memory_usage(deep=True)
# RangeIndex, IntervalIndex
# don't have engines
if not isinstance(idx, (RangeIndex, IntervalIndex)):
assert result2 > result
if idx.inferred_type == 'object':
assert result3 > result2
else:
# we report 0 for no-length
assert result == 0
def test_nlevels(idx):
assert idx.nlevels == 2
| bsd-3-clause |
rohit21122012/DCASE2013 | runs/2016/dnn2016med_gd_50/task3_sound_event_detection_in_real_life_audio.py | 15 | 46699 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# DCASE 2016::Sound Event Detection in Real-life Audio / Baseline System
import argparse
import csv
import math
import numpy
import textwrap
import warnings
from sklearn import mixture
from src.dataset import *
from src.evaluation import *
from src.features import *
from src.sound_event_detection import *
__version_info__ = ('1', '0', '1')
__version__ = '.'.join(__version_info__)
def main(argv):
numpy.random.seed(123456) # let's make randomization predictable
parser = argparse.ArgumentParser(
prefix_chars='-+',
formatter_class=argparse.RawDescriptionHelpFormatter,
description=textwrap.dedent('''\
DCASE 2016
Task 3: Sound Event Detection in Real-life Audio
Baseline System
---------------------------------------------
Tampere University of Technology / Audio Research Group
Author: Toni Heittola ( toni.heittola@tut.fi )
System description
This is an baseline implementation for the D-CASE 2016, task 3 - Sound event detection in real life audio.
The system has binary classifier for each included sound event class. The GMM classifier is trained with
the positive and negative examples from the mixture signals, and classification is done between these
two models as likelihood ratio. Acoustic features are MFCC+Delta+Acceleration (MFCC0 omitted).
'''))
parser.add_argument("-development", help="Use the system in the development mode", action='store_true',
default=False, dest='development')
parser.add_argument("-challenge", help="Use the system in the challenge mode", action='store_true',
default=False, dest='challenge')
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
args = parser.parse_args()
# Load parameters from config file
parameter_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
os.path.splitext(os.path.basename(__file__))[0] + '.yaml')
params = load_parameters(parameter_file)
params = process_parameters(params)
make_folders(params)
title("DCASE 2016::Sound Event Detection in Real-life Audio / Baseline System")
# Check if mode is defined
if not (args.development or args.challenge):
args.development = True
args.challenge = False
dataset_evaluation_mode = 'folds'
if args.development and not args.challenge:
print "Running system in development mode"
dataset_evaluation_mode = 'folds'
elif not args.development and args.challenge:
print "Running system in challenge mode"
dataset_evaluation_mode = 'full'
# Get dataset container class
dataset = eval(params['general']['development_dataset'])(data_path=params['path']['data'])
# Fetch data over internet and setup the data
# ==================================================
if params['flow']['initialize']:
dataset.fetch()
# Extract features for all audio files in the dataset
# ==================================================
if params['flow']['extract_features']:
section_header('Feature extraction [Development data]')
# Collect files from evaluation sets
files = []
for fold in dataset.folds(mode=dataset_evaluation_mode):
for item_id, item in enumerate(dataset.train(fold)):
if item['file'] not in files:
files.append(item['file'])
for item_id, item in enumerate(dataset.test(fold)):
if item['file'] not in files:
files.append(item['file'])
# Go through files and make sure all features are extracted
do_feature_extraction(files=files,
dataset=dataset,
feature_path=params['path']['features'],
params=params['features'],
overwrite=params['general']['overwrite'])
foot()
# Prepare feature normalizers
# ==================================================
if params['flow']['feature_normalizer']:
section_header('Feature normalizer [Development data]')
do_feature_normalization(dataset=dataset,
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
dataset_evaluation_mode=dataset_evaluation_mode,
overwrite=params['general']['overwrite'])
foot()
# System training
# ==================================================
if params['flow']['train_system']:
section_header('System training [Development data]')
do_system_training(dataset=dataset,
model_path=params['path']['models'],
feature_normalizer_path=params['path']['feature_normalizers'],
feature_path=params['path']['features'],
hop_length_seconds=params['features']['hop_length_seconds'],
classifier_params=params['classifier']['parameters'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
foot()
# System evaluation in development mode
if args.development and not args.challenge:
# System testing
# ==================================================
if params['flow']['test_system']:
section_header('System testing [Development data]')
do_system_testing(dataset=dataset,
result_path=params['path']['results'],
feature_path=params['path']['features'],
model_path=params['path']['models'],
feature_params=params['features'],
detector_params=params['detector'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=params['general']['overwrite']
)
foot()
# System evaluation
# ==================================================
if params['flow']['evaluate_system']:
section_header('System evaluation [Development data]')
do_system_evaluation(dataset=dataset,
dataset_evaluation_mode=dataset_evaluation_mode,
result_path=params['path']['results'])
foot()
# System evaluation with challenge data
elif not args.development and args.challenge:
# Fetch data over internet and setup the data
challenge_dataset = eval(params['general']['challenge_dataset'])()
if params['flow']['initialize']:
challenge_dataset.fetch()
# System testing
if params['flow']['test_system']:
section_header('System testing [Challenge data]')
do_system_testing(dataset=challenge_dataset,
result_path=params['path']['challenge_results'],
feature_path=params['path']['features'],
model_path=params['path']['models'],
feature_params=params['features'],
detector_params=params['detector'],
dataset_evaluation_mode=dataset_evaluation_mode,
classifier_method=params['classifier']['method'],
overwrite=True
)
foot()
print " "
print "Your results for the challenge data are stored at [" + params['path']['challenge_results'] + "]"
print " "
def process_parameters(params):
"""Parameter post-processing.
Parameters
----------
params : dict
parameters in dict
Returns
-------
params : dict
processed parameters
"""
params['features']['mfcc']['win_length'] = int(params['features']['win_length_seconds'] * params['features']['fs'])
params['features']['mfcc']['hop_length'] = int(params['features']['hop_length_seconds'] * params['features']['fs'])
# Copy parameters for current classifier method
params['classifier']['parameters'] = params['classifier_parameters'][params['classifier']['method']]
# Hash
params['features']['hash'] = get_parameter_hash(params['features'])
params['classifier']['hash'] = get_parameter_hash(params['classifier'])
params['detector']['hash'] = get_parameter_hash(params['detector'])
# Paths
params['path']['data'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['data'])
params['path']['base'] = os.path.join(os.path.dirname(os.path.realpath(__file__)), params['path']['base'])
# Features
params['path']['features_'] = params['path']['features']
params['path']['features'] = os.path.join(params['path']['base'],
params['path']['features'],
params['features']['hash'])
# Feature normalizers
params['path']['feature_normalizers_'] = params['path']['feature_normalizers']
params['path']['feature_normalizers'] = os.path.join(params['path']['base'],
params['path']['feature_normalizers'],
params['features']['hash'])
# Models
# Save parameters into folders to help manual browsing of files.
params['path']['models_'] = params['path']['models']
params['path']['models'] = os.path.join(params['path']['base'],
params['path']['models'],
params['features']['hash'],
params['classifier']['hash'])
# Results
params['path']['results_'] = params['path']['results']
params['path']['results'] = os.path.join(params['path']['base'],
params['path']['results'],
params['features']['hash'],
params['classifier']['hash'],
params['detector']['hash'])
return params
def make_folders(params, parameter_filename='parameters.yaml'):
"""Create all needed folders, and saves parameters in yaml-file for easier manual browsing of data.
Parameters
----------
params : dict
parameters in dict
parameter_filename : str
filename to save parameters used to generate the folder name
Returns
-------
nothing
"""
# Check that target path exists, create if not
check_path(params['path']['features'])
check_path(params['path']['feature_normalizers'])
check_path(params['path']['models'])
check_path(params['path']['results'])
# Save parameters into folders to help manual browsing of files.
# Features
feature_parameter_filename = os.path.join(params['path']['features'], parameter_filename)
if not os.path.isfile(feature_parameter_filename):
save_parameters(feature_parameter_filename, params['features'])
# Feature normalizers
feature_normalizer_parameter_filename = os.path.join(params['path']['feature_normalizers'], parameter_filename)
if not os.path.isfile(feature_normalizer_parameter_filename):
save_parameters(feature_normalizer_parameter_filename, params['features'])
# Models
model_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(model_features_parameter_filename):
save_parameters(model_features_parameter_filename, params['features'])
model_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['models_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(model_models_parameter_filename):
save_parameters(model_models_parameter_filename, params['classifier'])
# Results
# Save parameters into folders to help manual browsing of files.
result_features_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
parameter_filename)
if not os.path.isfile(result_features_parameter_filename):
save_parameters(result_features_parameter_filename, params['features'])
result_models_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
parameter_filename)
if not os.path.isfile(result_models_parameter_filename):
save_parameters(result_models_parameter_filename, params['classifier'])
result_detector_parameter_filename = os.path.join(params['path']['base'],
params['path']['results_'],
params['features']['hash'],
params['classifier']['hash'],
params['detector']['hash'],
parameter_filename)
if not os.path.isfile(result_detector_parameter_filename):
save_parameters(result_detector_parameter_filename, params['detector'])
def get_feature_filename(audio_file, path, extension='cpickle'):
"""Get feature filename
Parameters
----------
audio_file : str
audio file name from which the features are extracted
path : str
feature path
extension : str
file extension
(Default value='cpickle')
Returns
-------
feature_filename : str
full feature filename
"""
return os.path.join(path, 'sequence_' + os.path.splitext(audio_file)[0] + '.' + extension)
def get_feature_normalizer_filename(fold, scene_label, path, extension='cpickle'):
"""Get normalizer filename
Parameters
----------
fold : int >= 0
evaluation fold number
scene_label : str
scene label
path : str
normalizer path
extension : str
file extension
(Default value='cpickle')
Returns
-------
normalizer_filename : str
full normalizer filename
"""
return os.path.join(path, 'scale_fold' + str(fold) + '_' + str(scene_label) + '.' + extension)
def get_model_filename(fold, scene_label, path, extension='cpickle'):
"""Get model filename
Parameters
----------
fold : int >= 0
evaluation fold number
scene_label : str
scene label
path : str
model path
extension : str
file extension
(Default value='cpickle')
Returns
-------
model_filename : str
full model filename
"""
return os.path.join(path, 'model_fold' + str(fold) + '_' + str(scene_label) + '.' + extension)
def get_result_filename(fold, scene_label, path, extension='txt'):
"""Get result filename
Parameters
----------
fold : int >= 0
evaluation fold number
scene_label : str
scene label
path : str
result path
extension : str
file extension
(Default value='cpickle')
Returns
-------
result_filename : str
full result filename
"""
if fold == 0:
return os.path.join(path, 'results_' + str(scene_label) + '.' + extension)
else:
return os.path.join(path, 'results_fold' + str(fold) + '_' + str(scene_label) + '.' + extension)
def do_feature_extraction(files, dataset, feature_path, params, overwrite=False):
"""Feature extraction
Parameters
----------
files : list
file list
dataset : class
dataset class
feature_path : str
path where the features are saved
params : dict
parameter dict
overwrite : bool
overwrite existing feature files
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Audio file not found.
"""
for file_id, audio_filename in enumerate(files):
# Get feature filename
current_feature_file = get_feature_filename(audio_file=os.path.split(audio_filename)[1], path=feature_path)
progress(title_text='Extracting [sequences]',
percentage=(float(file_id) / len(files)),
note=os.path.split(audio_filename)[1])
if not os.path.isfile(current_feature_file) or overwrite:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(audio_filename)):
y, fs = load_audio(filename=dataset.relative_to_absolute_path(audio_filename), mono=True,
fs=params['fs'])
else:
raise IOError("Audio file not found [%s]" % audio_filename)
# Extract features
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=params['include_mfcc0'],
include_delta=params['include_delta'],
include_acceleration=params['include_acceleration'],
mfcc_params=params['mfcc'],
delta_params=params['mfcc_delta'],
acceleration_params=params['mfcc_acceleration'])
# Save
save_data(current_feature_file, feature_data)
def do_feature_normalization(dataset, feature_normalizer_path, feature_path, dataset_evaluation_mode='folds',
overwrite=False):
"""Feature normalization
Calculated normalization factors for each evaluation fold based on the training material available.
Parameters
----------
dataset : class
dataset class
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
overwrite : bool
overwrite existing normalizers
(Default value=False)
Returns
-------
nothing
Raises
-------
IOError
Feature file not found.
"""
for fold in dataset.folds(mode=dataset_evaluation_mode):
for scene_id, scene_label in enumerate(dataset.scene_labels):
current_normalizer_file = get_feature_normalizer_filename(fold=fold, scene_label=scene_label,
path=feature_normalizer_path)
if not os.path.isfile(current_normalizer_file) or overwrite:
# Collect sequence files from scene class
files = []
for item_id, item in enumerate(dataset.train(fold, scene_label=scene_label)):
if item['file'] not in files:
files.append(item['file'])
file_count = len(files)
# Initialize statistics
normalizer = FeatureNormalizer()
for file_id, audio_filename in enumerate(files):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(file_id) / file_count),
note=os.path.split(audio_filename)[1])
# Load features
feature_filename = get_feature_filename(audio_file=os.path.split(audio_filename)[1],
path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['stat']
else:
raise IOError("Feature file not found [%s]" % audio_filename)
# Accumulate statistics
normalizer.accumulate(feature_data)
# Calculate normalization factors
normalizer.finalize()
# Save
save_data(current_normalizer_file, normalizer)
def do_system_training(dataset, model_path, feature_normalizer_path, feature_path, hop_length_seconds,
classifier_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System training
Train a model pair for each sound event class, one for activity and one for inactivity.
model container format:
{
'normalizer': normalizer class
'models' :
{
'mouse click' :
{
'positive': mixture.GMM class,
'negative': mixture.GMM class
}
'keyboard typing' :
{
'positive': mixture.GMM class,
'negative': mixture.GMM class
}
...
}
}
Parameters
----------
dataset : class
dataset class
model_path : str
path where the models are saved.
feature_normalizer_path : str
path where the feature normalizers are saved.
feature_path : str
path where the features are saved.
hop_length_seconds : float > 0
feature frame hop length in seconds
classifier_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Feature normalizer not found.
Feature file not found.
"""
if classifier_method != 'gmm':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
for fold in dataset.folds(mode=dataset_evaluation_mode):
for scene_id, scene_label in enumerate(dataset.scene_labels):
current_model_file = get_model_filename(fold=fold, scene_label=scene_label, path=model_path)
if not os.path.isfile(current_model_file) or overwrite:
# Load normalizer
feature_normalizer_filename = get_feature_normalizer_filename(fold=fold, scene_label=scene_label,
path=feature_normalizer_path)
if os.path.isfile(feature_normalizer_filename):
normalizer = load_data(feature_normalizer_filename)
else:
raise IOError("Feature normalizer not found [%s]" % feature_normalizer_filename)
# Initialize model container
model_container = {'normalizer': normalizer, 'models': {}}
# Restructure training data in to structure[files][events]
ann = {}
for item_id, item in enumerate(dataset.train(fold=fold, scene_label=scene_label)):
filename = os.path.split(item['file'])[1]
if filename not in ann:
ann[filename] = {}
if item['event_label'] not in ann[filename]:
ann[filename][item['event_label']] = []
ann[filename][item['event_label']].append((item['event_onset'], item['event_offset']))
# Collect training examples
data_positive = {}
data_negative = {}
file_count = len(ann)
for item_id, audio_filename in enumerate(ann):
progress(title_text='Collecting data',
fold=fold,
percentage=(float(item_id) / file_count),
note=scene_label + " / " + os.path.split(audio_filename)[1])
# Load features
feature_filename = get_feature_filename(audio_file=audio_filename, path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
raise IOError("Feature file not found [%s]" % feature_filename)
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
for event_label in ann[audio_filename]:
positive_mask = numpy.zeros((feature_data.shape[0]), dtype=bool)
for event in ann[audio_filename][event_label]:
start_frame = int(math.floor(event[0] / hop_length_seconds))
stop_frame = int(math.ceil(event[1] / hop_length_seconds))
if stop_frame > feature_data.shape[0]:
stop_frame = feature_data.shape[0]
positive_mask[start_frame:stop_frame] = True
# Store positive examples
if event_label not in data_positive:
data_positive[event_label] = feature_data[positive_mask, :]
else:
data_positive[event_label] = numpy.vstack(
(data_positive[event_label], feature_data[positive_mask, :]))
# Store negative examples
if event_label not in data_negative:
data_negative[event_label] = feature_data[~positive_mask, :]
else:
data_negative[event_label] = numpy.vstack(
(data_negative[event_label], feature_data[~positive_mask, :]))
# Train models for each class
for event_label in data_positive:
progress(title_text='Train models',
fold=fold,
note=scene_label + " / " + event_label)
if classifier_method == 'gmm':
model_container['models'][event_label] = {}
model_container['models'][event_label]['positive'] = mixture.GMM(**classifier_params).fit(
data_positive[event_label])
model_container['models'][event_label]['negative'] = mixture.GMM(**classifier_params).fit(
data_negative[event_label])
else:
raise ValueError("Unknown classifier method [" + classifier_method + "]")
# Save models
save_data(current_model_file, model_container)
def do_system_testing(dataset, result_path, feature_path, model_path, feature_params, detector_params,
dataset_evaluation_mode='folds', classifier_method='gmm', overwrite=False):
"""System testing.
If extracted features are not found from disk, they are extracted but not saved.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
feature_path : str
path where the features are saved.
model_path : str
path where the models are saved.
feature_params : dict
parameter dict
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
classifier_method : str ['gmm']
classifier method, currently only GMM supported
(Default value='gmm')
overwrite : bool
overwrite existing models
(Default value=False)
Returns
-------
nothing
Raises
-------
ValueError
classifier_method is unknown.
IOError
Model file not found.
Audio file not found.
"""
if classifier_method != 'gmm':
raise ValueError("Unknown classifier method [" + classifier_method + "]")
for fold in dataset.folds(mode=dataset_evaluation_mode):
for scene_id, scene_label in enumerate(dataset.scene_labels):
current_result_file = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
if not os.path.isfile(current_result_file) or overwrite:
results = []
# Load class model container
model_filename = get_model_filename(fold=fold, scene_label=scene_label, path=model_path)
if os.path.isfile(model_filename):
model_container = load_data(model_filename)
else:
raise IOError("Model file not found [%s]" % model_filename)
file_count = len(dataset.test(fold, scene_label=scene_label))
for file_id, item in enumerate(dataset.test(fold=fold, scene_label=scene_label)):
progress(title_text='Testing',
fold=fold,
percentage=(float(file_id) / file_count),
note=scene_label + " / " + os.path.split(item['file'])[1])
# Load features
feature_filename = get_feature_filename(audio_file=item['file'], path=feature_path)
if os.path.isfile(feature_filename):
feature_data = load_data(feature_filename)['feat']
else:
# Load audio
if os.path.isfile(dataset.relative_to_absolute_path(item['file'])):
y, fs = load_audio(filename=item['file'], mono=True, fs=feature_params['fs'])
else:
raise IOError("Audio file not found [%s]" % item['file'])
# Extract features
feature_data = feature_extraction(y=y,
fs=fs,
include_mfcc0=feature_params['include_mfcc0'],
include_delta=feature_params['include_delta'],
include_acceleration=feature_params['include_acceleration'],
mfcc_params=feature_params['mfcc'],
delta_params=feature_params['mfcc_delta'],
acceleration_params=feature_params['mfcc_acceleration'],
statistics=False)['feat']
# Normalize features
feature_data = model_container['normalizer'].normalize(feature_data)
current_results = event_detection(feature_data=feature_data,
model_container=model_container,
hop_length_seconds=feature_params['hop_length_seconds'],
smoothing_window_length_seconds=detector_params[
'smoothing_window_length'],
decision_threshold=detector_params['decision_threshold'],
minimum_event_length=detector_params['minimum_event_length'],
minimum_event_gap=detector_params['minimum_event_gap'])
# Store the result
for event in current_results:
results.append((dataset.absolute_to_relative(item['file']), event[0], event[1], event[2]))
# Save testing results
with open(current_result_file, 'wt') as f:
writer = csv.writer(f, delimiter='\t')
for result_item in results:
writer.writerow(result_item)
def do_system_evaluation(dataset, result_path, dataset_evaluation_mode='folds'):
"""System evaluation. Testing outputs are collected and evaluated. Evaluation results are printed.
Parameters
----------
dataset : class
dataset class
result_path : str
path where the results are saved.
dataset_evaluation_mode : str ['folds', 'full']
evaluation mode, 'full' all material available is considered to belong to one fold.
(Default value='folds')
Returns
-------
nothing
Raises
-------
IOError
Result file not found
"""
# Set warnings off, sklearn metrics will trigger warning for classes without
# predicted samples in F1-scoring. This is just to keep printing clean.
warnings.simplefilter("ignore")
overall_metrics_per_scene = {}
for scene_id, scene_label in enumerate(dataset.scene_labels):
if scene_label not in overall_metrics_per_scene:
overall_metrics_per_scene[scene_label] = {}
dcase2016_segment_based_metric = DCASE2016_EventDetection_SegmentBasedMetrics(
class_list=dataset.event_labels(scene_label=scene_label))
dcase2016_event_based_metric = DCASE2016_EventDetection_EventBasedMetrics(
class_list=dataset.event_labels(scene_label=scene_label))
for fold in dataset.folds(mode=dataset_evaluation_mode):
results = []
result_filename = get_result_filename(fold=fold, scene_label=scene_label, path=result_path)
if os.path.isfile(result_filename):
with open(result_filename, 'rt') as f:
for row in csv.reader(f, delimiter='\t'):
results.append(row)
else:
raise IOError("Result file not found [%s]" % result_filename)
for file_id, item in enumerate(dataset.test(fold, scene_label=scene_label)):
current_file_results = []
for result_line in results:
if len(result_line) != 0 and result_line[0] == dataset.absolute_to_relative(item['file']):
current_file_results.append(
{'file': result_line[0],
'event_onset': float(result_line[1]),
'event_offset': float(result_line[2]),
'event_label': result_line[3].rstrip()
}
)
meta = dataset.file_meta(dataset.absolute_to_relative(item['file']))
dcase2016_segment_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
dcase2016_event_based_metric.evaluate(system_output=current_file_results, annotated_ground_truth=meta)
overall_metrics_per_scene[scene_label]['segment_based_metrics'] = dcase2016_segment_based_metric.results()
overall_metrics_per_scene[scene_label]['event_based_metrics'] = dcase2016_event_based_metric.results()
print " Evaluation over %d folds" % dataset.fold_count
print " "
print " Results per scene "
print " {:18s} | {:5s} | | {:39s} ".format('', 'Main', 'Secondary metrics')
print " {:18s} | {:5s} | | {:38s} | {:14s} | {:14s} | {:14s} ".format('', '', 'Seg/Overall', 'Seg/Class',
'Event/Overall', 'Event/Class')
print " {:18s} | {:5s} | | {:6s} : {:5s} : {:5s} : {:5s} : {:5s} | {:6s} : {:5s} | {:6s} : {:5s} | {:6s} : {:5s} |".format(
'Scene', 'ER', 'F1', 'ER', 'ER/S', 'ER/D', 'ER/I', 'F1', 'ER', 'F1', 'ER', 'F1', 'ER')
print " -------------------+-------+ +--------+-------+-------+-------+-------+--------+-------+--------+-------+--------+-------+"
averages = {
'segment_based_metrics': {
'overall': {
'ER': [],
'F': [],
},
'class_wise_average': {
'ER': [],
'F': [],
}
},
'event_based_metrics': {
'overall': {
'ER': [],
'F': [],
},
'class_wise_average': {
'ER': [],
'F': [],
}
},
}
for scene_id, scene_label in enumerate(dataset.scene_labels):
print " {:18s} | {:5.2f} | | {:4.1f} % : {:5.2f} : {:5.2f} : {:5.2f} : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} |".format(
scene_label,
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'],
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['F'] * 100,
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'],
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['S'],
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['D'],
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['I'],
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F'] * 100,
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'],
overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['F'] * 100,
overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['ER'],
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F'] * 100,
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'],
)
averages['segment_based_metrics']['overall']['ER'].append(
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['ER'])
averages['segment_based_metrics']['overall']['F'].append(
overall_metrics_per_scene[scene_label]['segment_based_metrics']['overall']['F'])
averages['segment_based_metrics']['class_wise_average']['ER'].append(
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'])
averages['segment_based_metrics']['class_wise_average']['F'].append(
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F'])
averages['event_based_metrics']['overall']['ER'].append(
overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['ER'])
averages['event_based_metrics']['overall']['F'].append(
overall_metrics_per_scene[scene_label]['event_based_metrics']['overall']['F'])
averages['event_based_metrics']['class_wise_average']['ER'].append(
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'])
averages['event_based_metrics']['class_wise_average']['F'].append(
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F'])
print " -------------------+-------+ +--------+-------+-------+-------+-------+--------+-------+--------+-------+--------+-------+"
print " {:18s} | {:5.2f} | | {:4.1f} % : {:5.2f} : {:21s} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} | {:4.1f} % : {:5.2f} |".format(
'Average',
numpy.mean(averages['segment_based_metrics']['overall']['ER']),
numpy.mean(averages['segment_based_metrics']['overall']['F']) * 100,
numpy.mean(averages['segment_based_metrics']['overall']['ER']),
' ',
numpy.mean(averages['segment_based_metrics']['class_wise_average']['F']) * 100,
numpy.mean(averages['segment_based_metrics']['class_wise_average']['ER']),
numpy.mean(averages['event_based_metrics']['overall']['F']) * 100,
numpy.mean(averages['event_based_metrics']['overall']['ER']),
numpy.mean(averages['event_based_metrics']['class_wise_average']['F']) * 100,
numpy.mean(averages['event_based_metrics']['class_wise_average']['ER']),
)
print " "
# Restore warnings to default settings
warnings.simplefilter("default")
print " Results per events "
for scene_id, scene_label in enumerate(dataset.scene_labels):
print " "
print " " + scene_label.upper()
print " {:20s} | {:30s} | | {:15s} ".format('', 'Segment-based', 'Event-based')
print " {:20s} | {:5s} : {:5s} : {:6s} : {:5s} | | {:5s} : {:5s} : {:6s} : {:5s} |".format('Event', 'Nref',
'Nsys', 'F1', 'ER',
'Nref', 'Nsys',
'F1', 'ER')
print " ---------------------+-------+-------+--------+-------+ +-------+-------+--------+-------+"
seg_Nref = 0
seg_Nsys = 0
event_Nref = 0
event_Nsys = 0
for event_label in sorted(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise']):
print " {:20s} | {:5d} : {:5d} : {:4.1f} % : {:5.2f} | | {:5d} : {:5d} : {:4.1f} % : {:5.2f} |".format(
event_label,
int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nref']),
int(overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nsys']),
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['F'] * 100,
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['ER'],
int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nref']),
int(overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nsys']),
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['F'] * 100,
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['ER'])
seg_Nref += int(
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nref'])
seg_Nsys += int(
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise'][event_label]['Nsys'])
event_Nref += int(
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nref'])
event_Nsys += int(
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise'][event_label]['Nsys'])
print " ---------------------+-------+-------+--------+-------+ +-------+-------+--------+-------+"
print " {:20s} | {:5d} : {:5d} : {:14s} | | {:5d} : {:5d} : {:14s} |".format('Sum',
seg_Nref,
seg_Nsys,
'',
event_Nref,
event_Nsys,
'')
print " {:20s} | {:5s} {:5s} : {:4.1f} % : {:5.2f} | | {:5s} {:5s} : {:4.1f} % : {:5.2f} |".format(
'Average',
'', '',
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['F'] * 100,
overall_metrics_per_scene[scene_label]['segment_based_metrics']['class_wise_average']['ER'],
'', '',
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['F'] * 100,
overall_metrics_per_scene[scene_label]['event_based_metrics']['class_wise_average']['ER'])
print " "
if __name__ == "__main__":
try:
sys.exit(main(sys.argv))
except (ValueError, IOError) as e:
sys.exit(e)
| mit |
victor-prado/broker-manager | environment/lib/python3.5/site-packages/pandas/sparse/tests/test_indexing.py | 7 | 38977 | # pylint: disable-msg=E1101,W0612
import nose # noqa
import numpy as np
import pandas as pd
import pandas.util.testing as tm
class TestSparseSeriesIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def setUp(self):
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
self.sparse = self.orig.to_sparse()
def test_getitem(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[:2], orig[:2].to_sparse())
tm.assert_sp_series_equal(sparse[4:2], orig[4:2].to_sparse())
tm.assert_sp_series_equal(sparse[::2], orig[::2].to_sparse())
tm.assert_sp_series_equal(sparse[-5:], orig[-5:].to_sparse())
def test_getitem_int_dtype(self):
# GH 8292
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6], name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
s = pd.SparseSeries([0, 1, 2, 3, 4, 5, 6], fill_value=0, name='xxx')
res = s[::2]
exp = pd.SparseSeries([0, 2, 4, 6], index=[0, 2, 4, 6],
fill_value=0, name='xxx')
tm.assert_sp_series_equal(res, exp)
self.assertEqual(res.dtype, np.int64)
def test_getitem_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse[0], 1)
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[2], 0)
self.assertEqual(sparse[3], 3)
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_ellipsis(self):
# GH 9467
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan])
tm.assert_sp_series_equal(s[...], s)
s = pd.SparseSeries([1, np.nan, 2, 0, np.nan], fill_value=0)
tm.assert_sp_series_equal(s[...], s)
def test_getitem_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse[:2],
orig[:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[4:2],
orig[4:2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[::2],
orig[::2].to_sparse(fill_value=0))
tm.assert_sp_series_equal(sparse[-5:],
orig[-5:].to_sparse(fill_value=0))
def test_loc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc[0], 1)
self.assertTrue(np.isnan(sparse.loc[1]))
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# padded with NaN
self.assertTrue(np.isnan(result[-1]))
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=list('ABCDE'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.loc['A'], 1)
self.assertTrue(np.isnan(sparse.loc['B']))
result = sparse.loc[['A', 'C', 'D']]
exp = orig.loc[['A', 'C', 'D']].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_loc_slice_index_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc['C':],
orig.loc['C':].to_sparse(fill_value=0))
def test_loc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.loc[2:],
orig.loc[2:].to_sparse(fill_value=0))
def test_iloc(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[2]))
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
result = sparse.iloc[[1, -2, -4]]
exp = orig.iloc[[1, -2, -4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iloc[3], 3)
self.assertTrue(np.isnan(sparse.iloc[1]))
self.assertEqual(sparse.iloc[4], 0)
result = sparse.iloc[[1, 3, 4]]
exp = orig.iloc[[1, 3, 4]].to_sparse(fill_value=0)
tm.assert_sp_series_equal(result, exp)
def test_iloc_slice(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_iloc_slice_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.iloc[2:],
orig.iloc[2:].to_sparse(fill_value=0))
def test_at(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan])
sparse = orig.to_sparse()
self.assertEqual(sparse.at[0], orig.at[0])
self.assertTrue(np.isnan(sparse.at[1]))
self.assertTrue(np.isnan(sparse.at[2]))
self.assertEqual(sparse.at[3], orig.at[3])
self.assertTrue(np.isnan(sparse.at[4]))
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('abcde'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertTrue(np.isnan(sparse.at['c']))
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertTrue(np.isnan(sparse.at['e']))
def test_at_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('abcde'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['a'], orig.at['a'])
self.assertTrue(np.isnan(sparse.at['b']))
self.assertEqual(sparse.at['c'], orig.at['c'])
self.assertEqual(sparse.at['d'], orig.at['d'])
self.assertEqual(sparse.at['e'], orig.at['e'])
def test_iat(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertTrue(np.isnan(sparse.iat[2]))
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertTrue(np.isnan(sparse.iat[4]))
self.assertTrue(np.isnan(sparse.iat[-1]))
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_iat_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0])
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0], orig.iat[0])
self.assertTrue(np.isnan(sparse.iat[1]))
self.assertEqual(sparse.iat[2], orig.iat[2])
self.assertEqual(sparse.iat[3], orig.iat[3])
self.assertEqual(sparse.iat[4], orig.iat[4])
self.assertEqual(sparse.iat[-1], orig.iat[-1])
self.assertEqual(sparse.iat[-5], orig.iat[-5])
def test_get(self):
s = pd.SparseSeries([1, np.nan, np.nan, 3, np.nan])
self.assertEqual(s.get(0), 1)
self.assertTrue(np.isnan(s.get(1)))
self.assertIsNone(s.get(5))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'))
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
s = pd.SparseSeries([1, np.nan, 0, 3, 0], index=list('ABCDE'),
fill_value=0)
self.assertEqual(s.get('A'), 1)
self.assertTrue(np.isnan(s.get('B')))
self.assertEqual(s.get('C'), 0)
self.assertIsNone(s.get('XX'))
def test_take(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_series_equal(sparse.take([0, 1, 3]),
orig.take([0, 1, 3]).to_sparse())
tm.assert_sp_series_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0]),
orig.take([0]).to_sparse(fill_value=0))
exp = orig.take([0, 1, 3]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([0, 1, 3]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.Series([1, np.nan, np.nan, 3, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
# all missing & fill_value
res = sparse.reindex(['B', 'E', 'C'])
exp = orig.reindex(['B', 'E', 'C']).to_sparse()
tm.assert_sp_series_equal(res, exp)
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse()
tm.assert_sp_series_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.Series([1, np.nan, 0, 3, 0], index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# includes missing and fill_value
res = sparse.reindex(['A', 'B', 'C'])
exp = orig.reindex(['A', 'B', 'C']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all missing
orig = pd.Series([np.nan, np.nan, np.nan, np.nan, np.nan],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
# all fill_value
orig = pd.Series([0., 0., 0., 0., 0.],
index=list('ABCDE'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'E', 'C', 'D'])
exp = orig.reindex(['A', 'E', 'C', 'D']).to_sparse(fill_value=0)
tm.assert_sp_series_equal(res, exp)
def tests_indexing_with_sparse(self):
# GH 13985
for kind in ['integer', 'block']:
for fill in [True, False, np.nan]:
arr = pd.SparseArray([1, 2, 3], kind=kind)
indexer = pd.SparseArray([True, False, True], fill_value=fill,
dtype=bool)
tm.assert_sp_array_equal(pd.SparseArray([1, 3], kind=kind),
arr[indexer])
s = pd.SparseSeries(arr, index=['a', 'b', 'c'],
dtype=np.float64)
exp = pd.SparseSeries([1, 3], index=['a', 'c'],
dtype=np.float64, kind=kind)
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
tm.assert_sp_series_equal(s.iloc[indexer], exp)
indexer = pd.SparseSeries(indexer, index=['a', 'b', 'c'])
tm.assert_sp_series_equal(s[indexer], exp)
tm.assert_sp_series_equal(s.loc[indexer], exp)
msg = ("iLocation based boolean indexing cannot use an "
"indexable as a mask")
with tm.assertRaisesRegexp(ValueError, msg):
s.iloc[indexer]
class TestSparseSeriesMultiIndexing(TestSparseSeriesIndexing):
_multiprocess_can_split_ = True
def setUp(self):
# Mi with duplicated values
idx = pd.MultiIndex.from_tuples([('A', 0), ('A', 1), ('B', 0),
('C', 0), ('C', 1)])
self.orig = pd.Series([1, np.nan, np.nan, 3, np.nan], index=idx)
self.sparse = self.orig.to_sparse()
def test_getitem_multi(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse[0], orig[0])
self.assertTrue(np.isnan(sparse[1]))
self.assertEqual(sparse[3], orig[3])
tm.assert_sp_series_equal(sparse['A'], orig['A'].to_sparse())
tm.assert_sp_series_equal(sparse['B'], orig['B'].to_sparse())
result = sparse[[1, 3, 4]]
exp = orig[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse[orig % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse[sparse % 2 == 1]
exp = orig[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_getitem_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse['C', 0], orig['C', 0])
self.assertTrue(np.isnan(sparse['A', 1]))
self.assertTrue(np.isnan(sparse['B', 0]))
def test_getitems_slice_multi(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse[2:], orig[2:].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
def test_loc(self):
# need to be override to use different label
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A'],
orig.loc['A'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B'],
orig.loc['B'].to_sparse())
result = sparse.loc[[1, 3, 4]]
exp = orig.loc[[1, 3, 4]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_series_equal(result, exp)
# dense array
result = sparse.loc[orig % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse % 2 == 1]
exp = orig.loc[orig % 2 == 1].to_sparse()
tm.assert_sp_series_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse % 2 == 1, dtype=bool)]
tm.assert_sp_series_equal(result, exp)
def test_loc_multi_tuple(self):
orig = self.orig
sparse = self.sparse
self.assertEqual(sparse.loc['C', 0], orig.loc['C', 0])
self.assertTrue(np.isnan(sparse.loc['A', 1]))
self.assertTrue(np.isnan(sparse.loc['B', 0]))
def test_loc_slice(self):
orig = self.orig
sparse = self.sparse
tm.assert_sp_series_equal(sparse.loc['A':], orig.loc['A':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['B':], orig.loc['B':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['C':], orig.loc['C':].to_sparse())
tm.assert_sp_series_equal(sparse.loc['A':'B'],
orig.loc['A':'B'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:'B'], orig.loc[:'B'].to_sparse())
class TestSparseDataFrameIndexing(tm.TestCase):
_multiprocess_can_split_ = True
def test_getitem(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_series_equal(sparse['x'], orig['x'].to_sparse())
tm.assert_sp_frame_equal(sparse[['x']], orig[['x']].to_sparse())
tm.assert_sp_frame_equal(sparse[['z', 'x']],
orig[['z', 'x']].to_sparse())
tm.assert_sp_frame_equal(sparse[[True, False, True, True]],
orig[[True, False, True, True]].to_sparse())
tm.assert_sp_frame_equal(sparse[[1, 2]],
orig[[1, 2]].to_sparse())
def test_getitem_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
tm.assert_sp_series_equal(sparse['y'],
orig['y'].to_sparse(fill_value=0))
exp = orig[['x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['x']], exp)
exp = orig[['z', 'x']].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[['z', 'x']], exp)
indexer = [True, False, True, True]
exp = orig[indexer].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[indexer], exp)
exp = orig[[1, 2]].to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse[[1, 2]], exp)
def test_loc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc[0, 'x'], 1)
self.assertTrue(np.isnan(sparse.loc[1, 'z']))
self.assertEqual(sparse.loc[2, 'z'], 4)
tm.assert_sp_series_equal(sparse.loc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.loc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[2, :],
orig.loc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'y'],
orig.loc[:, 'y'].to_sparse())
result = sparse.loc[[1, 2]]
exp = orig.loc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[1, 2], :]
exp = orig.loc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[[0, 2], ['x', 'z']]
exp = orig.loc[[0, 2], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# exceeds the bounds
result = sparse.loc[[1, 3, 4, 5]]
exp = orig.loc[[1, 3, 4, 5]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_index(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
index=list('abc'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.loc['a', 'x'], 1)
self.assertTrue(np.isnan(sparse.loc['b', 'z']))
self.assertEqual(sparse.loc['c', 'z'], 4)
tm.assert_sp_series_equal(sparse.loc['a'], orig.loc['a'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b'], orig.loc['b'].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc['b', :],
orig.loc['b', :].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
tm.assert_sp_series_equal(sparse.loc[:, 'z'],
orig.loc[:, 'z'].to_sparse())
result = sparse.loc[['a', 'b']]
exp = orig.loc[['a', 'b']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['a', 'b'], :]
exp = orig.loc[['a', 'b'], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[:, ['x', 'z']]
exp = orig.loc[:, ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.loc[['c', 'a'], ['x', 'z']]
exp = orig.loc[['c', 'a'], ['x', 'z']].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# dense array
result = sparse.loc[orig.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array (actuary it coerces to normal Series)
result = sparse.loc[sparse.x % 2 == 1]
exp = orig.loc[orig.x % 2 == 1].to_sparse()
tm.assert_sp_frame_equal(result, exp)
# sparse array
result = sparse.loc[pd.SparseArray(sparse.x % 2 == 1, dtype=bool)]
tm.assert_sp_frame_equal(result, exp)
def test_loc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.loc[2:], orig.loc[2:].to_sparse())
def test_iloc(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]])
sparse = orig.to_sparse()
self.assertEqual(sparse.iloc[1, 1], 3)
self.assertTrue(np.isnan(sparse.iloc[2, 0]))
tm.assert_sp_series_equal(sparse.iloc[0], orig.loc[0].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[1], orig.loc[1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[2, :],
orig.iloc[2, :].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
tm.assert_sp_series_equal(sparse.iloc[:, 1],
orig.iloc[:, 1].to_sparse())
result = sparse.iloc[[1, 2]]
exp = orig.iloc[[1, 2]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[1, 2], :]
exp = orig.iloc[[1, 2], :].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[:, [1, 0]]
exp = orig.iloc[:, [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
result = sparse.iloc[[2], [1, 0]]
exp = orig.iloc[[2], [1, 0]].to_sparse()
tm.assert_sp_frame_equal(result, exp)
with tm.assertRaises(IndexError):
sparse.iloc[[1, 3, 5]]
def test_iloc_slice(self):
orig = pd.DataFrame([[1, np.nan, np.nan],
[2, 3, np.nan],
[np.nan, np.nan, 4]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.iloc[2:], orig.iloc[2:].to_sparse())
def test_at(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_at_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.at['A', 'x'], orig.at['A', 'x'])
self.assertTrue(np.isnan(sparse.at['B', 'z']))
self.assertTrue(np.isnan(sparse.at['C', 'y']))
self.assertEqual(sparse.at['D', 'x'], orig.at['D', 'x'])
def test_iat(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_iat_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
self.assertEqual(sparse.iat[0, 0], orig.iat[0, 0])
self.assertTrue(np.isnan(sparse.iat[1, 2]))
self.assertTrue(np.isnan(sparse.iat[2, 1]))
self.assertEqual(sparse.iat[2, 0], orig.iat[2, 0])
self.assertTrue(np.isnan(sparse.iat[-1, -2]))
self.assertEqual(sparse.iat[-1, -1], orig.iat[-1, -1])
def test_take(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse()
tm.assert_sp_frame_equal(sparse.take([0]),
orig.take([0]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([0, 1]),
orig.take([0, 1]).to_sparse())
tm.assert_sp_frame_equal(sparse.take([-1, -2]),
orig.take([-1, -2]).to_sparse())
def test_take_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
exp = orig.take([0]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0]), exp)
exp = orig.take([0, 1]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([0, 1]), exp)
exp = orig.take([-1, -2]).to_sparse(fill_value=0)
exp._default_fill_value = np.nan
tm.assert_sp_frame_equal(sparse.take([-1, -2]), exp)
def test_reindex(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse()
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse()
tm.assert_sp_frame_equal(res, exp)
def test_reindex_fill_value(self):
orig = pd.DataFrame([[1, np.nan, 0],
[2, 3, np.nan],
[0, np.nan, 4],
[0, np.nan, 5]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all missing
orig = pd.DataFrame([[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
# all fill_value
orig = pd.DataFrame([[0, 0, 0],
[0, 0, 0],
[0, 0, 0],
[0, 0, 0]],
index=list('ABCD'), columns=list('xyz'))
sparse = orig.to_sparse(fill_value=0)
res = sparse.reindex(['A', 'C', 'B'])
exp = orig.reindex(['A', 'C', 'B']).to_sparse(fill_value=0)
tm.assert_sp_frame_equal(res, exp)
class TestMultitype(tm.TestCase):
def setUp(self):
self.cols = ['string', 'int', 'float', 'object']
self.string_series = pd.SparseSeries(['a', 'b', 'c'])
self.int_series = pd.SparseSeries([1, 2, 3])
self.float_series = pd.SparseSeries([1.1, 1.2, 1.3])
self.object_series = pd.SparseSeries([[], {}, set()])
self.sdf = pd.SparseDataFrame({
'string': self.string_series,
'int': self.int_series,
'float': self.float_series,
'object': self.object_series,
})
self.sdf = self.sdf[self.cols]
self.ss = pd.SparseSeries(['a', 1, 1.1, []], index=self.cols)
def test_frame_basic_dtypes(self):
for _, row in self.sdf.iterrows():
self.assertEqual(row.dtype, object)
tm.assert_sp_series_equal(self.sdf['string'], self.string_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['int'], self.int_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['float'], self.float_series,
check_names=False)
tm.assert_sp_series_equal(self.sdf['object'], self.object_series,
check_names=False)
def test_frame_indexing_single(self):
tm.assert_sp_series_equal(self.sdf.iloc[0],
pd.SparseSeries(['a', 1, 1.1, []],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[1],
pd.SparseSeries(['b', 2, 1.2, {}],
index=self.cols),
check_names=False)
tm.assert_sp_series_equal(self.sdf.iloc[2],
pd.SparseSeries(['c', 3, 1.3, set()],
index=self.cols),
check_names=False)
def test_frame_indexing_multiple(self):
tm.assert_sp_frame_equal(self.sdf, self.sdf[:])
tm.assert_sp_frame_equal(self.sdf, self.sdf.loc[:])
tm.assert_sp_frame_equal(self.sdf.iloc[[1, 2]],
pd.SparseDataFrame({
'string': self.string_series.iloc[[1, 2]],
'int': self.int_series.iloc[[1, 2]],
'float': self.float_series.iloc[[1, 2]],
'object': self.object_series.iloc[[1, 2]]
}, index=[1, 2])[self.cols])
tm.assert_sp_frame_equal(self.sdf[['int', 'string']],
pd.SparseDataFrame({
'int': self.int_series,
'string': self.string_series,
}))
def test_series_indexing_single(self):
for i, idx in enumerate(self.cols):
self.assertEqual(self.ss.iloc[i], self.ss[idx])
self.assertEqual(type(self.ss.iloc[i]),
type(self.ss[idx]))
self.assertEqual(self.ss['string'], 'a')
self.assertEqual(self.ss['int'], 1)
self.assertEqual(self.ss['float'], 1.1)
self.assertEqual(self.ss['object'], [])
def test_series_indexing_multiple(self):
tm.assert_sp_series_equal(self.ss.loc[['string', 'int']],
pd.SparseSeries(['a', 1],
index=['string', 'int']))
tm.assert_sp_series_equal(self.ss.loc[['string', 'object']],
pd.SparseSeries(['a', []],
index=['string', 'object']))
| mit |
yarikoptic/NiPy-OLD | nipy/neurospin/viz/activation_maps.py | 1 | 25526 | #!/usr/bin/env python
"""
Functions to do automatic visualization of activation-like maps.
For 2D-only visualization, only matplotlib is required.
For 3D visualization, Mayavi, version 3.0 or greater, is required.
"""
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# License: BSD
# Standard library imports
import os
import sys
# Standard scientific libraries imports (more specific imports are
# delayed, so that the part module can be used without them).
import numpy as np
import matplotlib as mp
import pylab as pl
# Local imports
from nipy.neurospin.utils.mask import compute_mask
from nipy.io.imageformats import load
from anat_cache import mni_sform, mni_sform_inv, _AnatCache
from coord_tools import coord_transform, find_activation, \
find_cut_coords
class SformError(Exception):
pass
class NiftiIndexError(IndexError):
pass
################################################################################
# Colormaps
def _rotate_cmap(cmap, name=None, swap_order=('green', 'red', 'blue')):
""" Utility function to swap the colors of a colormap.
"""
orig_cdict = cmap._segmentdata.copy()
cdict = dict()
cdict['green'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[0]]]
cdict['blue'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[1]]]
cdict['red'] = [(p, c1, c2)
for (p, c1, c2) in orig_cdict[swap_order[2]]]
if name is None:
name = '%s_rotated' % cmap.name
return mp.colors.LinearSegmentedColormap(name, cdict, 512)
def _pigtailed_cmap(cmap, name=None,
swap_order=('green', 'red', 'blue')):
""" Utility function to make a new colormap by concatenating a
colormap with its reverse.
"""
orig_cdict = cmap._segmentdata.copy()
cdict = dict()
cdict['green'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[0]])]
cdict['blue'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[1]])]
cdict['red'] = [(0.5*(1-p), c1, c2)
for (p, c1, c2) in reversed(orig_cdict[swap_order[2]])]
for color in ('red', 'green', 'blue'):
cdict[color].extend([(0.5*(1+p), c1, c2)
for (p, c1, c2) in orig_cdict[color]])
if name is None:
name = '%s_reversed' % cmap.name
return mp.colors.LinearSegmentedColormap(name, cdict, 512)
# Using a dict as a namespace, to micmic matplotlib's cm
_cm = dict(
cold_hot = _pigtailed_cmap(pl.cm.hot, name='cold_hot'),
brown_blue = _pigtailed_cmap(pl.cm.bone, name='brown_blue'),
cyan_copper = _pigtailed_cmap(pl.cm.copper, name='cyan_copper'),
cyan_orange = _pigtailed_cmap(pl.cm.YlOrBr_r, name='cyan_orange'),
blue_red = _pigtailed_cmap(pl.cm.Reds_r, name='blue_red'),
brown_cyan = _pigtailed_cmap(pl.cm.Blues_r, name='brown_cyan'),
purple_green = _pigtailed_cmap(pl.cm.Greens_r, name='purple_green',
swap_order=('red', 'blue', 'green')),
purple_blue = _pigtailed_cmap(pl.cm.Blues_r, name='purple_blue',
swap_order=('red', 'blue', 'green')),
blue_orange = _pigtailed_cmap(pl.cm.Oranges_r, name='blue_orange',
swap_order=('green', 'red', 'blue')),
black_blue = _rotate_cmap(pl.cm.hot, name='black_blue'),
black_purple = _rotate_cmap(pl.cm.hot, name='black_purple',
swap_order=('blue', 'red', 'green')),
black_pink = _rotate_cmap(pl.cm.hot, name='black_pink',
swap_order=('blue', 'green', 'red')),
black_green = _rotate_cmap(pl.cm.hot, name='black_green',
swap_order=('red', 'blue', 'green')),
black_red = pl.cm.hot,
)
_cm.update(pl.cm.datad)
class _CM(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
self.__dict__.update(self)
cm = _CM(**_cm)
################################################################################
# 2D plotting of activation maps
################################################################################
def plot_map_2d(map, sform, cut_coords, anat=None, anat_sform=None,
vmin=None, figure_num=None, axes=None, title='',
mask=None, **kwargs):
""" Plot three cuts of a given activation map (Frontal, Axial, and Lateral)
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order.
anat : 3D ndarray, optional or False
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used. If False, no anat is displayed.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
figure_num : integer, optional
The number of the matplotlib figure used. If None is given, a
new figure is created.
axes : 4 tuple of float: (xmin, xmax, ymin, ymin), optional
The coordinates, in matplotlib figure space, of the axes
used to display the plot. If None, the complete figure is
used.
title : string, optional
The title dispayed on the figure.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.*
kwargs: extra keyword arguments, optional
Extra keyword arguments passed to pylab.imshow
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if anat is None:
anat, anat_sform, vmax_anat = _AnatCache.get_anat()
elif anat is not False:
vmax_anat = anat.max()
if mask is not None and (
np.all(mask) or np.all(np.logical_not(mask))):
mask = None
vmin_map = map.min()
vmax_map = map.max()
if vmin is not None and np.isfinite(vmin):
map = np.ma.masked_less(map, vmin)
elif mask is not None and not isinstance(map, np.ma.masked_array):
map = np.ma.masked_array(map, np.logical_not(mask))
vmin_map = map.min()
vmax_map = map.max()
if isinstance(map, np.ma.core.MaskedArray):
use_mask = False
if map._mask is False or np.all(np.logical_not(map._mask)):
map = np.asarray(map)
elif map._mask is True or np.all(map._mask):
map = np.asarray(map)
if use_mask and mask is not None:
map = np.ma.masked_array(map, np.logical_not(mask))
# Calculate the bounds
if anat is not False:
anat_bounds = np.zeros((4, 6))
anat_bounds[:3, -3:] = np.identity(3)*anat.shape
anat_bounds[-1, :] = 1
anat_bounds = np.dot(anat_sform, anat_bounds)
map_bounds = np.zeros((4, 6))
map_bounds[:3, -3:] = np.identity(3)*map.shape
map_bounds[-1, :] = 1
map_bounds = np.dot(sform, map_bounds)
# The coordinates of the center of the cut in different spaces.
y, x, z = cut_coords
x_map, y_map, z_map = [int(round(c)) for c in
coord_transform(x, y, z,
np.linalg.inv(sform))]
if anat is not False:
x_anat, y_anat, z_anat = [int(round(c)) for c in
coord_transform(x, y, z,
np.linalg.inv(anat_sform))]
fig = pl.figure(figure_num, figsize=(6.6, 2.6))
if axes is None:
axes = (0., 1., 0., 1.)
pl.clf()
ax_xmin, ax_xmax, ax_ymin, ax_ymax = axes
ax_width = ax_xmax - ax_xmin
ax_height = ax_ymax - ax_ymin
# Calculate the axes ratio size in a 'clever' way
if anat is not False:
shapes = np.array(anat.shape, 'f')
else:
shapes = np.array(map.shape, 'f')
shapes *= ax_width/shapes.sum()
###########################################################################
# Frontal
pl.axes([ax_xmin, ax_ymin, shapes[0], ax_height])
if anat is not False:
if y_anat < anat.shape[1]:
pl.imshow(np.rot90(anat[:, y_anat, :]),
cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[0, 3],
anat_bounds[0, 0],
anat_bounds[2, 0],
anat_bounds[2, 5]))
if y_map < map.shape[1]:
pl.imshow(np.rot90(map[:, y_map, :]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[0, 3],
map_bounds[0, 0],
map_bounds[2, 0],
map_bounds[2, 5]),
**kwargs)
pl.text(ax_xmin +shapes[0] + shapes[1] - 0.01, ax_ymin + 0.07, '%i' % x,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(z, xmin, xmax, color=(.5, .5, .5))
pl.vlines(-x, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
###########################################################################
# Lateral
pl.axes([ax_xmin + shapes[0], ax_ymin, shapes[1], ax_height])
if anat is not False:
if x_anat < anat.shape[0]:
pl.imshow(np.rot90(anat[x_anat, ...]), cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[1, 0],
anat_bounds[1, 4],
anat_bounds[2, 0],
anat_bounds[2, 5]))
if x_map < map.shape[0]:
pl.imshow(np.rot90(map[x_map, ...]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[1, 0],
map_bounds[1, 4],
map_bounds[2, 0],
map_bounds[2, 5]),
**kwargs)
pl.text(ax_xmin + shapes[-1] - 0.01, ax_ymin + 0.07, '%i' % y,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(z, xmin, xmax, color=(.5, .5, .5))
pl.vlines(y, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
###########################################################################
# Axial
pl.axes([ax_xmin + shapes[0] + shapes[1], ax_ymin, shapes[-1],
ax_height])
if anat is not False:
if z_anat < anat.shape[2]:
pl.imshow(np.rot90(anat[..., z_anat]),
cmap=pl.cm.gray,
vmin=-.5*vmax_anat,
vmax=vmax_anat,
extent=(anat_bounds[0, 0],
anat_bounds[0, 3],
anat_bounds[1, 0],
anat_bounds[1, 4]))
if z_map < map.shape[2]:
pl.imshow(np.rot90(map[..., z_map]),
vmin=vmin_map,
vmax=vmax_map,
extent=(map_bounds[0, 0],
map_bounds[0, 3],
map_bounds[1, 0],
map_bounds[1, 4]),
**kwargs)
pl.text(ax_xmax - 0.01, ax_ymin + 0.07, '%i' % z,
horizontalalignment='right',
verticalalignment='bottom',
transform=fig.transFigure)
xmin, xmax = pl.xlim()
ymin, ymax = pl.ylim()
pl.hlines(y, xmin, xmax, color=(.5, .5, .5))
pl.vlines(x, ymin, ymax, color=(.5, .5, .5))
pl.axis('off')
pl.text(ax_xmin + 0.01, ax_ymax - 0.01, title,
horizontalalignment='left',
verticalalignment='top',
transform=fig.transFigure)
pl.axis('off')
def demo_plot_map_2d():
map = np.zeros((182, 218, 182))
# Color a asymetric rectangle around Broadman area 26:
x, y, z = -6, -53, 9
x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv)
map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1
map = np.ma.masked_less(map, 0.5)
plot_map_2d(map, mni_sform, cut_coords=(x, y, z),
figure_num=512)
def plot_map(map, sform, cut_coords, anat=None, anat_sform=None,
vmin=None, figure_num=None, title='', mask=None):
""" Plot a together a 3D volume rendering view of the activation, with an
outline of the brain, and 2D cuts. If Mayavi is not installed,
falls back to 2D views only.
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the cut to perform, in MNI coordinates
and order. If None is given, the cut_coords are automaticaly
estimated.
anat : 3D ndarray, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
title : string, optional
The title dispayed on the figure.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
try:
from enthought.mayavi import version
if not int(version.version[0]) > 2:
raise ImportError
except ImportError:
print >> sys.stderr, 'Mayavi > 3.x not installed, plotting only 2D'
return plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin,
title=title,
figure_num=figure_num, mask=mask)
from .maps_3d import plot_map_3d, m2screenshot
plot_map_3d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin,
figure_num=figure_num, mask=mask)
fig = pl.figure(figure_num, figsize=(10.6, 2.6))
ax = pl.axes((-0.01, 0, 0.3, 1))
m2screenshot(mpl_axes=ax)
plot_map_2d(map, sform, cut_coords=cut_coords, anat=anat,
anat_sform=anat_sform, vmin=vmin, mask=mask,
figure_num=fig.number, axes=(0.28, 1, 0, 1.), title=title)
def demo_plot_map():
map = np.zeros((182, 218, 182))
# Color a asymetric rectangle around Broadman area 26:
x, y, z = -6, -53, 9
x_map, y_map, z_map = coord_transform(x, y, z, mni_sform_inv)
map[x_map-30:x_map+30, y_map-3:y_map+3, z_map-10:z_map+10] = 1
plot_map(map, mni_sform, cut_coords=(x, y, z), vmin=0.5,
figure_num=512)
def auto_plot_map(map, sform, vmin=None, cut_coords=None, do3d=False,
anat=None, anat_sform=None, title='',
figure_num=None, mask=None, auto_sign=True):
""" Automatic plotting of an activation map.
Plot a together a 3D volume rendering view of the activation, with an
outline of the brain, and 2D cuts. If Mayavi is not installed,
falls back to 2D views only.
Parameters
----------
map : 3D ndarray
The activation map, as a 3D image.
sform : 4x4 ndarray
The affine matrix going from image voxel space to MNI space.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order. If None is given, the cut_coords are
automaticaly estimated.
do3d : boolean, optional
If do3d is True, a 3D plot is created if Mayavi is installed.
anat : 3D ndarray, optional
The anatomical image to be used as a background. If None, the
MNI152 T1 1mm template is used.
anat_sform : 4x4 ndarray, optional
The affine matrix going from the anatomical image voxel space to
MNI space. This parameter is not used when the default
anatomical is used, but it is compulsory when using an
explicite anatomical image.
title : string, optional
The title dispayed on the figure.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
mask : 3D ndarray, boolean, optional
The brain mask. If None, the mask is computed from the map.
auto_sign : boolean, optional
If auto_sign is True, the sign of the activation is
automaticaly computed: negative activation can thus be
plotted.
Returns
-------
vmin : float
The lower threshold of the activation used.
cut_coords : 3-tuple of floats
The Talairach coordinates of the cut performed for the 2D
view.
Notes
-----
All the 3D arrays are in numpy convention: (x, y, z)
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if do3d:
if do3d == 'offscreen':
try:
from enthought.mayavi import mlab
mlab.options.offscreen = True
except:
pass
plotter = plot_map
else:
plotter = plot_map_2d
if mask is None:
mask = compute_mask(map)
if vmin is None:
vmin = np.inf
pvalue = 0.04
while not np.isfinite(vmin):
pvalue *= 1.25
vmax, vmin = find_activation(map, mask=mask, pvalue=pvalue)
if not np.isfinite(vmin) and auto_sign:
if np.isfinite(vmax):
vmin = -vmax
if mask is not None:
map[mask] *= -1
else:
map *= -1
if cut_coords is None:
x, y, z = find_cut_coords(map, activation_threshold=vmin)
# XXX: Careful with Voxel/MNI ordering
y, x, z = coord_transform(x, y, z, sform)
cut_coords = (x, y, z)
plotter(map, sform, vmin=vmin, cut_coords=cut_coords,
anat=anat, anat_sform=anat_sform, title=title,
figure_num=figure_num, mask=mask)
return vmin, cut_coords
def plot_niftifile(filename, outputname=None, do3d=False, vmin=None,
cut_coords=None, anat_filename=None, figure_num=None,
mask_filename=None, auto_sign=True):
""" Given a nifti filename, plot a view of it to a file (png by
default).
Parameters
----------
filename : string
The name of the Nifti file of the map to be plotted
outputname : string, optional
The file name of the output file created. By default
the name of the input file with a png extension is used.
do3d : boolean, optional
If do3d is True, a 3D plot is created if Mayavi is installed.
vmin : float, optional
The lower threshold of the positive activation. This
parameter is used to threshold the activation map.
cut_coords: 3-tuple of floats, optional
The MNI coordinates of the point where the cut is performed, in
MNI coordinates and order. If None is given, the cut_coords are
automaticaly estimated.
anat : string, optional
Name of the Nifti image file to be used as a background. If None,
the MNI152 T1 1mm template is used.
title : string, optional
The title dispayed on the figure.
figure_num : integer, optional
The number of the matplotlib and Mayavi figures used. If None is
given, a new figure is created.
mask_filename : string, optional
Name of the Nifti file to be used as brain mask. If None, the
mask is computed from the map.
auto_sign : boolean, optional
If auto_sign is True, the sign of the activation is
automaticaly computed: negative activation can thus be
plotted.
Notes
-----
Cut coordinates are in Talairach coordinates. Warning: Talairach
coordinates are (y, x, z), if (x, y, z) are in voxel-ordering
convention.
"""
if outputname is None:
outputname = os.path.splitext(filename)[0] + '.png'
if not os.path.exists(filename):
raise OSError, 'File %s does not exist' % filename
nim = load(filename)
sform = nim.get_affine()
if any(np.linalg.eigvals(sform)==0):
raise SformError, "sform affine is not inversible"
if anat_filename is not None:
anat_im = load(anat_filename)
anat = anat_im.data
anat_sform = anat_im.get_affine()
else:
anat = None
anat_sform = None
if mask_filename is not None:
mask_im = load(mask_filename)
mask = mask_im.data.astype(np.bool)
if not np.allclose(mask_im.get_affine(), sform):
raise SformError, 'Mask does not have same sform as image'
if not np.allclose(mask.shape, nim.data.shape[:3]):
raise NiftiIndexError, 'Mask does not have same shape as image'
else:
mask = None
output_files = list()
if nim.data.ndim == 3:
map = nim.data.T
auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords,
do3d=do3d, anat=anat, anat_sform=anat_sform, mask=mask,
title=os.path.basename(filename), figure_num=figure_num,
auto_sign=auto_sign)
pl.savefig(outputname)
output_files.append(outputname)
elif nim.data.ndim == 4:
outputname, outputext = os.path.splitext(outputname)
if len(nim.data) < 10:
fmt = '%s_%i%s'
elif len(nim.data) < 100:
fmt = '%s_%02i%s'
elif len(nim.data) < 1000:
fmt = '%s_%03i%s'
else:
fmt = '%s_%04i%s'
if mask is None:
mask = compute_mask(nim.data.mean(axis=0)).T
for index, data in enumerate(nim.data):
map = data.T
auto_plot_map(map, sform, vmin=vmin, cut_coords=cut_coords,
do3d=do3d, anat=anat, anat_sform=anat_sform,
title='%s, %i' % (os.path.basename(filename), index),
figure_num=figure_num, mask=mask, auto_sign=auto_sign)
this_outputname = fmt % (outputname, index, outputext)
pl.savefig(this_outputname)
pl.clf()
output_files.append(this_outputname)
else:
raise NiftiIndexError, 'File %s: incorrect number of dimensions'
return output_files
| bsd-3-clause |
ElDeveloper/qiita | qiita_pet/handlers/rest/study_samples.py | 3 | 4239 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from tornado.escape import json_encode, json_decode
import pandas as pd
from qiita_db.handlers.oauth2 import authenticate_oauth
from .rest_handler import RESTHandler
class StudySamplesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
samples = []
else:
samples = list(study.sample_template.keys())
self.write(json_encode(samples))
self.finish()
@authenticate_oauth
def patch(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
if study.sample_template is None:
self.fail('No sample information found', 404)
return
else:
sample_info = study.sample_template.to_dataframe()
data = pd.DataFrame.from_dict(json_decode(self.request.body),
orient='index')
if len(data.index) == 0:
self.fail('No samples provided', 400)
return
categories = set(study.sample_template.categories())
if set(data.columns) != categories:
if set(data.columns).issubset(categories):
self.fail('Not all sample information categories provided',
400)
else:
unknown = set(data.columns) - categories
self.fail("Some categories do not exist in the sample "
"information", 400,
categories_not_found=sorted(unknown))
return
existing_samples = set(sample_info.index)
overlapping_ids = set(data.index).intersection(existing_samples)
new_ids = set(data.index) - existing_samples
status = 500
# warnings generated are not currently caught
# see https://github.com/biocore/qiita/issues/2096
if overlapping_ids:
to_update = data.loc[overlapping_ids]
study.sample_template.update(to_update)
status = 200
if new_ids:
to_extend = data.loc[new_ids]
study.sample_template.extend(to_extend)
status = 201
self.set_status(status)
self.finish()
class StudySamplesCategoriesHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id, categories):
if not categories:
self.fail('No categories specified', 405)
return
study = self.safe_get_study(study_id)
if study is None:
return
categories = categories.split(',')
if study.sample_template is None:
self.fail('Study does not have sample information', 404)
return
available_categories = set(study.sample_template.categories())
not_found = set(categories) - available_categories
if not_found:
self.fail('Category not found', 404,
categories_not_found=sorted(not_found))
return
blob = {'header': categories,
'samples': {}}
df = study.sample_template.to_dataframe()
for idx, row in df[categories].iterrows():
blob['samples'][idx] = list(row)
self.write(json_encode(blob))
self.finish()
class StudySamplesInfoHandler(RESTHandler):
@authenticate_oauth
def get(self, study_id):
study = self.safe_get_study(study_id)
if study is None:
return
st = study.sample_template
if st is None:
info = {'number-of-samples': 0,
'categories': []}
else:
info = {'number-of-samples': len(st),
'categories': st.categories()}
self.write(json_encode(info))
self.finish()
| bsd-3-clause |
chongyangtao/gmmreg | Python/_plotting.py | 14 | 2435 | #!/usr/bin/env python
#coding=utf-8
##====================================================
## $Author$
## $Date$
## $Revision$
##====================================================
from pylab import *
from configobj import ConfigObj
import matplotlib.pyplot as plt
def display2Dpointset(A):
fig = plt.figure()
ax = fig.add_subplot(111)
#ax.grid(True)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
for i,x in enumerate(A):
ax.annotate('%d'%(i+1), xy = x, xytext = x + 0)
ax.set_axis_off()
#fig.show()
def display2Dpointsets(A, B, ax = None):
""" display a pair of 2D point sets """
if not ax:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(A[:,0],A[:,1],'yo',markersize=8,mew=1)
ax.plot(B[:,0],B[:,1],'b+',markersize=8,mew=1)
#pylab.setp(pylab.gca(), 'xlim', [-0.15,0.6])
labels = plt.getp(plt.gca(), 'xticklabels')
plt.setp(labels, color='k', fontweight='bold')
labels = plt.getp(plt.gca(), 'yticklabels')
plt.setp(labels, color='k', fontweight='bold')
def display3Dpointsets(A,B,ax):
#ax.plot3d(A[:,0],A[:,1],A[:,2],'yo',markersize=10,mew=1)
#ax.plot3d(B[:,0],B[:,1],B[:,2],'b+',markersize=10,mew=1)
ax.scatter(A[:,0],A[:,1],A[:,2], c = 'y', marker = 'o')
ax.scatter(B[:,0],B[:,1],B[:,2], c = 'b', marker = '+')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
from mpl_toolkits.mplot3d import Axes3D
def displayABC(A,B,C):
fig = plt.figure()
dim = A.shape[1]
if dim==2:
ax = plt.subplot(121)
display2Dpointsets(A, B, ax)
ax = plt.subplot(122)
display2Dpointsets(C, B, ax)
if dim==3:
plot1 = plt.subplot(1,2,1)
ax = Axes3D(fig, rect = plot1.get_position())
display3Dpointsets(A,B,ax)
plot2 = plt.subplot(1,2,2)
ax = Axes3D(fig, rect = plot2.get_position())
display3Dpointsets(C,B,ax)
plt.show()
def display_pts(f_config):
config = ConfigObj(f_config)
file_section = config['FILES']
mf = file_section['model']
sf = file_section['scene']
tf = file_section['transformed_model']
m = np.loadtxt(mf)
s = np.loadtxt(sf)
t = np.loadtxt(tf)
displayABC(m,s,t)
| gpl-3.0 |
liang42hao/bokeh | bokeh/compat/mplexporter/renderers/base.py | 44 | 14355 | import warnings
import itertools
from contextlib import contextmanager
import numpy as np
from matplotlib import transforms
from .. import utils
from .. import _py3k_compat as py3k
class Renderer(object):
@staticmethod
def ax_zoomable(ax):
return bool(ax and ax.get_navigate())
@staticmethod
def ax_has_xgrid(ax):
return bool(ax and ax.xaxis._gridOnMajor and ax.yaxis.get_gridlines())
@staticmethod
def ax_has_ygrid(ax):
return bool(ax and ax.yaxis._gridOnMajor and ax.yaxis.get_gridlines())
@property
def current_ax_zoomable(self):
return self.ax_zoomable(self._current_ax)
@property
def current_ax_has_xgrid(self):
return self.ax_has_xgrid(self._current_ax)
@property
def current_ax_has_ygrid(self):
return self.ax_has_ygrid(self._current_ax)
@contextmanager
def draw_figure(self, fig, props):
if hasattr(self, "_current_fig") and self._current_fig is not None:
warnings.warn("figure embedded in figure: something is wrong")
self._current_fig = fig
self._fig_props = props
self.open_figure(fig=fig, props=props)
yield
self.close_figure(fig=fig)
self._current_fig = None
self._fig_props = {}
@contextmanager
def draw_axes(self, ax, props):
if hasattr(self, "_current_ax") and self._current_ax is not None:
warnings.warn("axes embedded in axes: something is wrong")
self._current_ax = ax
self._ax_props = props
self.open_axes(ax=ax, props=props)
yield
self.close_axes(ax=ax)
self._current_ax = None
self._ax_props = {}
@contextmanager
def draw_legend(self, legend, props):
self._current_legend = legend
self._legend_props = props
self.open_legend(legend=legend, props=props)
yield
self.close_legend(legend=legend)
self._current_legend = None
self._legend_props = {}
# Following are the functions which should be overloaded in subclasses
def open_figure(self, fig, props):
"""
Begin commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The Figure which will contain the ensuing axes and elements
props : dictionary
The dictionary of figure properties
"""
pass
def close_figure(self, fig):
"""
Finish commands for a particular figure.
Parameters
----------
fig : matplotlib.Figure
The figure which is finished being drawn.
"""
pass
def open_axes(self, ax, props):
"""
Begin commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which will contain the ensuing axes and elements
props : dictionary
The dictionary of axes properties
"""
pass
def close_axes(self, ax):
"""
Finish commands for a particular axes.
Parameters
----------
ax : matplotlib.Axes
The Axes which is finished being drawn.
"""
pass
def open_legend(self, legend, props):
"""
Beging commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend that will contain the ensuing elements
props : dictionary
The dictionary of legend properties
"""
pass
def close_legend(self, legend):
"""
Finish commands for a particular legend.
Parameters
----------
legend : matplotlib.legend.Legend
The Legend which is finished being drawn
"""
pass
def draw_marked_line(self, data, coordinates, linestyle, markerstyle,
label, mplobj=None):
"""Draw a line that also has markers.
If this isn't reimplemented by a renderer object, by default, it will
make a call to BOTH draw_line and draw_markers when both markerstyle
and linestyle are not None in the same Line2D object.
"""
if linestyle is not None:
self.draw_line(data, coordinates, linestyle, label, mplobj)
if markerstyle is not None:
self.draw_markers(data, coordinates, markerstyle, label, mplobj)
def draw_line(self, data, coordinates, style, label, mplobj=None):
"""
Draw a line. By default, draw the line via the draw_path() command.
Some renderers might wish to override this and provide more
fine-grained behavior.
In matplotlib, lines are generally created via the plt.plot() command,
though this command also can create marker collections.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the line.
mplobj : matplotlib object
the matplotlib plot element which generated this line
"""
pathcodes = ['M'] + (data.shape[0] - 1) * ['L']
pathstyle = dict(facecolor='none', **style)
pathstyle['edgecolor'] = pathstyle.pop('color')
pathstyle['edgewidth'] = pathstyle.pop('linewidth')
self.draw_path(data=data, coordinates=coordinates,
pathcodes=pathcodes, style=pathstyle, mplobj=mplobj)
@staticmethod
def _iter_path_collection(paths, path_transforms, offsets, styles):
"""Build an iterator over the elements of the path collection"""
N = max(len(paths), len(offsets))
if not path_transforms:
path_transforms = [np.eye(3)]
edgecolor = styles['edgecolor']
if np.size(edgecolor) == 0:
edgecolor = ['none']
facecolor = styles['facecolor']
if np.size(facecolor) == 0:
facecolor = ['none']
elements = [paths, path_transforms, offsets,
edgecolor, styles['linewidth'], facecolor]
it = itertools
return it.islice(py3k.zip(*py3k.map(it.cycle, elements)), N)
def draw_path_collection(self, paths, path_coordinates, path_transforms,
offsets, offset_coordinates, offset_order,
styles, mplobj=None):
"""
Draw a collection of paths. The paths, offsets, and styles are all
iterables, and the number of paths is max(len(paths), len(offsets)).
By default, this is implemented via multiple calls to the draw_path()
function. For efficiency, Renderers may choose to customize this
implementation.
Examples of path collections created by matplotlib are scatter plots,
histograms, contour plots, and many others.
Parameters
----------
paths : list
list of tuples, where each tuple has two elements:
(data, pathcodes). See draw_path() for a description of these.
path_coordinates: string
the coordinates code for the paths, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
path_transforms: array_like
an array of shape (*, 3, 3), giving a series of 2D Affine
transforms for the paths. These encode translations, rotations,
and scalings in the standard way.
offsets: array_like
An array of offsets of shape (N, 2)
offset_coordinates : string
the coordinates code for the offsets, which should be either
'data' for data coordinates, or 'figure' for figure (pixel)
coordinates.
offset_order : string
either "before" or "after". This specifies whether the offset
is applied before the path transform, or after. The matplotlib
backend equivalent is "before"->"data", "after"->"screen".
styles: dictionary
A dictionary in which each value is a list of length N, containing
the style(s) for the paths.
mplobj : matplotlib object
the matplotlib plot element which generated this collection
"""
if offset_order == "before":
raise NotImplementedError("offset before transform")
for tup in self._iter_path_collection(paths, path_transforms,
offsets, styles):
(path, path_transform, offset, ec, lw, fc) = tup
vertices, pathcodes = path
path_transform = transforms.Affine2D(path_transform)
vertices = path_transform.transform(vertices)
# This is a hack:
if path_coordinates == "figure":
path_coordinates = "points"
style = {"edgecolor": utils.color_to_hex(ec),
"facecolor": utils.color_to_hex(fc),
"edgewidth": lw,
"dasharray": "10,0",
"alpha": styles['alpha'],
"zorder": styles['zorder']}
self.draw_path(data=vertices, coordinates=path_coordinates,
pathcodes=pathcodes, style=style, offset=offset,
offset_coordinates=offset_coordinates,
mplobj=mplobj)
def draw_markers(self, data, coordinates, style, label, mplobj=None):
"""
Draw a set of markers. By default, this is done by repeatedly
calling draw_path(), but renderers should generally overload
this method to provide a more efficient implementation.
In matplotlib, markers are created using the plt.plot() command.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the markers.
mplobj : matplotlib object
the matplotlib plot element which generated this marker collection
"""
vertices, pathcodes = style['markerpath']
pathstyle = dict((key, style[key]) for key in ['alpha', 'edgecolor',
'facecolor', 'zorder',
'edgewidth'])
pathstyle['dasharray'] = "10,0"
for vertex in data:
self.draw_path(data=vertices, coordinates="points",
pathcodes=pathcodes, style=pathstyle,
offset=vertex, offset_coordinates=coordinates,
mplobj=mplobj)
def draw_text(self, text, position, coordinates, style,
text_type=None, mplobj=None):
"""
Draw text on the image.
Parameters
----------
text : string
The text to draw
position : tuple
The (x, y) position of the text
coordinates : string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the text.
text_type : string or None
if specified, a type of text such as "xlabel", "ylabel", "title"
mplobj : matplotlib object
the matplotlib plot element which generated this text
"""
raise NotImplementedError()
def draw_path(self, data, coordinates, pathcodes, style,
offset=None, offset_coordinates="data", mplobj=None):
"""
Draw a path.
In matplotlib, paths are created by filled regions, histograms,
contour plots, patches, etc.
Parameters
----------
data : array_like
A shape (N, 2) array of datapoints.
coordinates : string
A string code, which should be either 'data' for data coordinates,
'figure' for figure (pixel) coordinates, or "points" for raw
point coordinates (useful in conjunction with offsets, below).
pathcodes : list
A list of single-character SVG pathcodes associated with the data.
Path codes are one of ['M', 'm', 'L', 'l', 'Q', 'q', 'T', 't',
'S', 's', 'C', 'c', 'Z', 'z']
See the SVG specification for details. Note that some path codes
consume more than one datapoint (while 'Z' consumes none), so
in general, the length of the pathcodes list will not be the same
as that of the data array.
style : dictionary
a dictionary specifying the appearance of the line.
offset : list (optional)
the (x, y) offset of the path. If not given, no offset will
be used.
offset_coordinates : string (optional)
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
mplobj : matplotlib object
the matplotlib plot element which generated this path
"""
raise NotImplementedError()
def draw_image(self, imdata, extent, coordinates, style, mplobj=None):
"""
Draw an image.
Parameters
----------
imdata : string
base64 encoded png representation of the image
extent : list
the axes extent of the image: [xmin, xmax, ymin, ymax]
coordinates: string
A string code, which should be either 'data' for data coordinates,
or 'figure' for figure (pixel) coordinates.
style : dictionary
a dictionary specifying the appearance of the image
mplobj : matplotlib object
the matplotlib plot object which generated this image
"""
raise NotImplementedError()
| bsd-3-clause |
dongjoon-hyun/tensorflow | tensorflow/contrib/learn/python/learn/estimators/linear_test.py | 23 | 77821 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for estimators.linear."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import json
import tempfile
import numpy as np
from tensorflow.contrib.layers.python.layers import feature_column as feature_column_lib
from tensorflow.contrib.learn.python.learn import experiment
from tensorflow.contrib.learn.python.learn.datasets import base
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import run_config
from tensorflow.contrib.learn.python.learn.estimators import test_data
from tensorflow.contrib.learn.python.learn.metric_spec import MetricSpec
from tensorflow.contrib.linear_optimizer.python import sdca_optimizer as sdca_optimizer_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.feature_column import feature_column as fc_core
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import partitioned_variables
from tensorflow.python.platform import test
from tensorflow.python.training import ftrl
from tensorflow.python.training import input as input_lib
from tensorflow.python.training import server_lib
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = base.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = base.Dataset(data=iris.data[ids], target=iris.target[ids])
return iris
class LinearClassifierTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearClassifier(
n_classes=3, feature_columns=cont_features),
train_input_fn=test_data.iris_input_multiclass_fn,
eval_input_fn=test_data.iris_input_multiclass_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearClassifier)
def testTrain(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testJointTrain(self):
"""Tests that loss goes down with training with joint weights."""
def input_fn():
return {
'age':
sparse_tensor.SparseTensor(
values=['1'], indices=[[0, 0]], dense_shape=[1, 1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.sparse_column_with_hash_bucket('age', 2)
classifier = linear.LinearClassifier(
_joint_weight=True, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
def testMultiClass_MatrixData(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [150] instead of [150, 1]."""
def _input_fn():
iris = base.load_iris()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[150], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass_NpMatrixData(self):
"""Tests multi-class classification using numpy matrix data as input."""
iris = base.load_iris()
train_x = iris.data
train_y = iris.target
feature_column = feature_column_lib.real_valued_column('', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClassLabelKeys(self):
"""Tests n_classes > 2 with label_keys vocabulary for labels."""
# Byte literals needed for python3 test to pass.
label_keys = [b'label0', b'label1', b'label2']
def _input_fn(num_epochs=None):
features = {
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant(
[[label_keys[1]], [label_keys[0]], [label_keys[0]]],
dtype=dtypes.string)
return features, labels
language_column = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
classifier = linear.LinearClassifier(
n_classes=3,
feature_columns=[language_column],
label_keys=label_keys)
classifier.fit(input_fn=_input_fn, steps=50)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
self.assertIn('loss', scores)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_classes = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
self.assertEqual(3, len(predicted_classes))
for pred in predicted_classes:
self.assertIn(pred, label_keys)
predictions = list(
classifier.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllEqual(predicted_classes, predictions)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100, 1], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testEstimatorWithCoreFeatureColumns(self):
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[.8], [0.2], [.1]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=input_lib.limit_epochs(
['en', 'fr', 'zh'], num_epochs=num_epochs),
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant([[1], [0], [0]], dtype=dtypes.int32)
language_column = fc_core.categorical_column_with_hash_bucket(
'language', hash_bucket_size=20)
feature_columns = [language_column, fc_core.numeric_column('age')]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_MatrixData_Labels1D(self):
"""Same as the last test, but labels shape is [100] instead of [100, 1]."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': constant_op.constant(
iris.data, dtype=dtypes.float32)
}, constant_op.constant(
iris.target, shape=[100], dtype=dtypes.int32)
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(feature_columns=[feature_column])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_NpMatrixData(self):
"""Tests binary classification using numpy matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
train_x = iris.data
train_y = iris.target
feature_columns = [feature_column_lib.real_valued_column('', dimension=4)]
classifier = linear.LinearClassifier(feature_columns=feature_columns)
classifier.fit(x=train_x, y=train_y, steps=100)
scores = classifier.evaluate(x=train_x, y=train_y, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
variable_names = classifier.get_variable_names()
self.assertIn('linear/feature/weight', variable_names)
self.assertIn('linear/bias_weight', variable_names)
self.assertEqual(
4, len(classifier.get_variable_value('linear/feature/weight')))
self.assertEqual(
3, len(classifier.get_variable_value('linear/bias_weight')))
def testCustomOptimizerByObject(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.1),
feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
def _optimizer():
return ftrl.FtrlOptimizer(learning_rate=0.1)
classifier = linear.LinearClassifier(
n_classes=3, optimizer=_optimizer, feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests multi-class classification using matrix data as input."""
feature_column = feature_column_lib.real_valued_column(
'feature', dimension=4)
classifier = linear.LinearClassifier(
n_classes=3, optimizer='Ftrl', feature_columns=[feature_column])
classifier.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1], [0], [0], [0]], dtype=dtypes.float32)
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
predictions = array_ops.strided_slice(
predictions, [0, 1], [-1, 2], end_mask=1)
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
classifier = linear.LinearClassifier(
feature_columns=[feature_column_lib.real_valued_column('x')])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy':
MetricSpec(
metric_fn=metric_ops.streaming_accuracy,
prediction_key='classes'),
'my_precision':
MetricSpec(
metric_fn=metric_ops.streaming_precision,
prediction_key='classes'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='probabilities')
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric']).issubset(
set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(classifier.predict_classes(
input_fn=predict_input_fn)))
self.assertEqual(
_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Tests the case where the prediction_key is neither "classes" nor
# "probabilities".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): metric_ops.streaming_auc})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
metric_ops.streaming_accuracy
})
def testLogisticFractionalLabels(self):
"""Tests logistic training with fractional labels."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([[1], [2]]), num_epochs=num_epochs),
}, constant_op.constant(
[[.7], [0]], dtype=dtypes.float32)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age], config=run_config.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=input_fn, steps=500)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
predictions_proba = list(
classifier.predict_proba(input_fn=predict_input_fn))
# Prediction probabilities mirror the labels column, which proves that the
# classifier learns from float input.
self.assertAllClose([[.3, .7], [1., 0.]], predictions_proba, atol=.1)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=sparse_features, config=config)
classifier.fit(input_fn=_input_fn, steps=200)
loss = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def input_fn(num_epochs=None):
return {
'age':
input_lib.limit_epochs(
constant_op.constant([1]), num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1]),
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
model_dir = tempfile.mkdtemp()
classifier = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=30)
predict_input_fn = functools.partial(input_fn, num_epochs=1)
out1_class = list(
classifier.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out1_proba = list(
classifier.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
del classifier
classifier2 = linear.LinearClassifier(
model_dir=model_dir, feature_columns=[age, language])
out2_class = list(
classifier2.predict_classes(
input_fn=predict_input_fn, as_iterable=True))
out2_proba = list(
classifier2.predict_proba(
input_fn=predict_input_fn, as_iterable=True))
self.assertTrue(np.array_equal(out1_class, out2_class))
self.assertTrue(np.array_equal(out1_proba, out2_proba))
def testWeightColumn(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1], [0], [0], [0]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1], [1], [1], [1]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
classifier = linear.LinearClassifier(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=3))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# All examples in eval data set are y=x.
self.assertGreater(scores['labels/actual_label_mean'], 0.9)
# If there were no weight column, model would learn y=Not(x). Because of
# weights, it learns y=x.
self.assertGreater(scores['labels/prediction_mean'], 0.9)
# All examples in eval data set are y=x. So if weight column were ignored,
# then accuracy would be zero. Because of weights, accuracy should be close
# to 1.0.
self.assertGreater(scores['accuracy'], 0.9)
scores_train_set = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Considering weights, the mean label should be close to 1.0.
# If weights were ignored, it would be 0.25.
self.assertGreater(scores_train_set['labels/actual_label_mean'], 0.9)
# The classifier has learned y=x. If weight column were ignored in
# evaluation, then accuracy for the train set would be 0.25.
# Because weight is not ignored, accuracy is greater than 0.6.
self.assertGreater(scores_train_set['accuracy'], 0.6)
def testWeightColumnLoss(self):
"""Test ensures that you can specify per-example weights for loss."""
def _input_fn():
features = {
'age': constant_op.constant([[20], [20], [20]]),
'weights': constant_op.constant([[100], [1], [1]]),
}
labels = constant_op.constant([[1], [0], [0]])
return features, labels
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age])
classifier.fit(input_fn=_input_fn, steps=100)
loss_unweighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
classifier = linear.LinearClassifier(
feature_columns=[age], weight_column_name='weights')
classifier.fit(input_fn=_input_fn, steps=100)
loss_weighted = classifier.evaluate(input_fn=_input_fn, steps=1)['loss']
self.assertLess(loss_weighted, loss_unweighted)
def testExport(self):
"""Tests that export model for servo works."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
export_dir = tempfile.mkdtemp()
classifier.export(export_dir)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=False)
classifier.fit(input_fn=input_fn, steps=100)
self.assertNotIn('centered_bias_weight', classifier.get_variable_names())
def testEnableCenteredBias(self):
"""Tests that we can enable centered bias."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(
feature_columns=[age, language], enable_centered_bias=True)
classifier.fit(input_fn=input_fn, steps=100)
self.assertIn('linear/binary_logistic_head/centered_bias_weight',
classifier.get_variable_names())
def testTrainOptimizerWithL1Reg(self):
"""Tests l1 regularized model has higher loss."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['hindi'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier_no_reg = linear.LinearClassifier(feature_columns=[language])
classifier_with_reg = linear.LinearClassifier(
feature_columns=[language],
optimizer=ftrl.FtrlOptimizer(
learning_rate=1.0, l1_regularization_strength=100.))
loss_no_reg = classifier_no_reg.fit(input_fn=input_fn, steps=100).evaluate(
input_fn=input_fn, steps=1)['loss']
loss_with_reg = classifier_with_reg.fit(input_fn=input_fn,
steps=100).evaluate(
input_fn=input_fn,
steps=1)['loss']
self.assertLess(loss_no_reg, loss_with_reg)
def testTrainWithMissingFeature(self):
"""Tests that training works with missing features."""
def input_fn():
return {
'language':
sparse_tensor.SparseTensor(
values=['Swahili', 'turkish'],
indices=[[0, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[1], [1], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
classifier = linear.LinearClassifier(feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.07)
def testSdcaOptimizerRealValuedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and real valued features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2']),
'maintenance_cost': constant_op.constant([[500.0], [200.0]]),
'sq_footage': constant_op.constant([[800.0], [600.0]]),
'weights': constant_op.constant([[1.0], [1.0]])
}, constant_op.constant([[0], [1]])
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[maintenance_cost, sq_footage],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerRealValuedFeatureWithHigherDimension(self):
"""Tests SDCAOptimizer with real valued features of higher dimension."""
# input_fn is identical to the one in testSdcaOptimizerRealValuedFeatures
# where 2 1-dimensional dense features have been replaced by 1 2-dimensional
# feature.
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2']),
'dense_feature':
constant_op.constant([[500.0, 800.0], [200.0, 600.0]])
}, constant_op.constant([[0], [1]])
dense_feature = feature_column_lib.real_valued_column(
'dense_feature', dimension=2)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[dense_feature], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=100)
loss = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerBucketizedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and bucketized features."""
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'price': constant_op.constant([[600.0], [1000.0], [400.0]]),
'sq_footage': constant_op.constant([[1000.0], [600.0], [700.0]]),
'weights': constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('price'),
boundaries=[500.0, 700.0])
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'), boundaries=[650.0])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
classifier = linear.LinearClassifier(
feature_columns=[price_bucket, sq_footage_bucket],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerSparseFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.4, 0.6, 0.3]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[1.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeatures(self):
"""LinearClassifier with SDCAOptimizer and weighted sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerWeightedSparseFeaturesOOVWithNoOOVBuckets(self):
"""LinearClassifier with SDCAOptimizer with OOV features (-1 IDs)."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
sparse_tensor.SparseTensor(
values=[2., 3., 1.],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5]),
'country':
sparse_tensor.SparseTensor(
# 'GB' is out of the vocabulary.
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 5])
}, constant_op.constant([[1], [0], [1]])
country = feature_column_lib.sparse_column_with_keys(
'country', keys=['US', 'CA', 'MK', 'IT', 'CN'])
country_weighted_by_price = feature_column_lib.weighted_sparse_column(
country, 'price')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_weighted_by_price], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerCrossedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and crossed features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'language':
sparse_tensor.SparseTensor(
values=['english', 'italian', 'spanish'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1]),
'country':
sparse_tensor.SparseTensor(
values=['US', 'IT', 'MX'],
indices=[[0, 0], [1, 0], [2, 0]],
dense_shape=[3, 1])
}, constant_op.constant([[0], [0], [1]])
language = feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=5)
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
country_language = feature_column_lib.crossed_column(
[language, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[country_language], optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=10)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerMixedFeatures(self):
"""Tests LinearClassifier with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearClassifier with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.6], [0.8], [0.3]]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [1.0], [1.0]])
}, constant_op.constant([[1], [0], [1]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id',
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
classifier = linear.LinearClassifier(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
classifier.fit(input_fn=input_fn, steps=50)
scores = classifier.evaluate(input_fn=input_fn, steps=1)
print('all scores = {}'.format(scores))
self.assertGreater(scores['accuracy'], 0.9)
def testEval(self):
"""Tests that eval produces correct metrics.
"""
def input_fn():
return {
'age':
constant_op.constant([[1], [2]]),
'language':
sparse_tensor.SparseTensor(
values=['greek', 'chinese'],
indices=[[0, 0], [1, 0]],
dense_shape=[2, 1]),
}, constant_op.constant([[1], [0]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearClassifier(feature_columns=[age, language])
# Evaluate on trained model
classifier.fit(input_fn=input_fn, steps=100)
classifier.evaluate(input_fn=input_fn, steps=1)
# TODO(ispir): Enable accuracy check after resolving the randomness issue.
# self.assertLess(evaluated_values['loss/mean'], 0.3)
# self.assertGreater(evaluated_values['accuracy/mean'], .95)
class LinearRegressorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearRegressor(feature_columns=cont_features),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self, linear.LinearRegressor)
def testRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
classifier = linear.LinearRegressor(feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
regressor = linear.LinearRegressor(
feature_columns=cont_features,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=test_data.iris_input_multiclass_fn, steps=100)
scores = regressor.evaluate(
input_fn=test_data.iris_input_multiclass_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {'x': array_ops.ones(shape=[4, 1], dtype=dtypes.float32),}
return features, labels
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(0.1875, scores['loss'], delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
def _input_fn_eval():
# 4 rows, with different weights.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[7.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(0.4125, scores['loss'], delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[100.], [3.], [2.], [2.]])
}
return features, labels
def _input_fn_eval():
# Create 4 rows (y = x)
labels = constant_op.constant([[1.], [1.], [1.], [1.]])
features = {
'x': array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
'w': constant_op.constant([[1.], [1.], [1.], [1.]])
}
return features, labels
regressor = linear.LinearRegressor(
weight_column_name='w',
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.1)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predicted_scores = regressor.predict_scores(
input_fn=_input_fn, as_iterable=False)
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predicted_scores, predictions)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
labels = [1.0, 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(labels, dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predicted_scores = list(
regressor.predict_scores(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(labels, predicted_scores, atol=0.1)
predictions = list(
regressor.predict(
input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predicted_scores, predictions)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
labels = constant_op.constant([[1.], [0.], [0.], [0.]])
features = {
'x':
input_lib.limit_epochs(
array_ops.ones(
shape=[4, 1], dtype=dtypes.float32),
num_epochs=num_epochs)
}
return features, labels
def _my_metric_op(predictions, labels):
return math_ops.reduce_sum(math_ops.multiply(predictions, labels))
regressor = linear.LinearRegressor(
feature_columns=[feature_column_lib.real_valued_column('x')],
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error':
MetricSpec(
metric_fn=metric_ops.streaming_mean_squared_error,
prediction_key='scores'),
'my_metric':
MetricSpec(
metric_fn=_my_metric_op, prediction_key='scores')
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(
regressor.predict_scores(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests the case where the prediction_key is not "scores".
with self.assertRaisesRegexp(KeyError, 'bad_type'):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'bad_name':
MetricSpec(
metric_fn=metric_ops.streaming_auc,
prediction_key='bad_type')
})
# Tests the case where the 2nd element of the key is not "scores".
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('my_error', 'predictions'):
metric_ops.streaming_mean_squared_error
})
# Tests the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
('bad_length_name', 'scores', 'bad_length'):
metric_ops.streaming_mean_squared_error
})
def testTrainSaveLoad(self):
"""Tests that insures you can save and reload a trained model."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
model_dir = tempfile.mkdtemp()
regressor = linear.LinearRegressor(
model_dir=model_dir,
feature_columns=feature_columns,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(regressor.predict_scores(input_fn=predict_input_fn))
del regressor
regressor2 = linear.LinearRegressor(
model_dir=model_dir, feature_columns=feature_columns)
predictions2 = list(regressor2.predict_scores(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7),
feature_column_lib.real_valued_column('age')
]
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig(tf_random_seed=1)
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=feature_columns, config=config)
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age':
input_lib.limit_epochs(
constant_op.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language':
sparse_tensor.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
dense_shape=[3, 2])
}
return features, constant_op.constant(
[1.0, 0., 0.2], dtype=dtypes.float32)
feature_columns = [
feature_column_lib.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20),
feature_column_lib.real_valued_column('age')
]
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
enable_centered_bias=False,
config=run_config.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.1)
def testRecoverWeights(self):
rng = np.random.RandomState(67)
n = 1000
n_weights = 10
bias = 2
x = rng.uniform(-1, 1, (n, n_weights))
weights = 10 * rng.randn(n_weights)
y = np.dot(x, weights)
y += rng.randn(len(x)) * 0.05 + rng.normal(bias, 0.01)
feature_columns = estimator.infer_real_valued_columns_from_input(x)
regressor = linear.LinearRegressor(
feature_columns=feature_columns,
optimizer=ftrl.FtrlOptimizer(learning_rate=0.8))
regressor.fit(x, y, batch_size=64, steps=2000)
self.assertIn('linear//weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear//weight')
# Have to flatten weights since they come in (x, 1) shape.
self.assertAllClose(weights, regressor_weights.flatten(), rtol=1)
# TODO(ispir): Disable centered_bias.
# assert abs(bias - regressor.bias_) < 0.1
def testSdcaOptimizerRealValuedLinearFeatures(self):
"""Tests LinearRegressor with SDCAOptimizer and real valued features."""
x = [[1.2, 2.0, -1.5], [-2.0, 3.0, -0.5], [1.0, -0.5, 4.0]]
weights = [[3.0], [-1.2], [0.5]]
y = np.dot(x, weights)
def input_fn():
return {
'example_id': constant_op.constant(['1', '2', '3']),
'x': constant_op.constant(x),
'weights': constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant(y)
x_column = feature_column_lib.real_valued_column('x', dimension=3)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[x_column],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.01)
self.assertIn('linear/x/weight', regressor.get_variable_names())
regressor_weights = regressor.get_variable_value('linear/x/weight')
self.assertAllClose(
[w[0] for w in weights], regressor_weights.flatten(), rtol=0.1)
def testSdcaOptimizerMixedFeaturesArbitraryWeights(self):
"""Tests LinearRegressor with SDCAOptimizer and a mix of features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerPartitionedVariables(self):
"""Tests LinearRegressor with SDCAOptimizer with partitioned variables."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([0.6, 0.8, 0.3]),
'sq_footage':
constant_op.constant([[900.0], [700.0], [600.0]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[3.0], [5.0], [7.0]])
}, constant_op.constant([[1.55], [-1.25], [-3.0]])
price = feature_column_lib.real_valued_column('price')
sq_footage_bucket = feature_column_lib.bucketized_column(
feature_column_lib.real_valued_column('sq_footage'),
boundaries=[650.0, 800.0])
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
sq_footage_country = feature_column_lib.crossed_column(
[sq_footage_bucket, country], hash_bucket_size=10)
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l2_regularization=1.0,
partitioner=partitioned_variables.fixed_size_partitioner(
num_shards=2, axis=0))
tf_config = {
'cluster': {
run_config.TaskType.PS: ['fake_ps_0', 'fake_ps_1']
}
}
with test.mock.patch.dict('os.environ',
{'TF_CONFIG': json.dumps(tf_config)}):
config = run_config.RunConfig()
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config._cluster_spec = server_lib.ClusterSpec({})
regressor = linear.LinearRegressor(
feature_columns=[price, sq_footage_bucket, country, sq_footage_country],
weight_column_name='weights',
optimizer=sdca_optimizer,
config=config)
regressor.fit(input_fn=input_fn, steps=20)
loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss, 0.05)
def testSdcaOptimizerSparseFeaturesWithL1Reg(self):
"""Tests LinearClassifier with SDCAOptimizer and sparse features."""
def input_fn():
return {
'example_id':
constant_op.constant(['1', '2', '3']),
'price':
constant_op.constant([[0.4], [0.6], [0.3]]),
'country':
sparse_tensor.SparseTensor(
values=['IT', 'US', 'GB'],
indices=[[0, 0], [1, 3], [2, 1]],
dense_shape=[3, 5]),
'weights':
constant_op.constant([[10.0], [10.0], [10.0]])
}, constant_op.constant([[1.4], [-0.8], [2.6]])
price = feature_column_lib.real_valued_column('price')
country = feature_column_lib.sparse_column_with_hash_bucket(
'country', hash_bucket_size=5)
# Regressor with no L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
no_l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
variable_names = regressor.get_variable_names()
self.assertIn('linear/price/weight', variable_names)
self.assertIn('linear/country/weights', variable_names)
no_l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Regressor with L1 regularization.
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id', symmetric_l1_regularization=1.0)
regressor = linear.LinearRegressor(
feature_columns=[price, country],
weight_column_name='weights',
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=20)
l1_reg_loss = regressor.evaluate(input_fn=input_fn, steps=1)['loss']
l1_reg_weights = {
'linear/price/weight': regressor.get_variable_value(
'linear/price/weight'),
'linear/country/weights': regressor.get_variable_value(
'linear/country/weights'),
}
# Unregularized loss is lower when there is no L1 regularization.
self.assertLess(no_l1_reg_loss, l1_reg_loss)
self.assertLess(no_l1_reg_loss, 0.05)
# But weights returned by the regressor with L1 regularization have smaller
# L1 norm.
l1_reg_weights_norm, no_l1_reg_weights_norm = 0.0, 0.0
for var_name in sorted(l1_reg_weights):
l1_reg_weights_norm += sum(
np.absolute(l1_reg_weights[var_name].flatten()))
no_l1_reg_weights_norm += sum(
np.absolute(no_l1_reg_weights[var_name].flatten()))
print('Var name: %s, value: %s' %
(var_name, no_l1_reg_weights[var_name].flatten()))
self.assertLess(l1_reg_weights_norm, no_l1_reg_weights_norm)
def testSdcaOptimizerBiasOnly(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when it's the only feature present.
All of the instances in this input only have the bias feature, and a
1/4 of the labels are positive. This means that the expected weight for
the bias should be close to the average prediction, i.e 0.25.
Returns:
Training data for the test.
"""
num_examples = 40
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
# place_holder is an empty column which is always 0 (absent), because
# LinearClassifier requires at least one column.
'place_holder':
constant_op.constant([[0.0]] * num_examples),
}, constant_op.constant(
[[1 if i % 4 is 0 else 0] for i in range(num_examples)])
place_holder = feature_column_lib.real_valued_column('place_holder')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[place_holder], optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.25, err=0.1)
def testSdcaOptimizerBiasAndOtherColumns(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.4 of all instances that have feature 'a' are positive, and 0.2 of all
instances that have feature 'b' are positive. The labels in the dataset
are ordered to appear shuffled since SDCA expects shuffled data, and
converges faster with this pseudo-random ordering.
If the bias was centered we would expect the weights to be:
bias: 0.3
a: 0.1
b: -0.1
Until b/29339026 is resolved, the bias gets regularized with the same
global value for the other columns, and so the expected weights get
shifted and are:
bias: 0.2
a: 0.2
b: 0.0
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant(
[[x]
for x in [1, 0, 0, 1, 1, 0, 0, 0, 1, 0] * int(half / 10) +
[0, 1, 0, 0, 0, 0, 0, 0, 1, 0] * int(half / 10)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=200)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
# TODO(b/29339026): Change the expected results to expect a centered bias.
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.2, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], 0.0, err=0.05)
def testSdcaOptimizerBiasAndOtherColumnsFabricatedCentered(self):
"""Tests LinearClassifier with SDCAOptimizer and validates bias weight."""
def input_fn():
"""Testing the bias weight when there are other features present.
1/2 of the instances in this input have feature 'a', the rest have
feature 'b', and we expect the bias to be added to each instance as well.
0.1 of all instances that have feature 'a' have a label of 1, and 0.1 of
all instances that have feature 'b' have a label of -1.
We can expect the weights to be:
bias: 0.0
a: 0.1
b: -0.1
Returns:
The test dataset.
"""
num_examples = 200
half = int(num_examples / 2)
return {
'example_id':
constant_op.constant([str(x + 1) for x in range(num_examples)]),
'a':
constant_op.constant([[1]] * int(half) + [[0]] * int(half)),
'b':
constant_op.constant([[0]] * int(half) + [[1]] * int(half)),
}, constant_op.constant([[1 if x % 10 == 0 else 0] for x in range(half)] +
[[-1 if x % 10 == 0 else 0] for x in range(half)])
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
regressor = linear.LinearRegressor(
feature_columns=[
feature_column_lib.real_valued_column('a'),
feature_column_lib.real_valued_column('b')
],
optimizer=sdca_optimizer)
regressor.fit(input_fn=input_fn, steps=100)
variable_names = regressor.get_variable_names()
self.assertIn('linear/bias_weight', variable_names)
self.assertIn('linear/a/weight', variable_names)
self.assertIn('linear/b/weight', variable_names)
self.assertNear(
regressor.get_variable_value('linear/bias_weight')[0], 0.0, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/a/weight')[0], 0.1, err=0.05)
self.assertNear(
regressor.get_variable_value('linear/b/weight')[0], -0.1, err=0.05)
class LinearEstimatorTest(test.TestCase):
def testExperimentIntegration(self):
cont_features = [
feature_column_lib.real_valued_column(
'feature', dimension=4)
]
exp = experiment.Experiment(
estimator=linear.LinearEstimator(feature_columns=cont_features,
head=head_lib.regression_head()),
train_input_fn=test_data.iris_input_logistic_fn,
eval_input_fn=test_data.iris_input_logistic_fn)
exp.test()
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(self,
linear.LinearEstimator)
def testLinearRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(feature_columns=[age, language],
head=head_lib.regression_head())
linear_estimator.fit(input_fn=input_fn, steps=100)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=400)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.5)
def testPoissonRegression(self):
"""Tests that loss goes down with training."""
def input_fn():
return {
'age':
constant_op.constant([1]),
'language':
sparse_tensor.SparseTensor(
values=['english'], indices=[[0, 0]], dense_shape=[1, 1])
}, constant_op.constant([[10.]])
language = feature_column_lib.sparse_column_with_hash_bucket('language',
100)
age = feature_column_lib.real_valued_column('age')
linear_estimator = linear.LinearEstimator(
feature_columns=[age, language],
head=head_lib.poisson_regression_head())
linear_estimator.fit(input_fn=input_fn, steps=10)
loss1 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
linear_estimator.fit(input_fn=input_fn, steps=100)
loss2 = linear_estimator.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
# Here loss of 2.1 implies a prediction of ~9.9998
self.assertLess(loss2, 2.1)
def testSDCANotSupported(self):
"""Tests that we detect error for SDCA."""
maintenance_cost = feature_column_lib.real_valued_column('maintenance_cost')
sq_footage = feature_column_lib.real_valued_column('sq_footage')
sdca_optimizer = sdca_optimizer_lib.SDCAOptimizer(
example_id_column='example_id')
with self.assertRaises(ValueError):
linear.LinearEstimator(
head=head_lib.regression_head(label_dimension=1),
feature_columns=[maintenance_cost, sq_footage],
optimizer=sdca_optimizer,
_joint_weights=True)
def boston_input_fn():
boston = base.load_boston()
features = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.data), [-1, 13]),
dtypes.float32)
labels = math_ops.cast(
array_ops.reshape(constant_op.constant(boston.target), [-1, 1]),
dtypes.float32)
return features, labels
class FeatureColumnTest(test.TestCase):
def testTrain(self):
feature_columns = estimator.infer_real_valued_columns_from_input_fn(
boston_input_fn)
est = linear.LinearRegressor(feature_columns=feature_columns)
est.fit(input_fn=boston_input_fn, steps=1)
_ = est.evaluate(input_fn=boston_input_fn, steps=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
bovulpes/AliceO2 | Detectors/FIT/benchmark/process.py | 6 | 12238 | # load modules
import re
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator
# use classic plot style
plt.style.use('classic')
# read and save user input filenames
mem_filename = sys.argv[1]
cpu_filename = sys.argv[2]
# save the process id names
process_id_mem = re.findall('mem_evolution_(\\d+)', mem_filename)[0]
process_id_cpu = re.findall('cpu_evolution_(\\d+)', cpu_filename)[0]
# check that the process id names are the same
if not process_id_mem==process_id_cpu:
# throw error if true and exit program
sys.stderr.write("The memory and cpu process filenames do not match...\n")
print("input memory filename: ",mem_filename)
print("inpu cpu filename: ",cpu_filename)
exit(1)
# save the main process id (driver application)
process_id = process_id_mem + '.txt' # as string '<PID>.txt'
# save the same process id (driver application), but as a float
driver = float(process_id_mem)
# load the o2 command given
with open(mem_filename) as f:
title = f.readline()
# extract the command given
title = re.findall('#command line: (\\w.+)', title)[0]
# declare string variables for different runs
simulation = 'o2-sim '
serial = 'o2-sim-serial'
digitization = 'o2-sim-digitizer-workflow'
# print the command for the user
print("\nYour command was: ", title)
# check what type of command and parse it to a logfile variable
if title.find(simulation) == 0:
print("You have monitored o2 simulation in parallel.\n")
command=simulation
logfilename = 'o2sim.log'
elif title.find(serial) == 0:
print("You have monitored o2 simulation in serial.\n")
command=serial
logfilename = 'o2sim.log'
elif title.find(digitization) == 0:
command=digitization
print("You have monitored o2 digitization.\n")
logfilename = 'o2digi.log'
else :
print("I do not know this type of simulation.\n")
exit(1)
#################################################
# #
# Extract the PIDs from logfile #
# #
#################################################
if command==simulation: # True if you typed o2-sim
try:
# open o2sim.log file name
with open(logfilename) as logfile:
# read and save the first 6 lines in o2sim.log
loglines = [next(logfile) for line in range(6)]
# print("*******************************\n")
# print("Driver application PID is: ", driver)
# find the PID for the event generator (o2-sim-primary-..)
eventgenerator_line = re.search('Spawning particle server on PID (.*); Redirect output to serverlog\n',loglines[3])
event_gen = float(eventgenerator_line.group(1))
# print("Eventgenerator PID is: ", event_gen)
# find the PID for sim worker 0 (o2-sim-device-runner)
sim_worker_line = re.search('Spawning sim worker 0 on PID (.*); Redirect output to workerlog0\n',loglines[4])
sim_worker = float(sim_worker_line.group(1))
# print("SimWorker 0 PID is: ", sim_worker)
# find the PID for the hitmerger (o2-sim-hitmerger)
hitmerger_line = re.search('Spawning hit merger on PID (.*); Redirect output to mergerlog\n',loglines[5])
hit_merger = float(hitmerger_line.group(1))
# print("Hitmerger PID is: ", hit_merger, "\n")
# print("*******************************\n")
# find the number of simulation workers
n_workers = int(re.findall('Running with (\\d+)', loglines[1])[0])
# save into a list
pid_names = ['driver','event gen','sim worker 0','hit merger']
pid_vals = [driver,event_gen,sim_worker,hit_merger]
# append pid names for remaining workers
for i in range(n_workers-1):
pid_names.append(f"sim worker {i+1}")
no_log = False
except IOError:
print("There exists no o2sim.log..")
print("No details of devices will be provided.")
no_log = True
elif command==digitization: # True if you typed o2-sim-digitizer-workflow
try:
# open o2digi.log file name
with open(logfilename) as logfile:
# save the first 100 lines in o2digi.log
loglines = [next(logfile) for line in range(100)]
# declare list for PID numbers and names
pid_vals = []
pid_names = []
# loop through lines to find PIDs
for line_num,line in enumerate(loglines):
pid_line = re.findall('Starting (\\w.+) on pid (\\d+)',line)
if pid_line: # True if the line contains 'Start <PID name> on pid <PID number>'
# assign the name and value to variables
pid_name = pid_line[0][0]
pid_val = float(pid_line[0][1])
# save to list
pid_names.append(pid_name)
pid_vals.append(pid_val)
# insert driver application name and value
pid_names.insert(0,'driver')
pid_vals.insert(0,driver)
# for id in range(len(pid_names)):
# print(pid_names[id],"PID is: ",pid_vals[id])
# print(pid_vals[pid])
# print("*******************************\n")
no_log = False
except IOError:
print("There exists no o2digi.log..")
print("No details of devices will be provided.")
no_log = True
elif command==serial:
print("*******************************\n")
print("Driver application PID is: ", driver)
print("There are no other PIDs")
no_log = False
else :
print("Something went wrong.. exiting")
exit(1)
############### End of PID extraction #################
# get time and PID filenames
time_filename = 'time_evolution_' + process_id
pid_filename = 'pid_evolution_' + process_id
# load data as pandas DataFrame (DataFrame due to uneven number of coloumns in file)
mem = pd.read_csv(mem_filename, skiprows=2, sep=" +", engine="python",header=None)
cpu = pd.read_csv(cpu_filename, skiprows=2, sep=" +", engine="python",header=None)
pid = pd.read_csv(pid_filename, skiprows=2, sep=" +", engine="python",header=None)
t = np.loadtxt(time_filename) # time in ms (mili-seconds)
# extract values from the DataFrame
mem = mem[1:].values
cpu = cpu[1:].values
pid = pid[1:].values
# process time series
t = t-t[0] # rescale time such that t_start=0
t = t*10**(-3) # convert mili-seconds to seconds
# replace 'Nones' (empty) elements w/ zeros and convert string values to floats
mem = np.nan_to_num(mem.astype(np.float))
cpu = np.nan_to_num(cpu.astype(np.float))
pid = np.nan_to_num(pid.astype(np.float))
# find all process identifaction numbers involved (PIDs), the index of their first
# occurence (index) for an unraveled array and the total number of apperances (counts) in the process
PIDs, index, counts = np.unique(pid,return_index=True,return_counts=True)
# NOTE: we don't want to count 'fake' PIDs. These are PIDs that spawns only once not taking
# any memory or cpu. Due to their appearence they shift the colomns in all monitored files.
# This needs to be taken care of and they are therefore deleted from the removed.
# return the index of the fake pids
fake = np.where(counts==1)
# delete the fake pids from PIDs list
PIDs = np.delete(PIDs,fake)
index = np.delete(index,fake)
counts = np.delete(counts,fake)
# we also dele PID=0, as this is not a real PID
PIDs = np.delete(PIDs,0)
index = np.delete(index,0)
counts = np.delete(counts,0)
# get number of real PIDs
nPIDs = len(PIDs)
# dimension of data
dim = pid.shape # could also use from time series
# NOTE: dimensiton is always (n_steps, 40)
# because of '#' characters in ./monitor.sh
# number of steps in simulation for o2-sim
steps = len(pid[:,0]) # could also use from time series
# declare final lists
m = [] # memory
c = [] # cpu
p = [] # process
for i in range(nPIDs): # loop through all valid PIDs
# find the number of zeros to pad with
init_zeros, _ = np.unravel_index(index[i],dim)
# pad the 'initial' zeros (begining)
mem_dummy = np.hstack((np.zeros(init_zeros),mem[pid==PIDs[i]]))
cpu_dummy = np.hstack((np.zeros(init_zeros),cpu[pid==PIDs[i]]))
pid_dummy = np.hstack((np.zeros(init_zeros),pid[pid==PIDs[i]]))
# find the difference in final steps
n_diff = steps - len(mem_dummy)
# pad the ending w/ zeros
mem_dummy = np.hstack((mem_dummy,np.zeros(n_diff)))
cpu_dummy = np.hstack((cpu_dummy,np.zeros(n_diff)))
pid_dummy = np.hstack((pid_dummy,np.zeros(n_diff)))
# save to list
m.append(mem_dummy)
c.append(cpu_dummy)
p.append(pid_dummy)
#print("PID is: ",PIDs[i])
#print("initial number of zeros to pad: ", init_zeros)
#print("final number of zeros to pad: ", n_diff)
#print("**************\n")
# convert to array and assure correct shape of arrays
m = np.asarray(m).T
c = np.asarray(c).T
p = np.asarray(p).T
###################################
# #
# COMPUTATIONS #
# #
###################################
print("********************************")
# compute average memory and maximum memory
M = np.sum(m,axis=1) # sum all processes memory
max_mem = np.max(M) # find maximum
mean_mem = np.mean(M) # find mean
print(f"max mem: {max_mem:.2f} MB")
print(f"mean mem: {mean_mem:.2f} MB")
C = np.sum(c,axis=1) # compute total cpu
max_cpu = np.max(C)
print(f"max cpu: {max_cpu:.2f}s")
# print total wall clock time
wall_clock = t[-1]
print(f"Total wall clock time: {wall_clock:.2f} s")
# print ratio
ratio = np.max(C)/t[-1]
print(f"Ratio (cpu time) / (wall clock time) : {ratio:.2f}")
print("********************************")
###################################
# #
# PLOTTING #
# #
###################################
if no_log: # True if user hasn't provided logfiles
# plot of total, max and mean memory
fig,ax = plt.subplots(dpi=125,facecolor="white")
ax.plot(t,M,'-k',label='total memory');
ax.hlines(np.mean(M),np.min(t),np.max(t),color='blue',linestyles='--',label='mean memory');
ax.hlines(np.max(M),np.min(t),np.max(t),color='red',linestyles='--',label='max memory');
ax.set_title(title)
ax.set_xlabel("Time [s]")
ax.set_ylabel("Memory [MB]")
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.legend(prop={'size': 10},loc='best')
ax.grid();
# plot of total, max and mean CPU
fig1,ax1 = plt.subplots(dpi=125,facecolor="white")
ax1.plot(t,C,'-k',label='total cpu');
ax1.hlines(np.mean(C),np.min(t),np.max(t),color='blue',linestyles='--',label='mean cpu');
ax1.hlines(np.max(C),np.min(t),np.max(t),color='red',linestyles='--',label='max cpu');
ax1.set_title(title)
ax1.set_xlabel("Time [s]")
ax1.set_ylabel("CPU [s]")
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax1.legend(prop={'size': 10},loc='best');
ax1.grid()
plt.show();
else : # details about the PID exists (from logfiles)
# # convert to pid info lists to arrays
# pid_vals = np.asarray(pid_vals)
# pid_names = np.asarray(pid_names)
#
# # be sure of the correct ordering of pids
# pid_placement = np.where(pid_vals==PIDs)
# plot memory
fig,ax = plt.subplots(dpi=125,facecolor="white")
ax.plot(t,m);
# some features for the plot
ax.set_title(title)
ax.set_xlabel("Time [s]")
ax.set_ylabel("Memory [MB]")
ax.xaxis.set_minor_locator(AutoMinorLocator())
ax.yaxis.set_minor_locator(AutoMinorLocator())
ax.legend(pid_names,prop={'size': 10},loc='best')
ax.grid();
# plot cpu
fig1,ax1 = plt.subplots(dpi=125,facecolor="white")
ax1.plot(t,c);
# some features for the plot
ax1.set_title(title)
ax1.set_xlabel("Time [s]")
ax1.set_ylabel("CPU [s]")
ax1.xaxis.set_minor_locator(AutoMinorLocator())
ax1.yaxis.set_minor_locator(AutoMinorLocator())
ax1.legend(pid_names,prop={'size': 10},loc='best');
ax1.grid()
plt.show();
| gpl-3.0 |
mjgrav2001/scikit-learn | sklearn/linear_model/tests/test_bayes.py | 299 | 1770 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import SkipTest
from sklearn.linear_model.bayes import BayesianRidge, ARDRegression
from sklearn import datasets
from sklearn.utils.testing import assert_array_almost_equal
def test_bayesian_on_diabetes():
# Test BayesianRidge on diabetes
raise SkipTest("XFailed Test")
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
clf = BayesianRidge(compute_score=True)
# Test with more samples than features
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
# Test with more features than samples
X = X[:5, :]
y = y[:5]
clf.fit(X, y)
# Test that scores are increasing at each iteration
assert_array_equal(np.diff(clf.scores_) > 0, True)
def test_toy_bayesian_ridge_object():
# Test BayesianRidge on toy
X = np.array([[1], [2], [6], [8], [10]])
Y = np.array([1, 2, 6, 8, 10])
clf = BayesianRidge(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
def test_toy_ard_object():
# Test BayesianRegression ARD classifier
X = np.array([[1], [2], [3]])
Y = np.array([1, 2, 3])
clf = ARDRegression(compute_score=True)
clf.fit(X, Y)
# Check that the model could approximately learn the identity function
test = [[1], [3], [4]]
assert_array_almost_equal(clf.predict(test), [1, 3, 4], 2)
| bsd-3-clause |
drabastomek/practicalDataAnalysisCookbook | Codes/Chapter07/ts_detrendAndRemoveSeasonality.py | 1 | 2625 | import pandas as pd
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# change the font size
matplotlib.rc('xtick', labelsize=9)
matplotlib.rc('ytick', labelsize=9)
matplotlib.rc('font', size=14)
# time series tools
import statsmodels.api as sm
def period_mean(data, freq):
'''
Method to calculate mean for each frequency
'''
return np.array(
[np.mean(data[i::freq]) for i in range(freq)])
# folder with data
data_folder = '../../Data/Chapter07/'
# colors
colors = ['#FF6600', '#000000', '#29407C', '#660000']
# read the data
riverFlows = pd.read_csv(data_folder + 'combined_flow.csv',
index_col=0, parse_dates=[0])
# detrend the data
detrended = sm.tsa.tsatools.detrend(riverFlows,
order=1, axis=0)
# create a data frame with the detrended data
detrended = pd.DataFrame(detrended, index=riverFlows.index,
columns=['american_flow_d', 'columbia_flow_d'])
# join to the main dataset
riverFlows = riverFlows.join(detrended)
# calculate trend
riverFlows['american_flow_t'] = riverFlows['american_flow'] \
- riverFlows['american_flow_d']
riverFlows['columbia_flow_t'] = riverFlows['columbia_flow'] \
- riverFlows['columbia_flow_d']
# number of observations and frequency of seasonal component
nobs = len(riverFlows)
freq = 12 # yearly seasonality
# remove the seasonality
for col in ['american_flow_d', 'columbia_flow_d']:
period_averages = period_mean(riverFlows[col], freq)
riverFlows[col[:-2]+'_s'] = np.tile(period_averages,
nobs // freq + 1)[:nobs]
riverFlows[col[:-2]+'_r'] = np.array(riverFlows[col]) \
- np.array(riverFlows[col[:-2]+'_s'])
# save the decomposed dataset
with open(data_folder + 'combined_flow_d.csv', 'w') as o:
o.write(riverFlows.to_csv(ignore_index=True))
# plot the data
fig, ax = plt.subplots(2, 3, sharex=True, sharey=True)
# set the size of the figure explicitly
fig.set_size_inches(12, 7)
# plot the charts for american
ax[0, 0].plot(riverFlows['american_flow_t'], colors[0])
ax[0, 1].plot(riverFlows['american_flow_s'], colors[1])
ax[0, 2].plot(riverFlows['american_flow_r'], colors[2])
# plot the charts for columbia
ax[1, 0].plot(riverFlows['columbia_flow_t'], colors[0])
ax[1, 1].plot(riverFlows['columbia_flow_s'], colors[1])
ax[1, 2].plot(riverFlows['columbia_flow_r'], colors[2])
# set titles for columns
ax[0, 0].set_title('Trend')
ax[0, 1].set_title('Seasonality')
ax[0, 2].set_title('Residuals')
# set titles for rows
ax[0, 0].set_ylabel('American')
ax[1, 0].set_ylabel('Columbia')
# save the chart
plt.savefig(data_folder + 'charts/detrended.png', dpi=300)
| gpl-2.0 |
jlegendary/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/widgets.py | 69 | 40833 | """
GUI Neutral widgets
All of these widgets require you to predefine an Axes instance and
pass that as the first arg. matplotlib doesn't try to be too smart in
layout -- you have to figure out how wide and tall you want your Axes
to be to accommodate your widget.
"""
import numpy as np
from mlab import dist
from patches import Circle, Rectangle
from lines import Line2D
from transforms import blended_transform_factory
class LockDraw:
"""
some widgets, like the cursor, draw onto the canvas, and this is not
desirable under all circumstaces, like when the toolbar is in
zoom-to-rect mode and drawing a rectangle. The module level "lock"
allows someone to grab the lock and prevent other widgets from
drawing. Use matplotlib.widgets.lock(someobj) to pr
"""
def __init__(self):
self._owner = None
def __call__(self, o):
'reserve the lock for o'
if not self.available(o):
raise ValueError('already locked')
self._owner = o
def release(self, o):
'release the lock'
if not self.available(o):
raise ValueError('you do not own this lock')
self._owner = None
def available(self, o):
'drawing is available to o'
return not self.locked() or self.isowner(o)
def isowner(self, o):
'o owns the lock'
return self._owner is o
def locked(self):
'the lock is held'
return self._owner is not None
class Widget:
"""
OK, I couldn't resist; abstract base class for mpl GUI neutral
widgets
"""
drawon = True
eventson = True
class Button(Widget):
"""
A GUI neutral button
The following attributes are accesible
ax - the Axes the button renders into
label - a text.Text instance
color - the color of the button when not hovering
hovercolor - the color of the button when hovering
Call "on_clicked" to connect to the button
"""
def __init__(self, ax, label, image=None,
color='0.85', hovercolor='0.95'):
"""
ax is the Axes instance the button will be placed into
label is a string which is the button text
image if not None, is an image to place in the button -- can
be any legal arg to imshow (numpy array, matplotlib Image
instance, or PIL image)
color is the color of the button when not activated
hovercolor is the color of the button when the mouse is over
it
"""
if image is not None:
ax.imshow(image)
self.label = ax.text(0.5, 0.5, label,
verticalalignment='center',
horizontalalignment='center',
transform=ax.transAxes)
self.cnt = 0
self.observers = {}
self.ax = ax
ax.figure.canvas.mpl_connect('button_press_event', self._click)
ax.figure.canvas.mpl_connect('motion_notify_event', self._motion)
ax.set_navigate(False)
ax.set_axis_bgcolor(color)
ax.set_xticks([])
ax.set_yticks([])
self.color = color
self.hovercolor = hovercolor
self._lastcolor = color
def _click(self, event):
if event.inaxes != self.ax: return
if not self.eventson: return
for cid, func in self.observers.items():
func(event)
def _motion(self, event):
if event.inaxes==self.ax:
c = self.hovercolor
else:
c = self.color
if c != self._lastcolor:
self.ax.set_axis_bgcolor(c)
self._lastcolor = c
if self.drawon: self.ax.figure.canvas.draw()
def on_clicked(self, func):
"""
When the button is clicked, call this func with event
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class Slider(Widget):
"""
A slider representing a floating point range
The following attributes are defined
ax : the slider axes.Axes instance
val : the current slider value
vline : a Line2D instance representing the initial value
poly : A patch.Polygon instance which is the slider
valfmt : the format string for formatting the slider text
label : a text.Text instance, the slider label
closedmin : whether the slider is closed on the minimum
closedmax : whether the slider is closed on the maximum
slidermin : another slider - if not None, this slider must be > slidermin
slidermax : another slider - if not None, this slider must be < slidermax
dragging : allow for mouse dragging on slider
Call on_changed to connect to the slider event
"""
def __init__(self, ax, label, valmin, valmax, valinit=0.5, valfmt='%1.2f',
closedmin=True, closedmax=True, slidermin=None, slidermax=None,
dragging=True, **kwargs):
"""
Create a slider from valmin to valmax in axes ax;
valinit - the slider initial position
label - the slider label
valfmt - used to format the slider value
closedmin and closedmax - indicate whether the slider interval is closed
slidermin and slidermax - be used to contrain the value of
this slider to the values of other sliders.
additional kwargs are passed on to self.poly which is the
matplotlib.patches.Rectangle which draws the slider. See the
matplotlib.patches.Rectangle documentation for legal property
names (eg facecolor, edgecolor, alpha, ...)
"""
self.ax = ax
self.valmin = valmin
self.valmax = valmax
self.val = valinit
self.valinit = valinit
self.poly = ax.axvspan(valmin,valinit,0,1, **kwargs)
self.vline = ax.axvline(valinit,0,1, color='r', lw=1)
self.valfmt=valfmt
ax.set_yticks([])
ax.set_xlim((valmin, valmax))
ax.set_xticks([])
ax.set_navigate(False)
ax.figure.canvas.mpl_connect('button_press_event', self._update)
if dragging:
ax.figure.canvas.mpl_connect('motion_notify_event', self._update)
self.label = ax.text(-0.02, 0.5, label, transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='right')
self.valtext = ax.text(1.02, 0.5, valfmt%valinit,
transform=ax.transAxes,
verticalalignment='center',
horizontalalignment='left')
self.cnt = 0
self.observers = {}
self.closedmin = closedmin
self.closedmax = closedmax
self.slidermin = slidermin
self.slidermax = slidermax
def _update(self, event):
'update the slider position'
if event.button !=1: return
if event.inaxes != self.ax: return
val = event.xdata
if not self.closedmin and val<=self.valmin: return
if not self.closedmax and val>=self.valmax: return
if self.slidermin is not None:
if val<=self.slidermin.val: return
if self.slidermax is not None:
if val>=self.slidermax.val: return
self.set_val(val)
def set_val(self, val):
xy = self.poly.xy
xy[-1] = val, 0
xy[-2] = val, 1
self.poly.xy = xy
self.valtext.set_text(self.valfmt%val)
if self.drawon: self.ax.figure.canvas.draw()
self.val = val
if not self.eventson: return
for cid, func in self.observers.items():
func(val)
def on_changed(self, func):
"""
When the slider valud is changed, call this func with the new
slider position
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
def reset(self):
"reset the slider to the initial value if needed"
if (self.val != self.valinit):
self.set_val(self.valinit)
class CheckButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
labels - a list of text.Text instances
lines - a list of (line1, line2) tuples for the x's in the check boxes.
These lines exist for each box, but have set_visible(False) when
box is not checked
rectangles - a list of patch.Rectangle instances
Connect to the CheckButtons with the on_clicked method
"""
def __init__(self, ax, labels, actives):
"""
Add check buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
actives is a len(buttons) list of booleans indicating whether
the button is active
"""
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
if len(labels)>1:
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
else:
dy = 0.25
ys = [0.5]
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.lines = []
self.rectangles = []
lineparams = {'color':'k', 'linewidth':1.25, 'transform':ax.transAxes,
'solid_capstyle':'butt'}
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
w, h = dy/2., dy/2.
x, y = 0.05, y-h/2.
p = Rectangle(xy=(x,y), width=w, height=h,
facecolor=axcolor,
transform=ax.transAxes)
l1 = Line2D([x, x+w], [y+h, y], **lineparams)
l2 = Line2D([x, x+w], [y, y+h], **lineparams)
l1.set_visible(actives[cnt])
l2.set_visible(actives[cnt])
self.labels.append(t)
self.rectangles.append(p)
self.lines.append((l1,l2))
ax.add_patch(p)
ax.add_line(l1)
ax.add_line(l2)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
for p,t,lines in zip(self.rectangles, self.labels, self.lines):
if (t.get_window_extent().contains(event.x, event.y) or
p.get_window_extent().contains(event.x, event.y) ):
l1, l2 = lines
l1.set_visible(not l1.get_visible())
l2.set_visible(not l2.get_visible())
thist = t
break
else:
return
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class RadioButtons(Widget):
"""
A GUI neutral radio button
The following attributes are exposed
ax - the Axes instance the buttons are in
activecolor - the color of the button when clicked
labels - a list of text.Text instances
circles - a list of patch.Circle instances
Connect to the RadioButtons with the on_clicked method
"""
def __init__(self, ax, labels, active=0, activecolor='blue'):
"""
Add radio buttons to axes.Axes instance ax
labels is a len(buttons) list of labels as strings
active is the index into labels for the button that is active
activecolor is the color of the button when clicked
"""
self.activecolor = activecolor
ax.set_xticks([])
ax.set_yticks([])
ax.set_navigate(False)
dy = 1./(len(labels)+1)
ys = np.linspace(1-dy, dy, len(labels))
cnt = 0
axcolor = ax.get_axis_bgcolor()
self.labels = []
self.circles = []
for y, label in zip(ys, labels):
t = ax.text(0.25, y, label, transform=ax.transAxes,
horizontalalignment='left',
verticalalignment='center')
if cnt==active:
facecolor = activecolor
else:
facecolor = axcolor
p = Circle(xy=(0.15, y), radius=0.05, facecolor=facecolor,
transform=ax.transAxes)
self.labels.append(t)
self.circles.append(p)
ax.add_patch(p)
cnt += 1
ax.figure.canvas.mpl_connect('button_press_event', self._clicked)
self.ax = ax
self.cnt = 0
self.observers = {}
def _clicked(self, event):
if event.button !=1 : return
if event.inaxes != self.ax: return
xy = self.ax.transAxes.inverted().transform_point((event.x, event.y))
pclicked = np.array([xy[0], xy[1]])
def inside(p):
pcirc = np.array([p.center[0], p.center[1]])
return dist(pclicked, pcirc) < p.radius
for p,t in zip(self.circles, self.labels):
if t.get_window_extent().contains(event.x, event.y) or inside(p):
inp = p
thist = t
break
else: return
for p in self.circles:
if p==inp: color = self.activecolor
else: color = self.ax.get_axis_bgcolor()
p.set_facecolor(color)
if self.drawon: self.ax.figure.canvas.draw()
if not self.eventson: return
for cid, func in self.observers.items():
func(thist.get_text())
def on_clicked(self, func):
"""
When the button is clicked, call this func with button label
A connection id is returned which can be used to disconnect
"""
cid = self.cnt
self.observers[cid] = func
self.cnt += 1
return cid
def disconnect(self, cid):
'remove the observer with connection id cid'
try: del self.observers[cid]
except KeyError: pass
class SubplotTool(Widget):
"""
A tool to adjust to subplot params of fig
"""
def __init__(self, targetfig, toolfig):
"""
targetfig is the figure to adjust
toolfig is the figure to embed the the subplot tool into. If
None, a default pylab figure will be created. If you are
using this from the GUI
"""
self.targetfig = targetfig
toolfig.subplots_adjust(left=0.2, right=0.9)
class toolbarfmt:
def __init__(self, slider):
self.slider = slider
def __call__(self, x, y):
fmt = '%s=%s'%(self.slider.label.get_text(), self.slider.valfmt)
return fmt%x
self.axleft = toolfig.add_subplot(711)
self.axleft.set_title('Click on slider to adjust subplot param')
self.axleft.set_navigate(False)
self.sliderleft = Slider(self.axleft, 'left', 0, 1, targetfig.subplotpars.left, closedmax=False)
self.sliderleft.on_changed(self.funcleft)
self.axbottom = toolfig.add_subplot(712)
self.axbottom.set_navigate(False)
self.sliderbottom = Slider(self.axbottom, 'bottom', 0, 1, targetfig.subplotpars.bottom, closedmax=False)
self.sliderbottom.on_changed(self.funcbottom)
self.axright = toolfig.add_subplot(713)
self.axright.set_navigate(False)
self.sliderright = Slider(self.axright, 'right', 0, 1, targetfig.subplotpars.right, closedmin=False)
self.sliderright.on_changed(self.funcright)
self.axtop = toolfig.add_subplot(714)
self.axtop.set_navigate(False)
self.slidertop = Slider(self.axtop, 'top', 0, 1, targetfig.subplotpars.top, closedmin=False)
self.slidertop.on_changed(self.functop)
self.axwspace = toolfig.add_subplot(715)
self.axwspace.set_navigate(False)
self.sliderwspace = Slider(self.axwspace, 'wspace', 0, 1, targetfig.subplotpars.wspace, closedmax=False)
self.sliderwspace.on_changed(self.funcwspace)
self.axhspace = toolfig.add_subplot(716)
self.axhspace.set_navigate(False)
self.sliderhspace = Slider(self.axhspace, 'hspace', 0, 1, targetfig.subplotpars.hspace, closedmax=False)
self.sliderhspace.on_changed(self.funchspace)
# constraints
self.sliderleft.slidermax = self.sliderright
self.sliderright.slidermin = self.sliderleft
self.sliderbottom.slidermax = self.slidertop
self.slidertop.slidermin = self.sliderbottom
bax = toolfig.add_axes([0.8, 0.05, 0.15, 0.075])
self.buttonreset = Button(bax, 'Reset')
sliders = (self.sliderleft, self.sliderbottom, self.sliderright,
self.slidertop, self.sliderwspace, self.sliderhspace, )
def func(event):
thisdrawon = self.drawon
self.drawon = False
# store the drawon state of each slider
bs = []
for slider in sliders:
bs.append(slider.drawon)
slider.drawon = False
# reset the slider to the initial position
for slider in sliders:
slider.reset()
# reset drawon
for slider, b in zip(sliders, bs):
slider.drawon = b
# draw the canvas
self.drawon = thisdrawon
if self.drawon:
toolfig.canvas.draw()
self.targetfig.canvas.draw()
# during reset there can be a temporary invalid state
# depending on the order of the reset so we turn off
# validation for the resetting
validate = toolfig.subplotpars.validate
toolfig.subplotpars.validate = False
self.buttonreset.on_clicked(func)
toolfig.subplotpars.validate = validate
def funcleft(self, val):
self.targetfig.subplots_adjust(left=val)
if self.drawon: self.targetfig.canvas.draw()
def funcright(self, val):
self.targetfig.subplots_adjust(right=val)
if self.drawon: self.targetfig.canvas.draw()
def funcbottom(self, val):
self.targetfig.subplots_adjust(bottom=val)
if self.drawon: self.targetfig.canvas.draw()
def functop(self, val):
self.targetfig.subplots_adjust(top=val)
if self.drawon: self.targetfig.canvas.draw()
def funcwspace(self, val):
self.targetfig.subplots_adjust(wspace=val)
if self.drawon: self.targetfig.canvas.draw()
def funchspace(self, val):
self.targetfig.subplots_adjust(hspace=val)
if self.drawon: self.targetfig.canvas.draw()
class Cursor:
"""
A horizontal and vertical line span the axes that and move with
the pointer. You can turn off the hline or vline spectively with
the attributes
horizOn =True|False: controls visibility of the horizontal line
vertOn =True|False: controls visibility of the horizontal line
And the visibility of the cursor itself with visible attribute
"""
def __init__(self, ax, useblit=False, **lineprops):
"""
Add a cursor to ax. If useblit=True, use the backend
dependent blitting features for faster updates (GTKAgg only
now). lineprops is a dictionary of line properties. See
examples/widgets/cursor.py.
"""
self.ax = ax
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
self.visible = True
self.horizOn = True
self.vertOn = True
self.useblit = useblit
self.lineh = ax.axhline(ax.get_ybound()[0], visible=False, **lineprops)
self.linev = ax.axvline(ax.get_xbound()[0], visible=False, **lineprops)
self.background = None
self.needclear = False
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
self.linev.set_visible(False)
self.lineh.set_visible(False)
def onmove(self, event):
'on mouse motion draw the cursor if visible'
if event.inaxes != self.ax:
self.linev.set_visible(False)
self.lineh.set_visible(False)
if self.needclear:
self.canvas.draw()
self.needclear = False
return
self.needclear = True
if not self.visible: return
self.linev.set_xdata((event.xdata, event.xdata))
self.lineh.set_ydata((event.ydata, event.ydata))
self.linev.set_visible(self.visible and self.vertOn)
self.lineh.set_visible(self.visible and self.horizOn)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.linev)
self.ax.draw_artist(self.lineh)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
class MultiCursor:
"""
Provide a vertical line cursor shared between multiple axes
from matplotlib.widgets import MultiCursor
from pylab import figure, show, nx
t = nx.arange(0.0, 2.0, 0.01)
s1 = nx.sin(2*nx.pi*t)
s2 = nx.sin(4*nx.pi*t)
fig = figure()
ax1 = fig.add_subplot(211)
ax1.plot(t, s1)
ax2 = fig.add_subplot(212, sharex=ax1)
ax2.plot(t, s2)
multi = MultiCursor(fig.canvas, (ax1, ax2), color='r', lw=1)
show()
"""
def __init__(self, canvas, axes, useblit=True, **lineprops):
self.canvas = canvas
self.axes = axes
xmin, xmax = axes[-1].get_xlim()
xmid = 0.5*(xmin+xmax)
self.lines = [ax.axvline(xmid, visible=False, **lineprops) for ax in axes]
self.visible = True
self.useblit = useblit
self.background = None
self.needclear = False
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('draw_event', self.clear)
def clear(self, event):
'clear the cursor'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.canvas.figure.bbox)
for line in self.lines: line.set_visible(False)
def onmove(self, event):
if event.inaxes is None: return
if not self.canvas.widgetlock.available(self): return
self.needclear = True
if not self.visible: return
for line in self.lines:
line.set_xdata((event.xdata, event.xdata))
line.set_visible(self.visible)
self._update()
def _update(self):
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
for ax, line in zip(self.axes, self.lines):
ax.draw_artist(line)
self.canvas.blit(self.canvas.figure.bbox)
else:
self.canvas.draw_idle()
class SpanSelector:
"""
Select a min/max range of the x or y axes for a matplotlib Axes
Example usage:
ax = subplot(111)
ax.plot(x,y)
def onselect(vmin, vmax):
print vmin, vmax
span = SpanSelector(ax, onselect, 'horizontal')
onmove_callback is an optional callback that will be called on mouse move
with the span range
"""
def __init__(self, ax, onselect, direction, minspan=None, useblit=False, rectprops=None, onmove_callback=None):
"""
Create a span selector in ax. When a selection is made, clear
the span and call onselect with
onselect(vmin, vmax)
and clear the span.
direction must be 'horizontal' or 'vertical'
If minspan is not None, ignore events smaller than minspan
The span rect is drawn with rectprops; default
rectprops = dict(facecolor='red', alpha=0.5)
set the visible attribute to False if you want to turn off
the functionality of the span selector
"""
if rectprops is None:
rectprops = dict(facecolor='red', alpha=0.5)
assert direction in ['horizontal', 'vertical'], 'Must choose horizontal or vertical for direction'
self.direction = direction
self.ax = None
self.canvas = None
self.visible = True
self.cids=[]
self.rect = None
self.background = None
self.pressv = None
self.rectprops = rectprops
self.onselect = onselect
self.onmove_callback = onmove_callback
self.useblit = useblit
self.minspan = minspan
# Needed when dragging out of axes
self.buttonDown = False
self.prev = (0, 0)
self.new_axes(ax)
def new_axes(self,ax):
self.ax = ax
if self.canvas is not ax.figure.canvas:
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
self.canvas = ax.figure.canvas
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
self.cids.append(self.canvas.mpl_connect('button_press_event', self.press))
self.cids.append(self.canvas.mpl_connect('button_release_event', self.release))
self.cids.append(self.canvas.mpl_connect('draw_event', self.update_background))
if self.direction == 'horizontal':
trans = blended_transform_factory(self.ax.transData, self.ax.transAxes)
w,h = 0,1
else:
trans = blended_transform_factory(self.ax.transAxes, self.ax.transData)
w,h = 1,0
self.rect = Rectangle( (0,0), w, h,
transform=trans,
visible=False,
**self.rectprops
)
if not self.useblit: self.ax.add_patch(self.rect)
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
return event.inaxes!=self.ax or not self.visible or event.button !=1
def press(self, event):
'on button press event'
if self.ignore(event): return
self.buttonDown = True
self.rect.set_visible(self.visible)
if self.direction == 'horizontal':
self.pressv = event.xdata
else:
self.pressv = event.ydata
return False
def release(self, event):
'on button release event'
if self.pressv is None or (self.ignore(event) and not self.buttonDown): return
self.buttonDown = False
self.rect.set_visible(False)
self.canvas.draw()
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
span = vmax - vmin
if self.minspan is not None and span<self.minspan: return
self.onselect(vmin, vmax)
self.pressv = None
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.rect)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event'
if self.pressv is None or self.ignore(event): return
x, y = event.xdata, event.ydata
self.prev = x, y
if self.direction == 'horizontal':
v = x
else:
v = y
minv, maxv = v, self.pressv
if minv>maxv: minv, maxv = maxv, minv
if self.direction == 'horizontal':
self.rect.set_x(minv)
self.rect.set_width(maxv-minv)
else:
self.rect.set_y(minv)
self.rect.set_height(maxv-minv)
if self.onmove_callback is not None:
vmin = self.pressv
if self.direction == 'horizontal':
vmax = event.xdata or self.prev[0]
else:
vmax = event.ydata or self.prev[1]
if vmin>vmax: vmin, vmax = vmax, vmin
self.onmove_callback(vmin, vmax)
self.update()
return False
# For backwards compatibility only!
class HorizontalSpanSelector(SpanSelector):
def __init__(self, ax, onselect, **kwargs):
import warnings
warnings.warn('Use SpanSelector instead!', DeprecationWarning)
SpanSelector.__init__(self, ax, onselect, 'horizontal', **kwargs)
class RectangleSelector:
"""
Select a min/max range of the x axes for a matplotlib Axes
Example usage::
from matplotlib.widgets import RectangleSelector
from pylab import *
def onselect(eclick, erelease):
'eclick and erelease are matplotlib events at press and release'
print ' startposition : (%f, %f)' % (eclick.xdata, eclick.ydata)
print ' endposition : (%f, %f)' % (erelease.xdata, erelease.ydata)
print ' used button : ', eclick.button
def toggle_selector(event):
print ' Key pressed.'
if event.key in ['Q', 'q'] and toggle_selector.RS.active:
print ' RectangleSelector deactivated.'
toggle_selector.RS.set_active(False)
if event.key in ['A', 'a'] and not toggle_selector.RS.active:
print ' RectangleSelector activated.'
toggle_selector.RS.set_active(True)
x = arange(100)/(99.0)
y = sin(x)
fig = figure
ax = subplot(111)
ax.plot(x,y)
toggle_selector.RS = RectangleSelector(ax, onselect, drawtype='line')
connect('key_press_event', toggle_selector)
show()
"""
def __init__(self, ax, onselect, drawtype='box',
minspanx=None, minspany=None, useblit=False,
lineprops=None, rectprops=None, spancoords='data'):
"""
Create a selector in ax. When a selection is made, clear
the span and call onselect with
onselect(pos_1, pos_2)
and clear the drawn box/line. There pos_i are arrays of length 2
containing the x- and y-coordinate.
If minspanx is not None then events smaller than minspanx
in x direction are ignored(it's the same for y).
The rect is drawn with rectprops; default
rectprops = dict(facecolor='red', edgecolor = 'black',
alpha=0.5, fill=False)
The line is drawn with lineprops; default
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
Use type if you want the mouse to draw a line, a box or nothing
between click and actual position ny setting
drawtype = 'line', drawtype='box' or drawtype = 'none'.
spancoords is one of 'data' or 'pixels'. If 'data', minspanx
and minspanx will be interpreted in the same coordinates as
the x and ya axis, if 'pixels', they are in pixels
"""
self.ax = ax
self.visible = True
self.canvas = ax.figure.canvas
self.canvas.mpl_connect('motion_notify_event', self.onmove)
self.canvas.mpl_connect('button_press_event', self.press)
self.canvas.mpl_connect('button_release_event', self.release)
self.canvas.mpl_connect('draw_event', self.update_background)
self.active = True # for activation / deactivation
self.to_draw = None
self.background = None
if drawtype == 'none':
drawtype = 'line' # draw a line but make it
self.visible = False # invisible
if drawtype == 'box':
if rectprops is None:
rectprops = dict(facecolor='white', edgecolor = 'black',
alpha=0.5, fill=False)
self.rectprops = rectprops
self.to_draw = Rectangle((0,0), 0, 1,visible=False,**self.rectprops)
self.ax.add_patch(self.to_draw)
if drawtype == 'line':
if lineprops is None:
lineprops = dict(color='black', linestyle='-',
linewidth = 2, alpha=0.5)
self.lineprops = lineprops
self.to_draw = Line2D([0,0],[0,0],visible=False,**self.lineprops)
self.ax.add_line(self.to_draw)
self.onselect = onselect
self.useblit = useblit
self.minspanx = minspanx
self.minspany = minspany
assert(spancoords in ('data', 'pixels'))
self.spancoords = spancoords
self.drawtype = drawtype
# will save the data (position at mouseclick)
self.eventpress = None
# will save the data (pos. at mouserelease)
self.eventrelease = None
def update_background(self, event):
'force an update of the background'
if self.useblit:
self.background = self.canvas.copy_from_bbox(self.ax.bbox)
def ignore(self, event):
'return True if event should be ignored'
# If RectangleSelector is not active :
if not self.active:
return True
# If canvas was locked
if not self.canvas.widgetlock.available(self):
return True
# If no button was pressed yet ignore the event if it was out
# of the axes
if self.eventpress == None:
return event.inaxes!= self.ax
# If a button was pressed, check if the release-button is the
# same.
return (event.inaxes!=self.ax or
event.button != self.eventpress.button)
def press(self, event):
'on button press event'
# Is the correct button pressed within the correct axes?
if self.ignore(event): return
# make the drawed box/line visible get the click-coordinates,
# button, ...
self.to_draw.set_visible(self.visible)
self.eventpress = event
return False
def release(self, event):
'on button release event'
if self.eventpress is None or self.ignore(event): return
# make the box/line invisible again
self.to_draw.set_visible(False)
self.canvas.draw()
# release coordinates, button, ...
self.eventrelease = event
if self.spancoords=='data':
xmin, ymin = self.eventpress.xdata, self.eventpress.ydata
xmax, ymax = self.eventrelease.xdata, self.eventrelease.ydata
# calculate dimensions of box or line get values in the right
# order
elif self.spancoords=='pixels':
xmin, ymin = self.eventpress.x, self.eventpress.y
xmax, ymax = self.eventrelease.x, self.eventrelease.y
else:
raise ValueError('spancoords must be "data" or "pixels"')
if xmin>xmax: xmin, xmax = xmax, xmin
if ymin>ymax: ymin, ymax = ymax, ymin
spanx = xmax - xmin
spany = ymax - ymin
xproblems = self.minspanx is not None and spanx<self.minspanx
yproblems = self.minspany is not None and spany<self.minspany
if (self.drawtype=='box') and (xproblems or yproblems):
"""Box to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
if (self.drawtype=='line') and (xproblems and yproblems):
"""Line to small""" # check if drawed distance (if it exists) is
return # not to small in neither x nor y-direction
self.onselect(self.eventpress, self.eventrelease)
# call desired function
self.eventpress = None # reset the variables to their
self.eventrelease = None # inital values
return False
def update(self):
'draw using newfangled blit or oldfangled draw depending on useblit'
if self.useblit:
if self.background is not None:
self.canvas.restore_region(self.background)
self.ax.draw_artist(self.to_draw)
self.canvas.blit(self.ax.bbox)
else:
self.canvas.draw_idle()
return False
def onmove(self, event):
'on motion notify event if box/line is wanted'
if self.eventpress is None or self.ignore(event): return
x,y = event.xdata, event.ydata # actual position (with
# (button still pressed)
if self.drawtype == 'box':
minx, maxx = self.eventpress.xdata, x # click-x and actual mouse-x
miny, maxy = self.eventpress.ydata, y # click-y and actual mouse-y
if minx>maxx: minx, maxx = maxx, minx # get them in the right order
if miny>maxy: miny, maxy = maxy, miny
self.to_draw.set_x(minx) # set lower left of box
self.to_draw.set_y(miny)
self.to_draw.set_width(maxx-minx) # set width and height of box
self.to_draw.set_height(maxy-miny)
self.update()
return False
if self.drawtype == 'line':
self.to_draw.set_data([self.eventpress.xdata, x],
[self.eventpress.ydata, y])
self.update()
return False
def set_active(self, active):
""" Use this to activate / deactivate the RectangleSelector
from your program with an boolean variable 'active'.
"""
self.active = active
def get_active(self):
""" to get status of active mode (boolean variable)"""
return self.active
class Lasso(Widget):
def __init__(self, ax, xy, callback=None, useblit=True):
self.axes = ax
self.figure = ax.figure
self.canvas = self.figure.canvas
self.useblit = useblit
if useblit:
self.background = self.canvas.copy_from_bbox(self.axes.bbox)
x, y = xy
self.verts = [(x,y)]
self.line = Line2D([x], [y], linestyle='-', color='black', lw=2)
self.axes.add_line(self.line)
self.callback = callback
self.cids = []
self.cids.append(self.canvas.mpl_connect('button_release_event', self.onrelease))
self.cids.append(self.canvas.mpl_connect('motion_notify_event', self.onmove))
def onrelease(self, event):
if self.verts is not None:
self.verts.append((event.xdata, event.ydata))
if len(self.verts)>2:
self.callback(self.verts)
self.axes.lines.remove(self.line)
self.verts = None
for cid in self.cids:
self.canvas.mpl_disconnect(cid)
def onmove(self, event):
if self.verts is None: return
if event.inaxes != self.axes: return
if event.button!=1: return
self.verts.append((event.xdata, event.ydata))
self.line.set_data(zip(*self.verts))
if self.useblit:
self.canvas.restore_region(self.background)
self.axes.draw_artist(self.line)
self.canvas.blit(self.axes.bbox)
else:
self.canvas.draw_idle()
| gpl-3.0 |
murrayrm/python-control | examples/pvtol-nested.py | 2 | 4551 | # pvtol-nested.py - inner/outer design for vectored thrust aircraft
# RMM, 5 Sep 09
#
# This file works through a fairly complicated control design and
# analysis, corresponding to the planar vertical takeoff and landing
# (PVTOL) aircraft in Astrom and Murray, Chapter 11. It is intended
# to demonstrate the basic functionality of the python-control
# package.
#
from __future__ import print_function
import os
import matplotlib.pyplot as plt # MATLAB plotting functions
from control.matlab import * # MATLAB-like functions
import numpy as np
# System parameters
m = 4 # mass of aircraft
J = 0.0475 # inertia around pitch axis
r = 0.25 # distance to center of force
g = 9.8 # gravitational constant
c = 0.05 # damping factor (estimated)
# Transfer functions for dynamics
Pi = tf([r], [J, 0, 0]) # inner loop (roll)
Po = tf([1], [m, c, 0]) # outer loop (position)
#
# Inner loop control design
#
# This is the controller for the pitch dynamics. Goal is to have
# fast response for the pitch dynamics so that we can use this as a
# control for the lateral dynamics
#
# Design a simple lead controller for the system
k, a, b = 200, 2, 50
Ci = k*tf([1, a], [1, b]) # lead compensator
Li = Pi*Ci
# Bode plot for the open loop process
plt.figure(1)
bode(Pi)
# Bode plot for the loop transfer function, with margins
plt.figure(2)
bode(Li)
# Compute out the gain and phase margins
#! Not implemented
# gm, pm, wcg, wcp = margin(Li)
# Compute the sensitivity and complementary sensitivity functions
Si = feedback(1, Li)
Ti = Li*Si
# Check to make sure that the specification is met
plt.figure(3)
gangof4(Pi, Ci)
# Compute out the actual transfer function from u1 to v1 (see L8.2 notes)
# Hi = Ci*(1-m*g*Pi)/(1+Ci*Pi)
Hi = parallel(feedback(Ci, Pi), -m*g*feedback(Ci*Pi, 1))
plt.figure(4)
plt.clf()
plt.subplot(221)
bode(Hi)
# Now design the lateral control system
a, b, K = 0.02, 5, 2
Co = -K*tf([1, 0.3], [1, 10]) # another lead compensator
Lo = -m*g*Po*Co
plt.figure(5)
bode(Lo) # margin(Lo)
# Finally compute the real outer-loop loop gain + responses
L = Co*Hi*Po
S = feedback(1, L)
T = feedback(L, 1)
# Compute stability margins
gm, pm, wgc, wpc = margin(L)
print("Gain margin: %g at %g" % (gm, wgc))
print("Phase margin: %g at %g" % (pm, wpc))
plt.figure(6)
plt.clf()
bode(L, np.logspace(-4, 3))
# Add crossover line to the magnitude plot
#
# Note: in matplotlib before v2.1, the following code worked:
#
# plt.subplot(211); hold(True);
# loglog([1e-4, 1e3], [1, 1], 'k-')
#
# In later versions of matplotlib the call to plt.subplot will clear the
# axes and so we have to extract the axes that we want to use by hand.
# In addition, hold() is deprecated so we no longer require it.
#
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-magnitude':
break
ax.semilogx([1e-4, 1e3], 20*np.log10([1, 1]), 'k-')
#
# Replot phase starting at -90 degrees
#
# Get the phase plot axes
for ax in plt.gcf().axes:
if ax.get_label() == 'control-bode-phase':
break
# Recreate the frequency response and shift the phase
mag, phase, w = freqresp(L, np.logspace(-4, 3))
phase = phase - 360
# Replot the phase by hand
ax.semilogx([1e-4, 1e3], [-180, -180], 'k-')
ax.semilogx(w, np.squeeze(phase), 'b-')
ax.axis([1e-4, 1e3, -360, 0])
plt.xlabel('Frequency [deg]')
plt.ylabel('Phase [deg]')
# plt.set(gca, 'YTick', [-360, -270, -180, -90, 0])
# plt.set(gca, 'XTick', [10^-4, 10^-2, 1, 100])
#
# Nyquist plot for complete design
#
plt.figure(7)
plt.clf()
nyquist(L, (0.0001, 1000))
# Add a box in the region we are going to expand
plt.plot([-2, -2, 1, 1, -2], [-4, 4, 4, -4, -4], 'r-')
# Expanded region
plt.figure(8)
plt.clf()
nyquist(L)
plt.axis([-2, 1, -4, 4])
# set up the color
color = 'b'
# Add arrows to the plot
# H1 = L.evalfr(0.4); H2 = L.evalfr(0.41);
# arrow([real(H1), imag(H1)], [real(H2), imag(H2)], AM_normal_arrowsize, \
# 'EdgeColor', color, 'FaceColor', color);
# H1 = freqresp(L, 0.35); H2 = freqresp(L, 0.36);
# arrow([real(H2), -imag(H2)], [real(H1), -imag(H1)], AM_normal_arrowsize, \
# 'EdgeColor', color, 'FaceColor', color);
plt.figure(9)
Yvec, Tvec = step(T, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
Yvec, Tvec = step(Co*S, np.linspace(0, 20))
plt.plot(Tvec.T, Yvec.T)
plt.figure(10)
plt.clf()
P, Z = pzmap(T, plot=True, grid=True)
print("Closed loop poles and zeros: ", P, Z)
# Gang of Four
plt.figure(11)
plt.clf()
gangof4(Hi*Po, Co)
if 'PYCONTROL_TEST_EXAMPLES' not in os.environ:
plt.show()
| bsd-3-clause |
rc/sfepy | script/plot_mesh.py | 4 | 4164 | #!/usr/bin/env python
"""
Plot mesh connectivities, facet orientations, global and local DOF ids etc.
To switch off plotting some mesh entities, set the corresponding color to
`None`.
"""
from __future__ import absolute_import
import sys
sys.path.append('.')
from argparse import ArgumentParser
import matplotlib.pyplot as plt
from sfepy.base.base import output
from sfepy.base.conf import dict_from_string
from sfepy.discrete.fem import Mesh, FEDomain
import sfepy.postprocess.plot_cmesh as pc
helps = {
'vertex_opts' : 'plotting options for mesh vertices'
' [default: %(default)s]',
'edge_opts' : 'plotting options for mesh edges'
' [default: %(default)s]',
'face_opts' : 'plotting options for mesh faces'
' [default: %(default)s]',
'cell_opts' : 'plotting options for mesh cells'
' [default: %(default)s]',
'wireframe_opts' : 'plotting options for mesh wireframe'
' [default: %(default)s]',
'no_axes' :
'do not show the figure axes',
'no_show' :
'do not show the mesh plot figure',
}
def main():
default_vertex_opts = """color='k', label_global=12,
label_local=8"""
default_edge_opts = """color='b', label_global=12,
label_local=8"""
default_face_opts = """color='g', label_global=12,
label_local=8"""
default_cell_opts = """color='r', label_global=12"""
default_wireframe_opts = "color='k'"
parser = ArgumentParser(description=__doc__)
parser.add_argument('--version', action='version', version='%(prog)s')
parser.add_argument('--vertex-opts', metavar='dict-like',
action='store', dest='vertex_opts',
default=default_vertex_opts,
help=helps['vertex_opts'])
parser.add_argument('--edge-opts', metavar='dict-like',
action='store', dest='edge_opts',
default=default_edge_opts,
help=helps['edge_opts'])
parser.add_argument('--face-opts', metavar='dict-like',
action='store', dest='face_opts',
default=default_face_opts,
help=helps['face_opts'])
parser.add_argument('--cell-opts', metavar='dict-like',
action='store', dest='cell_opts',
default=default_cell_opts,
help=helps['cell_opts'])
parser.add_argument('--wireframe-opts', metavar='dict-like',
action='store', dest='wireframe_opts',
default=default_wireframe_opts,
help=helps['wireframe_opts'])
parser.add_argument('--no-axes',
action='store_false', dest='axes',
help=helps['no_axes'])
parser.add_argument('-n', '--no-show',
action='store_false', dest='show',
help=helps['no_show'])
parser.add_argument('filename')
parser.add_argument('figname', nargs='?')
options = parser.parse_args()
entities_opts = [
dict_from_string(options.vertex_opts),
dict_from_string(options.edge_opts),
dict_from_string(options.face_opts),
dict_from_string(options.cell_opts),
]
wireframe_opts = dict_from_string(options.wireframe_opts)
filename = options.filename
mesh = Mesh.from_file(filename)
output('Mesh:')
output(' dimension: %d, vertices: %d, elements: %d'
% (mesh.dim, mesh.n_nod, mesh.n_el))
domain = FEDomain('domain', mesh)
output(domain.cmesh)
domain.cmesh.cprint(1)
dim = domain.cmesh.dim
if dim == 2: entities_opts.pop(2)
ax = pc.plot_cmesh(None, domain.cmesh,
wireframe_opts=wireframe_opts,
entities_opts=entities_opts)
ax.axis('image')
if not options.axes:
ax.axis('off')
plt.tight_layout()
if options.figname:
fig = ax.figure
fig.savefig(options.figname, bbox_inches='tight')
if options.show:
plt.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
adamginsburg/APEX_CMZ_H2CO | plot_codes/tmap_figure.py | 2 | 12670 | import pylab as pl
import numpy as np
import aplpy
import os
import copy
from astropy import log
from paths import h2copath, figurepath
import paths
import matplotlib
from scipy import stats as ss
from astropy.io import fits
matplotlib.rc_file(paths.pcpath('pubfiguresrc'))
pl.ioff()
# Close these figures so we can remake them in the appropriate size
for fignum in (4,5,6,7):
pl.close(fignum)
cmap = pl.cm.RdYlBu_r
figsize = (20,10)
small_recen = dict(x=0.3, y=-0.03,width=1.05,height=0.27)
big_recen = dict(x=0.55, y=-0.075,width=2.3,height=0.40)
sgrb2x = [000.6773, 0.6578, 0.6672]
sgrb2y = [-00.0290, -00.0418, -00.0364]
vmin=10
vmax = 200
dustcolumn = '/Users/adam/work/gc/gcmosaic_column_conv36.fits'
# most of these come from make_ratiotem_cubesims
toloop = zip((
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens3e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens3e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_abund1e-8.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_abund1e-8.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_abund1e-10.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_abund1e-10.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e5.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e5.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens3e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens3e4_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_temperature_dens1e5_masked.fits',
'H2CO_321220_to_303202{0}_bl_integ_weighted_temperature_dens1e5_masked.fits',
'TemperatureCube_DendrogramObjects{0}_leaves_integ.fits',
'TemperatureCube_DendrogramObjects{0}_leaves_integ_weighted.fits',
'TemperatureCube_DendrogramObjects{0}_integ.fits',
'TemperatureCube_DendrogramObjects{0}_integ_weighted.fits'),
('dens3e4', 'dens3e4_weighted',
'dens1e4', 'dens1e4_weighted',
'dens1e4_abund1e-8', 'dens1e4_abund1e-8_weighted',
'dens1e4_abund1e-10', 'dens1e4_abund1e-10_weighted',
'dens1e5', 'dens1e5_weighted',
'dens1e4_masked','dens1e4_weighted_masked',
'dens3e4_masked','dens3e4_weighted_masked',
'dens1e5_masked','dens1e5_weighted_masked',
'dendro_leaf','dendro_leaf_weighted',
'dendro','dendro_weighted'))
#for vmax,vmax_str in zip((100,200),("_vmax100","")):
for vmax,vmax_str in zip((200,),("",)):
for ftemplate,outtype in toloop:
for smooth in ("","_smooth",):#"_vsmooth"):
log.info(ftemplate.format(smooth)+" "+outtype)
fig = pl.figure(4, figsize=figsize)
fig.clf()
F = aplpy.FITSFigure(h2copath+ftemplate.format(smooth),
convention='calabretta',
figure=fig)
cm = copy.copy(cmap)
cm.set_bad((0.5,)*3)
F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax)
F.set_tick_labels_format('d.dd','d.dd')
F.recenter(**small_recen)
peaksn = os.path.join(h2copath,'APEX_H2CO_303_202{0}_bl_mask_integ.fits'.format(smooth))
#F.show_contour(peaksn, levels=[4,7,11,20,38], colors=[(0.25,0.25,0.25,0.5)]*5, #smooth=3,
# linewidths=[1.0]*5,
# zorder=10, convention='calabretta')
#color = (0.25,)*3
#F.show_contour(peaksn, levels=[4,7,11,20,38], colors=[color + (alpha,) for alpha in (0.9,0.6,0.3,0.1,0.0)], #smooth=3,
# filled=True,
# #linewidths=[1.0]*5,
# zorder=10, convention='calabretta')
color = (0.5,)*3 # should be same as background #888
F.show_contour(peaksn, levels=[-1,0]+np.logspace(0.20,2).tolist(),
colors=[(0.5,0.5,0.5,1)]*2 + [color + (alpha,) for alpha in np.exp(-(np.logspace(0.20,2)-1.7)**2/(2.5**2*2.))], #smooth=3,
filled=True,
#linewidths=[1.0]*5,
layer='mask',
zorder=10, convention='calabretta',
rasterized=True)
F.add_colorbar()
F.colorbar.set_axis_label_text('T (K)')
F.colorbar.set_axis_label_font(size=18)
F.colorbar.set_label_properties(size=16)
F.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250,
edgecolor='k', alpha=0.9)
F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str)))
log.info(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withmask.pdf'.format(smooth, outtype, vmax_str)))
F.show_contour(dustcolumn,
levels=[5], colors=[(0,0,0,0.5)], zorder=15,
alpha=0.5,
linewidths=[0.5],
layer='dustcontour')
F.recenter(**small_recen)
F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str)))
log.info(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_withcontours.pdf'.format(smooth, outtype, vmax_str)))
F.hide_layer('mask')
F.recenter(**small_recen)
F.save(os.path.join(figurepath, "big_maps", 'lores{0}{1}{2}_tmap_nomask_withcontours.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_tmap_nomask_withcontours.pdf'.format(smooth, outtype, vmax_str)))
fig7 = pl.figure(7, figsize=figsize)
fig7.clf()
Fsn = aplpy.FITSFigure(peaksn, convention='calabretta', figure=fig7)
Fsn.show_grayscale(vmin=0, vmax=10, stretch='linear', invert=True)
Fsn.add_colorbar()
Fsn.colorbar.set_axis_label_text('Peak S/N')
Fsn.colorbar.set_axis_label_font(size=18)
Fsn.colorbar.set_label_properties(size=16)
Fsn.set_tick_labels_format('d.dd','d.dd')
Fsn.recenter(**big_recen)
Fsn.save(os.path.join(figurepath, "big_maps", 'big_lores{0}{1}{2}_peaksn.pdf'.format(smooth, outtype, vmax_str)))
F.hide_layer('dustcontour')
dusttemperature = '/Users/adam/work/gc/gcmosaic_temp_conv36.fits'
F.show_contour(dusttemperature,
levels=[20,25],
colors=[(0,0,x,0.5) for x in [0.9,0.7,0.6,0.2]], zorder=20)
F.recenter(**small_recen)
F.save(os.path.join(figurepath, "big_maps",'lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str)))
F.recenter(**big_recen)
F.save(os.path.join(figurepath, "big_maps",'big_lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str)))
log.info(os.path.join(figurepath, "big_maps",'big_lores{0}{1}{2}_tmap_withtdustcontours.pdf'.format(smooth, outtype, vmax_str)))
im = fits.getdata(h2copath+ftemplate.format(smooth))
data = im[np.isfinite(im)]
fig9 = pl.figure(9)
fig9.clf()
ax9 = fig9.gca()
h,l,p = ax9.hist(data, bins=np.linspace(0,300), alpha=0.5)
shape, loc, scale = ss.lognorm.fit(data, floc=0)
# from http://nbviewer.ipython.org/url/xweb.geos.ed.ac.uk/~jsteven5/blog/lognormal_distributions.ipynb
mu = np.log(scale) # Mean of log(X) [but I want mean(x)]
sigma = shape # Standard deviation of log(X)
M = np.exp(mu) # Geometric mean == median
s = np.exp(sigma) # Geometric standard deviation
lnf = ss.lognorm(s=shape, loc=loc, scale=scale)
pdf = lnf.pdf(np.arange(300))
label1 = ("$\sigma_{{\mathrm{{ln}} x}} = {0:0.2f}$\n"
"$\mu_x = {1:0.2f}$\n"
"$\sigma_x = {2:0.2f}$".format(sigma, scale,s))
pm = np.abs(ss.lognorm.interval(0.683, s=shape, loc=0, scale=scale) - scale)
label2 = ("$x = {0:0.1f}^{{+{1:0.1f}}}_{{-{2:0.1f}}}$\n"
"$\sigma_{{\mathrm{{ln}} x}} = {3:0.1f}$\n"
.format(scale,
pm[1],
pm[0],
sigma,
))
ax9.plot(np.arange(300), pdf*h.max()/pdf.max(), linewidth=4, alpha=0.5,
label=label2)
ax9.legend(loc='best')
ax9.set_xlim(0,300)
fig9.savefig(os.path.join(figurepath, "big_maps",
'histogram_{0}{1}{2}_tmap.pdf'.format(smooth,
outtype, vmax_str)),
bbox_inches='tight')
#F.show_contour('h2co218222_all.fits', levels=[1,7,11,20,38], colors=['g']*5, smooth=1, zorder=5)
#F.show_contour(datapath+'APEX_H2CO_merge_high_smooth_noise.fits', levels=[0.05,0.1], colors=['#0000FF']*2, zorder=3, convention='calabretta')
#F.show_contour(datapath+'APEX_H2CO_merge_high_nhits.fits', levels=[9], colors=['#0000FF']*2, zorder=3, convention='calabretta',smooth=3)
#F.show_regions('2014_expansion_targets_simpler.reg')
#F.save('CMZ_H2CO_observed_planned.pdf')
#F.show_rgb(background, wcs=wcs)
#F.save('CMZ_H2CO_observed_planned_colorful.pdf')
fig = pl.figure(5, figsize=figsize)
fig.clf()
F2 = aplpy.FITSFigure(dusttemperature, convention='calabretta', figure=fig)
F2.show_colorscale(cmap=pl.cm.hot, vmin=10, vmax=40)
F2.add_colorbar()
F2.show_contour(h2copath+'H2CO_321220_to_303202_smooth_bl_integ_temperature.fits',
convention='calabretta',
levels=[30,75,100,150],
cmap=pl.cm.BuGn)
F2.recenter(**small_recen)
F2.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250,
edgecolor='k', alpha=0.9)
F2.save(os.path.join(figurepath, "big_maps",'H2COtemperatureOnDust.pdf'))
F2.recenter(**big_recen)
F2.save(os.path.join(figurepath, "big_maps",'big_H2COtemperatureOnDust.pdf'))
for vmax in (100,200):
fig = pl.figure(6, figsize=figsize)
fig.clf()
F = aplpy.FITSFigure('/Users/adam/work/gc/Tkin-GC.fits.gz',
convention='calabretta',
figure=fig)
cm = copy.copy(cmap)
cm.set_bad((0.5,)*3)
F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax)
F.set_tick_labels_format('d.dd','d.dd')
F.recenter(**small_recen)
F.add_colorbar()
F.colorbar.set_axis_label_text('T (K)')
F.colorbar.set_axis_label_font(size=18)
F.colorbar.set_label_properties(size=16)
F.show_markers(sgrb2x, sgrb2y, color='k', facecolor='k', s=250,
edgecolor='k', alpha=0.9)
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to{0}.pdf'.format(vmax)))
F.show_colorscale(cmap=cm,vmin=vmin,vmax=80)
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to80.pdf'))
F.show_contour(dustcolumn,
levels=[5], colors=[(0,0,0,0.5)], zorder=15,
alpha=0.5,
linewidths=[0.5],
layer='dustcontour')
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to80_withcontours.pdf'))
F.show_colorscale(cmap=cm,vmin=vmin,vmax=vmax)
F.save(os.path.join(figurepath, "big_maps", 'ott2014_nh3_tmap_15to{0}_withcontours.pdf'.format(vmax)))
| bsd-3-clause |
drammock/mne-python | mne/viz/backends/_abstract.py | 4 | 24939 | """ABCs."""
# Authors: Guillaume Favelier <guillaume.favelier@gmail.com
# Eric Larson <larson.eric.d@gmail.com>
#
# License: Simplified BSD
from abc import ABC, abstractmethod, abstractclassmethod
from contextlib import nullcontext
import warnings
from ..utils import tight_layout
class _AbstractRenderer(ABC):
@abstractclassmethod
def __init__(self, fig=None, size=(600, 600), bgcolor=(0., 0., 0.),
name=None, show=False, shape=(1, 1)):
"""Set up the scene."""
pass
@abstractclassmethod
def subplot(self, x, y):
"""Set the active subplot."""
pass
@abstractclassmethod
def scene(self):
"""Return scene handle."""
pass
@abstractclassmethod
def set_interaction(self, interaction):
"""Set interaction mode."""
pass
@abstractclassmethod
def mesh(self, x, y, z, triangles, color, opacity=1.0, shading=False,
backface_culling=False, scalars=None, colormap=None,
vmin=None, vmax=None, interpolate_before_map=True,
representation='surface', line_width=1., normals=None,
polygon_offset=None, **kwargs):
"""Add a mesh in the scene.
Parameters
----------
x : array, shape (n_vertices,)
The array containing the X component of the vertices.
y : array, shape (n_vertices,)
The array containing the Y component of the vertices.
z : array, shape (n_vertices,)
The array containing the Z component of the vertices.
triangles : array, shape (n_polygons, 3)
The array containing the indices of the polygons.
color : tuple | str
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the mesh.
shading : bool
If True, enable the mesh shading.
backface_culling : bool
If True, enable backface culling on the mesh.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
interpolate_before_map :
Enabling makes for a smoother scalars display. Default is True.
When False, OpenGL will interpolate the mapped colors which can
result is showing colors that are not present in the color map.
representation : str
The representation of the mesh: either 'surface' or 'wireframe'.
line_width : int
The width of the lines when representation='wireframe'.
normals : array, shape (n_vertices, 3)
The array containing the normal of each vertex.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
kwargs : args
The arguments to pass to triangular_mesh
Returns
-------
surface :
Handle of the mesh in the scene.
"""
pass
@abstractclassmethod
def contour(self, surface, scalars, contours, width=1.0, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, kind='line', color=None):
"""Add a contour in the scene.
Parameters
----------
surface : surface object
The mesh to use as support for contour.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
contours : int | list
Specifying a list of values will only give the requested contours.
width : float
The width of the lines or radius of the tubes.
opacity : float
The opacity of the contour.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
normalized_colormap : bool
Specify if the values of the colormap are between 0 and 1.
kind : 'line' | 'tube'
The type of the primitives to use to display the contours.
color :
The color of the mesh as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def surface(self, surface, color=None, opacity=1.0,
vmin=None, vmax=None, colormap=None,
normalized_colormap=False, scalars=None,
backface_culling=False, polygon_offset=None):
"""Add a surface in the scene.
Parameters
----------
surface : surface object
The information describing the surface.
color : tuple | str
The color of the surface as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
opacity : float
The opacity of the surface.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
scalars : ndarray, shape (n_vertices,)
The scalar valued associated to the vertices.
backface_culling : bool
If True, enable backface culling on the surface.
polygon_offset : float
If not None, the factor used to resolve coincident topology.
"""
pass
@abstractclassmethod
def sphere(self, center, color, scale, opacity=1.0,
resolution=8, backface_culling=False,
radius=None):
"""Add sphere in the scene.
Parameters
----------
center : ndarray, shape(n_center, 3)
The list of centers to use for the sphere(s).
color : tuple | str
The color of the sphere as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the spheres. The given value specifies
the maximum size in drawing units.
opacity : float
The opacity of the sphere(s).
resolution : int
The resolution of the sphere created. This is the number
of divisions along theta and phi.
backface_culling : bool
If True, enable backface culling on the sphere(s).
radius : float | None
Replace the glyph scaling by a fixed radius value for each
sphere (not supported by mayavi).
"""
pass
@abstractclassmethod
def tube(self, origin, destination, radius=0.001, color='white',
scalars=None, vmin=None, vmax=None, colormap='RdBu',
normalized_colormap=False, reverse_lut=False):
"""Add tube in the scene.
Parameters
----------
origin : array, shape(n_lines, 3)
The coordinates of the first end of the tube(s).
destination : array, shape(n_lines, 3)
The coordinates of the other end of the tube(s).
radius : float
The radius of the tube(s).
color : tuple | str
The color of the tube as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
colormap :
The colormap to use.
opacity : float
The opacity of the tube(s).
backface_culling : bool
If True, enable backface culling on the tube(s).
reverse_lut : bool
If True, reverse the lookup table.
Returns
-------
surface :
Handle of the tube in the scene.
"""
pass
@abstractclassmethod
def quiver3d(self, x, y, z, u, v, w, color, scale, mode, resolution=8,
glyph_height=None, glyph_center=None, glyph_resolution=None,
opacity=1.0, scale_mode='none', scalars=None,
backface_culling=False, colormap=None, vmin=None, vmax=None,
line_width=2., name=None):
"""Add quiver3d in the scene.
Parameters
----------
x : array, shape (n_quivers,)
The X component of the position of the quiver.
y : array, shape (n_quivers,)
The Y component of the position of the quiver.
z : array, shape (n_quivers,)
The Z component of the position of the quiver.
u : array, shape (n_quivers,)
The last X component of the quiver.
v : array, shape (n_quivers,)
The last Y component of the quiver.
w : array, shape (n_quivers,)
The last Z component of the quiver.
color : tuple | str
The color of the quiver as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
scale : float
The scaling applied to the glyphs. The size of the glyph
is by default calculated from the inter-glyph spacing.
The given value specifies the maximum glyph size in drawing units.
mode : 'arrow', 'cone' or 'cylinder'
The type of the quiver.
resolution : int
The resolution of the glyph created. Depending on the type of
glyph, it represents the number of divisions in its geometric
representation.
glyph_height : float
The height of the glyph used with the quiver.
glyph_center : tuple
The center of the glyph used with the quiver: (x, y, z).
glyph_resolution : float
The resolution of the glyph used with the quiver.
opacity : float
The opacity of the quiver.
scale_mode : 'vector', 'scalar' or 'none'
The scaling mode for the glyph.
scalars : array, shape (n_quivers,) | None
The optional scalar data to use.
backface_culling : bool
If True, enable backface culling on the quiver.
colormap :
The colormap to use.
vmin : float | None
vmin is used to scale the colormap.
If None, the min of the data will be used
vmax : float | None
vmax is used to scale the colormap.
If None, the max of the data will be used
line_width : float
The width of the 2d arrows.
"""
pass
@abstractclassmethod
def text2d(self, x_window, y_window, text, size=14, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text in the
window coordinates system (window_width, window_height).
y : float
The Y component to use as position of the text in the
window coordinates system (window_width, window_height).
text : str
The content of the text.
size : int
The size of the font.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def text3d(self, x, y, z, text, width, color='white'):
"""Add 2d text in the scene.
Parameters
----------
x : float
The X component to use as position of the text.
y : float
The Y component to use as position of the text.
z : float
The Z component to use as position of the text.
text : str
The content of the text.
width : float
The width of the text.
color : tuple | str
The color of the text as a tuple (red, green, blue) of float
values between 0 and 1 or a valid color name (i.e. 'white'
or 'w').
"""
pass
@abstractclassmethod
def scalarbar(self, source, color="white", title=None, n_labels=4,
bgcolor=None):
"""Add a scalar bar in the scene.
Parameters
----------
source :
The object of the scene used for the colormap.
color :
The color of the label text.
title : str | None
The title of the scalar bar.
n_labels : int | None
The number of labels to display on the scalar bar.
bgcolor :
The color of the background when there is transparency.
"""
pass
@abstractclassmethod
def show(self):
"""Render the scene."""
pass
@abstractclassmethod
def close(self):
"""Close the scene."""
pass
@abstractclassmethod
def set_camera(self, azimuth=None, elevation=None, distance=None,
focalpoint=None, roll=None, reset_camera=True):
"""Configure the camera of the scene.
Parameters
----------
azimuth : float
The azimuthal angle of the camera.
elevation : float
The zenith angle of the camera.
distance : float
The distance to the focal point.
focalpoint : tuple
The focal point of the camera: (x, y, z).
roll : float
The rotation of the camera along its axis.
reset_camera : bool
If True, reset the camera properties beforehand.
"""
pass
@abstractclassmethod
def reset_camera(self):
"""Reset the camera properties."""
pass
@abstractclassmethod
def screenshot(self, mode='rgb', filename=None):
"""Take a screenshot of the scene.
Parameters
----------
mode : str
Either 'rgb' or 'rgba' for values to return.
Default is 'rgb'.
filename : str | None
If not None, save the figure to the disk.
"""
pass
@abstractclassmethod
def project(self, xyz, ch_names):
"""Convert 3d points to a 2d perspective.
Parameters
----------
xyz : array, shape(n_points, 3)
The points to project.
ch_names : array, shape(_n_points,)
Names of the channels.
"""
pass
@abstractclassmethod
def enable_depth_peeling(self):
"""Enable depth peeling."""
pass
@abstractclassmethod
def remove_mesh(self, mesh_data):
"""Remove the given mesh from the scene.
Parameters
----------
mesh_data : tuple | Surface
The mesh to remove.
"""
pass
class _AbstractToolBar(ABC):
@abstractmethod
def _tool_bar_load_icons(self):
pass
@abstractmethod
def _tool_bar_initialize(self, name="default", window=None):
pass
@abstractmethod
def _tool_bar_add_button(self, name, desc, func, icon_name=None,
shortcut=None):
pass
@abstractmethod
def _tool_bar_update_button_icon(self, name, icon_name):
pass
@abstractmethod
def _tool_bar_add_text(self, name, value, placeholder):
pass
@abstractmethod
def _tool_bar_add_spacer(self):
pass
@abstractmethod
def _tool_bar_add_file_button(self, name, desc, func, shortcut=None):
pass
@abstractmethod
def _tool_bar_add_play_button(self, name, desc, func, shortcut=None):
pass
@abstractmethod
def _tool_bar_set_theme(self, theme):
pass
class _AbstractDock(ABC):
@abstractmethod
def _dock_initialize(self, window=None):
pass
@abstractmethod
def _dock_finalize(self):
pass
@abstractmethod
def _dock_show(self):
pass
@abstractmethod
def _dock_hide(self):
pass
@abstractmethod
def _dock_add_stretch(self, layout):
pass
@abstractmethod
def _dock_add_layout(self, vertical=True):
pass
@abstractmethod
def _dock_add_label(self, value, align=False, layout=None):
pass
@abstractmethod
def _dock_add_button(self, name, callback, layout=None):
pass
@abstractmethod
def _dock_named_layout(self, name, layout, compact):
pass
@abstractmethod
def _dock_add_slider(self, name, value, rng, callback,
compact=True, double=False, layout=None):
pass
@abstractmethod
def _dock_add_spin_box(self, name, value, rng, callback,
compact=True, double=True, layout=None):
pass
@abstractmethod
def _dock_add_combo_box(self, name, value, rng,
callback, compact=True, layout=None):
pass
@abstractmethod
def _dock_add_group_box(self, name, layout=None):
pass
class _AbstractMenuBar(ABC):
@abstractmethod
def _menu_initialize(self, window=None):
pass
@abstractmethod
def _menu_add_submenu(self, name, desc):
pass
@abstractmethod
def _menu_add_button(self, menu_name, name, desc, func):
pass
class _AbstractStatusBar(ABC):
@abstractmethod
def _status_bar_initialize(self, window=None):
pass
@abstractmethod
def _status_bar_add_label(self, value, stretch=0):
pass
@abstractmethod
def _status_bar_add_progress_bar(self, stretch=0):
pass
@abstractmethod
def _status_bar_update(self):
pass
class _AbstractPlayback(ABC):
@abstractmethod
def _playback_initialize(self, func, timeout, value, rng,
time_widget, play_widget):
pass
class _AbstractLayout(ABC):
@abstractmethod
def _layout_initialize(self, max_width):
pass
@abstractmethod
def _layout_add_widget(self, layout, widget, stretch=0):
pass
class _AbstractWidget(ABC):
def __init__(self, widget):
self._widget = widget
@property
def widget(self):
return self._widget
@abstractmethod
def set_value(self, value):
pass
@abstractmethod
def get_value(self):
pass
@abstractmethod
def set_range(self, rng):
pass
@abstractmethod
def show(self):
pass
@abstractmethod
def hide(self):
pass
@abstractmethod
def update(self, repaint=True):
pass
class _AbstractMplInterface(ABC):
@abstractmethod
def _mpl_initialize():
pass
class _AbstractMplCanvas(ABC):
def __init__(self, width, height, dpi):
"""Initialize the MplCanvas."""
from matplotlib import rc_context
from matplotlib.figure import Figure
# prefer constrained layout here but live with tight_layout otherwise
context = nullcontext
self._extra_events = ('resize',)
try:
context = rc_context({'figure.constrained_layout.use': True})
self._extra_events = ()
except KeyError:
pass
with context:
self.fig = Figure(figsize=(width, height), dpi=dpi)
self.axes = self.fig.add_subplot(111)
self.axes.set(xlabel='Time (sec)', ylabel='Activation (AU)')
self.manager = None
def _connect(self):
for event in ('button_press', 'motion_notify') + self._extra_events:
self.canvas.mpl_connect(
event + '_event', getattr(self, 'on_' + event))
def plot(self, x, y, label, update=True, **kwargs):
"""Plot a curve."""
line, = self.axes.plot(
x, y, label=label, **kwargs)
if update:
self.update_plot()
return line
def plot_time_line(self, x, label, update=True, **kwargs):
"""Plot the vertical line."""
line = self.axes.axvline(x, label=label, **kwargs)
if update:
self.update_plot()
return line
def update_plot(self):
"""Update the plot."""
with warnings.catch_warnings(record=True):
warnings.filterwarnings('ignore', 'constrained_layout')
self.canvas.draw()
def set_color(self, bg_color, fg_color):
"""Set the widget colors."""
self.axes.set_facecolor(bg_color)
self.axes.xaxis.label.set_color(fg_color)
self.axes.yaxis.label.set_color(fg_color)
self.axes.spines['top'].set_color(fg_color)
self.axes.spines['bottom'].set_color(fg_color)
self.axes.spines['left'].set_color(fg_color)
self.axes.spines['right'].set_color(fg_color)
self.axes.tick_params(axis='x', colors=fg_color)
self.axes.tick_params(axis='y', colors=fg_color)
self.fig.patch.set_facecolor(bg_color)
def show(self):
"""Show the canvas."""
if self.manager is None:
self.canvas.show()
else:
self.manager.show()
def close(self):
"""Close the canvas."""
self.canvas.close()
def clear(self):
"""Clear internal variables."""
self.close()
self.axes.clear()
self.fig.clear()
self.canvas = None
self.manager = None
def on_resize(self, event):
"""Handle resize events."""
tight_layout(fig=self.axes.figure)
class _AbstractBrainMplCanvas(_AbstractMplCanvas):
def __init__(self, brain, width, height, dpi):
"""Initialize the MplCanvas."""
super().__init__(width, height, dpi)
self.brain = brain
self.time_func = brain.callbacks["time"]
def update_plot(self):
"""Update the plot."""
leg = self.axes.legend(
prop={'family': 'monospace', 'size': 'small'},
framealpha=0.5, handlelength=1.,
facecolor=self.brain._bg_color)
for text in leg.get_texts():
text.set_color(self.brain._fg_color)
super().update_plot()
def on_button_press(self, event):
"""Handle button presses."""
# left click (and maybe drag) in progress in axes
if (event.inaxes != self.axes or
event.button != 1):
return
self.time_func(
event.xdata, update_widget=True, time_as_index=False)
on_motion_notify = on_button_press # for now they can be the same
def clear(self):
"""Clear internal variables."""
super().clear()
self.brain = None
class _AbstractWindow(ABC):
def _window_initialize(self):
self._window = None
self._interactor = None
self._mplcanvas = None
self._show_traces = None
self._separate_canvas = None
self._interactor_fraction = None
@abstractmethod
def _window_close_connect(self, func):
pass
@abstractmethod
def _window_get_dpi(self):
pass
@abstractmethod
def _window_get_size(self):
pass
def _window_get_mplcanvas_size(self, fraction):
ratio = (1 - fraction) / fraction
dpi = self._window_get_dpi()
w, h = self._window_get_size()
h /= ratio
return (w / dpi, h / dpi)
@abstractmethod
def _window_get_simple_canvas(self, width, height, dpi):
pass
@abstractmethod
def _window_get_mplcanvas(self, brain, interactor_fraction, show_traces,
separate_canvas):
pass
@abstractmethod
def _window_adjust_mplcanvas_layout(self):
pass
@abstractmethod
def _window_get_cursor(self):
pass
@abstractmethod
def _window_set_cursor(self, cursor):
pass
@abstractmethod
def _window_new_cursor(self, name):
pass
@abstractmethod
def _window_ensure_minimum_sizes(self):
pass
@abstractmethod
def _window_set_theme(self, theme):
pass
| bsd-3-clause |
RPGroup-PBoC/gist_pboc_2017 | code/inclass/phase_portrait_in_class.py | 1 | 1286 | # Duhhhh
import numpy as np
import matplotlib.pyplot as plt
import seaborn
plt.close('all')
# Define the parameters
r = 20 # the production rate
gamma = 1 / 30 # the degradation rate
k = 200 # in units of concentration
max_R = 1000 # maximum number of R1 and R2
R1 = np.linspace(0, max_R, 500)
R2 = np.linspace(0, max_R, 500)
# Compute the nullclines.
R1_null = (r / gamma) / (1 + (R2 / k)**2)
R2_null = (r / gamma) / (1 + (R1 / k)**2)
# Plot the nullclines.
plt.figure()
plt.plot(R1, R1_null, label='dR1/dt = 0')
plt.plot(R2_null, R2, label='dR2/dt = 0')
plt.xlabel('R1')
plt.ylabel('R2')
plt.legend()
plt.show()
# Generate the vector fields
R1_m, R2_m = np.meshgrid(R1[1::30], R2[1::30])
# Compute the derivatives
dR1_dt = -gamma * R1_m + r / (1 + (R2_m / k)**2)
dR2_dt = -gamma * R2_m + r / (1 + (R1_m / k)**2)
# Plot the vector fields!!
plt.quiver(R1_m, R2_m, dR1_dt, dR2_dt)
plt.show()
# Plot the orbit.
time = 200
R1 = 800
R2 = 400
# Loop through time and integrate.
for t in range(time):
dR1 = -gamma * R1 + r / (1 + (R2 / k)**2)
dR2 = -gamma * R2 + r / (1 + (R1 / k)**2)
# Add this change to our current position
R1 = R1 + dR1
# This is the same operation as above..
R2 += dR2
plt.plot(R1, R2, 'ro')
plt.show()
plt.pause(0.05)
| mit |
mhue/scikit-learn | sklearn/cross_decomposition/tests/test_pls.py | 215 | 11427 | import numpy as np
from sklearn.utils.testing import (assert_array_almost_equal,
assert_array_equal, assert_true, assert_raise_message)
from sklearn.datasets import load_linnerud
from sklearn.cross_decomposition import pls_
from nose.tools import assert_equal
def test_pls():
d = load_linnerud()
X = d.data
Y = d.target
# 1) Canonical (symmetric) PLS (PLS 2 blocks canonical mode A)
# ===========================================================
# Compare 2 algo.: nipals vs. svd
# ------------------------------
pls_bynipals = pls_.PLSCanonical(n_components=X.shape[1])
pls_bynipals.fit(X, Y)
pls_bysvd = pls_.PLSCanonical(algorithm="svd", n_components=X.shape[1])
pls_bysvd.fit(X, Y)
# check equalities of loading (up to the sign of the second column)
assert_array_almost_equal(
pls_bynipals.x_loadings_,
np.multiply(pls_bysvd.x_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different x loadings")
assert_array_almost_equal(
pls_bynipals.y_loadings_,
np.multiply(pls_bysvd.y_loadings_, np.array([1, -1, 1])), decimal=5,
err_msg="nipals and svd implementation lead to different y loadings")
# Check PLS properties (with n_components=X.shape[1])
# ---------------------------------------------------
plsca = pls_.PLSCanonical(n_components=X.shape[1])
plsca.fit(X, Y)
T = plsca.x_scores_
P = plsca.x_loadings_
Wx = plsca.x_weights_
U = plsca.y_scores_
Q = plsca.y_loadings_
Wy = plsca.y_weights_
def check_ortho(M, err_msg):
K = np.dot(M.T, M)
assert_array_almost_equal(K, np.diag(np.diag(K)), err_msg=err_msg)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(Wx, "x weights are not orthogonal")
check_ortho(Wy, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(T, "x scores are not orthogonal")
check_ortho(U, "y scores are not orthogonal")
# Check X = TP' and Y = UQ' (with (p == q) components)
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# center scale X, Y
Xc, Yc, x_mean, y_mean, x_std, y_std =\
pls_._center_scale_xy(X.copy(), Y.copy(), scale=True)
assert_array_almost_equal(Xc, np.dot(T, P.T), err_msg="X != TP'")
assert_array_almost_equal(Yc, np.dot(U, Q.T), err_msg="Y != UQ'")
# Check that rotations on training data lead to scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Xr = plsca.transform(X)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
Xr, Yr = plsca.transform(X, Y)
assert_array_almost_equal(Xr, plsca.x_scores_,
err_msg="rotation on X failed")
assert_array_almost_equal(Yr, plsca.y_scores_,
err_msg="rotation on Y failed")
# "Non regression test" on canonical PLS
# --------------------------------------
# The results were checked against the R-package plspm
pls_ca = pls_.PLSCanonical(n_components=X.shape[1])
pls_ca.fit(X, Y)
x_weights = np.array(
[[-0.61330704, 0.25616119, -0.74715187],
[-0.74697144, 0.11930791, 0.65406368],
[-0.25668686, -0.95924297, -0.11817271]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_rotations = np.array(
[[-0.61330704, 0.41591889, -0.62297525],
[-0.74697144, 0.31388326, 0.77368233],
[-0.25668686, -0.89237972, -0.24121788]])
assert_array_almost_equal(pls_ca.x_rotations_, x_rotations)
y_weights = np.array(
[[+0.58989127, 0.7890047, 0.1717553],
[+0.77134053, -0.61351791, 0.16920272],
[-0.23887670, -0.03267062, 0.97050016]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_rotations = np.array(
[[+0.58989127, 0.7168115, 0.30665872],
[+0.77134053, -0.70791757, 0.19786539],
[-0.23887670, -0.00343595, 0.94162826]])
assert_array_almost_equal(pls_ca.y_rotations_, y_rotations)
# 2) Regression PLS (PLS2): "Non regression test"
# ===============================================
# The results were checked against the R-packages plspm, misOmics and pls
pls_2 = pls_.PLSRegression(n_components=X.shape[1])
pls_2.fit(X, Y)
x_weights = np.array(
[[-0.61330704, -0.00443647, 0.78983213],
[-0.74697144, -0.32172099, -0.58183269],
[-0.25668686, 0.94682413, -0.19399983]])
assert_array_almost_equal(pls_2.x_weights_, x_weights)
x_loadings = np.array(
[[-0.61470416, -0.24574278, 0.78983213],
[-0.65625755, -0.14396183, -0.58183269],
[-0.51733059, 1.00609417, -0.19399983]])
assert_array_almost_equal(pls_2.x_loadings_, x_loadings)
y_weights = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_weights_, y_weights)
y_loadings = np.array(
[[+0.32456184, 0.29892183, 0.20316322],
[+0.42439636, 0.61970543, 0.19320542],
[-0.13143144, -0.26348971, -0.17092916]])
assert_array_almost_equal(pls_2.y_loadings_, y_loadings)
# 3) Another non-regression test of Canonical PLS on random dataset
# =================================================================
# The results were checked against the R-package plspm
n = 500
p_noise = 10
q_noise = 5
# 2 latents vars:
np.random.seed(11)
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X = np.concatenate(
(X, np.random.normal(size=p_noise * n).reshape(n, p_noise)), axis=1)
Y = np.concatenate(
(Y, np.random.normal(size=q_noise * n).reshape(n, q_noise)), axis=1)
np.random.seed(None)
pls_ca = pls_.PLSCanonical(n_components=3)
pls_ca.fit(X, Y)
x_weights = np.array(
[[0.65803719, 0.19197924, 0.21769083],
[0.7009113, 0.13303969, -0.15376699],
[0.13528197, -0.68636408, 0.13856546],
[0.16854574, -0.66788088, -0.12485304],
[-0.03232333, -0.04189855, 0.40690153],
[0.1148816, -0.09643158, 0.1613305],
[0.04792138, -0.02384992, 0.17175319],
[-0.06781, -0.01666137, -0.18556747],
[-0.00266945, -0.00160224, 0.11893098],
[-0.00849528, -0.07706095, 0.1570547],
[-0.00949471, -0.02964127, 0.34657036],
[-0.03572177, 0.0945091, 0.3414855],
[0.05584937, -0.02028961, -0.57682568],
[0.05744254, -0.01482333, -0.17431274]])
assert_array_almost_equal(pls_ca.x_weights_, x_weights)
x_loadings = np.array(
[[0.65649254, 0.1847647, 0.15270699],
[0.67554234, 0.15237508, -0.09182247],
[0.19219925, -0.67750975, 0.08673128],
[0.2133631, -0.67034809, -0.08835483],
[-0.03178912, -0.06668336, 0.43395268],
[0.15684588, -0.13350241, 0.20578984],
[0.03337736, -0.03807306, 0.09871553],
[-0.06199844, 0.01559854, -0.1881785],
[0.00406146, -0.00587025, 0.16413253],
[-0.00374239, -0.05848466, 0.19140336],
[0.00139214, -0.01033161, 0.32239136],
[-0.05292828, 0.0953533, 0.31916881],
[0.04031924, -0.01961045, -0.65174036],
[0.06172484, -0.06597366, -0.1244497]])
assert_array_almost_equal(pls_ca.x_loadings_, x_loadings)
y_weights = np.array(
[[0.66101097, 0.18672553, 0.22826092],
[0.69347861, 0.18463471, -0.23995597],
[0.14462724, -0.66504085, 0.17082434],
[0.22247955, -0.6932605, -0.09832993],
[0.07035859, 0.00714283, 0.67810124],
[0.07765351, -0.0105204, -0.44108074],
[-0.00917056, 0.04322147, 0.10062478],
[-0.01909512, 0.06182718, 0.28830475],
[0.01756709, 0.04797666, 0.32225745]])
assert_array_almost_equal(pls_ca.y_weights_, y_weights)
y_loadings = np.array(
[[0.68568625, 0.1674376, 0.0969508],
[0.68782064, 0.20375837, -0.1164448],
[0.11712173, -0.68046903, 0.12001505],
[0.17860457, -0.6798319, -0.05089681],
[0.06265739, -0.0277703, 0.74729584],
[0.0914178, 0.00403751, -0.5135078],
[-0.02196918, -0.01377169, 0.09564505],
[-0.03288952, 0.09039729, 0.31858973],
[0.04287624, 0.05254676, 0.27836841]])
assert_array_almost_equal(pls_ca.y_loadings_, y_loadings)
# Orthogonality of weights
# ~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_weights_, "x weights are not orthogonal")
check_ortho(pls_ca.y_weights_, "y weights are not orthogonal")
# Orthogonality of latent scores
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
check_ortho(pls_ca.x_scores_, "x scores are not orthogonal")
check_ortho(pls_ca.y_scores_, "y scores are not orthogonal")
def test_PLSSVD():
# Let's check the PLSSVD doesn't return all possible component but just
# the specificied number
d = load_linnerud()
X = d.data
Y = d.target
n_components = 2
for clf in [pls_.PLSSVD, pls_.PLSRegression, pls_.PLSCanonical]:
pls = clf(n_components=n_components)
pls.fit(X, Y)
assert_equal(n_components, pls.y_scores_.shape[1])
def test_univariate_pls_regression():
# Ensure 1d Y is correctly interpreted
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSRegression()
# Compare 1d to column vector
model1 = clf.fit(X, Y[:, 0]).coef_
model2 = clf.fit(X, Y[:, :1]).coef_
assert_array_almost_equal(model1, model2)
def test_predict_transform_copy():
# check that the "copy" keyword works
d = load_linnerud()
X = d.data
Y = d.target
clf = pls_.PLSCanonical()
X_copy = X.copy()
Y_copy = Y.copy()
clf.fit(X, Y)
# check that results are identical with copy
assert_array_almost_equal(clf.predict(X), clf.predict(X.copy(), copy=False))
assert_array_almost_equal(clf.transform(X), clf.transform(X.copy(), copy=False))
# check also if passing Y
assert_array_almost_equal(clf.transform(X, Y),
clf.transform(X.copy(), Y.copy(), copy=False))
# check that copy doesn't destroy
# we do want to check exact equality here
assert_array_equal(X_copy, X)
assert_array_equal(Y_copy, Y)
# also check that mean wasn't zero before (to make sure we didn't touch it)
assert_true(np.all(X.mean(axis=0) != 0))
def test_scale():
d = load_linnerud()
X = d.data
Y = d.target
# causes X[:, -1].std() to be zero
X[:, -1] = 1.0
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.set_params(scale=True)
clf.fit(X, Y)
def test_pls_errors():
d = load_linnerud()
X = d.data
Y = d.target
for clf in [pls_.PLSCanonical(), pls_.PLSRegression(),
pls_.PLSSVD()]:
clf.n_components = 4
assert_raise_message(ValueError, "Invalid number of components", clf.fit, X, Y)
| bsd-3-clause |
OshynSong/scikit-learn | examples/model_selection/grid_search_digits.py | 227 | 2665 | """
============================================================
Parameter estimation using grid search with cross-validation
============================================================
This examples shows how a classifier is optimized by cross-validation,
which is done using the :class:`sklearn.grid_search.GridSearchCV` object
on a development set that comprises only half of the available labeled data.
The performance of the selected hyper-parameters and trained model is
then measured on a dedicated evaluation set that was not used during
the model selection step.
More details on tools available for model selection can be found in the
sections on :ref:`cross_validation` and :ref:`grid_search`.
"""
from __future__ import print_function
from sklearn import datasets
from sklearn.cross_validation import train_test_split
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import classification_report
from sklearn.svm import SVC
print(__doc__)
# Loading the Digits dataset
digits = datasets.load_digits()
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.images)
X = digits.images.reshape((n_samples, -1))
y = digits.target
# Split the dataset in two equal parts
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.5, random_state=0)
# Set the parameters by cross-validation
tuned_parameters = [{'kernel': ['rbf'], 'gamma': [1e-3, 1e-4],
'C': [1, 10, 100, 1000]},
{'kernel': ['linear'], 'C': [1, 10, 100, 1000]}]
scores = ['precision', 'recall']
for score in scores:
print("# Tuning hyper-parameters for %s" % score)
print()
clf = GridSearchCV(SVC(C=1), tuned_parameters, cv=5,
scoring='%s_weighted' % score)
clf.fit(X_train, y_train)
print("Best parameters set found on development set:")
print()
print(clf.best_params_)
print()
print("Grid scores on development set:")
print()
for params, mean_score, scores in clf.grid_scores_:
print("%0.3f (+/-%0.03f) for %r"
% (mean_score, scores.std() * 2, params))
print()
print("Detailed classification report:")
print()
print("The model is trained on the full development set.")
print("The scores are computed on the full evaluation set.")
print()
y_true, y_pred = y_test, clf.predict(X_test)
print(classification_report(y_true, y_pred))
print()
# Note the problem is too easy: the hyperparameter plateau is too flat and the
# output model is the same for precision and recall with ties in quality.
| bsd-3-clause |
glouppe/scikit-learn | benchmarks/bench_isotonic.py | 268 | 3046 | """
Benchmarks of isotonic regression performance.
We generate a synthetic dataset of size 10^n, for n in [min, max], and
examine the time taken to run isotonic regression over the dataset.
The timings are then output to stdout, or visualized on a log-log scale
with matplotlib.
This alows the scaling of the algorithm with the problem size to be
visualized and understood.
"""
from __future__ import print_function
import numpy as np
import gc
from datetime import datetime
from sklearn.isotonic import isotonic_regression
from sklearn.utils.bench import total_seconds
import matplotlib.pyplot as plt
import argparse
def generate_perturbed_logarithm_dataset(size):
return np.random.randint(-50, 50, size=n) \
+ 50. * np.log(1 + np.arange(n))
def generate_logistic_dataset(size):
X = np.sort(np.random.normal(size=size))
return np.random.random(size=size) < 1.0 / (1.0 + np.exp(-X))
DATASET_GENERATORS = {
'perturbed_logarithm': generate_perturbed_logarithm_dataset,
'logistic': generate_logistic_dataset
}
def bench_isotonic_regression(Y):
"""
Runs a single iteration of isotonic regression on the input data,
and reports the total time taken (in seconds).
"""
gc.collect()
tstart = datetime.now()
isotonic_regression(Y)
delta = datetime.now() - tstart
return total_seconds(delta)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Isotonic Regression benchmark tool")
parser.add_argument('--iterations', type=int, required=True,
help="Number of iterations to average timings over "
"for each problem size")
parser.add_argument('--log_min_problem_size', type=int, required=True,
help="Base 10 logarithm of the minimum problem size")
parser.add_argument('--log_max_problem_size', type=int, required=True,
help="Base 10 logarithm of the maximum problem size")
parser.add_argument('--show_plot', action='store_true',
help="Plot timing output with matplotlib")
parser.add_argument('--dataset', choices=DATASET_GENERATORS.keys(),
required=True)
args = parser.parse_args()
timings = []
for exponent in range(args.log_min_problem_size,
args.log_max_problem_size):
n = 10 ** exponent
Y = DATASET_GENERATORS[args.dataset](n)
time_per_iteration = \
[bench_isotonic_regression(Y) for i in range(args.iterations)]
timing = (n, np.mean(time_per_iteration))
timings.append(timing)
# If we're not plotting, dump the timing to stdout
if not args.show_plot:
print(n, np.mean(time_per_iteration))
if args.show_plot:
plt.plot(*zip(*timings))
plt.title("Average time taken running isotonic regression")
plt.xlabel('Number of observations')
plt.ylabel('Time (s)')
plt.axis('tight')
plt.loglog()
plt.show()
| bsd-3-clause |
nmartensen/pandas | scripts/file_sizes.py | 7 | 4949 | from __future__ import print_function
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
from pandas import DataFrame
from pandas.util.testing import set_trace
from pandas import compat
dirs = []
names = []
lengths = []
if len(sys.argv) > 1:
loc = sys.argv[1]
else:
loc = '.'
walked = os.walk(loc)
def _should_count_file(path):
return path.endswith('.py') or path.endswith('.pyx')
def _is_def_line(line):
"""def/cdef/cpdef, but not `cdef class`"""
return (line.endswith(':') and not 'class' in line.split() and
(line.startswith('def ') or
line.startswith('cdef ') or
line.startswith('cpdef ') or
' def ' in line or ' cdef ' in line or ' cpdef ' in line))
class LengthCounter(object):
"""
should add option for subtracting nested function lengths??
"""
def __init__(self, lines):
self.lines = lines
self.pos = 0
self.counts = []
self.n = len(lines)
def get_counts(self):
self.pos = 0
self.counts = []
while self.pos < self.n:
line = self.lines[self.pos]
self.pos += 1
if _is_def_line(line):
level = _get_indent_level(line)
self._count_function(indent_level=level)
return self.counts
def _count_function(self, indent_level=1):
indent = ' ' * indent_level
def _end_of_function(line):
return (line != '' and
not line.startswith(indent) and
not line.startswith('#'))
start_pos = self.pos
while self.pos < self.n:
line = self.lines[self.pos]
if _end_of_function(line):
self._push_count(start_pos)
return
self.pos += 1
if _is_def_line(line):
self._count_function(indent_level=indent_level + 1)
# end of file
self._push_count(start_pos)
def _push_count(self, start_pos):
func_lines = self.lines[start_pos:self.pos]
if len(func_lines) > 300:
set_trace()
# remove blank lines at end
while len(func_lines) > 0 and func_lines[-1] == '':
func_lines = func_lines[:-1]
# remove docstrings and comments
clean_lines = []
in_docstring = False
for line in func_lines:
line = line.strip()
if in_docstring and _is_triplequote(line):
in_docstring = False
continue
if line.startswith('#'):
continue
if _is_triplequote(line):
in_docstring = True
continue
self.counts.append(len(func_lines))
def _get_indent_level(line):
level = 0
while line.startswith(' ' * level):
level += 1
return level
def _is_triplequote(line):
return line.startswith('"""') or line.startswith("'''")
def _get_file_function_lengths(path):
lines = [x.rstrip() for x in open(path).readlines()]
counter = LengthCounter(lines)
return counter.get_counts()
# def test_get_function_lengths():
text = """
class Foo:
def foo():
def bar():
a = 1
b = 2
c = 3
foo = 'bar'
def x():
a = 1
b = 3
c = 7
pass
"""
expected = [5, 8, 7]
lines = [x.rstrip() for x in text.splitlines()]
counter = LengthCounter(lines)
result = counter.get_counts()
assert(result == expected)
def doit():
for directory, _, files in walked:
print(directory)
for path in files:
if not _should_count_file(path):
continue
full_path = os.path.join(directory, path)
print(full_path)
lines = len(open(full_path).readlines())
dirs.append(directory)
names.append(path)
lengths.append(lines)
result = DataFrame({'dirs': dirs, 'names': names,
'lengths': lengths})
def doit2():
counts = {}
for directory, _, files in walked:
print(directory)
for path in files:
if not _should_count_file(path) or path.startswith('test_'):
continue
full_path = os.path.join(directory, path)
counts[full_path] = _get_file_function_lengths(full_path)
return counts
counts = doit2()
# counts = _get_file_function_lengths('pandas/tests/test_series.py')
all_counts = []
for k, v in compat.iteritems(counts):
all_counts.extend(v)
all_counts = np.array(all_counts)
fig = plt.figure(figsize=(10, 5))
ax = fig.add_subplot(111)
ax.hist(all_counts, bins=100)
n = len(all_counts)
nmore = (all_counts > 50).sum()
ax.set_title('%s function lengths, n=%d' % ('pandas', n))
ax.set_ylabel('N functions')
ax.set_xlabel('Function length')
ax.text(100, 300, '%.3f%% with > 50 lines' % ((n - nmore) / float(n)),
fontsize=18)
plt.show()
| bsd-3-clause |
jakobkolb/MayaSim | mayasim/model/ModelCore.py | 1 | 66303 | from __future__ import print_function
import datetime
import operator
import os
import sys
import traceback
import warnings
from itertools import compress
import networkx as nx
import numpy as np
import pandas
import pkg_resources
import scipy.ndimage as ndimage
import scipy.sparse as sparse
try:
import cPickle as pkl
except ImportError:
import pickle as pkl
if __name__ == "__main__":
from ModelParameters import ModelParameters as Parameters
from f90routines import f90routines
else:
from .f90routines import f90routines
from .ModelParameters import ModelParameters as Parameters
class ModelCore(Parameters):
def __init__(self,
n=30,
output_data_location=None,
debug=False,
output_trajectory=True,
**kwargs):
"""
Instance of the MayaSim model.
Parameters
----------
n: int
number of settlements to initialize,
output_data_location: path_like
string stating the folder path to which the output
files will be writen,
debug: bool
switch for debugging output from model,
output_trajectory: bool
switch for output of trajectory data,
output_settlement_data: bool
switch for output of settlement data,
output_geographic_data: bool
switch for output of geographic data.
"""
# Input/Output settings:
# Set path to static input files
input_data_location = pkg_resources. \
resource_filename('mayasim', 'input_data/')
# Debugging settings
self.debug = debug
# In debug mode, allways print stack for warnings and errors.
def warn_with_traceback(message,
category,
filename,
lineno,
file=None,
line=None):
log = file if hasattr(file, 'write') else sys.stderr
traceback.print_stack(file=log)
log.write(
warnings.formatwarning(message, category, filename, lineno,
line))
if self.debug:
warnings.showwarning = warn_with_traceback
# *******************************************************************
# MODEL PARAMETERS (to be varied)
# *******************************************************************
self.output_trajectory = output_trajectory
# Settlement and geographic data will be written to files in each time step,
# Trajectory data will be kept in one data structure to be read out, when
# the model run finished.
if output_data_location != 0:
# remove file ending
self.output_data_location = output_data_location.rsplit('.', 1)[0]
# create callable output paths
self.settlement_output_path = \
lambda i: self.output_data_location + \
f'settlement_data_{i:03d}.pkl'
self.geographic_output_path = \
lambda i: self.output_data_location + \
f'geographic_data_{i:03d}.pkl'
# set switches for output generation
self.output_geographic_data = True
self.output_settlement_data = True
else:
self.output_geographic_data = False
self.output_settlement_data = False
self.trajectory = []
self.traders_trajectory = []
# *******************************************************************
# MODEL DATA SOURCES
# *******************************************************************
# documentation for TEMPERATURE and PRECIPITATION data can be found
# here: http://www.worldclim.org/formats
# apparently temperature data is given in x*10 format to allow for
# smaller file sizes.
# original version of mayasim divides temperature by 12 though
self.temp = np.load(input_data_location +
'0_RES_432x400_temp.npy') / 12.
# precipitation in mm or liters per square meter
# (comparing the numbers to numbers from Wikipedia suggests
# that it is given per year)
self.precip = np.load(input_data_location + '0_RES_432x400_precip.npy')
# in meters above sea level
self.elev = np.load(input_data_location + '0_RES_432x400_elev.npy')
self.slope = np.load(input_data_location + '0_RES_432x400_slope.npy')
# documentation for SOIL PRODUCTIVITY is given at:
# http://www.fao.org/geonetwork/srv/en/
# main.home?uuid=f7a2b3c0-bdbf-11db-a0f6-000d939bc5d8
# The soil production index considers the suitability
# of the best adapted crop to each soils
# condition in an area and makes a weighted average for
# all soils present in a pixel based
# on the formula: 0.9 * VS + 0.6 * S + 0.3 * MS + 0 * NS.
# Values range from 0 (bad) to 6 (good)
self.soilprod = np.load(input_data_location + '0_RES_432x400_soil.npy')
# it also sets soil productivity to 1.5 where the elevation is <= 1
# self.soilprod[self.elev <= 1] = 1.5
# complains because there is nans in elev
for ind, x in np.ndenumerate(self.elev):
if not np.isnan(x):
if x <= 1.:
self.soilprod[ind] = 1.5
# smoothen soil productivity dataset
self.soilprod = ndimage.gaussian_filter(self.soilprod,
sigma=(2, 2),
order=0)
# and set to zero for non land cells
self.soilprod[np.isnan(self.elev)] = 0
# *******************************************************************
# MODEL MAP INITIALIZATION
# *******************************************************************
# dimensions of the map
self.rows, self.columns = self.precip.shape
self.height, self.width = 914., 840. # height and width in km
self.pixel_dim = self.width / self.columns
self.cell_width = self.width / self.columns
self.cell_height = self.height / self.rows
self.land_patches = np.asarray(np.where(np.isfinite(self.elev)))
self.number_of_land_patches = self.land_patches.shape[1]
# lengh unit - total map is about 500 km wide
self.area = 516484. / len(self.land_patches[0])
self.elev[:, 0] = np.inf
self.elev[:, -1] = np.inf
self.elev[0, :] = np.inf
self.elev[-1, :] = np.inf
# create a list of the index values i = (x, y) of the land
# patches with finite elevation h
self.list_of_land_patches = [
i for i, h in np.ndenumerate(self.elev)
if np.isfinite(self.elev[i])
]
# initialize soil degradation and population
# gradient (influencing the forest)
# *******************************************************************
# INITIALIZE ECOSYSTEM
# *******************************************************************
# Soil (influencing primary production and agricultural productivity)
self.soil_deg = np.zeros((self.rows, self.columns))
# Forest
self.forest_state = np.ones((self.rows, self.columns), dtype=int)
self.forest_state[np.isnan(self.elev)] = 0
self.forest_memory = np.zeros((self.rows, self.columns), dtype=int)
self.cleared_land_neighbours = np.zeros((self.rows, self.columns),
dtype=int)
# The forest has three states: 3=climax forest,
# 2=secondary regrowth, 1=cleared land.
for i in self.list_of_land_patches:
self.forest_state[i] = 3
# Variables describing total amount of water and water flow
self.water = np.zeros((self.rows, self.columns))
self.flow = np.zeros((self.rows, self.columns))
self.spaciotemporal_precipitation = np.zeros((self.rows, self.columns))
# initialize the trajectories of the water drops
self.x = np.zeros((self.rows, self.columns), dtype="int")
self.y = np.zeros((self.rows, self.columns), dtype="int")
# define relative coordinates of the neighbourhood of a cell
self.neighbourhood = [(i, j) for i in [-1, 0, 1] for j in [-1, 0, 1]]
self.f90neighbourhood = np.asarray(self.neighbourhood).T
# *******************************************************************
# INITIALIZE SOCIETY
# *******************************************************************
# Population gradient (influencing the forest)
self.pop_gradient = np.zeros((self.rows, self.columns))
self.number_settlements = n
# distribute specified number of settlements on the map
self.settlement_positions = self.land_patches[:,
np.random.choice(
len(self.
land_patches[1]),
n).astype('int')]
self.age = [0] * n
# demographic variables
self.birth_rate = [self.birth_rate_parameter] * n
self.death_rate = [0.1 + 0.05 * r for r in list(np.random.random(n))]
self.population = list(
np.random.randint(self.min_init_inhabitants,
self.max_init_inhabitants, n).astype(float))
self.mig_rate = [0.] * n
self.out_mig = [0] * n
self.migrants = [0] * n
self.pioneer_set = []
self.failed = 0
# index list for populated and abandoned cities
# used until removal of dead cities is implemented.
self.populated_cities = range(n)
self.dead_cities = []
# agricultural influence
self.number_cells_in_influence = [0] * n
self.area_of_influence = [0.] * n
self.coordinates = np.indices((self.rows, self.columns))
self.cells_in_influence = [None] * n # will be a list of arrays
self.cropped_cells = [None] * n
# for now, cropped cells are only the city positions.
# first cropped cells are added at the first call of
# get_cropped_cells()
for city in self.populated_cities:
self.cropped_cells[city] = [[self.settlement_positions[0, city]],
[self.settlement_positions[1, city]]]
# print(self.cropped_cells[1])
self.occupied_cells = np.zeros((self.rows, self.columns))
self.number_cropped_cells = [0] * n
self.crop_yield = [0.] * n
self.eco_benefit = [0.] * n
self.available = 0
# details of income from ecosystems services
self.s_es_ag = [0.] * n
self.s_es_wf = [0.] * n
self.s_es_fs = [0.] * n
self.s_es_sp = [0.] * n
self.s_es_pg = [0.] * n
self.es_ag = np.zeros((self.rows, self.columns), dtype=float)
self.es_wf = np.zeros((self.rows, self.columns), dtype=float)
self.es_fs = np.zeros((self.rows, self.columns), dtype=float)
self.es_sp = np.zeros((self.rows, self.columns), dtype=float)
self.es_pg = np.zeros((self.rows, self.columns), dtype=float)
# Trade Variables
self.adjacency = np.zeros((n, n))
self.rank = [0] * n
self.degree = [0] * n
self.comp_size = [0] * n
self.centrality = [0] * n
self.trade_income = [0] * n
self.max_cluster_size = 0
# total real income per capita
self.real_income_pc = [0] * n
def _get_run_variables(self):
"""
Saves all variables and values of the class instance 'self'
in a dictionary file at the location given by 'path'
Parameters:
-----------
self: class instance
class instance whose variables are saved
"""
dictionary = {
attr: getattr(self, attr)
for attr in dir(self)
if not attr.startswith('__') and not callable(getattr(self, attr))
}
return dictionary
def update_precipitation(self, t):
"""
Modulates the initial precip dataset with a 24 timestep period.
Returns a field of rainfall values for each cell.
If veg_rainfall > 0, cleared_land_neighbours decreases rain.
TO DO: The original Model increases specialization every time
rainfall decreases, assuming that trade gets more important to
compensate for agriculture decline
"""
if self.precipitation_modulation:
self.spaciotemporal_precipitation = \
self.precip * (
1 + self.precipitation_amplitude *
self.precipitation_variation[
(np.ceil(t / self.climate_var) % 8).astype(int)]) \
- self.veg_rainfall * self.cleared_land_neighbours
else:
self.spaciotemporal_precipitation = \
self.precip * (1 -
self.veg_rainfall * self.cleared_land_neighbours)
# check if system time is in drought period
drought = False
for drought_time in self.drought_times:
if drought_time[0] < t <= drought_time[1]:
drought = True
# if so, decrease precipitation by factor percentage given by
# drought severity
if drought:
self.spaciotemporal_precipitation *= \
(1. - self.drought_severity / 100.)
def get_waterflow(self):
"""
waterflow: takes rain as an argument, uses elev, returns
water flow distribution
the precip percent parameter that reduces the amount of raindrops that
have to be moved.
Thereby inceases performance.
f90waterflow takes as arguments:
list of coordinates of land cells (2xN_land)
elevation map in (height x width)
rain_volume per cell map in (height x width)
rain_volume and elevation must have same units: height per cell
neighbourhood offsets
height and width of map as integers,
Number of land cells, N_land
"""
# convert precipitation from mm to meters
# NOTE: I think, this should be 1e-3
# to convert from mm to meters though...
# but 1e-5 is what they do in the original version.
rain_volume = np.nan_to_num(self.spaciotemporal_precipitation * 1e-5)
max_x, max_y = self.rows, self.columns
err, self.flow, self.water = \
f90routines.f90waterflow(self.land_patches,
self.elev,
rain_volume,
self.f90neighbourhood,
max_x,
max_y,
self.number_of_land_patches)
return self.water, self.flow
def forest_evolve(self, npp):
npp_mean = np.nanmean(npp)
# Iterate over all cells repeatedly and regenerate or degenerate
for repeat in range(4):
for i in self.list_of_land_patches:
if not np.isnan(self.elev[i]):
# Forest regenerates faster [slower] (linearly),
# if net primary productivity on the patch
# is above [below] average.
threshold = npp_mean / npp[i]
# Degradation:
# Decrement with probability 0.003
# if there is a settlement around,
# degrade with higher probability
probdec = self.natprobdec * (2 * self.pop_gradient[i] + 1)
if np.random.random() <= probdec:
if self.forest_state[i] == 3:
self.forest_state[i] = 2
self.forest_memory[i] = self.state_change_s2
elif self.forest_state[i] == 2:
self.forest_state[i] = 1
self.forest_memory[i] = 0
# Regeneration:"
# recover if tree = 1 and memory > threshold 1
if (self.forest_state[i] == 1 and self.forest_memory[i] >
self.state_change_s2 * threshold):
self.forest_state[i] = 2
self.forest_memory[i] = self.state_change_s2
# recover if tree = 2 and memory > threshold 2
# and certain number of neighbours are
# climax forest as well
if (self.forest_state[i] == 2 and self.forest_memory[i] >
self.state_change_s3 * threshold):
state_3_neighbours = \
np.sum(self.forest_state[i[0] - 1:i[0] + 2,
i[1] - 1:i[1] + 2] == 3)
if state_3_neighbours > \
self.min_number_of_s3_neighbours:
self.forest_state[i] = 3
# finally, increase memory by one
self.forest_memory[i] += 1
# calculate cleared land neighbours for output:
if self.veg_rainfall > 0:
for i in self.list_of_land_patches:
self.cleared_land_neighbours[i] = \
np.sum(self.forest_state[i[0] - 1:i[0] + 2,
i[1] - 1:i[1] + 2] == 1)
assert not np.any(self.forest_state[~np.isnan(self.elev)] < 1), \
'forest state is smaller than 1 somewhere'
return
def net_primary_prod(self):
"""
net_primaty_prod is the minimum of a quantity
derived from local temperature and rain
Why is it rain and not 'surface water'
according to the waterflow model?
"""
# EQUATION ############################################################
npp = 3000 \
* np.minimum(1 - np.exp(-6.64e-4
* self.spaciotemporal_precipitation),
1. / (1 + np.exp(1.315 - (0.119 * self.temp))))
# EQUATION ############################################################
return npp
def get_ag(self, npp, wf):
"""
agricultural productivit is calculated via a
linear additive model from
net primary productivity, soil productivity,
slope, waterflow and soil degradation
of each patch.
"""
# EQUATION ############################################################
return self.a_npp * npp + self.a_sp * self.soilprod \
- self.a_s * self.slope - self.a_wf * wf - self.soil_deg
# EQUATION ############################################################
def get_ecoserv(self, ag, wf):
"""
Ecosystem Services are calculated via a linear
additive model from agricultural productivity (ag),
waterflow through the cell (wf) and forest
state on the cell (forest) \in [1,3],
The recent version of mayasim limits value of
ecosystem services to 1 < ecoserv < 250, it also proposes
to include population density (pop_gradient) and precipitation (rain)
"""
# EQUATION ###########################################################
if not self.better_ess:
self.es_ag = self.e_ag * ag
self.es_wf = self.e_wf * wf
self.es_fs = self.e_f * (self.forest_state - 1.)
self.es_sp = self.e_r * self.spaciotemporal_precipitation
self.es_pg = self.e_deg * self.pop_gradient
else:
# change to use forest as proxy for income from agricultural
# productivity. Multiply by 2 to get same per cell levels as
# before
self.es_ag = np.zeros(np.shape(ag))
self.es_wf = self.e_wf * wf
self.es_fs = 2. * self.e_ag * (self.forest_state - 1.) * ag
self.es_sp = self.e_r * self.spaciotemporal_precipitation
self.es_pg = self.e_deg * self.pop_gradient
return (self.es_ag + self.es_wf + self.es_fs + self.es_sp - self.es_pg)
# EQUATION ###########################################################
######################################################################
# The Society
######################################################################
def benefit_cost(self, ag_in):
# Benefit cost assessment
return (self.max_yield *
(1 - self.origin_shift * np.exp(-self.slope_yield * ag_in)))
def get_cells_in_influence(self):
"""
creates a list of cells for each city that are under its influence.
these are the cells that are closer than population^0.8/60 (which is
not explained any further... change denominator to 80 and max value to
30 from eyeballing the results
"""
# EQUATION ####################################################################
self.area_of_influence = [(x**0.8) / 60. for x in self.population]
self.area_of_influence = [
value if value < 40. else 40. for value in self.area_of_influence
]
# EQUATION ####################################################################
for city in self.populated_cities:
distance = np.sqrt((self.cell_width *
(self.settlement_positions[0][city] -
self.coordinates[0]))**2 +
(self.cell_height *
(self.settlement_positions[1][city] -
self.coordinates[1]))**2)
stencil = distance <= self.area_of_influence[city]
self.cells_in_influence[city] = self.coordinates[:, stencil]
self.number_cells_in_influence = [
len(x[0]) for x in self.cells_in_influence
]
return
def get_cropped_cells(self, bca):
"""
Updates the cropped cells for each city with positive population.
Calculates the utility for each cell (depending on distance from
the respective city) If population per cropped cell is lower then
min_people_per_cropped_cell, cells are abandoned.
Cells with negative utility are also abandoned.
If population per cropped cell is higher than
max_people_per_cropped_cell, new cells are cropped.
Newly cropped cells are chosen such that they have highest utility
"""
abandoned = 0
sown = 0
# for each settlement: how many cells are currently cropped ?
self.number_cropped_cells = np.array(
[len(x[0]) for x in self.cropped_cells])
# agricultural population density (people per cropped land)
# determines the number of cells that can be cropped.
ag_pop_density = [
p / (self.number_cropped_cells[c] * self.area)
if self.number_cropped_cells[c] > 0 else 0.
for c, p in enumerate(self.population)
]
# occupied_cells is a mask of all occupied cells calculated as the
# unification of the cropped cells of all settlements.
if len(self.cropped_cells) > 0:
occup = np.concatenate(self.cropped_cells, axis=1).astype('int')
if False:
print('population of cities without agriculture:')
print(
np.array(self.population)[self.number_cropped_cells == 0])
print('pt. migration from cities without agriculture:')
print(np.array(self.out_mig)[self.number_cropped_cells == 0])
print('out migration from cities without agriculture:')
print(np.array(self.migrants)[self.number_cropped_cells == 0])
for index in range(len(occup[0])):
self.occupied_cells[occup[0, index], occup[1, index]] = 1
# the age of settlements is increased here.
self.age = [x + 1 for x in self.age]
# for each settlement: which cells to crop ?
# calculate utility first! This can be accelerated, if calculations
# are only done in 40 km radius.
for city in self.populated_cities:
cells = list(
zip(self.cells_in_influence[city][0],
self.cells_in_influence[city][1]))
# EQUATION ########################################################
utility = [
bca[x, y] - self.estab_cost - (self.ag_travel_cost * np.sqrt(
(self.cell_width * (self.settlement_positions[0][city] -
self.coordinates[0][x, y]))**2 +
(self.cell_height * (self.settlement_positions[1][city] -
self.coordinates[1][x, y]))**2)) /
np.sqrt(self.population[city]) for (x, y) in cells
]
# EQUATION ########################################################
available = [
True if self.occupied_cells[x, y] == 0 else False
for (x, y) in cells
]
# jointly sort utilities, availability and cells such that cells
# with highest utility are first.
sorted_utility, sorted_available, sorted_cells = \
list(zip(*sorted(list(zip(utility, available, cells)),
reverse=True)))
# of these sorted lists, sort filter only available cells
available_util = list(
compress(list(sorted_utility), list(sorted_available)))
available_cells = list(
compress(list(sorted_cells), list(sorted_available)))
# save local copy of all cropped cells
cropped_cells = list(zip(*self.cropped_cells[city]))
# select utilities for these cropped cells
cropped_utils = [
utility[cells.index(cell)] if cell in cells else -1
for cell in cropped_cells
]
# sort utilitites and cropped cells to lowest utilities first
city_has_crops = True if len(cropped_cells) > 0 else False
if city_has_crops:
occupied_util, occupied_cells = \
zip(*sorted(list(zip(cropped_utils, cropped_cells))))
# 1.) include new cells if population exceeds a threshold
# calculate number of new cells to crop
number_of_new_cells = np.floor(ag_pop_density[city]
/ self.max_people_per_cropped_cell) \
.astype('int')
# and crop them by selecting cells with positive utility from the
# beginning of the list
for n in range(min([number_of_new_cells, len(available_util)])):
if available_util[n] > 0:
self.occupied_cells[available_cells[n]] = 1
for dim in range(2):
self.cropped_cells[city][dim] \
.append(available_cells[n][dim])
if city_has_crops:
# 2.) abandon cells if population too low
# after cities age > 5 years
if (ag_pop_density[city] < self.min_people_per_cropped_cell
and self.age[city] > 5):
# There are some inconsistencies here. Cells are abandoned,
# if the 'people per cropped land' is lower then a
# threshold for 'people per cropped cells. Then the
# number of cells to abandon is calculated as 30/people
# per cropped land. Why?! (check the original version!)
number_of_lost_cells = np.ceil(
30 / ag_pop_density[city]).astype('int')
# TO DO: recycle utility and cell list to do this faster.
# therefore, filter cropped cells from utility list
# and delete last n cells.
for n in range(
min([number_of_lost_cells,
len(occupied_cells)])):
dropped_cell = occupied_cells[n]
self.occupied_cells[dropped_cell] = 0
for dim in range(2):
self.cropped_cells[city][dim] \
.remove(dropped_cell[dim])
abandoned += 1
# 3.) abandon cells with utility <= 0
# find cells that have negative utility and belong
# to city under consideration,
useless_cropped_cells = [
occupied_cells[i] for i in range(len(occupied_cells))
if occupied_util[i] < 0
and occupied_cells[i] in zip(*self.cropped_cells[city])
]
# and release them.
for useless_cropped_cell in useless_cropped_cells:
self.occupied_cells[useless_cropped_cell] = 0
for dim in range(2):
try:
self.cropped_cells[city][dim] \
.remove(useless_cropped_cell[dim])
except ValueError:
print('ERROR: Useless cell gone already')
abandoned += 1
# Finally, update list of lists containing cropped cells for each city
# with positive population.
self.number_cropped_cells = [
len(self.cropped_cells[city][0])
for city in range(len(self.population))
]
return abandoned, sown
def get_pop_mig(self):
# gives population and out-migration
# print("number of settlements", len(self.population))
# death rate correlates inversely with real income per capita
death_rate_diff = self.max_death_rate - self.min_death_rate
self.death_rate = [
-death_rate_diff * self.real_income_pc[i] + self.max_death_rate
for i in range(len(self.real_income_pc))
]
self.death_rate = list(
np.clip(self.death_rate, self.min_death_rate, self.max_death_rate))
# if population control,
# birth rate negatively correlates with population size
if self.population_control:
birth_rate_diff = self.max_birth_rate - self.min_birth_rate
self.birth_rate = [
-birth_rate_diff / 10000. * value +
self.shift if value > 5000 else self.birth_rate_parameter
for value in self.population
]
# population grows according to effective growth rate
self.population = [
int((1. + self.birth_rate[i] - self.death_rate[i]) * value)
for i, value in enumerate(self.population)
]
self.population = [
value if value > 0 else 0 for value in self.population
]
mig_rate_diffe = self.max_mig_rate - self.min_mig_rate
# outmigration rate also correlates
# inversely with real income per capita
self.mig_rate = [
-mig_rate_diffe * self.real_income_pc[i] + self.max_mig_rate
for i in range(len(self.real_income_pc))
]
self.mig_rate = list(
np.clip(self.mig_rate, self.min_mig_rate, self.max_mig_rate))
self.out_mig = [
int(self.mig_rate[i] * self.population[i])
for i in range(len(self.population))
]
self.out_mig = [value if value > 0 else 0 for value in self.out_mig]
return
# impact of sociosphere on ecosphere
def update_pop_gradient(self):
# pop gradient quantifies the disturbance of the forest by population
self.pop_gradient = np.zeros((self.rows, self.columns))
for city in self.populated_cities:
distance = np.sqrt(self.area * (
(self.settlement_positions[0][city] - self.coordinates[0])**2 +
(self.settlement_positions[1][city] - self.coordinates[1])**2))
# EQUATION ###################################################################
self.pop_gradient[self.cells_in_influence[city][0],
self.cells_in_influence[city][1]] += \
self.population[city] \
/ (300 * (1 + distance[self.cells_in_influence[city][0],
self.cells_in_influence[city][1]]))
# EQUATION ###################################################################
self.pop_gradient[self.pop_gradient > 15] = 15
def evolve_soil_deg(self):
# soil degrades for cropped cells
cropped = np.concatenate(self.cropped_cells, axis=1).astype('int')
self.soil_deg[cropped[0], cropped[1]] += self.deg_rate
self.soil_deg[self.forest_state == 3] -= self.reg_rate
self.soil_deg[self.soil_deg < 0] = 0
def get_rank(self):
# depending on population ranks are assigned
# attention: ranks are reverted with respect to Netlogo MayaSim !
# 1 => 3 ; 2 => 2 ; 3 => 1
self.rank = [
3
if value > self.thresh_rank_3 else 2 if value > self.thresh_rank_2
else 1 if value > self.thresh_rank_1 else 0
for index, value in enumerate(self.population)
]
return
@property
def build_routes(self):
adj = self.adjacency.copy()
adj[adj == -1] = 0
built_links = 0
lost_links = 0
g = nx.from_numpy_matrix(adj, create_using=nx.DiGraph())
self.degree = g.out_degree()
# cities with rank>0 are traders and establish links to neighbours
for city in self.populated_cities:
if self.degree[city] < self.rank[city]:
distances = \
(np.sqrt(self.area * (+ (self.settlement_positions[0][city]
- self.settlement_positions[0]) ** 2
+ (self.settlement_positions[1][city]
- self.settlement_positions[1]) ** 2
)))
if self.rank[city] == 3:
treshold = 31. * (
self.thresh_rank_3 / self.thresh_rank_3 * 0.5 + 1.)
elif self.rank[city] == 2:
treshold = 31. * (
self.thresh_rank_2 / self.thresh_rank_3 * 0.5 + 1.)
elif self.rank[city] == 1:
treshold = 31. * (
self.thresh_rank_1 / self.thresh_rank_3 * 0.5 + 1.)
else:
treshold = 0
# don't chose yourself as nearest neighbor
distances[city] = 2 * treshold
# collect close enough neighbors and omit those that are
# already connected.
a = distances <= treshold
b = self.adjacency[city] == 0
nearby = np.array(list(map(operator.and_, a, b)))
# if there are traders nearby,
# connect to the one with highest population
if sum(nearby) != 0:
try:
new_partner = np.nanargmax(self.population * nearby)
self.adjacency[city, new_partner] = 1
self.adjacency[new_partner, city] = -1
built_links += 1
except ValueError:
print('ERROR in new partner')
print(np.shape(self.population),
np.shape(self.settlement_positions[0]))
sys.exit(-1)
# cities who cant maintain their trade links, loose them:
elif self.degree[city] > self.rank[city]:
# get neighbors of node
neighbors = g.successors(city)
# find smallest of neighbors
smallest_neighbor = self.population.index(
min([self.population[nb] for nb in neighbors]))
# cut link with him
self.adjacency[city, smallest_neighbor] = 0
self.adjacency[smallest_neighbor, city] = 0
lost_links += 1
return (built_links, lost_links)
def get_comps(self):
# convert adjacency matrix to compressed sparse row format
adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency))
# extract data vector, row index vector and index pointer vector
a = adjacency_csr.data
# add one to make indexing compatible to fortran
# (where indices start counting with 1)
j_a = adjacency_csr.indices + 1
i_c = adjacency_csr.indptr + 1
# determine length of data vectors
l_a = np.shape(a)[0]
l_ic = np.shape(i_c)[0]
# if data vector is not empty, pass data to fortran routine.
# else, just fill the centrality vector with ones.
if l_a > 0:
tmp_comp_size, tmp_degree = \
f90routines.f90sparsecomponents(i_c, a, j_a,
self.number_settlements,
l_ic, l_a)
self.comp_size, self.degree = list(tmp_comp_size), list(tmp_degree)
elif l_a == 0:
self.comp_size, self.degree = [0] * (l_ic - 1), [0] * (l_ic - 1)
return
def get_centrality(self):
# convert adjacency matrix to compressed sparse row format
adjacency_csr = sparse.csr_matrix(np.absolute(self.adjacency))
# extract data vector, row index vector and index pointer vector
a = adjacency_csr.data
# add one to make indexing compatible to fortran
# (where indices start counting with 1)
j_a = adjacency_csr.indices + 1
i_c = adjacency_csr.indptr + 1
# determine length of data vectors
l_a = np.shape(a)[0]
l_ic = np.shape(i_c)[0]
# print('number of trade links:', sum(a) / 2)
# if data vector is not empty, pass data to fortran routine.
# else, just fill the centrality vector with ones.
if l_a > 0:
tmp_centrality = f90routines \
.f90sparsecentrality(i_c, a, j_a,
self.number_settlements,
l_ic, l_a)
self.centrality = list(tmp_centrality)
elif l_a == 0:
self.centrality = [1] * (l_ic - 1)
return
def get_crop_income(self, bca):
# agricultural benefit of cropping
for city in self.populated_cities:
crops = bca[self.cropped_cells[city][0], self.
cropped_cells[city][1]]
# EQUATION #
if self.crop_income_mode == "mean":
self.crop_yield[city] = self.r_bca_mean \
* np.nanmean(crops[crops > 0])
elif self.crop_income_mode == "sum":
self.crop_yield[city] = self.r_bca_sum \
* np.nansum(crops[crops > 0])
self.crop_yield = [
0 if np.isnan(self.crop_yield[index]) else self.crop_yield[index]
for index in range(len(self.crop_yield))
]
return
def get_eco_income(self, es):
# benefit from ecosystem services of cells in influence
# ##EQUATION###################################################################
for city in self.populated_cities:
if self.eco_income_mode == "mean":
self.eco_benefit[city] = self.r_es_mean \
* np.nanmean(es[self.cells_in_influence[city]])
elif self.eco_income_mode == "sum":
self.eco_benefit[city] = self.r_es_sum \
* np.nansum(es[self.cells_in_influence[city]])
self.s_es_ag[city] = self.r_es_sum \
* np.nansum(self.es_ag[self.cells_in_influence[city]])
self.s_es_wf[city] = self.r_es_sum \
* np.nansum(self.es_wf[self.cells_in_influence[city]])
self.s_es_fs[city] = self.r_es_sum \
* np.nansum(self.es_fs[self.cells_in_influence[city]])
self.s_es_sp[city] = self.r_es_sum \
* np.nansum(self.es_sp[self.cells_in_influence[city]])
self.s_es_pg[city] = self.r_es_sum \
* np.nansum(self.es_pg[self.cells_in_influence[city]])
try:
self.eco_benefit[self.population == 0] = 0
except IndexError:
self.print_variable_lengths()
# ##EQUATION###################################################################
return
def get_trade_income(self):
# ##EQUATION###################################################################
self.trade_income = [
1. / 30. * (1 + self.comp_size[i] / self.centrality[i])**0.9
for i in range(len(self.centrality))
]
self.trade_income = [
self.r_trade if value > 1 else 0 if
(value < 0 or self.degree[index] == 0) else self.r_trade * value
for index, value in enumerate(self.trade_income)
]
# ##EQUATION###################################################################
return
def get_real_income_pc(self):
# combine agricultural, ecosystem service and trade benefit
# EQUATION #
self.real_income_pc = [
(self.crop_yield[index] + self.eco_benefit[index] +
self.trade_income[index]) /
self.population[index] if value > 0 else 0
for index, value in enumerate(self.population)
]
return
def migration(self, es):
# if outmigration rate exceeds threshold, found new settlement
self.migrants = [0] * self.number_settlements
new_settlements = 0
vacant_lands = np.isfinite(es)
influenced_cells = np.concatenate(self.cells_in_influence, axis=1)
vacant_lands[influenced_cells[0], influenced_cells[1]] = 0
vacant_lands = np.asarray(np.where(vacant_lands == 1))
for city in self.populated_cities:
rd = np.random.rand()
if (self.out_mig[city] > 400 and len(vacant_lands[0]) > 0
and np.random.rand() <= 0.5):
mig_pop = self.out_mig[city]
self.migrants[city] = mig_pop
self.population[city] -= mig_pop
self.pioneer_set = \
vacant_lands[:, np.random.choice(len(vacant_lands[0]), 75)]
travel_cost = np.sqrt(
self.area *
((self.settlement_positions[0][city] - self.coordinates[0])
**2 + (self.settlement_positions[1][city] -
self.coordinates[1])**2))
utility = self.mig_ES_pref * es \
+ self.mig_TC_pref * travel_cost
utofpio = utility[self.pioneer_set[0], self.pioneer_set[1]]
new_loc = self.pioneer_set[:, np.nanargmax(utofpio)]
neighbours = \
(np.sqrt(self.area * ((new_loc[0]
- self.settlement_positions[0]) ** 2 +
(new_loc[1]
- self.settlement_positions[1]) ** 2
))) <= 7.5
summe = np.sum(neighbours)
if summe == 0:
self.spawn_city(new_loc[0], new_loc[1], mig_pop)
index = (vacant_lands[0, :] == new_loc[0]) \
& (vacant_lands[1, :] == new_loc[1])
np.delete(vacant_lands, int(np.where(index)[0]), 1)
new_settlements += 1
return new_settlements
def kill_cities(self):
# BUG: cities can be added twice,
# if they have neither population nor cropped cells.
# this might lead to unexpected consequences. see what happenes,
# when after adding all cities, only unique ones are kept
killed_cities = 0
# kill cities if they have either no crops or no inhabitants:
dead_city_indices = [
i for i in range(len(self.population))
if self.population[i] <= self.min_city_size
]
if self.kill_cities_without_crops:
dead_city_indices += [
i for i in range(len(self.population))
if (len(self.cropped_cells[i][0]) <= 0)
]
# the following expression only keeps the unique entries.
# might solve the problem.
dead_city_indices = list(set(dead_city_indices))
# remove entries from variables
# simple lists that can be deleted elementwise
for index in sorted(dead_city_indices, reverse=True):
self.number_settlements -= 1
self.failed += 1
del self.age[index]
del self.birth_rate[index]
del self.death_rate[index]
del self.population[index]
del self.mig_rate[index]
del self.out_mig[index]
del self.number_cells_in_influence[index]
del self.area_of_influence[index]
del self.number_cropped_cells[index]
del self.crop_yield[index]
del self.eco_benefit[index]
del self.rank[index]
del self.degree[index]
del self.comp_size[index]
del self.centrality[index]
del self.trade_income[index]
del self.real_income_pc[index]
del self.cells_in_influence[index]
del self.cropped_cells[index]
del self.s_es_ag[index]
del self.s_es_wf[index]
del self.s_es_fs[index]
del self.s_es_sp[index]
del self.s_es_pg[index]
del self.migrants[index]
killed_cities += 1
# special cases:
self.settlement_positions = \
np.delete(self.settlement_positions,
dead_city_indices, axis=1)
self.adjacency = \
np.delete(np.delete(self.adjacency,
dead_city_indices, axis=0),
dead_city_indices, axis=1)
# update list of indices for populated and dead cities
# a) update list of populated cities
self.populated_cities = [
index for index, value in enumerate(self.population) if value > 0
]
# b) update list of dead cities
self.dead_cities = [
index for index, value in enumerate(self.population) if value == 0
]
return killed_cities
def spawn_city(self, x, y, mig_pop):
"""
Spawn a new city at given location with
given population and append it to all necessary lists.
Parameters
----------
x: int
x location of new city on map
y: int
y location of new city on map
mig_pop: int
initial population of new city
"""
# extend all variables to include new city
self.number_settlements += 1
self.settlement_positions = np.append(self.settlement_positions,
[[x], [y]], 1)
self.cells_in_influence.append([[x], [y]])
self.cropped_cells.append([[x], [y]])
n = len(self.adjacency)
self.adjacency = np.append(self.adjacency, [[0] * n], 0)
self.adjacency = np.append(self.adjacency, [[0]] * (n + 1), 1)
self.age.append(0)
self.birth_rate.append(self.birth_rate_parameter)
self.death_rate.append(0.1 + 0.05 * np.random.rand())
self.population.append(mig_pop)
self.mig_rate.append(0)
self.out_mig.append(0)
self.number_cells_in_influence.append(0)
self.area_of_influence.append(0)
self.number_cropped_cells.append(1)
self.crop_yield.append(0)
self.eco_benefit.append(0)
self.rank.append(0)
self.degree.append(0)
self.trade_income.append(0)
self.real_income_pc.append(0)
self.s_es_ag.append(0)
self.s_es_wf.append(0)
self.s_es_fs.append(0)
self.s_es_sp.append(0)
self.s_es_pg.append(0)
self.migrants.append(0)
def run(self, t_max=1):
"""
Run the model for a given number of steps.
If no number of steps is given, the model is integrated for one step
Parameters
----------
t_max: int
number of steps to integrate the model
"""
# initialize time step
t = 0
# print update about output state
if self.debug:
print('output of settlement and geodata is {} and {}'.format(
self.output_settlement_data, self.output_geographic_data))
# initialize variables
# net primary productivity
npp = np.zeros((self.rows, self.columns))
# water flow
if self.debug and t == 0:
wf = np.zeros((self.rows, self.columns))
elif not self.debug:
wf = np.zeros((self.rows, self.columns))
else:
pass
# agricultural productivity
ag = np.zeros((self.rows, self.columns))
# ecosystem services
es = np.zeros((self.rows, self.columns))
# benefit cost map for agriculture
bca = np.zeros((self.rows, self.columns))
self.init_output()
while t <= t_max:
t += 1
if self.debug:
print(f"time = {t}, population = {sum(self.population)}")
# evolve subselfs
# ecosystem
self.update_precipitation(t)
npp = self.net_primary_prod()
self.forest_evolve(npp)
# this is curious: only waterflow is used,
# water level is abandoned.
wf = self.get_waterflow()[1]
ag = self.get_ag(npp, wf)
es = self.get_ecoserv(ag, wf)
bca = self.benefit_cost(ag)
# society
if len(self.population) > 0:
self.get_cells_in_influence()
abandoned, sown = self.get_cropped_cells(bca)
self.get_crop_income(bca)
self.get_eco_income(es)
self.evolve_soil_deg()
self.update_pop_gradient()
self.get_rank()
(built, lost) = self.build_routes
self.get_comps()
self.get_centrality()
self.get_trade_income()
self.get_real_income_pc()
self.get_pop_mig()
new_settlements = self.migration(es)
killed_settlements = self.kill_cities()
else:
abandoned = sown = cl = 0
self.step_output(t, npp, wf, ag, es, bca, abandoned, sown, built,
lost, new_settlements, killed_settlements)
def init_output(self):
"""initializes data output for trajectory, settlements and geography depending on settings"""
if self.output_trajectory:
self.init_trajectory_output()
self.init_traders_trajectory_output()
if self.output_geographic_data or self.output_settlement_data:
# If output data location is needed and does not exist, create it.
if not os.path.exists(self.output_data_location):
os.makedirs(self.output_data_location)
if not self.output_data_location.endswith('/'):
self.output_data_location += '/'
if self.output_settlement_data:
settlement_init_data = {'shape': (self.rows, self.columns)}
with open(self.settlement_output_path(0), 'wb') as f:
pkl.dump(settlement_init_data, f)
if self.output_geographic_data:
pass
def step_output(self, t, npp, wf, ag, es, bca, abandoned, sown, built,
lost, new_settlements, killed_settlements):
"""
call different data saving routines depending on settings.
Parameters
----------
t: int
Timestep number to append to save file path
npp: numpy array
Net Primary Productivity on cell basis
wf: numpy array
Water flow through cell
ag: numpy array
Agricultural productivity of cell
es: numpy array
Ecosystem services of cell (that are summed and weighted to
calculate ecosystems service income)
bca: numpy array
Benefit cost analysis of agriculture on cell.
abandoned: int
Number of cells that was abandoned in the previous time step
sown: int
Number of cells that was newly cropped in the previous time step
built : int
number of trade links built in this timestep
lost : int
number of trade links lost in this timestep
new_settlements : int
number of new settlements that were spawned during the preceeding
timestep
killed_settlements : int
number of settlements that were killed during the preceeding
timestep
"""
# append stuff to trajectory
if self.output_trajectory:
self.update_trajectory_output(t, [npp, wf, ag, es, bca], built,
lost, new_settlements,
killed_settlements)
self.update_traders_trajectory_output(t)
# save maps of spatial data
if self.output_geographic_data:
self.save_geographic_output(t, npp, wf, ag, es, bca, abandoned,
sown)
# save data on settlement basis
if self.output_settlement_data:
self.save_settlement_output(t)
def save_settlement_output(self, t):
"""
Organize settlement based data in Pandas Dataframe
and save to file.
Parameters
----------
t: int
Timestep number to append to save file path
"""
colums = [
'population', 'real income', 'ag income', 'es income',
'trade income', 'x position', 'y position', 'out migration',
'degree'
]
data = [
self.population, self.real_income_pc, self.crop_yield,
self.eco_benefit, self.trade_income,
list(self.settlement_positions[0]),
list(self.settlement_positions[1]), self.migrants,
[self.degree[city] for city in self.populated_cities]
]
data = list(map(list, zip(*data)))
data_frame = pandas.DataFrame(columns=colums, data=data)
with open(self.settlement_output_path(t), 'wb') as f:
pkl.dump(data_frame, f)
def save_geographic_output(self, t, npp, wf, ag, es, bca, abandoned, sown):
"""
Organize Geographic data in dictionary (for separate layers
of data) and save to file.
Parameters
----------
t: int
Timestep number to append to save file path
npp: numpy array
Net Primary Productivity on cell basis
wf: numpy array
Water flow through cell
ag: numpy array
Agricultural productivity of cell
es: numpy array
Ecosystem services of cell (that are summed and weighted to
calculate ecosystems service income)
bca: numpy array
Benefit cost analysis of agriculture on cell.
abandoned: int
Number of cells that was abandoned in the previous time step
sown: int
Number of cells that was newly cropped in the previous time step
"""
tmpforest = self.forest_state.copy()
tmpforest[np.isnan(self.elev)] = 0
data = {
'forest': tmpforest,
'waterflow': wf,
'cells in influence': self.cells_in_influence,
'number of cells in influence': self.number_cells_in_influence,
'cropped cells': self.cropped_cells,
'number of cropped cells': self.number_cropped_cells,
'abandoned sown': np.array([abandoned, sown]),
'soil degradation': self.soil_deg,
'population gradient': self.pop_gradient,
'adjacency': self.adjacency,
'x positions': list(self.settlement_positions[0]),
'y positions': list(self.settlement_positions[1]),
'population': self.population,
'elev': self.elev,
'rank': self.rank
}
with open(self.geographic_output_path(t), 'wb') as f:
pkl.dump(data, f)
def init_trajectory_output(self):
self.trajectory.append([
'time', 'total_population', 'max_settlement_population',
'total_migrants', 'total_settlements', 'total_agriculture_cells',
'total_cells_in_influence', 'total_trade_links',
'mean_cluster_size', 'max_cluster_size', 'new_settlements',
'killed_settlements', 'built_trade_links', 'lost_trade_links',
'total_income_agriculture', 'total_income_ecosystem',
'total_income_trade', 'mean_soil_degradation',
'forest_state_3_cells', 'forest_state_2_cells',
'forest_state_1_cells', 'es_income_forest', 'es_income_waterflow',
'es_income_agricultural_productivity', 'es_income_precipitation',
'es_income_pop_density', 'MAP', 'max_npp', 'mean_waterflow',
'max_AG', 'max_ES', 'max_bca', 'max_soil_deg', 'max_pop_grad'
])
def init_traders_trajectory_output(self):
self.traders_trajectory.append([
'time', 'total_population', 'total_migrants', 'total_traders',
'total_settlements', 'total_agriculture_cells',
'total_cells_in_influence', 'total_trade_links',
'total_income_agriculture', 'total_income_ecosystem',
'total_income_trade', 'es_income_forest', 'es_income_waterflow',
'es_income_agricultural_productivity', 'es_income_precipitation',
'es_income_pop_density'
])
def update_trajectory_output(self, time, args, built, lost,
new_settlements, killed_settlements):
# args = [npp, wf, ag, es, bca]
total_population = sum(self.population)
try:
max_population = np.nanmax(self.population)
except:
max_population = float('nan')
total_migrangs = sum(self.migrants)
total_settlements = len(self.population)
total_trade_links = sum(self.degree) / 2
income_agriculture = sum(self.crop_yield)
income_ecosystem = sum(self.eco_benefit)
income_trade = sum(self.trade_income)
number_of_components = float(
sum([1 if value > 0 else 0 for value in self.comp_size]))
mean_cluster_size = float(sum(self.comp_size)) / number_of_components \
if number_of_components > 0 else 0
try:
max_cluster_size = max(self.comp_size)
except:
max_cluster_size = 0
self.max_cluster_size = max_cluster_size
total_agriculture_cells = sum(self.number_cropped_cells)
total_cells_in_influence = sum(self.number_cells_in_influence)
self.trajectory.append([
time, total_population, max_population, total_migrangs,
total_settlements, total_agriculture_cells,
total_cells_in_influence, total_trade_links, mean_cluster_size,
max_cluster_size, new_settlements, killed_settlements, built, lost,
income_agriculture, income_ecosystem, income_trade,
np.nanmean(self.soil_deg),
np.sum(self.forest_state == 3),
np.sum(self.forest_state == 2),
np.sum(self.forest_state == 1),
np.sum(self.s_es_fs),
np.sum(self.s_es_wf),
np.sum(self.s_es_ag),
np.sum(self.s_es_sp),
np.sum(self.s_es_pg),
np.nanmean(self.spaciotemporal_precipitation),
np.nanmax(args[0]),
np.nanmean(args[1]),
np.nanmax(args[2]),
np.nanmax(args[3]),
np.nanmax(args[4]),
np.nanmax(self.soil_deg),
np.nanmax(self.pop_gradient)
])
def update_traders_trajectory_output(self, time):
traders = np.where(np.array(self.degree) > 0)[0]
total_population = sum([self.population[c] for c in traders])
total_migrants = sum([self.migrants[c] for c in traders])
total_settlements = len(self.population)
total_traders = len(traders)
total_trade_links = sum(self.degree) / 2
income_agriculture = sum([self.crop_yield[c] for c in traders])
income_ecosystem = sum([self.eco_benefit[c] for c in traders])
income_trade = sum([self.trade_income[c] for c in traders])
income_es_fs = sum([self.s_es_fs[c] for c in traders])
income_es_wf = sum([self.s_es_wf[c] for c in traders])
income_es_ag = sum([self.s_es_ag[c] for c in traders])
income_es_sp = sum([self.s_es_sp[c] for c in traders])
income_es_pg = sum([self.s_es_pg[c] for c in traders])
number_of_components = float(
sum([1 if value > 0 else 0 for value in self.comp_size]))
mean_cluster_size = (float(sum(self.comp_size)) / number_of_components
if number_of_components > 0 else 0)
try:
max_cluster_size = max(self.comp_size)
except:
max_cluster_size = 0
total_agriculture_cells = \
sum([self.number_cropped_cells[c] for c in traders])
total_cells_in_influence = \
sum([self.number_cells_in_influence[c] for c in traders])
self.traders_trajectory.append([
time, total_population, total_migrants, total_traders,
total_settlements, total_agriculture_cells,
total_cells_in_influence, total_trade_links, income_agriculture,
income_ecosystem, income_trade, income_es_fs, income_es_wf,
income_es_ag, income_es_sp, income_es_pg
])
def get_trajectory(self):
try:
trj = np.array(self.trajectory)
columns = trj[0, :]
df = pandas.DataFrame(trj[1:, :], columns=columns)
except IOError:
print('trajectory mode must be turned on')
return df
def get_traders_trajectory(self):
try:
trj = self.traders_trajectory
columns = trj.pop(0)
df = pandas.DataFrame(trj, columns=columns)
except IOError:
print('trajectory mode must be turned on')
return df
def run_test(self, timesteps=5):
import shutil
N = 50
# define saving location
comment = "testing_version"
now = datetime.datetime.now()
location = "output_data/" \
+ "Output_" + comment + '/'
if os.path.exists(location):
shutil.rmtree(location)
os.makedirs(location)
# initialize Model
model = ModelCore(n=N,
debug=True,
output_trajectory=True,
output_settlement_data=True,
output_geographic_data=True,
output_data_location=location)
# run Model
model.crop_income_mode = 'sum'
model.r_es_sum = 0.0001
model.r_bca_sum = 0.1
model.population_control = 'False'
model.run(timesteps)
trj = model.get_trajectory()
plot = trj.plot()
return 1
def print_variable_lengths(self):
for var in dir(self):
if not var.startswith('__') and not callable(getattr(self, var)):
try:
if len(getattr(self, var)) != 432:
print(var, len(getattr(self, var)))
except:
pass
if __name__ == "__main__":
import matplotlib.pyplot as plt
import shutil
N = 10
# define saving location
comment = "testing_version"
now = datetime.datetime.now()
location = "output_data/" \
+ "Output_" + comment + '/'
if os.path.exists(location):
shutil.rmtree(location)
# os.makedirs(location)
# initialize Model
model = ModelCore(n=N,
debug=True,
output_trajectory=True,
output_settlement_data=True,
output_geographic_data=True,
output_data_location=location)
# run Model
timesteps = 300
model.crop_income_mode = 'sum'
model.r_es_sum = 0.0001
model.r_bca_sum = 0.25
model.population_control = 'False'
model.run(timesteps)
trj = model.get_trajectory()
plot = trj[[
'total_population', 'total_settlements', 'total_migrangs'
]].plot()
plt.show()
plt.savefig(plot, location + 'plot')
| gpl-3.0 |
tapomayukh/projects_in_python | rapid_categorization/haptic_map/outlier/hmm_crossvalidation_force.py | 1 | 19066 | # Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import unittest
import ghmm
import ghmmwrapper
import random
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_length')
from data_variable_length_force import Fmat_original
if __name__ == '__main__' or __name__ != '__main__':
print "Inside outlier HMM model training file"
Fmat = Fmat_original
# Getting mean / covariance
i = 0
number_states = 10
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 35):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 0:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
#print np.shape(state_1)
#print np.shape(feature_1_final_data[j])
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_rf_force = np.zeros((number_states,1))
sigma_rf = np.zeros((number_states,1))
while (j < number_states):
mu_rf_force[j] = np.mean(feature_1_final_data[j])
sigma_rf[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 35
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 70):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 35:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_rm_force = np.zeros((number_states,1))
sigma_rm = np.zeros((number_states,1))
while (j < number_states):
mu_rm_force[j] = np.mean(feature_1_final_data[j])
sigma_rm[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 70
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 105):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 70:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_sf_force = np.zeros((number_states,1))
sigma_sf = np.zeros((number_states,1))
while (j < number_states):
mu_sf_force[j] = np.mean(feature_1_final_data[j])
sigma_sf[j] = scp.std(feature_1_final_data[j])
j = j+1
i = 105
feature_1_final_data = [0.0]*number_states
state_1 = [0.0]
while (i < 140):
data_length = len(Fmat[i])
feature_length = data_length/1
sample_length = feature_length/number_states
Feature_1 = Fmat[i][0:feature_length]
if i == 105:
j = 0
while (j < number_states):
feature_1_final_data[j] = Feature_1[sample_length*j:sample_length*(j+1)]
j=j+1
else:
j = 0
while (j < number_states):
state_1 = Feature_1[sample_length*j:sample_length*(j+1)]
feature_1_final_data[j] = feature_1_final_data[j]+state_1
j=j+1
i = i+1
j = 0
mu_sm_force = np.zeros((number_states,1))
sigma_sm = np.zeros((number_states,1))
while (j < number_states):
mu_sm_force[j] = np.mean(feature_1_final_data[j])
sigma_sm[j] = scp.std(feature_1_final_data[j])
j = j+1
# HMM - Implementation:
# 10 Hidden States
# Max. Force(For now), Contact Area(Not now), and Contact Motion(Not Now) as Continuous Gaussian Observations from each hidden state
# Four HMM-Models for Rigid-Fixed, Soft-Fixed, Rigid-Movable, Soft-Movable
# Transition probabilities obtained as upper diagonal matrix (to be trained using Baum_Welch)
# For new objects, it is classified according to which model it represenst the closest..
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
if number_states == 3:
A = [[0.2, 0.5, 0.3],
[0.0, 0.5, 0.5],
[0.0, 0.0, 1.0]]
elif number_states == 5:
A = [[0.2, 0.35, 0.2, 0.15, 0.1],
[0.0, 0.2, 0.45, 0.25, 0.1],
[0.0, 0.0, 0.2, 0.55, 0.25],
[0.0, 0.0, 0.0, 0.2, 0.8],
[0.0, 0.0, 0.0, 0.0, 1.0]]
elif number_states == 10:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
elif number_states == 15:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.15, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.30, 0.10, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.10, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.15, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.20, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 1.00]]
elif number_states == 20:
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.03, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.09, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.15, 0.04, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.10, 0.05, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.10, 0.05, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.05, 0.05, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.1, 0.30, 0.20, 0.15, 0.10, 0.05, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.1, 0.30, 0.30, 0.10, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.1, 0.40, 0.30, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.20, 0.40, 0.20, 0.10, 0.04, 0.02, 0.02, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.03, 0.02],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.20, 0.40, 0.20, 0.10, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.20, 0.40, 0.20, 0.10, 0.10],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.20, 0.40, 0.20, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.30, 0.50, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.40, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf = [0.0]*number_states
B_rm = [0.0]*number_states
B_sf = [0.0]*number_states
B_sm = [0.0]*number_states
for num_states in range(number_states):
B_rf[num_states] = [mu_rf_force[num_states][0],sigma_rf[num_states][0]]
B_rm[num_states] = [mu_rm_force[num_states][0],sigma_rm[num_states][0]]
B_sf[num_states] = [mu_sf_force[num_states][0],sigma_sf[num_states][0]]
B_sm[num_states] = [mu_sm_force[num_states][0],sigma_sm[num_states][0]]
#print B_sm
#print mu_sm_motion
# pi - initial probabilities per state
if number_states == 3:
pi = [1./3.] * 3
elif number_states == 5:
pi = [0.2] * 5
elif number_states == 10:
pi = [0.1] * 10
elif number_states == 15:
pi = [1./15.] * 15
elif number_states == 20:
pi = [0.05] * 20
# generate RF, RM, SF, SM models from parameters
model_rf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf, pi) # Will be Trained
model_rm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm, pi) # Will be Trained
model_sf = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf, pi) # Will be Trained
model_sm = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm, pi) # Will be Trained
trial_number = 1
rf_final = np.matrix(np.zeros((28,1)))
rm_final = np.matrix(np.zeros((28,1)))
sf_final = np.matrix(np.zeros((28,1)))
sm_final = np.matrix(np.zeros((28,1)))
total_seq = Fmat
for i in range(140):
total_seq[i][:] = sum(total_seq[i][:],[])
while (trial_number < 6):
# For Training
if (trial_number == 1):
j = 5
total_seq_rf = total_seq[1:5]
total_seq_rm = total_seq[36:40]
total_seq_sf = total_seq[71:75]
total_seq_sm = total_seq[106:110]
#print total_seq_rf
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+1:j+5]
total_seq_rm = total_seq_rm+total_seq[j+36:j+40]
total_seq_sf = total_seq_sf+total_seq[j+71:j+75]
total_seq_sm = total_seq_sm+total_seq[j+106:j+110]
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = [total_seq[0]]+total_seq[2:5]
total_seq_rm = [total_seq[35]]+total_seq[37:40]
total_seq_sf = [total_seq[70]]+total_seq[72:75]
total_seq_sm = [total_seq[105]]+total_seq[107:110]
#print total_seq_rf
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+0]]+total_seq[j+2:j+5]
total_seq_rm = total_seq_rm+[total_seq[j+35]]+total_seq[j+37:j+40]
total_seq_sf = total_seq_sf+[total_seq[j+70]]+total_seq[j+72:j+75]
total_seq_sm = total_seq_sm+[total_seq[j+105]]+total_seq[j+107:j+110]
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = total_seq[0:2]+total_seq[3:5]
total_seq_rm = total_seq[35:37]+total_seq[38:40]
total_seq_sf = total_seq[70:72]+total_seq[73:75]
total_seq_sm = total_seq[105:107]+total_seq[108:110]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+2]+total_seq[j+3:j+5]
total_seq_rm = total_seq_rm+total_seq[j+35:j+37]+total_seq[j+38:j+40]
total_seq_sf = total_seq_sf+total_seq[j+70:j+72]+total_seq[j+73:j+75]
total_seq_sm = total_seq_sm+total_seq[j+105:j+107]+total_seq[j+108:j+110]
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = total_seq[0:3]+total_seq[4:5]
total_seq_rm = total_seq[35:38]+total_seq[39:40]
total_seq_sf = total_seq[70:73]+total_seq[74:75]
total_seq_sm = total_seq[105:108]+total_seq[109:110]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+3]+total_seq[j+4:j+5]
total_seq_rm = total_seq_rm+total_seq[j+35:j+38]+total_seq[j+39:j+40]
total_seq_sf = total_seq_sf+total_seq[j+70:j+73]+total_seq[j+74:j+75]
total_seq_sm = total_seq_sm+total_seq[j+105:j+108]+total_seq[j+109:j+110]
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = total_seq[0:4]
total_seq_rm = total_seq[35:39]
total_seq_sf = total_seq[70:74]
total_seq_sm = total_seq[105:109]
while (j < 35):
total_seq_rf = total_seq_rf+total_seq[j+0:j+4]
total_seq_rm = total_seq_rm+total_seq[j+35:j+39]
total_seq_sf = total_seq_sf+total_seq[j+70:j+74]
total_seq_sm = total_seq_sm+total_seq[j+105:j+109]
j = j+5
train_seq_rf = total_seq_rf
train_seq_rm = total_seq_rm
train_seq_sf = total_seq_sf
train_seq_sm = total_seq_sm
#print train_seq_rf[27]
final_ts_rf = ghmm.SequenceSet(F,train_seq_rf)
final_ts_rm = ghmm.SequenceSet(F,train_seq_rm)
final_ts_sf = ghmm.SequenceSet(F,train_seq_sf)
final_ts_sm = ghmm.SequenceSet(F,train_seq_sm)
model_rf.baumWelch(final_ts_rf)
model_rm.baumWelch(final_ts_rm)
model_sf.baumWelch(final_ts_sf)
model_sm.baumWelch(final_ts_sm)
# For Testing
if (trial_number == 1):
j = 5
total_seq_rf = [total_seq[0]]
total_seq_rm = [total_seq[35]]
total_seq_sf = [total_seq[70]]
total_seq_sm = [total_seq[105]]
#print np.shape(total_seq_rf)
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j]]
total_seq_rm = total_seq_rm+[total_seq[j+35]]
total_seq_sf = total_seq_sf+[total_seq[j+70]]
total_seq_sm = total_seq_sm+[total_seq[j+105]]
j = j+5
if (trial_number == 2):
j = 5
total_seq_rf = [total_seq[1]]
total_seq_rm = [total_seq[36]]
total_seq_sf = [total_seq[71]]
total_seq_sm = [total_seq[106]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+1]]
total_seq_rm = total_seq_rm+[total_seq[j+36]]
total_seq_sf = total_seq_sf+[total_seq[j+71]]
total_seq_sm = total_seq_sm+[total_seq[j+106]]
j = j+5
if (trial_number == 3):
j = 5
total_seq_rf = [total_seq[2]]
total_seq_rm = [total_seq[37]]
total_seq_sf = [total_seq[72]]
total_seq_sm = [total_seq[107]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+2]]
total_seq_rm = total_seq_rm+[total_seq[j+37]]
total_seq_sf = total_seq_sf+[total_seq[j+72]]
total_seq_sm = total_seq_sm+[total_seq[j+107]]
j = j+5
if (trial_number == 4):
j = 5
total_seq_rf = [total_seq[3]]
total_seq_rm = [total_seq[38]]
total_seq_sf = [total_seq[73]]
total_seq_sm = [total_seq[108]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+3]]
total_seq_rm = total_seq_rm+[total_seq[j+38]]
total_seq_sf = total_seq_sf+[total_seq[j+73]]
total_seq_sm = total_seq_sm+[total_seq[j+108]]
j = j+5
if (trial_number == 5):
j = 5
total_seq_rf = [total_seq[4]]
total_seq_rm = [total_seq[39]]
total_seq_sf = [total_seq[74]]
total_seq_sm = [total_seq[109]]
while (j < 35):
total_seq_rf = total_seq_rf+[total_seq[j+4]]
total_seq_rm = total_seq_rm+[total_seq[j+39]]
total_seq_sf = total_seq_sf+[total_seq[j+74]]
total_seq_sm = total_seq_sm+[total_seq[j+109]]
j = j+5
trial_number = trial_number + 1
print "Outlier HMM model trained"
| mit |
dav-stott/phd-thesis | spectra_thesis_ais.py | 1 | 70177 | # -*- coding: utf-8 -*-
"""
Created on Fri Jul 25 08:48:28 2014
@author: david
"""
#*************** IMPORT DEPENDANCIES*******************************************
import numpy as np
#import spec_gdal4 as spg
from osgeo import gdal
import os
import csv
#import h5py
import datetime
import numpy.ma as ma
#from StringIO import StringIO
#import shapely
#import r2py
from osgeo import gdal_array
from osgeo import gdalconst
from osgeo.gdalconst import *
from osgeo import ogr
from osgeo import osr
from scipy.spatial import ConvexHull
from scipy.signal import find_peaks_cwt
from scipy.signal import savgol_filter
from scipy import interpolate
import matplotlib.pyplot as plt
#from shapely.geometry import LineString
################# Functions ###################################################
'''These here are functions that are not part of any specific class- these
are used by the data import classes for functions such as smoothing'''
def smoothing(perc_out, block_start, block_end, kparam, weight, sparam):
#D
sm_spline_block = perc_out[block_start:block_end,:]
sm_x = sm_spline_block[:,0]
sm_y = sm_spline_block[:,1]
sm_len = sm_x.shape
sm_weights = np.zeros(sm_len)+weight
sm_spline = interpolate.UnivariateSpline(sm_x,
sm_y,
k=kparam,
w=sm_weights,
s=sparam)
spline = sm_spline(sm_x)
spline = np.column_stack((sm_x,spline))
return spline
def interpolate_gaps(array1, array2):
array_end = array1.shape[0]-1
array1_endx = array1[array_end, 0]
#get the start point of the second array
array2_start = array2[0,0]
#get the length of the area to be interpolated
x_len = array2_start-array1_endx+1
#generate x values to use for the array
xvals = np.linspace(array1_endx, array2_start, num=x_len)
#y val for the start of the interpolated area
yval_array1 = array1[array_end,1]
# y val for the end of interpolated area
yval_array2 = array2[0,1]
#stack the values into a new array
xin = np.append(array1_endx, array2_start)
yin = np.append(yval_array1, yval_array2)
#numpy.interp(x, xp, fp)
gap_filling = np.interp(xvals, xin, yin)
filled_x = np.column_stack((xvals, gap_filling))
print (filled_x.shape)
return filled_x
class absorption_feature():
'''this class is used for the characterisation of spectral absortion features,
and their investigation using continuum removal'''
def __init__(self, spectra, feat_start, feat_end, feat_centre):
self.wl = spectra[:,0]
self.values = spectra[:,1]
print ('CALL TO ABSORPTION FEATURE')
# start of absorption feature
self.feat_start = feat_start
# end of absorption feature
self.feat_end = feat_end
# approximate 'centre' of feature
self.feat_centre = feat_centre
#get the range of the data
self.min_wl = self.wl[0]
self.max_wl = self.wl[-1]
print ('Absorption feature',self.feat_start,self.feat_end)
#define feature name
self.feat_name = str(self.feat_start)+'_'+str(self.feat_end)
'''# if the feature is within the range of the sensor, do stuff
if self.feat_start > self.min_wl and self.feat_end < self.max_wl:
print 'can do stuff with this data'
try:
self.abs_feature()
print ('Absorption feature analysis sussceful')
except:
print ('ERROR analysing absorption feature', self.feat_name)
pass
else:
print ('Cannot define feature: Out of range')'''
########## Methods ##################################################
def abs_feature(self):
print ('Call to abs_feature made')
# Meffod to calculate the end points of the absorption feature
# Does this using the Qhull algorithim form scipy spatial
#use the initial defintnion of the absorption feature as a staring point
# get the indices for these
cont_rem_stacked = None
ft_def_stacked = None
start_point = np.argmin(np.abs(self.wl-self.feat_start))
end_point = np.argmin(np.abs(self.wl-self.feat_end))
centre = np.argmin(np.abs(self.wl-self.feat_centre))
#find the index minima of reflectance
minima = np.argmin(self.values[start_point:end_point])+start_point
# if the minima = the start point then the start point is the minima
if minima == start_point:
left = minima
#if not then the left side of the feature is the maixima on the left of the minima
elif minima <= centre:
left = start_point+np.argmax(self.values[start_point:centre])
else:
left = start_point+np.argmax(self.values[start_point:minima])
#right is the maxima on the right of the absorption feature
if minima == end_point:
right = minima
else:
right = minima+np.argmax(self.values[minima:end_point])
# use left and right to create a 2D array of points
hull_in = np.column_stack((self.wl[left:right],self.values[left:right]))
#determine the minima of the points
hull_min = minima-left
if hull_min <= 0:
hull_min=0
#find the wavelength at minima
hull_min_wl = hull_in[hull_min,0]
# define the wavelength ranges we'll use to select simplices
ft_left_wl = hull_min_wl-((hull_min_wl-hull_in[0,0])/2)
ft_right_wl = hull_min_wl+((hull_in[-1,0]-hull_min_wl)/2)
#use scipy.spatial convex hull to determine the convex hull of the points
hull = ConvexHull(hull_in)
# get the simplex tuples from the convex hull
simplexes = hull.simplices
# create an empty list to store simplices potentially related to our feature
feat_pos = []
#iterate through the simplices
for simplex in simplexes:
#extract vertices from simplices
vertex1 = simplex[0]
vertex2 = simplex[1]
#print 'VERT!',hull_in[vertex1,0],hull_in[vertex2,0]
''' We're only interested in the upper hull. Qhull moves counter-
clockwise. Therefore we're only interested in those points where
vertex 1 is greater than vertex 2'''
'''The above may be total bollocks'''
if not vertex1 < vertex2:
'''We then use the wavelength ranges to determine which simplices
relate to our absorption feature'''
if hull_in[vertex2,0] <= ft_left_wl and \
hull_in[vertex2,0] >= self.wl[left] and \
hull_in[vertex1,0] >= ft_right_wl and \
hull_in[vertex1,0] <= self.wl[right]:
# append the vertices to the list
print (hull_in[vertex2,0])
print (hull_in[vertex1,0])
feat_pos.append((vertex2,vertex1))
print ('feat_pos length:',len(feat_pos), type(feat_pos))
#print feat_pos[0],feat_pos[1]
else:
continue
'''We only want one feature here. If there's more than one or less
than one we're not interested as we're probably not dealing with
vegetation'''
# If there's less than one feature...
if len(feat_pos) < 1:
print ('Absorption feature cannot be defined:less than one feature')
ft_def_stacked = None
ft_def_hdr = None
cont_rem_stacked = None
elif len(feat_pos) == 1:
feat_pos=feat_pos[0]
print ('£££££',feat_pos, type(feat_pos))
else:
#if theres more than one fid the widest one. this is not optimal.
if len(feat_pos) >1:
feat_width = []
for pair in feat_pos:
feat_width.append(pair[1]-pair[0])
print ('feat width:', feat_width)
#feat_width = np.asarray(feat_width)
print (feat_width)
f_max = feat_width.index(max(feat_width))
print (f_max)
feat_pos = feat_pos[f_max]
print (type(feat_pos))
if not feat_pos==None:
feat_pos = feat_pos[0], feat_pos[1]
print ('DOES MY FEAT_POS CONVERSION WORK?', feat_pos)
print ('Analysing absorption feature')
#slice
feature = hull_in[feat_pos[0]:feat_pos[1],:]
print ('Feature shape',feature.shape,'start:',feature[0,0],'end:',feature[-1,0])
#get the minima in the slice
minima_pos = np.argmin(feature[:,1])
#continuum removal
contrem = self.continuum_removal(feature,minima_pos)
# set up single value outputs
# start of feature
refined_start = feature[0,0]
# end of feature
refined_end = feature[-1,0]
# wavelength at minima
minima_WL = feature[minima_pos,0]
# reflectance at minima
minima_R = feature[minima_pos,1]
# area of absorption feature
feat_area = contrem[4]
# two band normalised index of minima and start of feature
left_tbvi = (refined_start-minima_R)/(refined_start+minima_R)
# two band normalised index of minima and right of feature
right_tbvi = (refined_end-minima_R)/(refined_end+minima_R)
# gradient of the continuum line
cont_gradient = np.mean(np.gradient(contrem[0]))
# area of continuum removed absorption feature
cont_rem_area = contrem[3]
# maxima of continuum removed absorption feature
cont_rem_maxima = np.max(contrem[1])
# wavelength of maxima of continuum removed absorption feature
cont_rem_maxima_wl = feature[np.argmax(contrem[1]),0]
#area of left part of continuum removed feature
cont_area_l = contrem[5]
if cont_area_l == None:
cont_area_l=0
#are aof right part of continuum removed feature
cont_area_r = contrem[6]
#stack these into a lovely array
ft_def_stacked = np.column_stack((refined_start,
refined_end,
minima_WL,
minima_R,
feat_area,
left_tbvi,
right_tbvi,
cont_gradient,
cont_rem_area,
cont_rem_maxima,
cont_rem_maxima_wl,
cont_area_l,
cont_area_r))
ft_def_hdr = str('"Refined start",'+
'"Refined end",'+
'"Minima Wavelenght",'+
'"Minima Reflectance",'+
'"Feature Area",'+
'"Left TBVI",'+
'"Right TBVI",'+
'"Continuum Gradient",'+
'"Continuum Removed Area",'+
'"Continuum Removed Maxima",'+
'"Continuum Removed Maxima WL",'+
'"Continuum Removed Area Left",'+
'"Continuum Removed Area Right",')
#print ft_def_stacked.shape #save the stacked outputs as hdf
# stack the 2d continuum removed outputs
cont_rem_stacked = np.column_stack((feature[:,0],
feature[:,1],
contrem[0],
contrem[1],
contrem[2]))
print ('CREM', cont_rem_stacked.shape)
return ft_def_stacked, ft_def_hdr, cont_rem_stacked
def continuum_removal(self,feature,minima):
#method to perform continuum r=<emoval
#pull out endmenmbers
end_memb = np.vstack((feature[0,:],feature[-1,:]))
#interpolate between the endmembers using x intervals
continuum_line = np.interp(feature[:,0], end_memb[:,0], end_memb[:,1])
#continuum removal
continuum_removed = continuum_line/feature[:,1]
#stack into coord pairs so we can measure the area of the feature
ft_coords = np.vstack((feature,
np.column_stack((feature[:,0],continuum_line))))
#get the area
area = self.area(ft_coords)
#get the area of the continuum removed feature
cont_rem_2d = np.column_stack((feature[:,0],continuum_removed))
cont_r_area = self.area(cont_rem_2d)
#band-normalised by area continuum removal
cont_BNA = (1-(feature[:,1]/continuum_line))/area
#continuum removed area on left of minima
cont_area_left = self.area(cont_rem_2d[0:minima,:])
#continuum removed area on right of minima
cont_area_right = self.area(cont_rem_2d[minima:,:])
return (continuum_line,
continuum_removed,
cont_BNA,
cont_r_area,
area,
cont_area_left,
cont_area_right)
#define area of 2d polygon- using shoelace formula
def area(self, coords2d):
#setup counter
total = 0.0
#get the number of coorsinate pairs
N = coords2d.shape[0]
#iterate through these
for i in range(N):
#define the first coordinate pair
vertex1 = coords2d[i]
#do the second
vertex2 = coords2d[(i+1) % N]
#append the first & second distance to the toatal
total += vertex1[0]*vertex2[1] - vertex1[1]*vertex2[0]
#return area
return abs(total/2)
class Indices():
#class that does vegetation indices
def __init__(self,spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''So, the init method here checks the range of the sensor and runs
the appropriate indices within that range, and saves them as hdf5.
The indices are all defined as methods of this class'''
def visnir(self):
# Sensor range VIS-NIR
if self.range[0] >= 350 and \
self.range[0] <= 500 and \
self.range[1] >= 900:
vis_nir = np.column_stack((self.sr700_800(),
self.ndvi694_760(),
self.ndvi695_805(),
self.ndvi700_800(),
self.ndvi705_750(),
self.rdvi(),
self.savi(),
self.msavi2(),
self.msr(),
self.msrvi(),
self.mdvi(),
self.tvi(),
self.mtvi(),
self.mtvi2(),
self.vog1vi(),
self.vog2(),
self.prsi(),
self.privi(),
self.sipi(),
self.mcari(),
self.mcari1(),
self.mcari2(),
self.npci(),
self.npqi(),
self.cri1(),
self.cri2(),
self.ari1(),
self.ari2(),
self.wbi()))
vis_nir_hdr=str('"sr700_800",'+
'"ndvi694_760",'+
'"ndvi695_805",'+
'"ndvi700_800",'+
'"ndvi705_750",'+
'"rdvi",'+
'"savi",'+
'"msavi2",'+
'"msr",'+
'"msrvi",'+
'"mdvi",'+
'"tvi",'+
'"mtvi",'+
'"mtvi2",'+
'"vog1vi",'+
'"vog2",'+
'"prsi"'+
'"privi",'+
'"sipi",'+
'"mcari",'+
'"mcari1",'+
'"mcari2",'+
'"npci",'+
'"npqi",'+
'"cri1",'+
'"cri2",'+
'"ari1",'+
'"ari2",'+
'"wbi"')
else:
vis_nir = None
vis_nir_hdr = None
return vis_nir,vis_nir_hdr
#Range NIR-SWIR
def nir_swir(self):
if self.range[0] <= 900 and self.range[1] >=2000:
nir_swir = np.column_stack((self.ndwi(),
self.msi(),
self.ndii()))
nir_swir_hdr = str('"ndwi",'+
'"msi",'+
'"ndii"')
else:
#continue
print ('not nir-swir')
nir_swir=None
nir_swir_hdr=None
return nir_swir, nir_swir_hdr
#range SWIR
def swir(self):
if self.range[1] >=2000:
swir = np.column_stack((self.ndni(),
self.ndli()))
swir_hdr=str('"ndni",'+
'"ndli"')
else:
print ('swir-nir')
swir = None
swir_hdr = None
#continue
return swir,swir_hdr
#||||||||||||||||||||| Methods |||||||||||||||||||||||||||||||||||||||||||||||
# function to run every permutation of the NDVI type index across the Red / IR
# ...... VIS / NIR methods ....
def multi_tbvi (self, red_start=650, red_end=750, ir_start=700, ir_end=850):
# get the indicies of the regions we're going to use.
# we've added default values here, but they can happily be overidden
#start of red
red_l =np.argmin(np.abs(self.wl-red_start))
#end of red
red_r = np.argmin(np.abs(self.wl-red_end))
#start of ir
ir_l = np.argmin(np.abs(self.wl-ir_start))
#end of ir
ir_r = np.argmin(np.abs(self.wl-ir_end))
#slice
left = self.values[red_l:red_r]
right = self.values[ir_l:ir_r]
#set up output
values = np.empty(3)
#set up counter
l = 0
#loop throught the values in the red
for lvalue in left:
l_wl = self.wl[l+red_l]
r = 0
l = l+1
#then calculate the index with each wl in the NIR
for rvalue in right:
value = (rvalue-lvalue)/(rvalue+lvalue)
r_wl = self.wl[r+ir_l]
out = np.column_stack((l_wl,r_wl,value))
values = np.vstack((values, out))
out = None
r = r+1
return values[1:,:]
def sr700_800 (self, x=700, y=800):
index = self.values[np.argmin(np.abs(self.wl-x))]/self.values[np.argmin(np.abs(self.wl-y))]
return index
def ndvi705_750 (self, x=705, y=750):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi700_800 (self, x=700, y=800):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi694_760 (self, x=694, y=760):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def ndvi695_805 (self, x=695, y=805):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npci (self, x=430, y=680):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
def npqi (self, x=415, y=435):
index = (self.values[np.argmin(np.abs(self.wl-y))]-self.values[np.argmin(np.abs(self.wl-x))])/\
(self.values[np.argmin(np.abs(self.wl-y))]+self.values[np.argmin(np.abs(self.wl-x))])
return index
#mSRvi
#= (750-445)/(705+445)
def msrvi (self):
x = 750
y = 445
z = 705
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
msrvi_val = (x_val-y_val)/(z_val+y_val)
return msrvi_val
#Vogelmann Red Edge 1
#740/720
def vog1vi (self):
x = 740
y = 720
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
vog1vi_val = (x_val/y_val)
return vog1vi_val
#Vogelmann Red Edge 2
#= (734-747)/(715+726)
def vog2 (self):
v = 734
x = 747
y = 715
z = 726
v_val = self.values[np.argmin(np.abs(self.wl-v))]
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
vog2_val = (v_val-x_val)/(y_val+z_val)
return vog2_val
#PRI
# (531-570)/(531+570)
def privi (self):
x = 531
y = 570
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
privi_val = (x_val-y_val)/(x_val+y_val)
return privi_val
#SIPI
#(800-445)/(800-680)
def sipi (self):
x = 800
y = 445
z = 680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
sipi_val = (x_val-y_val)/(x_val+z_val)
return sipi_val
#Water band index
# WBI = 900/700
def wbi (self):
x = 900
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
wbi_val = (x_val/y_val)
return wbi_val
#mNDVI
#= (750-705)/((750+705)-(445))
def mdvi (self):
x = 750
y = 705
z = 445
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mdvi_val = (x_val-y_val)/((x_val+y_val)-z_val)
return mdvi_val
#Carotenid Reflectance Index
#CRI1 = (1/510)-(1/550)
def cri1 (self):
x = 510
y = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri1_val = (1/x_val)-(1/y_val)
return cri1_val
#CRI2 = (1/510)-(1/700)
def cri2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
cri2_val = (1/x_val)-(1/y_val)
return cri2_val
#Anthocyanin
#ARI1 = (1/550)-(1/700)
def ari1 (self):
x = 550
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari1_val = (1/x_val)-(1/y_val)
return ari1_val
#ARI2 = 800*((1/550)-(1/700)_))
def ari2 (self):
x = 510
y = 700
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ari2_val = 800*((1/x_val)-(1/y_val))
return ari2_val
#MSR
#=((800/670)-1)/SQRT(800+670)
def msr (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msr_val = ((x_val/y_val)-1)/(np.sqrt(x_val+y_val))
return msr_val
#SAVI
#= (1+l)(800-670)/(800+670+l)
def savi (self, l=0.5):
x = 800
y = 670
l = 0.5
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
savi_val = ((1+l)*(x_val-y_val))/(x_val+y_val+l)
return savi_val
#MSAVI
#=1/2(sqrt(2*800)+1)-SQRT(((2*800+1)sqr)-8*(800-670)
def msavi2 (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msavi2_top1 = (2*x_val+1)
msavi2_top2 = (np.sqrt(np.square(2*x_val+1)-(8*(x_val-y_val))))
msavi2_top = msavi2_top1-msavi2_top2
msavi2_val = msavi2_top/2
return msavi2_val
#Modified clhoropyll absorption indec
#MCARI = ((700-670)-0.2*(700-550))*(700/670)
def mcari (self):
x = 700
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari_val = (x_val-y_val)-(0.2*(x_val-z_val)*(x_val/y_val))
return mcari_val
#Triangular vegetation index
#TVI 0.5*(120*(750-550))-(200*(670-550))
def tvi (self):
x = 750
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
tvi_val = 0.5*((120*(x_val-y_val))-(200*(z_val+y_val)))
return tvi_val
#MCAsavRI1 = 1.2*(2.5*(800-67-)-(1.3*800-550)
def mcari1 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari1_val = (1.2*((2.5*(x_val-y_val)))-(1.3*(x_val+z_val)))
return mcari1_val
#MTVI1
#=1.2*((1.2*(800-550))-(2.5(670-550)))
def mtvi (self):
x = 800
y = 550
z = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi_val = (1.2*(12*(x_val-y_val)))-(2.5*(z_val-y_val))
return mtvi_val
def mcari2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mcari2_top = (1.5*(2.5*(x_val-y_val)))-(1.3*(x_val-z_val))
mcari2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mcari2_val = mcari2_top/mcari2_btm
return mcari2_val
#MTVI2=(1.5*(2.5(800-670)-2.5*(800-550))/sqrt((2*800+1s)sq)-((6*800)-(5*sqrt670))-0.5
def mtvi2 (self):
x = 800
y = 670
z = 550
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
mtvi2_top = (1.5*(2.5*(x_val-z_val)))-(1.3*(x_val-z_val))
mtvi2_btm = np.sqrt((np.square(2*x_val)+1)-((6*x_val)-(5*(np.sqrt(y_val))))-0.5)
mtvi2_val = mtvi2_top/mtvi2_btm
return mtvi2_val
#Renormalised DVI
#RDVI = (800-670)/sqrt(800+670)
def rdvi (self):
x = 800
y = 670
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
rdvi_val = (x_val-y_val)/np.sqrt(x_val+y_val)
return rdvi_val
#Plant senescance reflectance index
#PRSI = (680-500)/750
def prsi (self):
x = 680
y = 500
z = 750
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
prsi_val = (x_val-y_val)/z_val
return prsi_val
#||||||||||||||||||||||| SWIR methods ||||||||||||||||||||||||||||||||||||
#Cellulose Absorption Index
#CAI =0.5*(2000-2200)/2100
def cai (self):
x = 2000
y = 2200
z = 2100
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
z_val = self.values[np.argmin(np.abs(self.wl-z))]
cai_val = 0.5*(x_val-y_val)-z_val
return cai_val
#Normalized Lignin Difference
#NDLI = (log(1/1754)-log(1/1680))/(log(1/1754)+log(1/1680))
def ndli (self):
x = 1754
y = 2680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndli_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndli_val
#Canopy N
#NDNI =(log(1/1510)-log(1/1680))/(log(1/1510)+log(1/1680))
def ndni (self):
x = 1510
y = 1680
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndni_val = (np.log(1/x_val)-np.log(1/y_val))/(np.log(1/x_val)+np.log(1/y_val))
return ndni_val
#|||||||||||||||||||||| Full spectrum (VIS-SWIR)||||||||||||||||||||||||||||
#Normalised Difference IR index
#NDII = (819-1649)/(819+1649)#NDII = (819-1649)/(819+1649)
def ndii (self):
x = 819
y = 1649
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndii_val = (x_val-y_val)/(x_val+y_val)
return ndii_val
#Moisture Stress Index
#MSI = 1599/819http://askubuntu.com/questions/89826/what-is-tumblerd
def msi (self):
x = 1599
y = 810
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
msi_val = (x_val/y_val)
return msi_val
#NDWI
#(857-1241)/(857+1241)
def ndwi (self):
x = 857
y = 1241
x_val = self.values[np.argmin(np.abs(self.wl-x))]
y_val = self.values[np.argmin(np.abs(self.wl-y))]
ndwi_val = (x_val-y_val)/(x_val+y_val)
return ndwi_val
class red_edge():
'''Class to derive red edge position using a number of different methods'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
'''Again, the mehtod that initialises this class uses the range of the
sensor to check to see if it falls within the red-edge reigion. If so,
it will derive the red edge using the differnet methods and save these
as seprate hdf5 datasets in the appropriate group'''
if self.range[0] <= 670 and self.range[1] >=750:
self.redge_vals = np.column_stack((self.redge_linear(),
self.redge_lagrange(),
self.redge_linear_extrapolation()))
print (self.redge_vals)
print (self.redge_linear,self.redge_lagrange,self.redge_linear_extrapolation)
self.redge_hdr = str('"linear",'+
'"lagrange",'+
'"extrapolated"')
else:
print ('red_edge out of range')
self.redge_vals = None
self.redge_hdr = None
##################### METHODS #########################################
#linear- defined by clevers et al 1994:
def redge_linear(self):
r670 = self.values[np.argmin(np.abs(self.wl-670))]
r780 = self.values[np.argmin(np.abs(self.wl-780))]
r700 = self.values[np.argmin(np.abs(self.wl-700))]
r740 = self.values[np.argmin(np.abs(self.wl-740))]
r_edge = (r670+r780)/2
lin_rep =700+40*((r_edge-r700)/(r740-r700))
print ('REDGE_LINEAR',lin_rep)
return lin_rep
#Lagrangian method, after Dawson & Curran 1998
def redge_lagrange(self):
#select the red edge region of the first derviative and associate this
#with wavelength
x = 680
y = 730
first_diff = np.diff(self.values, 1)
spec_in = np.column_stack((self.wl[1:], first_diff))
l680 = np.argmin(np.abs(spec_in[:,0]-x))
r680 = spec_in[l680,0]
l730 = np.argmin(np.abs(spec_in[:,0]-y))
r730 = spec_in[l730,0]
redge_region_sel = np.where(np.logical_and(spec_in[:,0]>r680-1,
spec_in[:,0]<r730+1))
redge_region = spec_in[redge_region_sel]
#find the maximum first derivative, return index
dif_max = np.argmax(redge_region[:,1], axis=0)
#find band with the max derivative -1, return index
dif_max_less = (np.argmax(redge_region[:,1], axis=0))-1
#find band with the max derivative +1, return index
dif_max_more = (np.argmax(redge_region[:,1], axis=0))+1
if dif_max_more >= redge_region.shape[0]:
dif_max_more = redge_region.shape[0]-1
#use these indeces to slice the array
rmax = redge_region[dif_max]
rmax_less =redge_region[dif_max_less]
rmax_more =redge_region[dif_max_more]
#lagrangian interpolation with three points
#this has been expanded to make the syntax easier
a = rmax_less[1]/(rmax_less[0]-rmax[0])*(rmax_less[0]-rmax_more[0])
b = rmax[1]/(rmax[0]-rmax_less[0])*(rmax[0]-rmax_more[0])
c = rmax_more[1]/(rmax_more[0]-rmax_less[0])*(rmax_more[0]-rmax[0])
d = a*(rmax[0]+rmax_more[0])
e = b*(rmax_less[0]+rmax_more[0])
f = c*(rmax_less[0]+rmax[0])
lg_rep = (d+e+f)/(2*(a+b+c))
print ('Lagrangian', lg_rep)
return lg_rep
#Linear extrapolation- after Cho & Skidmore 2006, Cho et al 2007
def redge_linear_extrapolation(self):
diff = np.diff(self.values)
d680 = diff[np.argmin(np.abs(self.wl-680+1))]
d694 = diff[np.argmin(np.abs(self.wl-694+1))]
d724 = diff[np.argmin(np.abs(self.wl-724+1))]
d760 = diff[np.argmin(np.abs(self.wl-760+1))]
red_slope = ((d694-d680)/(694-680))
ir_slope = ((d760-d724)/(760-724))
red_inter = d680-(red_slope*680)
ir_inter = d724-(ir_slope*724)
wl = (ir_inter-red_inter)/(ir_slope-red_slope)
print ('^!!!!!!!!! Linear:',wl)
return np.abs(wl)
class fluorescence():
'''this class is inteded to look for evidence of photosynthetic flourescence
currently this is limited to simple reflectance indices. This should be
expanded to take in other more complex methods to invesitgae fluorescence'''
def __init__(self, spectra):
self.wl = spectra[:,0]
self.values = spectra[:,1]
self.range = (np.min(self.wl),np.max(self.wl))
print ('call to fluor')
'''The init method checks the range to establish if it overlaps with
region of chlorophyll flourescence. If so it will will perform the
analysis methods and output to hdf5'''
def wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for reflectance'''
value = self.values[np.argmin(np.abs(self.wl-x))]
return value
def d_wl_selector(self, x):
'''this method finds the index of the wavelength closest to that
specified for the first derivative'''
diff = np.diff(self.values)
value = diff[np.argmin(np.abs(self.wl-x))+1]
return value
def wl_max_d(self):
'''method to extract wavelength of the maxima of the first derivative
and return this'''
start = np.argmin(np.abs(self.wl-650))
end = np.argmin(np.abs(self.wl-760))
diff = np.diff(self.values[start:end])
maxdiff = np.argmax(diff)
maxdiffwl = self.wl[maxdiff+start+1]
return maxdiffwl, diff[maxdiff]
def simple_ratios(self):
''' This method runs flourescence indices ratios and returns them as a
stacked numpy array'''
#r680/r630
r680r630 = self.wl_selector(680)/self.wl_selector(630)
print (r680r630)
#r685/r630
r685r630 = self.wl_selector(685)/self.wl_selector(630)
print (r685r630)
#r685/r655
r685r655 = self.wl_selector(685)/self.wl_selector(655)
print (r685r655)
#r687/r630
r687r630 = self.wl_selector(687)/self.wl_selector(630)
print (r687r630)
#r690/r630
r690r630 = self.wl_selector(690)/self.wl_selector(630)
print (r690r630)
#r750/r800
r750r800 = self.wl_selector(750)/self.wl_selector(800)
print (r750r800)
#sq(r685)/(r675-r690)
sqr685 = np.square(self.wl_selector(685))/(self.wl_selector(675)-self.wl_selector(690))
print (sqr685)
#(r675-r690)/sq(r683) Zarco-Tejada 2000
r675r690divsq683 = (self.wl_selector(675)-self.wl_selector(690))/np.square(self.wl_selector(683))
print (r675r690divsq683)
#d705/d722
d705d722 = self.d_wl_selector(705)/self.d_wl_selector(722)
print (d705d722)
#d730/d706
d730d706 = self.d_wl_selector(730)/self.d_wl_selector(706)
print (d730d706)
#(d688-d710)/sq(d697)
d686d710sq697 = (self.d_wl_selector(688)-self.d_wl_selector(710))\
/np.square(self.d_wl_selector(697))
print (d686d710sq697)
#wl at max d / d720
maxdd720 = self.wl_max_d()[1]/self.d_wl_selector(720)
print (maxdd720)
#wl at max d / d703
maxdd703 = self.wl_max_d()[1]/self.d_wl_selector(703)
print (maxdd703)
#wl at max d / d(max d+12)
print (self.wl_max_d()[0])
maxd12 = self.wl_max_d()[1]/self.d_wl_selector(self.wl_max_d()[0]+12)
print (maxd12)
combined = np.vstack((r680r630,
r685r630,
r685r655,
r687r630,
r690r630,
r750r800,
sqr685,
r675r690divsq683,
d705d722,
d730d706,
d686d710sq697,
maxdd720,
maxdd703,
maxd12))
fluo_hdr = str('"r680r630",'+
'"r685r630",'+
'"r685r655",'+
'"r687r630",'+
'"r690r630",'+
'"r750r800",'+
'"sqr685",'+
'"r675r690divsq683",'+
'"d705d722",'+
'"d730d706",'+
'"d686d710sq697",'+
'"maxdd720",'+
'"maxdd703",'+
'"maxd12"')
return combined, fluo_hdr
def dual_peak(self):
'''This fuction loogs for a dual peak in the red-edge region. If it's
there it measures the depth of the feature between the two peaks.
UNTESTED'''
start = self.wl_selector(640)
end = self.wl_selector(740)
d1_region = np.diff(self.values[start:end])
#d2_region = np.diff(self.values[start:end], n=2)
peak_finder = find_peaks_cwt(d1_region, np.arange(3,10))
peak_wl = wavelengths[peak_finder]
fluor_peaks = []
for peak in peak_finder:
if peak_wl[peak] == self.wl[self.wl_selector(668)]:
print ('found flourescence peak at 668nm')
fluor_peaks.append(peak)
elif peak_wl[peak] == self.wl[self.wl_selector(735)]:
print ('found flourescence peak at 735nm')
fluor_peaks.append[peak]
else:
print ('unknown peak')
'''if len(fluor_peaks) == 2:
something = 'something'''
class load_asd():
def __init__(self, indir, output_dir):
data_list = os.listdir(indir)
print (data_list)
#output_dir = os.path.join(indir,'output')
if not os.path.exists(output_dir):
os.mkdir(output_dirx)
for directory in data_list:
parent = os.path.join(indir, directory)
spectra_dir = os.path.join(parent, 'raw_spectra')
reading_info_dir = os.path.join(parent, 'reading_info')
sensor_name = 'ASD FieldSpec Pro'
sensor_type = 'SPR'
sensor_units = 'nm'
sensor_range = [350,2500]
os.chdir(reading_info_dir)
reading_info_file = open('reading_atributes.txt','rb')
reading_info = csv.DictReader(reading_info_file)
reading_info_array = np.empty(12)
readings_list = [row for row in reading_info]
for reading in readings_list[:]:
reading_filename = str(reading['reading_id']+'.txt')
reading_info_line = np.column_stack((reading['reading_id'],
reading['dartField'],
reading['transect'],
reading['transectPosition'],
reading['reading_type'],
reading['reading_coord_osgb_x'],
reading['reading_coord_osgb_y'],
reading['dateOfAcquisition'],
reading['timeOfAcquisition'],
reading['instrument_number'],
reading['dark_current'],
reading['white_ref']))
#print reading_info_line
if reading['reading_type']== 'REF':
reading_info_array = np.vstack((reading_info_array,reading_info_line))
#print reading_info_array
print ('*********** Loading File', reading_filename, '***********')
os.chdir(spectra_dir)
spec = np.genfromtxt(reading_filename,
delimiter=', ',
skiprows=30)
spec = np.column_stack((spec[:,0],spec[:,1]*100))
nir_start = 0
nir_end = 990
nir_weight = 3.5
nir_k = 4.9
nir_s =45
swir1_start = 1080
swir1_end = 1438
swir1_weight = 8.5
swir1_k = 3.5
swir1_s = 35
swir2_start = 1622
swir2_end = 2149
swir2_weight = 1.2
swir2_s = 92
swir2_k = 2.8
#smoothing(perc_out, block_start, block_end, kparam, weight, sparam)
nir_smoothed = smoothing(spec, nir_start, nir_end, nir_k, nir_weight, nir_s)
swir1_smoothed = smoothing(spec, swir1_start, swir1_end, swir1_k, swir1_weight, swir1_s)
swir2_smoothed = smoothing(spec, swir2_start, swir2_end, swir2_k, swir2_weight, swir2_s)
print ('Smoothed array shape', nir_smoothed.shape,swir1_smoothed.shape,swir2_smoothed.shape)
nir_swir_gap = interpolate_gaps(nir_smoothed,swir1_smoothed)
swir2_gap = interpolate_gaps(swir1_smoothed,swir2_smoothed)
spec_smoothed = np.vstack((nir_smoothed,
nir_swir_gap,
swir1_smoothed,
swir2_gap,
swir2_smoothed))
print ('Spec SHAPE:', spec.shape)
survey_dir = os.path.join(output_dir, directory)
if not os.path.exists(survey_dir):
os.mkdir(survey_dir)
os.chdir(survey_dir)
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
print (abs470.abs_feature()[0])
abs470_ftdef = abs470.abs_feature()[0]
print (abs470_ftdef)
abs470_crem = abs470.abs_feature()[2]
if not abs470_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs470_ftdef.txt',
abs470_ftdef,
header=abs470.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs470_crem.txt',
abs470_crem,
delimiter=',')
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
if not abs670_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs670_ftdef.txt',
abs670_ftdef,
header=abs670.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs670_crem.txt',
abs670_crem,
delimiter=',')
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1115,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
if not abs970_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs970_ftdef.txt',
abs970_ftdef,
header=abs970.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs970_crem.txt',
abs970_crem,
delimiter=',')
except:
pass
try:
abs1200 = absorption_feature(spec_smoothed,1080,1300,1190)
abs1200_ftdef = abs1200.abs_feature()[0]
abs1200_crem = abs1200.abs_feature()[2]
if not abs1200_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1200_ftdef.txt',
abs1200_ftdef,
header=abs1200.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1200_crem.txt',
abs1200_crem,
delimiter=',')
except:
pass
try:
abs1730 = absorption_feature(spec_smoothed,1630,1790,1708)
abs1730_ftdef = abs1730.abs_feature()[0]
abs1730_crem = abs1730.abs_feature()[2]
if not abs1730_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs1730_ftdef.txt',
abs1730_ftdef,
header=abs1730.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs1730_crem.txt',
abs1730_crem,
delimiter=',')
except:
pass
print (spec_smoothed.shape)
try:
abs2100 = absorption_feature(spec_smoothed,2001,2196,2188)
abs2100_ftdef = abs2100.abs_feature()[0]
abs2100_crem = abs2100.abs_feature()[2]
if not abs2100_ftdef == None:
np.savetxt(reading_filename[0:-4]+'_abs2100_ftdef.txt',
abs2100_ftdet,
header=abs2100.abs_feature()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_abs2100_crem.txt',
abs2100_crem,
delimiter=',')
except:
pass
veg_indices = Indices(spec_smoothed)
indices = np.column_stack((veg_indices.visnir()[0],
veg_indices.nir_swir()[0],
veg_indices.swir()[0]))
print (veg_indices.visnir()[1],veg_indices.nir_swir()[1],veg_indices.swir()[1])
hdr = str(veg_indices.visnir()[1]+','+veg_indices.nir_swir()[1]+','+veg_indices.swir()[1])
np.savetxt(reading_filename[0:-4]+'_indices.txt',
indices,
header=hdr,
delimiter=',')
mtbvi = veg_indices.multi_tbvi()
np.savetxt(reading_filename[0:-4]+'_mtbvi.txt',
mtbvi,
delimiter=',')
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
print (redge.redge_vals)
np.savetxt(reading_filename[0:-4]+'_redge.txt',
redge.redge_vals,
delimiter=',')
fluo = fluorescence(spec_smoothed)
np.savetxt(reading_filename[0:-4]+'_flou.txt',
np.transpose(fluo.simple_ratios()[0]),
header = fluo.simple_ratios()[1],
delimiter=',')
np.savetxt(reading_filename[0:-4]+'_spec.txt',
spec_smoothed,
delimiter=',')
class load_image():
def __init__(self, wavlengths_dir,image_dir,out_dir):
os.chdir(wavelengths_dir)
wavelengths = np.genfromtxt('wavelengths.txt')
print ('wavelengths array', wavelengths)
os.chdir(image_dir)
image_list = os.listdir(image_dir)
for image in image_list:
import_image = self.get_image(image)
image_name = image[:-4]
print ('IMAGE NAME:', image_name)
row = 1
img_array = import_image[0]
print ('Image_array', img_array)
projection = import_image[1]
print ('Projection',projection)
x_size = import_image[2]
print ('Xdim',x_size)
y_size = import_image[3]
print ('Ydim', y_size)
spatial = import_image[4]
print (spatial)
x_top_left = spatial[0]
ew_pix_size = spatial[1]
rotation_ew = spatial[2]
y_top_left = spatial[3]
rotation_y = spatial[4]
ns_pixel_size = spatial[5]
print ('Spatial', x_top_left,ew_pix_size,rotation_ew,y_top_left,rotation_y,ns_pixel_size)
print ('IMAGE ARRAY SHAPE',img_array.shape)
img_dims = img_array.shape
print (img_dims[0],'/',img_dims[1])
#indices+29
indices_out = np.zeros((img_dims[0],img_dims[1],29), dtype=np.float32)
#print indices_out
#redge=3
redge_out = np.zeros((img_dims[0],img_dims[1]),dtype=np.float32)
#fluo=14
fluo_out=np.zeros((img_dims[0],img_dims[1],14), dtype=np.float32)
print ('fluo out', fluo_out.shape)
ft470_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft670_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
ft970_out = np.zeros((img_dims[0],img_dims[1],13), dtype=np.float32)
x470 = np.argmin(np.abs(wavelengths-400))
y470 = np.argmin(np.abs(wavelengths-518))
len470 = y470-x470
cr470_out = np.zeros((img_dims[0],img_dims[1],len470), dtype=np.float32)
x670 = np.argmin(np.abs(wavelengths-548))
y670 = np.argmin(np.abs(wavelengths-800))
len670 = y670-x670
cr670_out = np.zeros((img_dims[0],img_dims[1],len670), dtype=np.float32)
print (cr670_out)
x970 = np.argmin(np.abs(wavelengths-880))
y970 = np.argmin(np.abs(wavelengths-1000))
len970 = y970-x970
cr970_out = np.zeros((img_dims[0],img_dims[1],len970), dtype=np.float32)
#print cr970_out
print (wavelengths)
row = 0
print ('***', row, img_dims[0])
for i in range(0,img_dims[0]):
print (i)
column = 0
#print 'COL',column
for j in range(0,img_dims[1]):
print ('COLUMN',column)
#print 'Pixel',pixel
name = '%s_pix-%s_%s' % (image_name,row,column)
print ('NAME',name)
pixel = img_array[row,column,:]
#smoothed = savgol_filter(pixel,5,2)
#spec_smoothed = np.column_stack((wavelengths,smoothed))
spec_smoothed = np.column_stack((wavelengths,pixel))
print (spec_smoothed)
veg_indices = Indices(spec_smoothed)
indices = veg_indices.visnir()[0]
print ('(*&)(*)(*&&^)^)^)*&^)*^)*&', indices)
indices_out[row,column,:]=indices
fluo = fluorescence(spec_smoothed)
fluo_out[row,column,:]=np.transpose(fluo.simple_ratios()[0])
redge = red_edge(spec_smoothed)
print (redge.redge_vals.shape)
redge_out[row,column]= redge.redge_vals[0,2]
try:
abs470 = absorption_feature(spec_smoothed,400,518,484)
abs470_ftdef = abs470.abs_feature()[0]
abs470_crem = abs470.abs_feature()[2]
abs470_crem = np.column_stack((abs470_crem[:,0],abs470_crem[:,4]))
print ('!*!*!*!*!&!*!*', abs470_crem)
crem470_fill = self.crem_fill(x470,y470,abs470_crem,wavelengths)
ft470_out[row,column,:]=abs470_ftdef
cr470_out[row,column,:]=crem470_fill
except:
pass
try:
abs670 = absorption_feature(spec_smoothed,548,800,670)
abs670_ftdef = abs670.abs_feature()[0]
abs670_crem = abs670.abs_feature()[2]
abs670_crem = np.column_stack((abs670_crem[:,0],abs670_crem[:,4]))
ft670_out[row,column,:]=abs670_ftdef
crem670_fill = self.crem_fill(x670,y670,abs670_crem,wavelengths)
cr670_out[row,column,:]=crem670_fill
except:
pass
try:
abs970 = absorption_feature(spec_smoothed,880,1000,970)
abs970_ftdef = abs970.abs_feature()[0]
abs970_crem = abs970.abs_feature()[2]
abs970_crem = np.column_stack((abs970_crem[:,0],abs970_crem[:,4]))
crem970_fill = self.crem_fill(x970,y970,abs970_crem,wavelengths)
ft970_out[row,column,:]=abs970_ftdef
cr970_out[row,column,:]=crem970_fill
except:
pass
column = column+1
print (pixel.shape)
row = row+1
self.writeimage(out_dir,image+'_indices.tif',indices_out,spatial)
self.writeimage(out_dir,image+'_fluo.tif',fluo_out,spatial)
self.writeimage(out_dir,image+'_redge.tif',redge_out,spatial)
self.writeimage(out_dir,image+'_ft470.tif',ft470_out,spatial)
self.writeimage(out_dir,image+'_cr470.tif',cr470_out,spatial)
self.writeimage(out_dir,image+'_ft670.tif',ft670_out,spatial)
self.writeimage(out_dir,image+'_cr670.tif',cr670_out,spatial)
self.writeimage(out_dir,image+'_ft970.tif',ft970_out,spatial)
self.writeimage(out_dir,image+'_cr970.tif',cr970_out,spatial)
def crem_fill(self,xwl,ywl,bna,wavelengths):
bna_out=np.zeros((ywl-xwl))
bna_wvl = bna[:,0]
bna_refl= bna[:,1]
full_wl = wavelengths[xwl:ywl]
index = np.argmin(np.abs(wavelengths-bna_wvl[0]))
bna_out[index:]=bna_refl
return bna_out
def get_image(self, image):
print ('call to get_image')
# open the dataset
dataset = gdal.Open(image, GA_ReadOnly)
print ('Dataset',dataset)
# if there's nothign there print error
if dataset is None:
print ('BORK: Could not load file: %s' %(image))
# otherwise do stuff
else:
#get the format
driver = dataset.GetDriver().ShortName
#get the x dimension
xsize = dataset.RasterXSize
#get the y dimension
ysize = dataset.RasterYSize
#get the projection
proj = dataset.GetProjection()
#get the number of bands
bands = dataset.RasterCount
#get the geotransform Returns a list object. This is standard GDAL ordering:
#spatial[0] = top left x
#spatial[1] = w-e pixel size
#spatial[2] = rotation (should be 0)
#spatial[3] = top left y
#spatial[4] = rotation (should be 0)
#spatial[5] = n-s pixel size
spatial = dataset.GetGeoTransform()
#print some stuff to console to show we're paying attention
print ('Found raster in %s format. Raster has %s bands' %(driver,bands))
print ('Projected as %s' %(proj))
print ('Dimensions: %s x %s' %(xsize,ysize))
#instantiate a counter
count = 1
#OK. This is the bit that catually loads the bands in in a while loop
# Loop through bands as long as count is equal to or less than total
while (count<=bands):
#show that your computer's fans are whining for a reason
print ('Loading band: %s of %s' %(count,bands))
#get the band
band = dataset.GetRasterBand(count)
# load this as a numpy array
data_array = band.ReadAsArray()
'''data_array = ma.masked_where(data_array == 0, data_array)
data_array = data_array.filled(-999)'''
data_array = data_array.astype(np.float32, copy=False)
# close the band object
band = None
#this bit stacks the bands into a combined numpy array
#if it's the first band copy the array directly to the combined one
if count == 1:
stacked = data_array
#else combine these
else:
stacked = np.dstack((stacked,data_array))
#stacked = stacked.filled(-999)
#just to check it's working
#print stacked.shape
# increment the counter
count = count+1
#stacked = stacked.astype(np.float32, copy=False)
return stacked,proj,xsize,ysize,spatial
def writeimage(self,
outpath,
outname,
image,
spatial):
data_out = image
print ('ROWS,COLS',image.shape)
print ('Call to write image')
os.chdir(outpath)
print ('OUTPATH',outpath)
print ('OUTNAME',outname)
#load the driver for the format of choice
driver = gdal.GetDriverByName("Gtiff")
#create an empty output file
#get the number of bands we'll need
try:
bands = image.shape[2]
except:
bands=1
print ('BANDS OUT', bands)
#file name, x columns, y columns, bands, dtype
out = driver.Create(outname, image.shape[1], image.shape[0], bands, gdal.GDT_Float32)
#define the location using coords of top-left corner
# minimum x, e-w pixel size, rotation, maximum y, n-s pixel size, rotation
out.SetGeoTransform(spatial)
srs = osr.SpatialReference()
#get the coodrinate system using the ESPG code
srs.SetWellKnownGeogCS("EPSG:27700")
#set pstackedstackedstackedtojection of output file
out.SetProjection(srs.ExportToWkt())
band = 1
if bands == 1:
out.GetRasterBand(band).WriteArray(data_out)
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
else:
while (band<=bands):
data = data_out[:,:,band-1]
#write values to empty array
out.GetRasterBand(band).WriteArray( data )
#set the no data value
out.GetRasterBand(band).SetNoDataValue(-999)
#apend the statistics to dataset
out.GetRasterBand(band).GetStatistics(0,1)
print ('Saving %s/%s' % (band,bands))
band = band+1
out = None
print ('Processing of %s complete' % (outname))
return outname
if __name__ == "__main__":
#dir_path = os.path.dirname(os.path.abspath('...'))
#data_root = os.path.join(dir_path, 'data')
data_root = '/home/dav/data/temp/test/test_spec'
for folder in os.listdir(data_root):
input_dir = os.path.join(data_root,folder)
print (input_dir)
surveys_list = os.listdir(input_dir)
print (surveys_list)
for survey_dir in surveys_list:
print (survey_dir)
site_dir=os.path.join(input_dir,survey_dir)
print (site_dir)
image_path = os.path.join(site_dir, 'image')
print (image_path)
wavelengths_dir = os.path.join(site_dir, 'wavelengths')
print (wavelengths_dir)
out_dir = os.path.join(site_dir,'output')
if not os.path.exists(out_dir):
os.mkdir(out_dir)
load_image(wavelengths_dir,image_path,out_dir) | mit |
omanor/MUSiCC | tests/test_musicc.py | 1 | 5038 | #!/usr/bin/env python
"""
This is the testing unit for MUSiCC
"""
# to comply with both Py2 and Py3
from __future__ import absolute_import, division, print_function
import unittest
import os
import pandas as pd
import musicc
from musicc.core import correct_and_normalize
class MUSiCCTestCase(unittest.TestCase):
"""Tests for `musicc.py`."""
path_to_data = os.path.dirname(musicc.__file__)
def test_is_output_correct_for_normalization_only(self):
"""Does MUSiCC produce the correct output for normalization of the example case?"""
print(MUSiCCTestCase.path_to_data)
# define the arguments needed by MUSiCC
musicc_args = {'input_file': MUSiCCTestCase.path_to_data + '/examples/simulated_ko_relative_abundance.tab',
'output_file': MUSiCCTestCase.path_to_data + '/examples/test1.tab',
'input_format': 'tab', 'output_format': 'tab', 'musicc_inter': True,
'musicc_intra': 'None', 'compute_scores': True, 'verbose': False}
# run the MUSiCC correction
correct_and_normalize(musicc_args)
# assert that the result is equal to the example (up to small difference due to OS/Other)
example = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/simulated_ko_MUSiCC_Normalized.tab', index_col=0)
output = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/test1.tab', index_col=0)
example_vals = example.values
output_vals = output.values
self.assertTrue(example_vals.shape[0] == output_vals.shape[0])
self.assertTrue(example_vals.shape[1] == output_vals.shape[1])
for i in range(example_vals.shape[0]):
for j in range(example_vals.shape[1]):
self.assertTrue(abs(example_vals[i, j] - output_vals[i, j]) < 1)
os.remove(MUSiCCTestCase.path_to_data + '/examples/test1.tab')
def test_is_output_correct_for_normalization_correction_use_generic(self):
"""Does MUSiCC produce the correct output for normalization and correction of the example case?"""
# define the arguments needed by MUSiCC
musicc_args = {'input_file': MUSiCCTestCase.path_to_data + '/examples/simulated_ko_relative_abundance.tab',
'output_file': MUSiCCTestCase.path_to_data + '/examples/test2.tab',
'input_format': 'tab', 'output_format': 'tab', 'musicc_inter': True,
'musicc_intra': 'use_generic', 'compute_scores': True, 'verbose': False}
# run the MUSiCC correction
correct_and_normalize(musicc_args)
# assert that the result is equal to the example (up to small difference due to OS/Other)
example = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/simulated_ko_MUSiCC_Normalized_Corrected_use_generic.tab', index_col=0)
output = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/test2.tab', index_col=0)
example_vals = example.values
output_vals = output.values
self.assertTrue(example_vals.shape[0] == output_vals.shape[0])
self.assertTrue(example_vals.shape[1] == output_vals.shape[1])
for i in range(example_vals.shape[0]):
for j in range(example_vals.shape[1]):
self.assertTrue(abs(example_vals[i, j] - output_vals[i, j]) < 1)
os.remove(MUSiCCTestCase.path_to_data + '/examples/test2.tab')
def test_is_output_correct_for_normalization_correction_learn_model(self):
"""Does MUSiCC produce the correct output for normalization and correction of the example case?"""
# define the arguments needed by MUSiCC
musicc_args = {'input_file': MUSiCCTestCase.path_to_data + '/examples/simulated_ko_relative_abundance.tab',
'output_file': MUSiCCTestCase.path_to_data + '/examples/test3.tab',
'input_format': 'tab', 'output_format': 'tab', 'musicc_inter': True,
'musicc_intra': 'learn_model', 'compute_scores': True, 'verbose': False}
# run the MUSiCC correction
correct_and_normalize(musicc_args)
# assert that the result is equal to the example (up to small difference due to de novo learning)
example = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/simulated_ko_MUSiCC_Normalized_Corrected_learn_model.tab', index_col=0)
output = pd.read_table(MUSiCCTestCase.path_to_data + '/examples/test3.tab', index_col=0)
example_vals = example.values
output_vals = output.values
self.assertTrue(example_vals.shape[0] == output_vals.shape[0])
self.assertTrue(example_vals.shape[1] == output_vals.shape[1])
for i in range(example_vals.shape[0]):
for j in range(example_vals.shape[1]):
self.assertTrue(abs(example_vals[i, j] - output_vals[i, j]) < 1)
os.remove(MUSiCCTestCase.path_to_data + '/examples/test3.tab')
################################################
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |