repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
xhochy/arrow | python/pyarrow/tests/test_hdfs.py | 1 | 13325 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import pickle
import pytest
import random
import unittest
from io import BytesIO
from os.path import join as pjoin
import numpy as np
import pyarrow as pa
import pyarrow.tests.test_parquet as test_parquet
from pyarrow.pandas_compat import _pandas_api
from pyarrow.tests import util
from pyarrow.util import guid
# ----------------------------------------------------------------------
# HDFS tests
def hdfs_test_client():
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'default')
user = os.environ.get('ARROW_HDFS_TEST_USER', None)
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
with pytest.warns(DeprecationWarning):
return pa.hdfs.connect(host, port, user)
@pytest.mark.hdfs
class HdfsTestCases:
def _make_test_file(self, hdfs, test_name, test_path, test_data):
base_path = pjoin(self.tmp_path, test_name)
hdfs.mkdir(base_path)
full_path = pjoin(base_path, test_path)
with hdfs.open(full_path, 'wb') as f:
f.write(test_data)
return full_path
@classmethod
def setUpClass(cls):
cls.check_driver()
cls.hdfs = hdfs_test_client()
cls.tmp_path = '/tmp/pyarrow-test-{}'.format(random.randint(0, 1000))
cls.hdfs.mkdir(cls.tmp_path)
@classmethod
def tearDownClass(cls):
cls.hdfs.delete(cls.tmp_path, recursive=True)
cls.hdfs.close()
def test_pickle(self):
s = pickle.dumps(self.hdfs)
h2 = pickle.loads(s)
assert h2.is_open
assert h2.host == self.hdfs.host
assert h2.port == self.hdfs.port
assert h2.user == self.hdfs.user
assert h2.kerb_ticket == self.hdfs.kerb_ticket
# smoketest unpickled client works
h2.ls(self.tmp_path)
def test_cat(self):
path = pjoin(self.tmp_path, 'cat-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
contents = self.hdfs.cat(path)
assert contents == data
def test_capacity_space(self):
capacity = self.hdfs.get_capacity()
space_used = self.hdfs.get_space_used()
disk_free = self.hdfs.df()
assert capacity > 0
assert capacity > space_used
assert disk_free == (capacity - space_used)
def test_close(self):
client = hdfs_test_client()
assert client.is_open
client.close()
assert not client.is_open
with pytest.raises(Exception):
client.ls('/')
def test_mkdir(self):
path = pjoin(self.tmp_path, 'test-dir/test-dir')
parent_path = pjoin(self.tmp_path, 'test-dir')
self.hdfs.mkdir(path)
assert self.hdfs.exists(path)
self.hdfs.delete(parent_path, recursive=True)
assert not self.hdfs.exists(path)
def test_mv_rename(self):
path = pjoin(self.tmp_path, 'mv-test')
new_path = pjoin(self.tmp_path, 'mv-new-test')
data = b'foobarbaz'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
assert self.hdfs.exists(path)
self.hdfs.mv(path, new_path)
assert not self.hdfs.exists(path)
assert self.hdfs.exists(new_path)
assert self.hdfs.cat(new_path) == data
self.hdfs.rename(new_path, path)
assert self.hdfs.cat(path) == data
def test_info(self):
path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(path, 'ex')
self.hdfs.mkdir(path)
data = b'foobarbaz'
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
path_info = self.hdfs.info(path)
file_path_info = self.hdfs.info(file_path)
assert path_info['kind'] == 'directory'
assert file_path_info['kind'] == 'file'
assert file_path_info['size'] == len(data)
def test_exists_isdir_isfile(self):
dir_path = pjoin(self.tmp_path, 'info-base')
file_path = pjoin(dir_path, 'ex')
missing_path = pjoin(dir_path, 'this-path-is-missing')
self.hdfs.mkdir(dir_path)
with self.hdfs.open(file_path, 'wb') as f:
f.write(b'foobarbaz')
assert self.hdfs.exists(dir_path)
assert self.hdfs.exists(file_path)
assert not self.hdfs.exists(missing_path)
assert self.hdfs.isdir(dir_path)
assert not self.hdfs.isdir(file_path)
assert not self.hdfs.isdir(missing_path)
assert not self.hdfs.isfile(dir_path)
assert self.hdfs.isfile(file_path)
assert not self.hdfs.isfile(missing_path)
def test_disk_usage(self):
path = pjoin(self.tmp_path, 'disk-usage-base')
p1 = pjoin(path, 'p1')
p2 = pjoin(path, 'p2')
subdir = pjoin(path, 'subdir')
p3 = pjoin(subdir, 'p3')
if self.hdfs.exists(path):
self.hdfs.delete(path, True)
self.hdfs.mkdir(path)
self.hdfs.mkdir(subdir)
data = b'foobarbaz'
for file_path in [p1, p2, p3]:
with self.hdfs.open(file_path, 'wb') as f:
f.write(data)
assert self.hdfs.disk_usage(path) == len(data) * 3
def test_ls(self):
base_path = pjoin(self.tmp_path, 'ls-test')
self.hdfs.mkdir(base_path)
dir_path = pjoin(base_path, 'a-dir')
f1_path = pjoin(base_path, 'a-file-1')
self.hdfs.mkdir(dir_path)
f = self.hdfs.open(f1_path, 'wb')
f.write(b'a' * 10)
contents = sorted(self.hdfs.ls(base_path, False))
assert contents == [dir_path, f1_path]
def test_chmod_chown(self):
path = pjoin(self.tmp_path, 'chmod-test')
with self.hdfs.open(path, 'wb') as f:
f.write(b'a' * 10)
def test_download_upload(self):
base_path = pjoin(self.tmp_path, 'upload-test')
data = b'foobarbaz'
buf = BytesIO(data)
buf.seek(0)
self.hdfs.upload(base_path, buf)
out_buf = BytesIO()
self.hdfs.download(base_path, out_buf)
out_buf.seek(0)
assert out_buf.getvalue() == data
def test_file_context_manager(self):
path = pjoin(self.tmp_path, 'ctx-manager')
data = b'foo'
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
assert f.size() == 3
result = f.read(10)
assert result == data
def test_open_not_exist_error_message(self):
# ARROW-226
path = pjoin(self.tmp_path, 'does-not-exist-123')
try:
self.hdfs.open(path)
except Exception as e:
assert 'file does not exist' in e.args[0].lower()
def test_read_whole_file(self):
path = pjoin(self.tmp_path, 'read-whole-file')
data = b'foo' * 1000
with self.hdfs.open(path, 'wb') as f:
f.write(data)
with self.hdfs.open(path, 'rb') as f:
result = f.read()
assert result == data
def _write_multiple_hdfs_pq_files(self, tmpdir):
import pyarrow.parquet as pq
nfiles = 10
size = 5
test_data = []
for i in range(nfiles):
df = test_parquet._test_dataframe(size, seed=i)
df['index'] = np.arange(i * size, (i + 1) * size)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
path = pjoin(tmpdir, '{}.parquet'.format(i))
table = pa.Table.from_pandas(df, preserve_index=False)
with self.hdfs.open(path, 'wb') as f:
pq.write_table(table, f)
test_data.append(table)
expected = pa.concat_tables(test_data)
return expected
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files(self):
tmpdir = pjoin(self.tmp_path, 'multi-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
result = self.hdfs.read_parquet(tmpdir)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_multiple_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'multi-parquet-uri-' + guid())
self.hdfs.mkdir(tmpdir)
expected = self._write_multiple_hdfs_pq_files(tmpdir)
path = _get_hdfs_uri(tmpdir)
# TODO for URI it should not be needed to pass this argument
result = pq.read_table(path, use_legacy_dataset=True)
_pandas_api.assert_frame_equal(result.to_pandas()
.sort_values(by='index')
.reset_index(drop=True),
expected.to_pandas())
@pytest.mark.pandas
@pytest.mark.parquet
def test_read_write_parquet_files_with_uri(self):
import pyarrow.parquet as pq
tmpdir = pjoin(self.tmp_path, 'uri-parquet-' + guid())
self.hdfs.mkdir(tmpdir)
path = _get_hdfs_uri(pjoin(tmpdir, 'test.parquet'))
size = 5
df = test_parquet._test_dataframe(size, seed=0)
# Hack so that we don't have a dtype cast in v1 files
df['uint32'] = df['uint32'].astype(np.int64)
table = pa.Table.from_pandas(df, preserve_index=False)
pq.write_table(table, path, filesystem=self.hdfs)
result = pq.read_table(
path, filesystem=self.hdfs, use_legacy_dataset=True
).to_pandas()
_pandas_api.assert_frame_equal(result, df)
@pytest.mark.parquet
@pytest.mark.pandas
def test_read_common_metadata_files(self):
tmpdir = pjoin(self.tmp_path, 'common-metadata-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_read_common_metadata_files(self.hdfs, tmpdir)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_with_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_with_partitions(
tmpdir, filesystem=self.hdfs)
@pytest.mark.parquet
@pytest.mark.pandas
def test_write_to_dataset_no_partitions(self):
tmpdir = pjoin(self.tmp_path, 'write-no_partitions-' + guid())
self.hdfs.mkdir(tmpdir)
test_parquet._test_write_to_dataset_no_partitions(
tmpdir, filesystem=self.hdfs)
class TestLibHdfs(HdfsTestCases, unittest.TestCase):
@classmethod
def check_driver(cls):
if not pa.have_libhdfs():
message = 'No libhdfs available on system'
if os.environ.get('PYARROW_HDFS_TEST_LIBHDFS_REQUIRE'):
pytest.fail(message)
else:
pytest.skip(message)
def test_orphaned_file(self):
hdfs = hdfs_test_client()
file_path = self._make_test_file(hdfs, 'orphaned_file_test', 'fname',
b'foobarbaz')
f = hdfs.open(file_path)
hdfs = None
f = None # noqa
def _get_hdfs_uri(path):
host = os.environ.get('ARROW_HDFS_TEST_HOST', 'localhost')
try:
port = int(os.environ.get('ARROW_HDFS_TEST_PORT', 0))
except ValueError:
raise ValueError('Env variable ARROW_HDFS_TEST_PORT was not '
'an integer')
uri = "hdfs://{}:{}{}".format(host, port, path)
return uri
@pytest.mark.hdfs
@pytest.mark.pandas
@pytest.mark.parquet
@pytest.mark.fastparquet
def test_fastparquet_read_with_hdfs():
from pandas.testing import assert_frame_equal
try:
import snappy # noqa
except ImportError:
pytest.skip('fastparquet test requires snappy')
import pyarrow.parquet as pq
fastparquet = pytest.importorskip('fastparquet')
fs = hdfs_test_client()
df = util.make_dataframe()
table = pa.Table.from_pandas(df)
path = '/tmp/testing.parquet'
with fs.open(path, 'wb') as f:
pq.write_table(table, f)
parquet_file = fastparquet.ParquetFile(path, open_with=fs.open)
result = parquet_file.to_pandas()
assert_frame_equal(result, df)
| apache-2.0 |
spallavolu/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
ASU-CodeDevils/DemonHacks2017 | gameFiles/Python/helpers.py | 1 | 7120 | from __future__ import print_function
from __future__ import division
import numpy as np
from datetime import datetime
from scipy.stats import norm
from scipy.optimize import minimize
def acq_max(ac, gp, y_max, bounds, random_state):
"""
A function to find the maximum of the acquisition function
It uses a combination of random sampling (cheap) and the 'L-BFGS-B'
optimization method. First by sampling 1e5 points at random, and then
running L-BFGS-B from 250 random starting points.
Parameters
----------
:param ac:
The acquisition function object that return its point-wise value.
:param gp:
A gaussian process fitted to the relevant data.
:param y_max:
The current maximum known value of the target function.
:param bounds:
The variables bounds to limit the search of the acq max.
Returns
-------
:return: x_max, The arg max of the acquisition function.
"""
# Warm up with random points
x_tries = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(100000, bounds.shape[0]))
ys = ac(x_tries, gp=gp, y_max=y_max)
x_max = x_tries[ys.argmax()]
max_acq = ys.max()
# Explore the parameter space more throughly
x_seeds = random_state.uniform(bounds[:, 0], bounds[:, 1],
size=(250, bounds.shape[0]))
for x_try in x_seeds:
# Find the minimum of minus the acquisition function
res = minimize(lambda x: -ac(x.reshape(1, -1), gp=gp, y_max=y_max),
x_try.reshape(1, -1),
bounds=bounds,
method="L-BFGS-B")
# Store it if better than previous minimum(maximum).
if max_acq is None or -res.fun[0] >= max_acq:
x_max = res.x
max_acq = -res.fun[0]
# Clip output to make sure it lies within the bounds. Due to floating
# point technicalities this is not always the case.
return np.clip(x_max, bounds[:, 0], bounds[:, 1])
class UtilityFunction(object):
"""
An object to compute the acquisition functions.
"""
def __init__(self, kind, kappa, xi):
"""
If UCB is to be used, a constant kappa is needed.
"""
self.kappa = kappa
self.xi = xi
if kind not in ['ucb', 'ei', 'poi']:
err = "The utility function " \
"{} has not been implemented, " \
"please choose one of ucb, ei, or poi.".format(kind)
raise NotImplementedError(err)
else:
self.kind = kind
def utility(self, x, gp, y_max):
if self.kind == 'ucb':
return self._ucb(x, gp, self.kappa)
if self.kind == 'ei':
return self._ei(x, gp, y_max, self.xi)
if self.kind == 'poi':
return self._poi(x, gp, y_max, self.xi)
@staticmethod
def _ucb(x, gp, kappa):
mean, std = gp.predict(x, return_std=True)
return mean + kappa * std
@staticmethod
def _ei(x, gp, y_max, xi):
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z)
@staticmethod
def _poi(x, gp, y_max, xi):
mean, std = gp.predict(x, return_std=True)
z = (mean - y_max - xi)/std
return norm.cdf(z)
def unique_rows(a):
"""
A functions to trim repeated rows that may appear when optimizing.
This is necessary to avoid the sklearn GP object from breaking
:param a: array to trim repeated rows from
:return: mask of unique rows
"""
# Sort array and kep track of where things should go back to
order = np.lexsort(a.T)
reorder = np.argsort(order)
a = a[order]
diff = np.diff(a, axis=0)
ui = np.ones(len(a), 'bool')
ui[1:] = (diff != 0).any(axis=1)
return ui[reorder]
class BColours(object):
BLUE = '\033[94m'
CYAN = '\033[36m'
GREEN = '\033[32m'
MAGENTA = '\033[35m'
RED = '\033[31m'
ENDC = '\033[0m'
class PrintLog(object):
def __init__(self, params):
self.ymax = None
self.xmax = None
self.params = params
self.ite = 1
self.start_time = datetime.now()
self.last_round = datetime.now()
# sizes of parameters name and all
self.sizes = [max(len(ps), 7) for ps in params]
# Sorted indexes to access parameters
self.sorti = sorted(range(len(self.params)),
key=self.params.__getitem__)
def reset_timer(self):
self.start_time = datetime.now()
self.last_round = datetime.now()
def print_header(self, initialization=True):
if initialization:
print("{}Initialization{}".format(BColours.RED,
BColours.ENDC))
else:
print("{}Bayesian Optimization{}".format(BColours.RED,
BColours.ENDC))
print(BColours.BLUE + "-" * (29 + sum([s + 5 for s in self.sizes])) +
BColours.ENDC)
print("{0:>{1}}".format("Step", 5), end=" | ")
print("{0:>{1}}".format("Time", 6), end=" | ")
print("{0:>{1}}".format("Value", 10), end=" | ")
for index in self.sorti:
print("{0:>{1}}".format(self.params[index],
self.sizes[index] + 2),
end=" | ")
print('')
def print_step(self, x, y, warning=False):
print("{:>5d}".format(self.ite), end=" | ")
m, s = divmod((datetime.now() - self.last_round).total_seconds(), 60)
print("{:>02d}m{:>02d}s".format(int(m), int(s)), end=" | ")
if self.ymax is None or self.ymax < y:
self.ymax = y
self.xmax = x
print("{0}{2: >10.5f}{1}".format(BColours.MAGENTA,
BColours.ENDC,
y),
end=" | ")
for index in self.sorti:
print("{0}{2: >{3}.{4}f}{1}".format(
BColours.GREEN, BColours.ENDC,
x[index],
self.sizes[index] + 2,
min(self.sizes[index] - 3, 6 - 2)
),
end=" | ")
else:
print("{: >10.5f}".format(y), end=" | ")
for index in self.sorti:
print("{0: >{1}.{2}f}".format(x[index],
self.sizes[index] + 2,
min(self.sizes[index] - 3, 6 - 2)),
end=" | ")
if warning:
print("{}Warning: Test point chose at "
"random due to repeated sample.{}".format(BColours.RED,
BColours.ENDC))
print()
self.last_round = datetime.now()
self.ite += 1
def print_summary(self):
pass
| mit |
muLAn-project/muLAn | muLAn/instruments.py | 1 | 5376 | # -*-coding:Utf-8 -*
# ====================================================================
# Packages
# ====================================================================
import configparser as cp
import copy
import glob
import muLAn
import muLAn.packages.general_tools as gtools
import muLAn.packages.algebra as algebra
import numpy as np
import os
import pandas as pd
import sys
import tables
class Instrument:
"""Class to store properties of each instrument
Args:
properties (list): list with shape (1, 2), where properties[0] is
the ID of the observatory (str), and properties[1] is a string
describing the properties of the observatory, as follow,
"Label, HTML color, Passband[=Gamma], Type, Location[, list of int".
`Label` is the name of the instrument; the color should be written
without the `#`, Gamma is the linear limb darkening coefficient
(default: 0.0), `Type` is `Magnitude` or `Flux` depending of the
input data files used, `Location` is a name referring to the
position of the instrument (a file `Location.dat` with JPL ephemeris
is required), and the optional list of int correspond to the data ID
to remove before the fit and the plots.
Examples for properties[1]:
OGLE-I, 000000, I=0.64, Magnitude, Earth
OGLE-V, 000000, V=0.5, Magnitude, Earth, 11, 60, 68, 73, 78, 121, 125, 128, 135
OGLE Passband I, 000000, I, Magnitude, Earth, 11, 60-68
In the third example, the data points from 60 to 68 (included) will
be removed. A file Earth.dat should be in the Data/ directory.
Attributes:
id (str): instrument ID.
label (str): instrument label.
color (str): HTML color with #.
passband (str): passband name.
gamma (float): linear limb-darkening coefficient Gamma.
type (str): wether the input data are in flux or magnitude units.
location (str): key word corresponding to the ephemeris file.
reject (:obj:`numpy.array`): array of the data ID to remove.
"""
def __init__(self, properties):
self._extract_properties(properties)
def _extract_properties(self, prop):
properties = dict()
properties['id'] = prop[0]
props = prop[1].split(',')
n_opts = len(props)
if n_opts > 4:
keywd = 'label color band type location'.split(' ')
for i in range(5):
properties.update({keywd[i]: props[i].strip()})
if n_opts > 5:
properties.update({'reject': props[5:]})
self.reject = self._rejection_list(properties['reject'])
else:
txt = 'Syntax error or not enough properties provided for an instrument.'
sys.exit(txt)
self.id = properties['id']
self.label = properties['label']
self.color = '#{:s}'.format(properties['color'])
band = self._extract_band(properties['band'])
self.passband = band[0]
self.gamma = band[1]
self.type = properties['type']
self.location = properties['location']
def _rejection_list(self, string):
string = [a.strip() for a in string]
nb = len(string)
to_reject = np.array([], dtype=np.int)
for i in range(nb):
substring = string[i].split('-')
if len(substring) == 1:
to_reject = np.append(to_reject, int(substring[0]))
elif len(substring) == 2:
a = int(substring[0].strip())
b = int(substring[1].strip())
n = b - a + 1
ids = np.linspace(a, b, n, dtype=np.int)
to_reject = np.append(to_reject, ids)
return to_reject
def _extract_band(self, string):
string = string.split('=')
string = [a.strip() for a in string]
if len(string) == 2:
return string[0].strip(), float(string[1])
else:
return string[0].strip(), 0.0
class InstrumentsList(Instrument):
"""Class to store a list of instruments (observatories).
Args:
input (str): file that defines instrument properties.
Attributes:
to_dict(dict): dictionary with keys corresponding to each instrument
ID, and values corresponding to a :obj:`muLAn.instruments.Instrument`
object.
"""
def __init__(self, input):
if isinstance(input, str):
self.file = input
self._load_from_file(input)
self._create_instruments_list()
def _load_from_file(self, fname):
cfgobs = cp.ConfigParser()
cfgobs.read(fname)
self.parser = cfgobs
def _create_instruments_list(self):
item = self.parser.items('ObservatoriesDetails')
n = len(item)
instruments_list = dict()
for i in range(n):
tmp = Instrument(list(item[i]))
instruments_list.update({tmp.id: tmp})
setattr(self, tmp.id, tmp)
self._instruments_list = instruments_list
self.len = n
def to_dict(self):
return self._instruments_list
def prop(self, val):
return self._instruments_list[val[0]]
if (__name__ == "__main__"):
pass
| mit |
imaculate/scikit-learn | examples/preprocessing/plot_function_transformer.py | 158 | 1993 | """
=========================================================
Using FunctionTransformer to select columns
=========================================================
Shows how to use a function transformer in a pipeline. If you know your
dataset's first principle component is irrelevant for a classification task,
you can use the FunctionTransformer to select all but the first column of the
PCA transformed data.
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import FunctionTransformer
def _generate_vector(shift=0.5, noise=15):
return np.arange(1000) + (np.random.rand(1000) - shift) * noise
def generate_dataset():
"""
This dataset is two lines with a slope ~ 1, where one has
a y offset of ~100
"""
return np.vstack((
np.vstack((
_generate_vector(),
_generate_vector() + 100,
)).T,
np.vstack((
_generate_vector(),
_generate_vector(),
)).T,
)), np.hstack((np.zeros(1000), np.ones(1000)))
def all_but_first_column(X):
return X[:, 1:]
def drop_first_component(X, y):
"""
Create a pipeline with PCA and the column selector and use it to
transform the dataset.
"""
pipeline = make_pipeline(
PCA(), FunctionTransformer(all_but_first_column),
)
X_train, X_test, y_train, y_test = train_test_split(X, y)
pipeline.fit(X_train, y_train)
return pipeline.transform(X_test), y_test
if __name__ == '__main__':
X, y = generate_dataset()
lw = 0
plt.figure()
plt.scatter(X[:, 0], X[:, 1], c=y, lw=lw)
plt.figure()
X_transformed, y_transformed = drop_first_component(*generate_dataset())
plt.scatter(
X_transformed[:, 0],
np.zeros(len(X_transformed)),
c=y_transformed,
lw=lw,
s=60
)
plt.show()
| bsd-3-clause |
Axelrod-Python/Axelrod-fingerprint | update_fingerprints.py | 1 | 8815 | """
A script to obtain the Ashlock Fingerprints of all strategies in the Axelrod
library.
This writes a hash of the source code of each strategy to file: db.csv.
If the source code of a strategy changes **or** a new strategy is introduced
then the fingerprint is regenerated for that strategy.
"""
import inspect
import hashlib
import csv
import string
import numpy as np
import matplotlib.pyplot as plt
import axelrod as axl
def hash_strategy(strategy):
"""
Hash the source code of a strategy
"""
try:
source_code = "".join(inspect.getsourcelines(strategy)[0])
except OSError: # Some classes are dynamically created
source_code = "".join(inspect.getsourcelines(strategy.strategy)[0])
hash_object = hashlib.md5(source_code.encode("utf-8"))
hashed_source = hash_object.hexdigest()
return hashed_source
def write_strategy_to_db(strategy, filename="db.csv", fingerprint="Ashlock"):
"""
Write the hash of a strategy to the db
"""
hashed_source = hash_strategy(strategy)
with open(filename, "a") as db:
try:
db.write(
"{},{},{}\n".format(
strategy.original_name, fingerprint, hashed_source
)
)
except AttributeError:
db.write(
"{},{},{}\n".format(strategy.name, fingerprint, hashed_source)
)
def read_db(filename="db.csv"):
"""
Read filename and return a dictionary mapping string names to hash of source
code of a strategy
"""
with open(filename, "r") as db:
csvreader = csv.reader(db)
str_to_hash = {(row[0], row[1]): row[2] for row in csvreader}
return str_to_hash
def create_db(filename="db.csv"):
"""
Creates an empty db.csv file
"""
with open(filename, "w"):
pass
def write_data_to_file(fp, filename):
"""
Write the fingerprint data to a file.
"""
columns = ["x", "y", "score"]
with open(filename, "w") as f:
w = csv.writer(f)
w.writerow(columns)
for key, value in fp.data.items():
w.writerow([key.x, key.y, value])
def obtain_fingerprint(
strategy, turns, repetitions, probe=axl.TitForTat, processes=1
):
"""
Obtain the fingerprint for a given strategy and save the figure to the
assets dir
"""
fp = axl.AshlockFingerprint(strategy, probe)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot()
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/{}.png".format(format_filename(name)), bbox_inches="tight"
)
write_data_to_file(fp, "assets/{}.csv".format(format_filename(name)))
def obtain_transitive_fingerprint(strategy, turns, repetitions, processes=1):
"""
Obtain the transitive fingerprint
for a given strategy and save the figure to the assets dir
"""
fp = axl.TransitiveFingerprint(strategy, number_of_opponents=30)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot()
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/transitive_{}.png".format(format_filename(name)),
bbox_inches="tight",
)
np.savetxt(
"assets/transitive_{}.csv".format(format_filename(name)), fp.data
)
def obtain_transitive_fingerprint_v_short(
strategy, turns, repetitions, processes=1
):
"""
Obtain the transitive fingerprint against short run time
for a given strategy and save the figure to the assets dir
"""
short_run_time = [s() for s in axl.short_run_time_strategies]
fp = axl.TransitiveFingerprint(strategy, opponents=short_run_time)
fp.fingerprint(
turns=turns,
repetitions=repetitions,
progress_bar=False,
processes=processes,
)
plt.figure()
fp.plot(display_names=True)
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
plt.tight_layout()
plt.savefig(
"assets/transitive_v_short_{}.png".format(format_filename(name)),
bbox_inches="tight",
)
np.savetxt(
"assets/transitive_v_short_{}.csv".format(format_filename(name)),
fp.data,
)
def format_filename(s):
"""
Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
Borrowed from https://gist.github.com/seanh/93666
"""
valid_chars = "-_.() {}{}".format(string.ascii_letters, string.digits)
filename = "".join(c for c in s if c in valid_chars)
filename = filename.replace(" ", "_")
return filename
def write_markdown(strategy):
"""
Write a markdown section of a strategy.
"""
try:
name = strategy.original_name
except AttributeError:
name = strategy.name
markdown = """
## {0}
![fingerprint of {0}](./assets/{1}.png)
[data (csv)](./assets/{1}.csv)
![Transitive fingerprint of {0}](./assets/transitive_{1}.png)
[data (csv)](./assets/transitive_{1}.csv)
![Transitive fingerprint of {0} against short run time](./assets/transitive_v_short_{1}.png)
[data (csv)](./assets/transitive_v_short_{1}.csv)
""".format(
name, format_filename(name)
)
return markdown
def main(
turns,
repetitions,
transitive_turns,
transitive_repetitions,
transitive_v_short_turns,
transitive_v_short_repetitions,
processes,
):
"""
Fingerprint all strategies, if a strategy has already been fingerprinted it
does not get rerun.
"""
version = axl.__version__
markdown = """# Ashlock and transitive fingerprints
See:
[axelrod.readthedocs.io/en/latest/tutorials/further_topics/fingerprinting.html#fingerprinting](http://axelrod.readthedocs.io/en/latest/tutorials/further_topics/fingerprinting.html#fingerprinting)
All strategies included from Axelrod version {}.
This README.md file is autogenerated by running:
```
$ python update_fingerprints.py
```
Each individual fingerprint can be obtained by running:
```python
import axelrod as axl
fp = axl.AshlockFingerprint(strategy, probe)
fp.fingerprint(turns={}, repetitions={})
fp.plot()
```
# Axelrod library fingerprints
""".format(
version, turns, repetitions
)
try:
db = read_db()
except FileNotFoundError:
create_db()
db = read_db()
for strategy in axl.short_run_time_strategies:
name = strategy.name
signature = hash_strategy(strategy)
fp = "Ashlock"
if (name, fp) not in db or db[name, fp] != signature:
obtain_fingerprint(
strategy, turns, repetitions, processes=processes
)
write_strategy_to_db(strategy, fingerprint=fp)
fp = "Transitive"
if (name, fp) not in db or db[name, fp] != signature:
obtain_transitive_fingerprint(
strategy,
transitive_turns,
transitive_repetitions,
processes=processes,
)
write_strategy_to_db(strategy, fingerprint=fp)
fp = "Transitive_v_short"
if (name, fp) not in db or db[name, fp] != signature:
obtain_transitive_fingerprint_v_short(
strategy,
transitive_v_short_turns,
transitive_v_short_repetitions,
processes=processes,
)
write_strategy_to_db(strategy, fingerprint=fp)
markdown += write_markdown(strategy)
with open("README.md", "w") as outfile:
outfile.write(markdown)
if __name__ == "__main__":
turns, repetitions = 200, 20
transitive_turns, transitive_repetitions = 200, 20
transitive_v_short_turns, transitive_v_short_repetitions = 200, 20
processes = 20
main(
turns=turns,
repetitions=repetitions,
transitive_turns=transitive_turns,
transitive_repetitions=transitive_repetitions,
transitive_v_short_turns=transitive_v_short_turns,
transitive_v_short_repetitions=transitive_v_short_repetitions,
processes=processes,
)
| mit |
hwp-kiel/opencali | src/ui/mplwidget.py | 1 | 1403 | # Python Qt4 bindings for GUI objects
from PyQt4 import QtGui
# import the Qt4Agg FigureCanvas object, that binds Figure to
# Qt4Agg backend. It also inherits from QWidget
from matplotlib.backends.backend_qt4agg \
import FigureCanvasQTAgg as FigureCanvas
# Matplotlib Figure object
from matplotlib.figure import Figure
class MplCanvas(FigureCanvas):
"""Class to represent the FigureCanvas widget"""
def __init__(self):
# setup Matplotlib Figure and Axis
self.fig = Figure()
self.ax = self.fig.add_subplot(111)
# initialization of the canvas
FigureCanvas.__init__(self, self.fig)
# we define the widget as expandable
FigureCanvas.setSizePolicy(self,
QtGui.QSizePolicy.Expanding,
QtGui.QSizePolicy.Expanding)
# notify the system of updated policy
FigureCanvas.updateGeometry(self)
class MplWidget(QtGui.QWidget):
"""Widget defined in Qt Designer"""
def __init__(self, parent = None):
# initialization of Qt MainWindow widget
QtGui.QWidget.__init__(self, parent)
# set the canvas to the Matplotlib widget
self.canvas = MplCanvas()
# create a vertical box layout
self.vbl = QtGui.QVBoxLayout()
# add mpl widget to vertical box
self.vbl.addWidget(self.canvas)
# set the layout to th vertical box
self.setLayout(self.vbl) | gpl-2.0 |
Evensgn/MNIST-learning | mnist_svm.py | 1 | 1201 | import numpy as np
import matplotlib.pyplot as plt
GRAY_SCALE_RANGE = 255
import pickle
data_filename = 'data_deskewed.pkl'
print('Loading data from file \'' + data_filename + '\' ...')
with open(data_filename, 'rb') as f:
train_labels = pickle.load(f)
train_images = pickle.load(f)
test_labels = pickle.load(f)
test_images = pickle.load(f)
num_pixel = pickle.load(f)
print('Data loading complete.')
train_images = np.array(train_images)
train_images.resize(train_images.size // num_pixel, num_pixel)
test_images = np.array(test_images)
test_images.resize(test_images.size // num_pixel, num_pixel)
test_labels = np.array(test_labels)
train_labels = np.array(train_labels)
## normalization
train_images = train_images / GRAY_SCALE_RANGE
test_images = test_images / GRAY_SCALE_RANGE
from sklearn import svm, metrics
# clf = svm.SVC(gamma = 0.001)
clf = svm.SVC(kernel = 'linear')
clf.fit(train_images[:1000], train_labels[:1000])
prediction = clf.predict(test_images)
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(test_labels, prediction)))
print("Confusion matrix:\n%s" % metrics.confusion_matrix(test_labels, prediction)) | mit |
dr-jpk/saltefficiency | weekly/weekly_summary_plots.py | 1 | 8536 | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 10:06:20 2015
@author: jpk
ToDo: automate the subsystems check. A query that checks all the subsystems in
case things change in the future should prevent issues with the pis chart
colours
"""
import sys
import os
import pandas as pd
import pandas.io.sql as psql
import MySQLdb
import matplotlib.pyplot as pl
import report_queries as rq
import numpy as np
import matplotlib.dates as mdates
def priority_breakdown_pie_chart(x, ds, dirname='./logs/'):
'''
make a pie chart from the dataframe
'''
temp = list(x['Priority'])
no_blocks = map(int, list(x['No. Blocks']))
labels = ['P'+str(temp[i])+' - ' + str(no_blocks[i]) for i in range(0,len(temp))]
values = list(x['Tsec'])
# set colours for the priorities
colours = ['b','c','g','m','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.7), fontsize=8)
title_txt = 'Weekly Priority Breakdown - ' + str(int(x['No. Blocks'].sum())) + ' Blocks Total' + '\n {}'.format(ds)
ax.set_title(title_txt, fontsize=12)
filename = dirname+'priority_breakdown_pie_chart_' +'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(filename, dpi=100)
# pl.show()
def weekly_total_time_breakdown_pie_chart(x, ds, dirname='./logs/'):
labels = ['Science - {}'.format(x['ScienceTime'][0]),
'Engineering - {}'.format(x['EngineeringTime'][0]),
'Weather - {}'.format(x['TimeLostToWeather'][0]),
'Problems - {}'.format(x['TimeLostToProblems'][0])]
values = [int(x['Science']),
int(x['Engineering']),
int(x['Weather']),
int(x['Problems'])]
colours = ['b','c','g','r']
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'w'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.8), fontsize=8)
title_txt = 'Weekly Time Breakdown - {} Total\n{}'.format(x['NightLength'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_total_time_breakdown_pie_chart_' + '-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_subsystem_breakdown_pie_chart(x, y, col_dict, ds, dirname='./logs/'):
subsystem = list(x['SaltSubsystem'])
time = list(x['TotalTime'])
labels = [subsystem[i] + ' - ' + time[i] for i in range(0,len(subsystem))]
values = list(x['Time'])
colours = [col_dict[i] for i in subsystem]
fig = pl.figure(facecolor='w', figsize=[5, 5])
ax = fig.add_subplot(111)
ax.set_aspect=1
pie_wedge_collection = ax.pie(values,
colors=colours,
pctdistance=0.8,
radius = 0.95,
autopct='%1.1f%%',
textprops = {'fontsize':10,
'color':'k'},
wedgeprops = {'edgecolor':'white'})
ax.legend(labels=labels, frameon=False, loc=(-0.15,0.65), fontsize=8)
title_txt = 'Weekly Problems Breakdown - {}\n{}'.format(y['TotalTime'][0], ds)
ax.set_title(title_txt, fontsize=12)
filename = 'weekly_subsystem_breakdown_pie_chart_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
def weekly_time_breakdown(x, ds, dirname='./logs/'):
'''
produce a bar stacked bar chart plot of the time breakdown per day for the
past week.
'''
fig = pl.figure(figsize=(10,4),facecolor='w')
ax = fig.add_subplot(111)
width = 0.55
ax.grid(which='major', axis='y')
# science time per day
s = ax.bar(x['Date'],
x['Science'],
width,
color = 'b',
edgecolor='w')
# engineering time per day
e = ax.bar(x['Date'],
x['Engineering'],
width,
bottom = x['Science'],
color = 'c',
edgecolor='w')
# weather time per day
w = ax.bar(x['Date'],
x['Weather'],
width,
bottom = x['Science'] + x['Engineering'],
color = 'g',
edgecolor='w')
# problem time per day
p = ax.bar(x['Date'],
x['Problems'],
width,
bottom = x['Science'] + x['Engineering'] + x['Weather'],
color = 'r',
edgecolor='w')
ax.set_ylabel('Hours', fontsize=11)
ax.set_xlabel('Date', fontsize=11)
fig.legend((s[0], e[0], w[0], p[0]),
('Science Time',
'Engineering Time',
'Time lost to Weather',
'Time lost to Problems'),
frameon=False,
fontsize=10,
loc=(0.0,0.70))
title_txt = 'Weekly Time Breakdown - {}'.format(ds)
ax.set_title(title_txt, fontsize=11)
ax.xaxis_date()
date_formatter = mdates.DateFormatter('%a \n %Y-%m-%d')
ax.xaxis.set_major_formatter(date_formatter)
for tick in ax.xaxis.get_major_ticks():
tick.label.set_fontsize(8)
for tick in ax.yaxis.get_major_ticks():
tick.label.set_fontsize(8)
fig.autofmt_xdate(rotation=0, ha = 'left')
fig.subplots_adjust(left=0.22, bottom=0.20, right=0.96, top=None,
wspace=None, hspace=None)
pl.autoscale()
filename = 'weekly_time_breakdown_'+'-'.join([ds.split()[0].replace('-',''), ds.split()[2].replace('-','')])+'.png'
pl.savefig(dirname+filename, dpi=100)
# pl.show()
if __name__=='__main__':
# set the colours for all the subsystems:
subsystems_list = ['BMS', 'DOME', 'TC', 'PMAS', 'SCAM', 'TCS', 'STRUCT',
'TPC', 'HRS', 'PFIS','Proposal', 'Operations',
'ELS', 'ESKOM']
cmap = pl.cm.jet
colour_map = cmap(np.linspace(0.0, 1.0, len(subsystems_list)))
col_dict = {}
for i in range(0, len(subsystems_list)):
col_dict[subsystems_list[i]] = colour_map[i]
# open mysql connection to the sdb
mysql_con = MySQLdb.connect(host='sdb.cape.saao.ac.za',
port=3306,user=os.environ['SDBUSER'],
passwd=os.environ['SDBPASS'], db='sdb')
obsdate = sys.argv[1]
date = '{}-{}-{}'.format(obsdate[0:4], obsdate[4:6], obsdate[6:8])
interval = sys.argv[2]
# use the connection to get the required data: _d
dr_d = rq.date_range(mysql_con, date, interval=interval)
wpb_d = rq.weekly_priority_breakdown(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
wttb_d = rq.weekly_total_time_breakdown(mysql_con, date, interval=interval)
wsb_d = rq.weekly_subsystem_breakdown(mysql_con, date, interval=interval)
wsbt_d = rq.weekly_subsystem_breakdown_total(mysql_con, date, interval=interval)
wtb_d = rq.weekly_time_breakdown(mysql_con, date, interval=interval)
date_string = '{} - {}'.format(dr_d['StartDate'][0], dr_d['EndDate'][0])
# testing the pie_chart method
priority_breakdown_pie_chart(wpb_d, date_string)
weekly_total_time_breakdown_pie_chart(wttb_d, date_string)
weekly_subsystem_breakdown_pie_chart(wsb_d, wsbt_d, col_dict, date_string)
weekly_time_breakdown(wtb_d, date_string)
mysql_con.close()
| bsd-3-clause |
dhalleine/tensorflow | tensorflow/examples/skflow/iris.py | 1 | 1465 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import metrics, cross_validation
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
classifier = learn.DNNClassifier(hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
score = metrics.accuracy_score(y_test, classifier.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
amueller/advanced_training | plots/plot_interactive_tree.py | 1 | 2695 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_blobs
from sklearn.tree import DecisionTreeClassifier
from sklearn.externals.six import StringIO # doctest: +SKIP
from sklearn.tree import export_graphviz
from scipy.misc import imread
from scipy import ndimage
import os
import re
GRAPHVIS_PATH = r"C:\Program Files (x86)\Graphviz2.38\bin"
if GRAPHVIS_PATH not in os.environ['PATH']:
os.environ['PATH'] += ";" + GRAPHVIS_PATH
X, y = make_blobs(centers=[[0, 0], [1, 1]], random_state=61526, n_samples=50)
def tree_image(tree, fout=None):
try:
import graphviz
except ImportError:
# make a hacky white plot
x = np.ones((10, 10))
x[0, 0] = 0
return x
dot_data = StringIO()
export_graphviz(tree, out_file=dot_data, max_depth=3, impurity=False)
data = dot_data.getvalue()
#data = re.sub(r"gini = 0\.[0-9]+\\n", "", dot_data.getvalue())
data = re.sub(r"samples = [0-9]+\\n", "", data)
data = re.sub(r"\\nsamples = [0-9]+", "", data)
data = re.sub(r"value", "counts", data)
graph = graphviz.Source(data, format="png")
if fout is None:
fout = "tmp"
graph.render(fout)
return imread(fout + ".png")
def plot_tree(max_depth=1):
fig, ax = plt.subplots(1, 2, figsize=(15, 7))
h = 0.02
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
if max_depth != 0:
tree = DecisionTreeClassifier(max_depth=max_depth, random_state=1).fit(X, y)
Z = tree.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
Z = Z.reshape(xx.shape)
faces = tree.tree_.apply(np.c_[xx.ravel(), yy.ravel()].astype(np.float32))
faces = faces.reshape(xx.shape)
border = ndimage.laplace(faces) != 0
ax[0].contourf(xx, yy, Z, alpha=.4)
ax[0].scatter(xx[border], yy[border], marker='.', s=1)
ax[0].set_title("max_depth = %d" % max_depth)
img = tree_image(tree)
if img is not None:
ax[1].imshow(img)
ax[1].axis("off")
else:
ax[1].set_visible(False)
else:
ax[0].set_title("data set")
ax[1].set_visible(False)
ax[0].scatter(X[:, 0], X[:, 1], c=np.array(['b', 'r'])[y], s=60)
ax[0].set_xlim(x_min, x_max)
ax[0].set_ylim(y_min, y_max)
ax[0].set_xticks(())
ax[0].set_yticks(())
def plot_tree_interactive():
from IPython.html.widgets import interactive, IntSlider
slider = IntSlider(min=0, max=8, step=1, value=0)
return interactive(plot_tree, max_depth=slider)
| bsd-2-clause |
rsheftel/pandas_market_calendars | tests/test_bse_calendar.py | 1 | 1192 | import datetime
import pandas as pd
import pytz
from pandas_market_calendars.exchange_calendar_bse import BSEExchangeCalendar, BSEClosedDay
def test_time_zone():
assert BSEExchangeCalendar().tz == pytz.timezone('Asia/Calcutta')
assert BSEExchangeCalendar().name == 'BSE'
def test_holidays():
bse_calendar = BSEExchangeCalendar()
trading_days = bse_calendar.valid_days(pd.Timestamp('2004-01-01'), pd.Timestamp('2018-12-31'))
for session_label in BSEClosedDay:
assert session_label not in trading_days
def test_open_close_time():
bse_calendar = BSEExchangeCalendar()
india_time_zone = pytz.timezone('Asia/Calcutta')
bse_schedule = bse_calendar.schedule(
start_date=india_time_zone.localize(datetime.datetime(2015, 1, 14)),
end_date=india_time_zone.localize(datetime.datetime(2015, 1, 16))
)
assert BSEExchangeCalendar.open_at_time(
schedule=bse_schedule,
timestamp=india_time_zone.localize(datetime.datetime(2015, 1, 14, 11, 0))
)
assert not BSEExchangeCalendar.open_at_time(
schedule=bse_schedule,
timestamp=india_time_zone.localize(datetime.datetime(2015, 1, 9, 12, 0))
)
| mit |
dipanjanS/text-analytics-with-python | Old-First-Edition/Ch04_Text_Classification/classifier_evaluation_demo.py | 1 | 3277 | # -*- coding: utf-8 -*-
"""
Created on Fri Sep 02 12:36:55 2016
@author: DIP
"""
from sklearn import metrics
import numpy as np
import pandas as pd
from collections import Counter
actual_labels = ['spam', 'ham', 'spam', 'spam', 'spam',
'ham', 'ham', 'spam', 'ham', 'spam',
'spam', 'ham', 'ham', 'ham', 'spam',
'ham', 'ham', 'spam', 'spam', 'ham']
predicted_labels = ['spam', 'spam', 'spam', 'ham', 'spam',
'spam', 'ham', 'ham', 'spam', 'spam',
'ham', 'ham', 'spam', 'ham', 'ham',
'ham', 'spam', 'ham', 'spam', 'spam']
ac = Counter(actual_labels)
pc = Counter(predicted_labels)
print 'Actual counts:', ac.most_common()
print 'Predicted counts:', pc.most_common()
cm = metrics.confusion_matrix(y_true=actual_labels,
y_pred=predicted_labels,
labels=['spam','ham'])
print pd.DataFrame(data=cm,
columns=pd.MultiIndex(levels=[['Predicted:'],
['spam','ham']],
labels=[[0,0],[0,1]]),
index=pd.MultiIndex(levels=[['Actual:'],
['spam','ham']],
labels=[[0,0],[0,1]]))
positive_class = 'spam'
true_positive = 5.
false_positive = 6.
false_negative = 5.
true_negative = 4.
accuracy = np.round(
metrics.accuracy_score(y_true=actual_labels,
y_pred=predicted_labels),2)
accuracy_manual = np.round(
(true_positive + true_negative) /
(true_positive + true_negative +
false_negative + false_positive),2)
print 'Accuracy:', accuracy
print 'Manually computed accuracy:', accuracy_manual
precision = np.round(
metrics.precision_score(y_true=actual_labels,
y_pred=predicted_labels,
pos_label=positive_class),2)
precision_manual = np.round(
(true_positive) /
(true_positive + false_positive),2)
print 'Precision:', precision
print 'Manually computed precision:', precision_manual
recall = np.round(
metrics.recall_score(y_true=actual_labels,
y_pred=predicted_labels,
pos_label=positive_class),2)
recall_manual = np.round(
(true_positive) /
(true_positive + false_negative),2)
print 'Recall:', recall
print 'Manually computed recall:', recall_manual
f1_score = np.round(
metrics.f1_score(y_true=actual_labels,
y_pred=predicted_labels,
pos_label=positive_class),2)
f1_score_manual = np.round(
(2 * precision * recall) /
(precision + recall),2)
print 'F1 score:', f1_score
print 'Manually computed F1 score:', f1_score_manual | apache-2.0 |
Averroes/statsmodels | statsmodels/sandbox/examples/try_multiols.py | 33 | 1243 | # -*- coding: utf-8 -*-
"""
Created on Sun May 26 13:23:40 2013
Author: Josef Perktold, based on Enrico Giampieri's multiOLS
"""
#import numpy as np
import pandas as pd
import statsmodels.api as sm
from statsmodels.sandbox.multilinear import multiOLS, multigroup
data = sm.datasets.longley.load_pandas()
df = data.exog
df['TOTEMP'] = data.endog
#This will perform the specified linear model on all the
#other columns of the dataframe
res0 = multiOLS('GNP + 1', df)
#This select only a certain subset of the columns
res = multiOLS('GNP + 0', df, ['GNPDEFL', 'TOTEMP', 'POP'])
print(res.to_string())
url = "http://vincentarelbundock.github.com/"
url = url + "Rdatasets/csv/HistData/Guerry.csv"
df = pd.read_csv(url, index_col=1) #'dept')
#evaluate the relationship between the various parameters whith the Wealth
pvals = multiOLS('Wealth', df)['adj_pvals', '_f_test']
#define the groups
groups = {}
groups['crime'] = ['Crime_prop', 'Infanticide',
'Crime_parents', 'Desertion', 'Crime_pers']
groups['religion'] = ['Donation_clergy', 'Clergy', 'Donations']
groups['wealth'] = ['Commerce', 'Lottery', 'Instruction', 'Literacy']
#do the analysis of the significance
res3 = multigroup(pvals < 0.05, groups)
print(res3)
| bsd-3-clause |
nlhepler/freetype-py3 | examples/glyph-vector-2.py | 1 | 3414 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
#
# FreeType high-level python API - Copyright 2011 Nicolas P. Rougier
# Distributed under the terms of the new BSD license.
#
# -----------------------------------------------------------------------------
'''
Show how to access glyph outline description.
'''
from freetype import *
if __name__ == '__main__':
import numpy
import matplotlib.pyplot as plt
from matplotlib.path import Path
import matplotlib.patches as patches
face = Face(b'./Vera.ttf')
face.set_char_size( 32*64 )
face.load_char('g')
slot = face.glyph
bitmap = face.glyph.bitmap
width = face.glyph.bitmap.width
rows = face.glyph.bitmap.rows
pitch = face.glyph.bitmap.pitch
data = []
for i in range(rows):
data.extend(bitmap.buffer[i*pitch:i*pitch+width])
Z = numpy.array(data,dtype=numpy.ubyte).reshape(rows, width)
outline = slot.outline
points = numpy.array(outline.points, dtype=[('x',float), ('y',float)])
x, y = points['x'], points['y']
figure = plt.figure(figsize=(8,10))
axis = figure.add_subplot(111)
#axis.scatter(points['x'], points['y'], alpha=.25)
start, end = 0, 0
VERTS, CODES = [], []
# Iterate over each contour
for i in range(len(outline.contours)):
end = outline.contours[i]
points = outline.points[start:end+1]
points.append(points[0])
tags = outline.tags[start:end+1]
tags.append(tags[0])
segments = [ [points[0],], ]
for j in range(1, len(points) ):
segments[-1].append(points[j])
if tags[j] & (1 << 0) and j < (len(points)-1):
segments.append( [points[j],] )
verts = [points[0], ]
codes = [Path.MOVETO,]
for segment in segments:
if len(segment) == 2:
verts.extend(segment[1:])
codes.extend([Path.LINETO])
elif len(segment) == 3:
verts.extend(segment[1:])
codes.extend([Path.CURVE3, Path.CURVE3])
else:
verts.append(segment[1])
codes.append(Path.CURVE3)
for i in range(1,len(segment)-2):
A,B = segment[i], segment[i+1]
C = ((A[0]+B[0])/2.0, (A[1]+B[1])/2.0)
verts.extend([ C, B ])
codes.extend([ Path.CURVE3, Path.CURVE3])
verts.append(segment[-1])
codes.append(Path.CURVE3)
VERTS.extend(verts)
CODES.extend(codes)
start = end+1
# Draw glyph
path = Path(VERTS, CODES)
glyph = patches.PathPatch(path, fill = True, facecolor=(0.8,0.5,0.8), alpha=.25, lw=0)
glyph_outline = patches.PathPatch(path, fill = False, edgecolor='black', lw=3)
plt.imshow(Z, extent=[x.min(), x.max(),y.min(), y.max()],
interpolation='nearest', cmap = plt.cm.gray_r, vmin=0, vmax=400)
plt.xticks(numpy.linspace(x.min(), x.max(), Z.shape[1]+1), ())
plt.yticks(numpy.linspace(y.min(), y.max(), Z.shape[0]+1), ())
plt.grid(color='k', linewidth=1, linestyle='-')
axis.add_patch(glyph)
axis.add_patch(glyph_outline)
axis.set_xlim(x.min(), x.max())
axis.set_ylim(y.min(), y.max())
plt.savefig('test.pdf')
plt.show()
| bsd-3-clause |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/dask/dataframe/__init__.py | 2 | 1278 | from __future__ import print_function, division, absolute_import
try:
from .core import (DataFrame, Series, Index, _Frame, map_partitions,
repartition, to_datetime, to_timedelta)
from .groupby import Aggregation
from .io import (from_array, from_pandas, from_bcolz,
from_dask_array, read_hdf, read_sql_table,
from_delayed, read_csv, to_csv, read_table,
demo, to_hdf, to_records, to_bag, read_json, to_json)
from .optimize import optimize
from .multi import merge, concat
from . import rolling
from ..base import compute
from .reshape import get_dummies, pivot_table, melt
from .utils import assert_eq
from .io.orc import read_orc
try:
from .io import read_parquet, to_parquet
except ImportError:
pass
try:
from .core import isna
except ImportError:
pass
except ImportError as e:
msg = ("Dask dataframe requirements are not installed.\n\n"
"Please either conda or pip install as follows:\n\n"
" conda install dask # either conda install\n"
" pip install dask[dataframe] --upgrade # or pip install")
raise ImportError(str(e) + '\n\n' + msg)
| gpl-3.0 |
lifei96/Medium-crawler-with-data-analyzer | User_Crawler/get_data.py | 2 | 1601 | # -*- coding: utf-8 -*-
import pandas as pd
import json
import datetime
import os
def read_users():
users = list()
file_in = open('./username_list.txt', 'r')
username_list = str(file_in.read()).split(' ')
file_in.close()
num = 0
for username in username_list:
if not username:
continue
if not os.path.exists('./data/Users/%s.json' % username):
continue
if not os.path.exists('./data/Twitter/%s_t.json' % username):
continue
try:
file_in = open('./data/Users/%s.json' % username, 'r')
raw_data = json.loads(str(file_in.read()))
file_in.close()
user = dict()
user['followers_count'] = raw_data['profile']['user']['socialStats']['usersFollowedByCount']
user['following_count'] = raw_data['profile']['user']['socialStats']['usersFollowedCount']
file_in = open('./data/Twitter/%s_t.json' % username, 'r')
raw_data = json.loads(str(file_in.read()))
file_in.close()
user['t_following_count'] = raw_data['profile_user']['friends_count']
user['t_followers_count'] = raw_data['profile_user']['followers_count']
users.append(user)
except:
continue
num += 1
print(username)
print(num)
return pd.read_json(json.dumps(users))
if __name__ == '__main__':
if not os.path.exists('./result'):
os.mkdir('./result')
users_data = read_users()
users_data.to_csv('./result/twitter.csv', sep='\t', encoding='utf-8')
| mit |
MechCoder/scikit-learn | sklearn/metrics/__init__.py | 8 | 3701 | """
The :mod:`sklearn.metrics` module includes score functions, performance metrics
and pairwise metrics and distance computations.
"""
from .ranking import auc
from .ranking import average_precision_score
from .ranking import coverage_error
from .ranking import label_ranking_average_precision_score
from .ranking import label_ranking_loss
from .ranking import precision_recall_curve
from .ranking import roc_auc_score
from .ranking import roc_curve
from .ranking import dcg_score
from .ranking import ndcg_score
from .classification import accuracy_score
from .classification import classification_report
from .classification import cohen_kappa_score
from .classification import confusion_matrix
from .classification import f1_score
from .classification import fbeta_score
from .classification import hamming_loss
from .classification import hinge_loss
from .classification import jaccard_similarity_score
from .classification import log_loss
from .classification import matthews_corrcoef
from .classification import precision_recall_fscore_support
from .classification import precision_score
from .classification import recall_score
from .classification import zero_one_loss
from .classification import brier_score_loss
from . import cluster
from .cluster import adjusted_mutual_info_score
from .cluster import adjusted_rand_score
from .cluster import completeness_score
from .cluster import consensus_score
from .cluster import homogeneity_completeness_v_measure
from .cluster import homogeneity_score
from .cluster import mutual_info_score
from .cluster import normalized_mutual_info_score
from .cluster import fowlkes_mallows_score
from .cluster import silhouette_samples
from .cluster import silhouette_score
from .cluster import calinski_harabaz_score
from .cluster import v_measure_score
from .pairwise import euclidean_distances
from .pairwise import pairwise_distances
from .pairwise import pairwise_distances_argmin
from .pairwise import pairwise_distances_argmin_min
from .pairwise import pairwise_kernels
from .regression import explained_variance_score
from .regression import mean_absolute_error
from .regression import mean_squared_error
from .regression import mean_squared_log_error
from .regression import median_absolute_error
from .regression import r2_score
from .scorer import make_scorer
from .scorer import SCORERS
from .scorer import get_scorer
__all__ = [
'accuracy_score',
'adjusted_mutual_info_score',
'adjusted_rand_score',
'auc',
'average_precision_score',
'classification_report',
'cluster',
'completeness_score',
'confusion_matrix',
'consensus_score',
'coverage_error',
'euclidean_distances',
'explained_variance_score',
'f1_score',
'fbeta_score',
'get_scorer',
'hamming_loss',
'hinge_loss',
'homogeneity_completeness_v_measure',
'homogeneity_score',
'jaccard_similarity_score',
'label_ranking_average_precision_score',
'label_ranking_loss',
'log_loss',
'make_scorer',
'matthews_corrcoef',
'mean_absolute_error',
'mean_squared_error',
'mean_squared_log_error',
'median_absolute_error',
'mutual_info_score',
'normalized_mutual_info_score',
'pairwise_distances',
'pairwise_distances_argmin',
'pairwise_distances_argmin_min',
'pairwise_distances_argmin_min',
'pairwise_kernels',
'precision_recall_curve',
'precision_recall_fscore_support',
'precision_score',
'r2_score',
'recall_score',
'roc_auc_score',
'roc_curve',
'SCORERS',
'silhouette_samples',
'silhouette_score',
'v_measure_score',
'zero_one_loss',
'brier_score_loss',
'dcg_score',
'ndcg_score'
]
| bsd-3-clause |
willyd/fast-rcnn | tools/demo.py | 22 | 5446 | #!/usr/bin/env python
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
"""
Demo script showing detections in sample images.
See README.md for installation instructions before running.
"""
import _init_paths
from fast_rcnn.config import cfg
from fast_rcnn.test import im_detect
from utils.cython_nms import nms
from utils.timer import Timer
import matplotlib.pyplot as plt
import numpy as np
import scipy.io as sio
import caffe, os, sys, cv2
import argparse
CLASSES = ('__background__',
'aeroplane', 'bicycle', 'bird', 'boat',
'bottle', 'bus', 'car', 'cat', 'chair',
'cow', 'diningtable', 'dog', 'horse',
'motorbike', 'person', 'pottedplant',
'sheep', 'sofa', 'train', 'tvmonitor')
NETS = {'vgg16': ('VGG16',
'vgg16_fast_rcnn_iter_40000.caffemodel'),
'vgg_cnn_m_1024': ('VGG_CNN_M_1024',
'vgg_cnn_m_1024_fast_rcnn_iter_40000.caffemodel'),
'caffenet': ('CaffeNet',
'caffenet_fast_rcnn_iter_40000.caffemodel')}
def vis_detections(im, class_name, dets, thresh=0.5):
"""Draw detected bounding boxes."""
inds = np.where(dets[:, -1] >= thresh)[0]
if len(inds) == 0:
return
im = im[:, :, (2, 1, 0)]
fig, ax = plt.subplots(figsize=(12, 12))
ax.imshow(im, aspect='equal')
for i in inds:
bbox = dets[i, :4]
score = dets[i, -1]
ax.add_patch(
plt.Rectangle((bbox[0], bbox[1]),
bbox[2] - bbox[0],
bbox[3] - bbox[1], fill=False,
edgecolor='red', linewidth=3.5)
)
ax.text(bbox[0], bbox[1] - 2,
'{:s} {:.3f}'.format(class_name, score),
bbox=dict(facecolor='blue', alpha=0.5),
fontsize=14, color='white')
ax.set_title(('{} detections with '
'p({} | box) >= {:.1f}').format(class_name, class_name,
thresh),
fontsize=14)
plt.axis('off')
plt.tight_layout()
plt.draw()
def demo(net, image_name, classes):
"""Detect object classes in an image using pre-computed object proposals."""
# Load pre-computed Selected Search object proposals
box_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo',
image_name + '_boxes.mat')
obj_proposals = sio.loadmat(box_file)['boxes']
# Load the demo image
im_file = os.path.join(cfg.ROOT_DIR, 'data', 'demo', image_name + '.jpg')
im = cv2.imread(im_file)
# Detect all object classes and regress object bounds
timer = Timer()
timer.tic()
scores, boxes = im_detect(net, im, obj_proposals)
timer.toc()
print ('Detection took {:.3f}s for '
'{:d} object proposals').format(timer.total_time, boxes.shape[0])
# Visualize detections for each class
CONF_THRESH = 0.8
NMS_THRESH = 0.3
for cls in classes:
cls_ind = CLASSES.index(cls)
cls_boxes = boxes[:, 4*cls_ind:4*(cls_ind + 1)]
cls_scores = scores[:, cls_ind]
keep = np.where(cls_scores >= CONF_THRESH)[0]
cls_boxes = cls_boxes[keep, :]
cls_scores = cls_scores[keep]
dets = np.hstack((cls_boxes,
cls_scores[:, np.newaxis])).astype(np.float32)
keep = nms(dets, NMS_THRESH)
dets = dets[keep, :]
print 'All {} detections with p({} | box) >= {:.1f}'.format(cls, cls,
CONF_THRESH)
vis_detections(im, cls, dets, thresh=CONF_THRESH)
def parse_args():
"""Parse input arguments."""
parser = argparse.ArgumentParser(description='Train a Fast R-CNN network')
parser.add_argument('--gpu', dest='gpu_id', help='GPU device id to use [0]',
default=0, type=int)
parser.add_argument('--cpu', dest='cpu_mode',
help='Use CPU mode (overrides --gpu)',
action='store_true')
parser.add_argument('--net', dest='demo_net', help='Network to use [vgg16]',
choices=NETS.keys(), default='vgg16')
args = parser.parse_args()
return args
if __name__ == '__main__':
args = parse_args()
prototxt = os.path.join(cfg.ROOT_DIR, 'models', NETS[args.demo_net][0],
'test.prototxt')
caffemodel = os.path.join(cfg.ROOT_DIR, 'data', 'fast_rcnn_models',
NETS[args.demo_net][1])
if not os.path.isfile(caffemodel):
raise IOError(('{:s} not found.\nDid you run ./data/scripts/'
'fetch_fast_rcnn_models.sh?').format(caffemodel))
if args.cpu_mode:
caffe.set_mode_cpu()
else:
caffe.set_mode_gpu()
caffe.set_device(args.gpu_id)
net = caffe.Net(prototxt, caffemodel, caffe.TEST)
print '\n\nLoaded network {:s}'.format(caffemodel)
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/000004.jpg'
demo(net, '000004', ('car',))
print '~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~'
print 'Demo for data/demo/001551.jpg'
demo(net, '001551', ('sofa', 'tvmonitor'))
plt.show()
| mit |
clawpack/clawpack-4.x | doc/sphinx/example-acoustics-1d/setplot_2.py | 2 | 2095 |
"""
Single figure and axes with two items
=======================================
Only the pressure q[0] is plotted.
In this example the line and points are plotted in different colors by
specifying a second item on the same axes.
"""
#--------------------------
def setplot(plotdata):
#--------------------------
"""
Specify what is to be plotted at each frame.
Input: plotdata, an instance of pyclaw.plotters.data.ClawPlotData.
Output: a modified version of plotdata.
"""
plotdata.clearfigures() # clear any old figures,axes,items data
# Figure for q[0]
plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)
# Set up for axes in this figure:
plotaxes = plotfigure.new_plotaxes()
plotaxes.xlimits = 'auto'
plotaxes.ylimits = [-.5,1.1]
plotaxes.title = 'Pressure'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(name='line', plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = '-'
plotitem.color = 'b'
# Set up for item on these axes:
plotitem = plotaxes.new_plotitem(name='points', plot_type='1d')
plotitem.plot_var = 0
plotitem.plotstyle = 'o'
plotitem.color = '#ff00ff' # any color supported by matplotlib
# Parameters used only when creating html and/or latex hardcopy
# e.g., via pyclaw.plotters.frametools.printframes:
plotdata.printfigs = True # print figures
plotdata.print_format = 'png' # file format
plotdata.print_framenos = 'all' # list of frames to print
plotdata.print_fignos = 'all' # list of figures to print
plotdata.html = True # create html files of plots?
plotdata.html_homelink = '../README.html'# pointer for index page
plotdata.latex = True # create latex file of plots?
plotdata.latex_figsperline = 1 # layout of plots
plotdata.latex_framesperline = 2 # layout of plots
plotdata.latex_makepdf = True # also run pdflatex?
return plotdata
| bsd-3-clause |
dingocuster/scikit-learn | sklearn/naive_bayes.py | 70 | 28476 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.naive_bayes` module implements Naive Bayes algorithms. These
are supervised learning methods based on applying Bayes' theorem with strong
(naive) feature independence assumptions.
"""
# Author: Vincent Michel <vincent.michel@inria.fr>
# Minor fixes by Fabian Pedregosa
# Amit Aides <amitibo@tx.technion.ac.il>
# Yehuda Finkelstein <yehudaf@tx.technion.ac.il>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# (parts based on earlier work by Mathieu Blondel)
#
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from .base import BaseEstimator, ClassifierMixin
from .preprocessing import binarize
from .preprocessing import LabelBinarizer
from .preprocessing import label_binarize
from .utils import check_X_y, check_array
from .utils.extmath import safe_sparse_dot, logsumexp
from .utils.multiclass import _check_partial_fit_first_call
from .utils.fixes import in1d
from .utils.validation import check_is_fitted
from .externals import six
__all__ = ['BernoulliNB', 'GaussianNB', 'MultinomialNB']
class BaseNB(six.with_metaclass(ABCMeta, BaseEstimator, ClassifierMixin)):
"""Abstract base class for naive Bayes estimators"""
@abstractmethod
def _joint_log_likelihood(self, X):
"""Compute the unnormalized posterior log probability of X
I.e. ``log P(c) + log P(x|c)`` for all rows x of X, as an array-like of
shape [n_classes, n_samples].
Input is passed to _joint_log_likelihood as-is by predict,
predict_proba and predict_log_proba.
"""
def predict(self, X):
"""
Perform classification on an array of test vectors X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = [n_samples]
Predicted target values for X
"""
jll = self._joint_log_likelihood(X)
return self.classes_[np.argmax(jll, axis=1)]
def predict_log_proba(self, X):
"""
Return log-probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
jll = self._joint_log_likelihood(X)
# normalize by P(x) = P(f_1, ..., f_n)
log_prob_x = logsumexp(jll, axis=1)
return jll - np.atleast_2d(log_prob_x).T
def predict_proba(self, X):
"""
Return probability estimates for the test vector X.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array-like, shape = [n_samples, n_classes]
Returns the probability of the samples for each class in
the model. The columns correspond to the classes in sorted
order, as they appear in the attribute `classes_`.
"""
return np.exp(self.predict_log_proba(X))
class GaussianNB(BaseNB):
"""
Gaussian Naive Bayes (GaussianNB)
Can perform online updates to model parameters via `partial_fit` method.
For details on algorithm used to update feature means and variance online,
see Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Read more in the :ref:`User Guide <gaussian_naive_bayes>`.
Attributes
----------
class_prior_ : array, shape (n_classes,)
probability of each class.
class_count_ : array, shape (n_classes,)
number of training samples observed in each class.
theta_ : array, shape (n_classes, n_features)
mean of each feature per class
sigma_ : array, shape (n_classes, n_features)
variance of each feature per class
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> Y = np.array([1, 1, 1, 2, 2, 2])
>>> from sklearn.naive_bayes import GaussianNB
>>> clf = GaussianNB()
>>> clf.fit(X, Y)
GaussianNB()
>>> print(clf.predict([[-0.8, -1]]))
[1]
>>> clf_pf = GaussianNB()
>>> clf_pf.partial_fit(X, Y, np.unique(Y))
GaussianNB()
>>> print(clf_pf.predict([[-0.8, -1]]))
[1]
"""
def fit(self, X, y, sample_weight=None):
"""Fit Gaussian Naive Bayes according to X, y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
return self._partial_fit(X, y, np.unique(y), _refit=True,
sample_weight=sample_weight)
@staticmethod
def _update_mean_variance(n_past, mu, var, X, sample_weight=None):
"""Compute online update of Gaussian mean and variance.
Given starting sample count, mean, and variance, a new set of
points X, and optionally sample weights, return the updated mean and
variance. (NB - each dimension (column) in X is treated as independent
-- you get variance, not covariance).
Can take scalar mean and variance, or vector mean and variance to
simultaneously update a number of independent Gaussians.
See Stanford CS tech report STAN-CS-79-773 by Chan, Golub, and LeVeque:
http://i.stanford.edu/pub/cstr/reports/cs/tr/79/773/CS-TR-79-773.pdf
Parameters
----------
n_past : int
Number of samples represented in old mean and variance. If sample
weights were given, this should contain the sum of sample
weights represented in old mean and variance.
mu : array-like, shape (number of Gaussians,)
Means for Gaussians in original set.
var : array-like, shape (number of Gaussians,)
Variances for Gaussians in original set.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
total_mu : array-like, shape (number of Gaussians,)
Updated mean for each Gaussian over the combined set.
total_var : array-like, shape (number of Gaussians,)
Updated variance for each Gaussian over the combined set.
"""
if X.shape[0] == 0:
return mu, var
# Compute (potentially weighted) mean and variance of new datapoints
if sample_weight is not None:
n_new = float(sample_weight.sum())
new_mu = np.average(X, axis=0, weights=sample_weight / n_new)
new_var = np.average((X - new_mu) ** 2, axis=0,
weights=sample_weight / n_new)
else:
n_new = X.shape[0]
new_var = np.var(X, axis=0)
new_mu = np.mean(X, axis=0)
if n_past == 0:
return new_mu, new_var
n_total = float(n_past + n_new)
# Combine mean of old and new data, taking into consideration
# (weighted) number of observations
total_mu = (n_new * new_mu + n_past * mu) / n_total
# Combine variance of old and new data, taking into consideration
# (weighted) number of observations. This is achieved by combining
# the sum-of-squared-differences (ssd)
old_ssd = n_past * var
new_ssd = n_new * new_var
total_ssd = (old_ssd + new_ssd +
(n_past / float(n_new * n_total)) *
(n_new * mu - n_new * new_mu) ** 2)
total_var = total_ssd / n_total
return total_mu, total_var
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance and numerical stability overhead,
hence it is better to call partial_fit on chunks of data that are
as large as possible (as long as fitting in the memory budget) to
hide the overhead.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
return self._partial_fit(X, y, classes, _refit=False,
sample_weight=sample_weight)
def _partial_fit(self, X, y, classes=None, _refit=False,
sample_weight=None):
"""Actual implementation of Gaussian NB fitting.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target values.
classes : array-like, shape (n_classes,)
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
_refit: bool
If true, act as though this were the first time we called
_partial_fit (ie, throw away any past fitting and start over).
sample_weight : array-like, shape (n_samples,), optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y)
epsilon = 1e-9
if _refit:
self.classes_ = None
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_features = X.shape[1]
n_classes = len(self.classes_)
self.theta_ = np.zeros((n_classes, n_features))
self.sigma_ = np.zeros((n_classes, n_features))
self.class_prior_ = np.zeros(n_classes)
self.class_count_ = np.zeros(n_classes)
else:
if X.shape[1] != self.theta_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (X.shape[1], self.theta_.shape[1]))
# Put epsilon back in each time
self.sigma_[:, :] -= epsilon
classes = self.classes_
unique_y = np.unique(y)
unique_y_in_classes = in1d(unique_y, classes)
if not np.all(unique_y_in_classes):
raise ValueError("The target label(s) %s in y do not exist in the "
"initial classes %s" %
(y[~unique_y_in_classes], classes))
for y_i in unique_y:
i = classes.searchsorted(y_i)
X_i = X[y == y_i, :]
if sample_weight is not None:
sw_i = sample_weight[y == y_i]
N_i = sw_i.sum()
else:
sw_i = None
N_i = X_i.shape[0]
new_theta, new_sigma = self._update_mean_variance(
self.class_count_[i], self.theta_[i, :], self.sigma_[i, :],
X_i, sw_i)
self.theta_[i, :] = new_theta
self.sigma_[i, :] = new_sigma
self.class_count_[i] += N_i
self.sigma_[:, :] += epsilon
self.class_prior_[:] = self.class_count_ / np.sum(self.class_count_)
return self
def _joint_log_likelihood(self, X):
check_is_fitted(self, "classes_")
X = check_array(X)
joint_log_likelihood = []
for i in range(np.size(self.classes_)):
jointi = np.log(self.class_prior_[i])
n_ij = - 0.5 * np.sum(np.log(2. * np.pi * self.sigma_[i, :]))
n_ij -= 0.5 * np.sum(((X - self.theta_[i, :]) ** 2) /
(self.sigma_[i, :]), 1)
joint_log_likelihood.append(jointi + n_ij)
joint_log_likelihood = np.array(joint_log_likelihood).T
return joint_log_likelihood
class BaseDiscreteNB(BaseNB):
"""Abstract base class for naive Bayes on discrete/categorical data
Any estimator based on this class should provide:
__init__
_joint_log_likelihood(X) as per BaseNB
"""
def _update_class_log_prior(self, class_prior=None):
n_classes = len(self.classes_)
if class_prior is not None:
if len(class_prior) != n_classes:
raise ValueError("Number of priors must match number of"
" classes.")
self.class_log_prior_ = np.log(class_prior)
elif self.fit_prior:
# empirical prior, with sample_weight taken into account
self.class_log_prior_ = (np.log(self.class_count_)
- np.log(self.class_count_.sum()))
else:
self.class_log_prior_ = np.zeros(n_classes) - np.log(n_classes)
def partial_fit(self, X, y, classes=None, sample_weight=None):
"""Incremental fit on a batch of samples.
This method is expected to be called several times consecutively
on different chunks of a dataset so as to implement out-of-core
or online learning.
This is especially useful when the whole dataset is too big to fit in
memory at once.
This method has some performance overhead hence it is better to call
partial_fit on chunks of data that are as large as possible
(as long as fitting in the memory budget) to hide the overhead.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
classes : array-like, shape = [n_classes]
List of all the classes that can possibly appear in the y vector.
Must be provided at the first call to partial_fit, can be omitted
in subsequent calls.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X = check_array(X, accept_sparse='csr', dtype=np.float64)
_, n_features = X.shape
if _check_partial_fit_first_call(self, classes):
# This is the first call to partial_fit:
# initialize various cumulative counters
n_effective_classes = len(classes) if len(classes) > 1 else 2
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
elif n_features != self.coef_.shape[1]:
msg = "Number of features %d does not match previous data %d."
raise ValueError(msg % (n_features, self.coef_.shape[-1]))
Y = label_binarize(y, classes=self.classes_)
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
n_samples, n_classes = Y.shape
if X.shape[0] != Y.shape[0]:
msg = "X.shape[0]=%d and y.shape[0]=%d are incompatible."
raise ValueError(msg % (X.shape[0], y.shape[0]))
# label_binarize() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
self._count(X, Y)
# XXX: OPTIM: we could introduce a public finalization method to
# be called by the user explicitly just once after several consecutive
# calls to partial_fit and prior any call to predict[_[log_]proba]
# to avoid computing the smooth log probas at each call to partial fit
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
def fit(self, X, y, sample_weight=None):
"""Fit Naive Bayes classifier according to X, y
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vectors, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target values.
sample_weight : array-like, shape = [n_samples], optional
Weights applied to individual samples (1. for unweighted).
Returns
-------
self : object
Returns self.
"""
X, y = check_X_y(X, y, 'csr')
_, n_features = X.shape
labelbin = LabelBinarizer()
Y = labelbin.fit_transform(y)
self.classes_ = labelbin.classes_
if Y.shape[1] == 1:
Y = np.concatenate((1 - Y, Y), axis=1)
# LabelBinarizer().fit_transform() returns arrays with dtype=np.int64.
# We convert it to np.float64 to support sample_weight consistently;
# this means we also don't have to cast X to floating point
Y = Y.astype(np.float64)
if sample_weight is not None:
sample_weight = np.atleast_2d(sample_weight)
Y *= check_array(sample_weight).T
class_prior = self.class_prior
# Count raw events from data before updating the class log prior
# and feature log probas
n_effective_classes = Y.shape[1]
self.class_count_ = np.zeros(n_effective_classes, dtype=np.float64)
self.feature_count_ = np.zeros((n_effective_classes, n_features),
dtype=np.float64)
self._count(X, Y)
self._update_feature_log_prob()
self._update_class_log_prior(class_prior=class_prior)
return self
# XXX The following is a stopgap measure; we need to set the dimensions
# of class_log_prior_ and feature_log_prob_ correctly.
def _get_coef(self):
return (self.feature_log_prob_[1:]
if len(self.classes_) == 2 else self.feature_log_prob_)
def _get_intercept(self):
return (self.class_log_prior_[1:]
if len(self.classes_) == 2 else self.class_log_prior_)
coef_ = property(_get_coef)
intercept_ = property(_get_intercept)
class MultinomialNB(BaseDiscreteNB):
"""
Naive Bayes classifier for multinomial models
The multinomial Naive Bayes classifier is suitable for classification with
discrete features (e.g., word counts for text classification). The
multinomial distribution normally requires integer feature counts. However,
in practice, fractional counts such as tf-idf may also work.
Read more in the :ref:`User Guide <multinomial_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size (n_classes,)
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape (n_classes, )
Smoothed empirical log probability for each class.
intercept_ : property
Mirrors ``class_log_prior_`` for interpreting MultinomialNB
as a linear model.
feature_log_prob_ : array, shape (n_classes, n_features)
Empirical log probability of features
given a class, ``P(x_i|y)``.
coef_ : property
Mirrors ``feature_log_prob_`` for interpreting MultinomialNB
as a linear model.
class_count_ : array, shape (n_classes,)
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape (n_classes, n_features)
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(5, size=(6, 100))
>>> y = np.array([1, 2, 3, 4, 5, 6])
>>> from sklearn.naive_bayes import MultinomialNB
>>> clf = MultinomialNB()
>>> clf.fit(X, y)
MultinomialNB(alpha=1.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
Notes
-----
For the rationale behind the names `coef_` and `intercept_`, i.e.
naive Bayes as a linear classifier, see J. Rennie et al. (2003),
Tackling the poor assumptions of naive Bayes text classifiers, ICML.
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html
"""
def __init__(self, alpha=1.0, fit_prior=True, class_prior=None):
self.alpha = alpha
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if np.any((X.data if issparse(X) else X) < 0):
raise ValueError("Input X must be non-negative")
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = smoothed_fc.sum(axis=1)
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
return (safe_sparse_dot(X, self.feature_log_prob_.T)
+ self.class_log_prior_)
class BernoulliNB(BaseDiscreteNB):
"""Naive Bayes classifier for multivariate Bernoulli models.
Like MultinomialNB, this classifier is suitable for discrete data. The
difference is that while MultinomialNB works with occurrence counts,
BernoulliNB is designed for binary/boolean features.
Read more in the :ref:`User Guide <bernoulli_naive_bayes>`.
Parameters
----------
alpha : float, optional (default=1.0)
Additive (Laplace/Lidstone) smoothing parameter
(0 for no smoothing).
binarize : float or None, optional
Threshold for binarizing (mapping to booleans) of sample features.
If None, input is presumed to already consist of binary vectors.
fit_prior : boolean
Whether to learn class prior probabilities or not.
If false, a uniform prior will be used.
class_prior : array-like, size=[n_classes,]
Prior probabilities of the classes. If specified the priors are not
adjusted according to the data.
Attributes
----------
class_log_prior_ : array, shape = [n_classes]
Log probability of each class (smoothed).
feature_log_prob_ : array, shape = [n_classes, n_features]
Empirical log probability of features given a class, P(x_i|y).
class_count_ : array, shape = [n_classes]
Number of samples encountered for each class during fitting. This
value is weighted by the sample weight when provided.
feature_count_ : array, shape = [n_classes, n_features]
Number of samples encountered for each (class, feature)
during fitting. This value is weighted by the sample weight when
provided.
Examples
--------
>>> import numpy as np
>>> X = np.random.randint(2, size=(6, 100))
>>> Y = np.array([1, 2, 3, 4, 4, 5])
>>> from sklearn.naive_bayes import BernoulliNB
>>> clf = BernoulliNB()
>>> clf.fit(X, Y)
BernoulliNB(alpha=1.0, binarize=0.0, class_prior=None, fit_prior=True)
>>> print(clf.predict(X[2:3]))
[3]
References
----------
C.D. Manning, P. Raghavan and H. Schuetze (2008). Introduction to
Information Retrieval. Cambridge University Press, pp. 234-265.
http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
A. McCallum and K. Nigam (1998). A comparison of event models for naive
Bayes text classification. Proc. AAAI/ICML-98 Workshop on Learning for
Text Categorization, pp. 41-48.
V. Metsis, I. Androutsopoulos and G. Paliouras (2006). Spam filtering with
naive Bayes -- Which naive Bayes? 3rd Conf. on Email and Anti-Spam (CEAS).
"""
def __init__(self, alpha=1.0, binarize=.0, fit_prior=True,
class_prior=None):
self.alpha = alpha
self.binarize = binarize
self.fit_prior = fit_prior
self.class_prior = class_prior
def _count(self, X, Y):
"""Count and smooth feature occurrences."""
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
self.feature_count_ += safe_sparse_dot(Y.T, X)
self.class_count_ += Y.sum(axis=0)
def _update_feature_log_prob(self):
"""Apply smoothing to raw counts and recompute log probabilities"""
smoothed_fc = self.feature_count_ + self.alpha
smoothed_cc = self.class_count_ + self.alpha * 2
self.feature_log_prob_ = (np.log(smoothed_fc)
- np.log(smoothed_cc.reshape(-1, 1)))
def _joint_log_likelihood(self, X):
"""Calculate the posterior log probability of the samples X"""
check_is_fitted(self, "classes_")
X = check_array(X, accept_sparse='csr')
if self.binarize is not None:
X = binarize(X, threshold=self.binarize)
n_classes, n_features = self.feature_log_prob_.shape
n_samples, n_features_X = X.shape
if n_features_X != n_features:
raise ValueError("Expected input with %d features, got %d instead"
% (n_features, n_features_X))
neg_prob = np.log(1 - np.exp(self.feature_log_prob_))
# Compute neg_prob · (1 - X).T as ∑neg_prob - X · neg_prob
jll = safe_sparse_dot(X, (self.feature_log_prob_ - neg_prob).T)
jll += self.class_log_prior_ + neg_prob.sum(axis=1)
return jll
| bsd-3-clause |
jhamman/xray | xarray/tests/test_combine.py | 1 | 15860 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from copy import deepcopy
import numpy as np
import pandas as pd
from xarray import Dataset, DataArray, auto_combine, concat, Variable
from xarray.core.pycompat import iteritems, OrderedDict
from . import TestCase, InaccessibleArray, requires_dask
from .test_dataset import create_test_data
class TestConcatDataset(TestCase):
def test_concat(self):
# TODO: simplify and split this test case
# drop the third dimension to keep things relatively understandable
data = create_test_data()
for k in list(data):
if 'dim3' in data[k].dims:
del data[k]
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
self.assertDatasetIdentical(data, concat(split_data, 'dim1'))
def rectify_dim_order(dataset):
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(dict((k, v.transpose(*data[k].dims))
for k, v in iteritems(dataset.data_vars)),
dataset.coords, attrs=dataset.attrs)
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
self.assertDatasetIdentical(data, concat(datasets, dim))
dim = 'dim2'
self.assertDatasetIdentical(
data, concat(datasets, data[dim]))
self.assertDatasetIdentical(
data, concat(datasets, data[dim], coords='minimal'))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in iteritems(data.coords)
if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
self.assertDatasetIdentical(data, rectify_dim_order(actual))
actual = concat(datasets, data[dim], coords='different')
self.assertDatasetIdentical(data, rectify_dim_order(actual))
# make sure the coords argument behaves as expected
data.coords['extra'] = ('dim4', np.arange(3))
for dim in ['dim1', 'dim2']:
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords='all')
expected = np.array([data['extra'].values
for _ in range(data.dims[dim])])
self.assertArrayEqual(actual['extra'].values, expected)
actual = concat(datasets, data[dim], coords='different')
self.assertDataArrayEqual(data['extra'], actual['extra'])
actual = concat(datasets, data[dim], coords='minimal')
self.assertDataArrayEqual(data['extra'], actual['extra'])
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data['dim1']).rename('dim1')
datasets = [g for _, g in data.groupby('dim1', squeeze=False)]
expected = data.copy()
expected['dim1'] = dim
self.assertDatasetIdentical(expected, concat(datasets, dim))
def test_concat_data_vars(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
objs = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ['minimal', 'different', 'all', [], ['foo']]:
actual = concat(objs, dim='x', data_vars=data_vars)
self.assertDatasetIdentical(data, actual)
def test_concat_coords(self):
data = Dataset({'foo': ('x', np.random.randn(10))})
expected = data.assign_coords(c=('x', [0] * 5 + [1] * 5))
objs = [data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1)]
for coords in ['different', 'all', ['c']]:
actual = concat(objs, dim='x', coords=coords)
self.assertDatasetIdentical(expected, actual)
for coords in ['minimal', []]:
with self.assertRaisesRegexp(ValueError, 'not equal across'):
concat(objs, dim='x', coords=coords)
def test_concat_constant_index(self):
# GH425
ds1 = Dataset({'foo': 1.5}, {'y': 1})
ds2 = Dataset({'foo': 2.5}, {'y': 1})
expected = Dataset({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})
for mode in ['different', 'all', ['foo']]:
actual = concat([ds1, ds2], 'y', data_vars=mode)
self.assertDatasetIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not equal across datasets'):
concat([ds1, ds2], 'y', data_vars='minimal')
def test_concat_size0(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, 'dim1')
self.assertDatasetIdentical(data, actual)
actual = concat(split_data[::-1], 'dim1')
self.assertDatasetIdentical(data, actual)
def test_concat_autoalign(self):
ds1 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 2])])})
ds2 = Dataset({'foo': DataArray([1, 2], coords=[('x', [1, 3])])})
actual = concat([ds1, ds2], 'y')
expected = Dataset({'foo': DataArray([[1, 2, np.nan], [1, np.nan, 2]],
dims=['y', 'x'],
coords={'x': [1, 2, 3]})})
self.assertDatasetIdentical(expected, actual)
def test_concat_errors(self):
data = create_test_data()
split_data = [data.isel(dim1=slice(3)),
data.isel(dim1=slice(3, None))]
with self.assertRaisesRegexp(ValueError, 'must supply at least one'):
concat([], 'dim1')
with self.assertRaisesRegexp(ValueError, 'are not coordinates'):
concat([data, data], 'new_dim', coords=['not_found'])
with self.assertRaisesRegexp(ValueError, 'global attributes not'):
data0, data1 = deepcopy(split_data)
data1.attrs['foo'] = 'bar'
concat([data0, data1], 'dim1', compat='identical')
self.assertDatasetIdentical(
data, concat([data0, data1], 'dim1', compat='equals'))
with self.assertRaisesRegexp(ValueError, 'encountered unexpected'):
data0, data1 = deepcopy(split_data)
data1['foo'] = ('bar', np.random.randn(10))
concat([data0, data1], 'dim1')
with self.assertRaisesRegexp(ValueError, 'compat.* invalid'):
concat(split_data, 'dim1', compat='foobar')
with self.assertRaisesRegexp(ValueError, 'unexpected value for'):
concat([data, data], 'new_dim', coords='foobar')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({'x': [1]})], dim='z')
with self.assertRaisesRegexp(
ValueError, 'coordinate in some datasets but not others'):
concat([Dataset({'x': 0}), Dataset({}, {'x': 1})], dim='z')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', mode='different')
with self.assertRaisesRegexp(ValueError, 'no longer a valid'):
concat([data, data], 'new_dim', concat_over='different')
def test_concat_promote_shape(self):
# mixed dims within variables
objs = [Dataset({}, {'x': 0}), Dataset({'x': [1]})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(actual, expected)
objs = [Dataset({'x': [0]}), Dataset({}, {'x': 1})]
actual = concat(objs, 'x')
self.assertDatasetIdentical(actual, expected)
# mixed dims between variables
objs = [Dataset({'x': [2], 'y': 3}), Dataset({'x': [4], 'y': 5})]
actual = concat(objs, 'x')
expected = Dataset({'x': [2, 4], 'y': ('x', [3, 5])})
self.assertDatasetIdentical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1]}, {'y': ('x', [-2])})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1]}, {'y': ('x', [-1, -2])})
self.assertDatasetIdentical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({'x': [0]}, {'y': -1}),
Dataset({'x': [1, 2]}, {'y': -2})]
actual = concat(objs, 'x')
expected = Dataset({'x': [0, 1, 2]}, {'y': ('x', [-1, -2, -2])})
self.assertDatasetIdentical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [Dataset({'z': ('x', [-1])}, {'x': [0], 'y': [0]}),
Dataset({'z': ('y', [1])}, {'x': [1], 'y': [0]})]
actual = concat(objs, 'x')
expected = Dataset({'z': (('x', 'y'), [[-1], [1]])},
{'x': [0, 1], 'y': [0]})
self.assertDatasetIdentical(actual, expected)
def test_concat_do_not_promote(self):
# GH438
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 1, 't': [0]})]
expected = Dataset({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})
actual = concat(objs, 't')
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'y': ('t', [1])}, {'x': 1, 't': [0]}),
Dataset({'y': ('t', [2])}, {'x': 2, 't': [0]})]
with self.assertRaises(ValueError):
concat(objs, 't', coords='minimal')
def test_concat_dim_is_variable(self):
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
coord = Variable('y', [3, 4])
expected = Dataset({'x': ('y', [0, 1]), 'y': [3, 4]})
actual = concat(objs, coord)
self.assertDatasetIdentical(actual, expected)
def test_concat_multiindex(self):
x = pd.MultiIndex.from_product([[1, 2, 3], ['a', 'b']])
expected = Dataset({'x': x})
actual = concat([expected.isel(x=slice(2)),
expected.isel(x=slice(2, None))], 'x')
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
class TestConcatDataArray(TestCase):
def test_concat(self):
ds = Dataset({'foo': (['x', 'y'], np.random.random((2, 3))),
'bar': (['x', 'y'], np.random.random((2, 3)))},
{'x': [0, 1]})
foo = ds['foo']
bar = ds['bar']
# from dataset array:
expected = DataArray(np.array([foo.values, bar.values]),
dims=['w', 'x', 'y'], coords={'x': [0, 1]})
actual = concat([foo, bar], 'w')
self.assertDataArrayEqual(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby('x')]
stacked = concat(grouped, ds['x'])
self.assertDataArrayIdentical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, ds.indexes['x'])
self.assertDataArrayIdentical(foo, stacked)
actual = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
actual = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({'x': 'concat_dim'})
self.assertDataArrayIdentical(expected, actual)
with self.assertRaisesRegexp(ValueError, 'not identical'):
concat([foo, bar], dim='w', compat='identical')
with self.assertRaisesRegexp(ValueError, 'not a valid argument'):
concat([foo, bar], dim='w', data_vars='minimal')
@requires_dask
def test_concat_lazy(self):
import dask.array as da
arrays = [DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3),
dims=['x', 'y']) for _ in range(2)]
# should not raise
combined = concat(arrays, dim='z')
self.assertEqual(combined.shape, (2, 3, 3))
self.assertEqual(combined.dims, ('z', 'x', 'y'))
class TestAutoCombine(TestCase):
@requires_dask # only for toolz
def test_auto_combine(self):
objs = [Dataset({'x': [0]}), Dataset({'x': [1]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1]})
self.assertDatasetIdentical(expected, actual)
actual = auto_combine([actual])
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0, 1]}), Dataset({'x': [2]})]
actual = auto_combine(objs)
expected = Dataset({'x': [0, 1, 2]})
self.assertDatasetIdentical(expected, actual)
# ensure auto_combine handles non-sorted variables
objs = [Dataset(OrderedDict([('x', ('a', [0])), ('y', ('a', [0]))])),
Dataset(OrderedDict([('y', ('a', [1])), ('x', ('a', [1]))]))]
actual = auto_combine(objs)
expected = Dataset({'x': ('a', [0, 1]), 'y': ('a', [0, 1])})
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'y': [1], 'x': [1]})]
with self.assertRaisesRegexp(ValueError, 'too many .* dimensions'):
auto_combine(objs)
objs = [Dataset({'x': 0}), Dataset({'x': 1})]
with self.assertRaisesRegexp(ValueError, 'cannot infer dimension'):
auto_combine(objs)
objs = [Dataset({'x': [0], 'y': [0]}), Dataset({'x': [0]})]
with self.assertRaises(KeyError):
auto_combine(objs)
@requires_dask # only for toolz
def test_auto_combine_previously_failed(self):
# In the above scenario, one file is missing, containing the data for
# one year's data for one variable.
datasets = [Dataset({'a': ('x', [0]), 'x': [0]}),
Dataset({'b': ('x', [0]), 'x': [0]}),
Dataset({'a': ('x', [1]), 'x': [1]})]
expected = Dataset({'a': ('x', [0, 1]), 'b': ('x', [0, np.nan])},
{'x': [0, 1]})
actual = auto_combine(datasets)
self.assertDatasetIdentical(expected, actual)
# Your data includes "time" and "station" dimensions, and each year's
# data has a different set of stations.
datasets = [Dataset({'a': ('x', [2, 3]), 'x': [1, 2]}),
Dataset({'a': ('x', [1, 2]), 'x': [0, 1]})]
expected = Dataset({'a': (('t', 'x'),
[[np.nan, 2, 3], [1, 2, np.nan]])},
{'x': [0, 1, 2]})
actual = auto_combine(datasets, concat_dim='t')
self.assertDatasetIdentical(expected, actual)
@requires_dask # only for toolz
def test_auto_combine_still_fails(self):
# concat can't handle new variables (yet):
# https://github.com/pydata/xarray/issues/508
datasets = [Dataset({'x': 0}, {'y': 0}),
Dataset({'x': 1}, {'y': 1, 'z': 1})]
with self.assertRaises(ValueError):
auto_combine(datasets, 'y')
@requires_dask # only for toolz
def test_auto_combine_no_concat(self):
objs = [Dataset({'x': 0}), Dataset({'y': 1})]
actual = auto_combine(objs)
expected = Dataset({'x': 0, 'y': 1})
self.assertDatasetIdentical(expected, actual)
objs = [Dataset({'x': 0, 'y': 1}), Dataset({'y': np.nan, 'z': 2})]
actual = auto_combine(objs)
expected = Dataset({'x': 0, 'y': 1, 'z': 2})
self.assertDatasetIdentical(expected, actual)
data = Dataset({'x': 0})
actual = auto_combine([data, data, data], concat_dim=None)
self.assertDatasetIdentical(data, actual)
| apache-2.0 |
spallavolu/scikit-learn | examples/linear_model/plot_logistic.py | 312 | 1426 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Logit function
=========================================================
Show in the plot is how the logistic regression would, in this
synthetic dataset, classify values as either 0 or 1,
i.e. class one or two, using the logit-curve.
"""
print(__doc__)
# Code source: Gael Varoquaux
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import linear_model
# this is our test set, it's just a straight line with some
# Gaussian noise
xmin, xmax = -5, 5
n_samples = 100
np.random.seed(0)
X = np.random.normal(size=n_samples)
y = (X > 0).astype(np.float)
X[X > 0] *= 4
X += .3 * np.random.normal(size=n_samples)
X = X[:, np.newaxis]
# run the classifier
clf = linear_model.LogisticRegression(C=1e5)
clf.fit(X, y)
# and plot the result
plt.figure(1, figsize=(4, 3))
plt.clf()
plt.scatter(X.ravel(), y, color='black', zorder=20)
X_test = np.linspace(-5, 10, 300)
def model(x):
return 1 / (1 + np.exp(-x))
loss = model(X_test * clf.coef_ + clf.intercept_).ravel()
plt.plot(X_test, loss, color='blue', linewidth=3)
ols = linear_model.LinearRegression()
ols.fit(X, y)
plt.plot(X_test, ols.coef_ * X_test + ols.intercept_, linewidth=1)
plt.axhline(.5, color='.5')
plt.ylabel('y')
plt.xlabel('X')
plt.xticks(())
plt.yticks(())
plt.ylim(-.25, 1.25)
plt.xlim(-4, 10)
plt.show()
| bsd-3-clause |
chungjjang80/FRETBursts | fretbursts/tests/test_burstlib.py | 1 | 40546 | #
# FRETBursts - A single-molecule FRET burst analysis toolkit.
#
# Copyright (C) 2014 Antonino Ingargiola <tritemio@gmail.com>
#
"""
Module containing automated unit tests for FRETBursts.
Running the tests requires `py.test`.
"""
from __future__ import division
from builtins import range, zip
from collections import namedtuple
import pytest
import numpy as np
try:
import matplotlib
except ImportError:
has_matplotlib = False # OK to run tests without matplotlib
else:
has_matplotlib = True
matplotlib.use('Agg') # but if matplotlib is installed, use Agg
try:
import numba
except ImportError:
has_numba = False
else:
has_numba = True
import fretbursts.background as bg
import fretbursts.burstlib as bl
import fretbursts.burstlib_ext as bext
from fretbursts import loader
from fretbursts import select_bursts
from fretbursts.ph_sel import Ph_sel
from fretbursts.phtools import phrates
if has_matplotlib:
import fretbursts.burst_plot as bplt
# data subdir in the notebook folder
DATASETS_DIR = u'notebooks/data/'
def _alex_process(d):
loader.alex_apply_period(d)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
d.burst_search(L=10, m=10, F=7)
def load_dataset_1ch(process=True):
fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
if process:
_alex_process(d)
return d
def load_dataset_8ch():
fn = "12d_New_30p_320mW_steer_3.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
d.burst_search(L=10, m=10, F=7)
return d
def load_fake_pax():
fn = "0023uLRpitc_NTP_20dT_0.5GndCl.hdf5"
fname = DATASETS_DIR + fn
d = loader.photon_hdf5(fname)
d.add(ALEX=False, meas_type='PAX')
loader.alex_apply_period(d)
d.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto')
d.burst_search(L=10, m=10, F=6)
return d
@pytest.fixture(scope="module", params=[
load_dataset_1ch,
load_dataset_8ch,
])
def data(request):
load_func = request.param
d = load_func()
return d
@pytest.fixture(scope="module")
def data_8ch(request):
d = load_dataset_8ch()
return d
@pytest.fixture(scope="module")
def data_1ch(request):
d = load_dataset_1ch()
return d
##
# List comparison functions
#
def list_equal(list1, list2):
"""Test numerical equality of all the elements in the two lists.
"""
return np.all([val1 == val2 for val1, val2 in zip(list1, list2)])
def list_array_equal(list1, list2):
"""Test numerical equality between two lists of arrays.
"""
return np.all([np.all(arr1 == arr2) for arr1, arr2 in zip(list1, list2)])
def list_array_allclose(list1, list2):
"""Test float closeness (np.allclose) between two lists of arrays.
"""
return np.all([np.allclose(arr1, arr2) for arr1, arr2 in zip(list1, list2)])
##
# Test functions
#
def test_bg_compatlayer_for_obsolete_attrs():
d = load_dataset_1ch(process=False)
attrs = ('bg_dd', 'bg_ad', 'bg_da', 'bg_aa',
'rate_m', 'rate_dd', 'rate_ad', 'rate_da', 'rate_aa')
for attr in attrs:
with pytest.raises(RuntimeError):
getattr(d, attr)
_alex_process(d)
for attr in attrs:
assert isinstance(getattr(d, attr), list)
def test_ph_times_compact(data_1ch):
"""Test calculation of ph_times_compact."""
def isinteger(x):
return np.equal(np.mod(x, 1), 0)
ich = 0
d = data_1ch
ph_d = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'))
ph_a = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem'))
ph_dc = d.get_ph_times(ph_sel=Ph_sel(Dex='DAem'), compact=True)
ph_ac = d.get_ph_times(ph_sel=Ph_sel(Aex='DAem'), compact=True)
# Test that the difference of ph and ph_compact is multiple of
# the complementary excitation period duration
Dex_void = bl._excitation_width(d._D_ON_multich[ich], d.alex_period)
Aex_void = bl._excitation_width(d._A_ON_multich[ich], d.alex_period)
assert isinteger((ph_d - ph_dc) / Dex_void).all()
assert isinteger((ph_a - ph_ac) / Aex_void).all()
# Test that alternation histogram does not have "gaps" for ph_compact
bins = np.linspace(0, d.alex_period, num=101)
hist_dc, _ = np.histogram(ph_dc % d.alex_period, bins=bins)
hist_ac, _ = np.histogram(ph_ac % d.alex_period, bins=bins)
assert (hist_dc > 0).all()
assert (hist_ac > 0).all()
def test_time_min_max():
"""Test time_min and time_max for ALEX data."""
d = load_dataset_1ch(process=False)
ich = 0
assert d.time_max == d.ph_times_t[ich].max() * d.clk_p
assert d.time_min == d.ph_times_t[ich].min() * d.clk_p
del d._time_max, d._time_min
_alex_process(d)
assert d.time_max == d.ph_times_m[ich][-1] * d.clk_p
assert d.time_min == d.ph_times_m[ich][0] * d.clk_p
d.delete('ph_times_m')
del d._time_max, d._time_min
assert d.time_max == d.mburst[0].stop[-1] * d.clk_p
assert d.time_min == d.mburst[0].start[0] * d.clk_p
def test_time_min_max_multispot(data_8ch):
"""Test time_min and time_max for multi-spot data."""
d = data_8ch
assert d.time_max == max(t[-1] for t in d.ph_times_m) * d.clk_p
assert d.time_min == min(t[0] for t in d.ph_times_m) * d.clk_p
def test_aex_dex_ratio(data_1ch):
"""Test methods computing relative D and A alternation periods durations.
"""
d = data_1ch
Dx, Ax = d.D_ON, d.A_ON
a1 = d._aex_fraction()
a2 = (Ax[1] - Ax[0]) / (Ax[1] - Ax[0] + Dx[1] - Dx[0])
assert a1 == a2
r1 = d._aex_dex_ratio()
r2 = (Ax[1] - Ax[0]) / (Dx[1] - Dx[0])
assert r1 == r2
assert (a1 / (1 - a1)) == r1
def test_burst_size_pax():
d = load_fake_pax()
aex_dex_ratio, alpha_d = d._aex_dex_ratio(), 1 - d._aex_fraction()
nd, na = d.nd[0], d.na[0]
nda = d.nda[0]
naa = d.naa[0] - d.nar[0] * aex_dex_ratio
# Test burst size during Dex
b1 = d.burst_sizes_pax_ich(add_aex=False)
b2 = d.burst_sizes_ich(add_naa=False)
b3 = nd + na
assert (b1 == b2).all()
assert (b1 == b3).all()
# Test naa
naa2 = d.get_naa_corrected()
naa3 = d._get_naa_ich()
assert (naa == naa2).all()
assert (naa == naa3).all()
# Test add_naa
b1 = d.burst_sizes_ich(add_naa=True)
b2 = nd + na + naa
assert (b1 == b2).all()
# Test add_aex with no duty-cycle correction
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=False)
b2 = nd + na + nda + d.naa[0]
b3 = nd + na + nda + naa + na * aex_dex_ratio
assert np.allclose(b1, b2)
assert np.allclose(b1, b3)
# Test add_aex with duty-cycle correction
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True)
b2 = nd + na + nda + na * aex_dex_ratio + naa / alpha_d
assert np.allclose(b1, b2)
# Test add_aex with duty-cycle correction, donor_ref
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True, donor_ref=False)
assert np.allclose(b1, b2)
# Test add_aex with duty-cycle correction, gamma, beta
gamma = 0.7
beta = 0.85
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
assert np.allclose(b1 * gamma, b2)
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) +
naa / (alpha_d * beta))
assert np.allclose(b1, b2)
d.leakage = 0.1
nd, na = d.nd[0], d.na[0]
nda = d.nda[0]
naa = d.naa[0] - d.nar[0] * aex_dex_ratio
# Test add_aex with duty-cycle correction, gamma, beta
gamma = 0.7
beta = 0.85
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=True)
b2 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
assert np.allclose(b1 * gamma, b2)
b1 = d.burst_sizes_pax_ich(add_aex=True, aex_corr=True,
gamma=gamma, beta=beta, donor_ref=False)
b2 = (gamma * (nd + nda) + na * (1 + aex_dex_ratio) +
naa / (alpha_d * beta))
assert np.allclose(b1, b2)
def test_bg_calc(data):
"""Smoke test bg_calc() and test deletion of bg fields.
"""
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us=300)
assert 'bg_auto_th_us0' not in data
assert 'bg_auto_F_bg' not in data
assert 'bg_th_us_user' in data
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7)
assert 'bg_auto_th_us0' in data
assert 'bg_auto_F_bg' in data
assert 'bg_th_us_user' not in data
data.calc_bg(bg.exp_fit, time_s=30, tail_min_us='auto', F_bg=1.7,
fit_allph=False)
streams = [s for s in data.ph_streams if s != Ph_sel('all')]
bg_t = [np.sum(data.bg[s][ich] for s in streams) for ich in range(data.nch)]
assert list_array_equal(data.bg[Ph_sel('all')], bg_t)
def test_ph_streams(data):
sel = [Ph_sel('all'), Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
if data.alternated:
sel.extend([Ph_sel(Aex='Aem'), Ph_sel(Aex='Dem')])
for s in sel:
assert s in data.ph_streams
def test_bg_from(data):
"""Test the method .bg_from() for all the ph_sel combinations.
"""
d = data
for sel in d.ph_streams:
bg = d.bg_from(ph_sel=sel)
assert list_array_equal(bg, d.bg[sel])
if not (data.alternated):
assert list_array_equal(d.bg_from(Ph_sel('all')),
d.bg_from(Ph_sel(Dex='DAem')))
return
bg_dd = d.bg_from(ph_sel=Ph_sel(Dex='Dem'))
bg_ad = d.bg_from(ph_sel=Ph_sel(Dex='Aem'))
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)])
bg_aa = d.bg_from(ph_sel=Ph_sel(Aex='Aem'))
bg_da = d.bg_from(ph_sel=Ph_sel(Aex='Dem'))
bg = d.bg_from(ph_sel=Ph_sel(Aex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_aa, bg_da)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='Dem', Aex='Dem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_da)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='Aem', Aex='Aem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_ad, bg_aa)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem'))
assert list_array_equal(bg, [b1 + b2 for b1, b2 in zip(bg_dd, bg_ad)])
bg = d.bg_from(ph_sel=Ph_sel(Dex='DAem', Aex='Aem'))
bg2 = [b1 + b2 + b3 for b1, b2, b3 in zip(bg_dd, bg_ad, bg_aa)]
assert list_array_equal(bg, bg2)
def test_iter_ph_times(data):
"""Test method .iter_ph_times() for all the ph_sel combinations.
"""
# TODO add all the ph_sel combinations like in test_bg_from()
d = data
assert list_array_equal(d.ph_times_m, d.iter_ph_times())
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem'))):
if d.alternated:
assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.D_ex[ich]]).all()
else:
assert (ph == d.ph_times_m[ich][~d.A_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem'))):
if d.alternated:
assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.D_ex[ich]]).all()
else:
assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all()
if d.alternated:
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Dem'))):
assert (ph == d.ph_times_m[ich][d.D_em[ich] * d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='Aem'))):
assert (ph == d.ph_times_m[ich][d.A_em[ich] * d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='DAem'))):
assert (ph == d.ph_times_m[ich][d.D_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Aex='DAem'))):
assert (ph == d.ph_times_m[ich][d.A_ex[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Dem', Aex='Dem'))):
assert (ph == d.ph_times_m[ich][d.D_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(Ph_sel(Dex='Aem', Aex='Aem'))):
assert (ph == d.ph_times_m[ich][d.A_em[ich]]).all()
for ich, ph in enumerate(d.iter_ph_times(
Ph_sel(Dex='DAem', Aex='Aem'))):
mask = d.D_ex[ich] + d.A_em[ich] * d.A_ex[ich]
assert (ph == d.ph_times_m[ich][mask]).all()
else:
assert list_array_equal(d.iter_ph_times(),
d.iter_ph_times(Ph_sel(Dex='DAem')))
def test_get_ph_times_period(data):
for ich in range(data.nch):
data.get_ph_times_period(0, ich=ich)
data.get_ph_times_period(0, ich=ich, ph_sel=Ph_sel(Dex='Dem'))
def test_iter_ph_times_period(data):
d = data
for ich in range(data.nch):
for period, ph_period in enumerate(d.iter_ph_times_period(ich=ich)):
istart, iend = d.Lim[ich][period]
assert (ph_period == d.ph_times_m[ich][istart : iend + 1]).all()
ph_sel = Ph_sel(Dex='Dem')
mask = d.get_ph_mask(ich=ich, ph_sel=ph_sel)
for period, ph_period in enumerate(
d.iter_ph_times_period(ich=ich, ph_sel=ph_sel)):
istart, iend = d.Lim[ich][period]
ph_period_test = d.ph_times_m[ich][istart : iend + 1]
ph_period_test = ph_period_test[mask[istart : iend + 1]]
assert (ph_period == ph_period_test).all()
def test_burst_search_py_cy(data):
"""Test python and cython burst search with background-dependent threshold.
"""
data.burst_search(pure_python=True)
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(pure_python=False)
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
data.burst_search(L=30, pure_python=True)
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(L=30, pure_python=False)
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
def test_burst_search_constant_rates(data):
"""Test python and cython burst search with constant threshold."""
data.burst_search(min_rate_cps=50e3, pure_python=True)
assert (data.num_bursts > 0).all()
mburst1 = [b.copy() for b in data.mburst]
num_bursts1 = data.num_bursts
data.burst_search(min_rate_cps=50e3, pure_python=False)
assert (data.num_bursts > 0).all()
assert np.all(num_bursts1 == data.num_bursts)
assert mburst1 == data.mburst
def test_burst_search_L(data):
"""Test burst search with different L arguments."""
data.burst_search(L=10)
for bursts in data.mburst:
assert (bursts.counts >= 10).all()
num_bursts1 = data.num_bursts
data.burst_search(L=30)
for bursts in data.mburst:
assert (bursts.counts >= 30).all()
assert np.all(num_bursts1 > data.num_bursts)
def test_burst_search_with_no_bursts(data):
"""Smoke test burst search when some periods have no bursts."""
# F=600 results in periods with no bursts for the us-ALEX measurement
# and in no bursts at all for the multi-spot measurements
data.burst_search(m=10, F=600)
data.fuse_bursts(ms=1)
if has_matplotlib:
def test_stale_fitter_after_burst_search(data):
"""Test that E/S_fitter attributes are deleted on burst search."""
data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Dem'))
bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute
if data.alternated:
bplt.dplot(data, bplt.hist_S) # create S_fitter attribute
data.burst_search(L=10, m=10, F=7, ph_sel=Ph_sel(Dex='Aem'))
assert not hasattr(data, 'E_fitter')
if data.alternated:
assert not hasattr(data, 'S_fitter')
bplt.dplot(data, bplt.hist_fret) # create E_fitter attribute
if data.alternated:
bplt.dplot(data, bplt.hist_S) # create S_fitter attribute
data.calc_fret()
assert not hasattr(data, 'E_fitter')
if data.alternated:
assert not hasattr(data, 'S_fitter')
def test_burst_search(data):
"""Smoke test and bg_bs check."""
streams = [Ph_sel(Dex='Dem'), Ph_sel(Dex='Aem')]
if data.alternated:
streams.extend([Ph_sel(Dex='Aem', Aex='Aem'), Ph_sel(Dex='DAem')])
for sel in streams:
data.burst_search(L=10, m=10, F=7, ph_sel=sel)
assert list_equal(data.bg_bs, data.bg_from(sel))
if data.alternated:
data.burst_search(m=10, F=7, ph_sel=Ph_sel(Dex='DAem'), compact=True)
data.burst_search(L=10, m=10, F=7)
def test_burst_search_and_gate(data_1ch):
"""Test consistency of burst search and gate."""
d = data_1ch
assert d.alternated
# Smoke tests
bext.burst_search_and_gate(d, F=(6, 8))
bext.burst_search_and_gate(d, m=(12, 8))
bext.burst_search_and_gate(d, min_rate_cps=(60e3, 40e3))
if d.nch > 1:
mr1 = 35e3 + np.arange(d.nch) * 1e3
mr2 = 30e3 + np.arange(d.nch) * 1e3
bext.burst_search_and_gate(d, min_rate_cps=(mr1, mr2))
# Consistency test
d_dex = d.copy()
d_dex.burst_search(ph_sel=Ph_sel(Dex='DAem'))
d_aex = d.copy()
d_aex.burst_search(ph_sel=Ph_sel(Aex='Aem'))
d_and = bext.burst_search_and_gate(d)
for bursts_dex, bursts_aex, bursts_and, ph in zip(
d_dex.mburst, d_aex.mburst, d_and.mburst, d.iter_ph_times()):
ph_b_mask_dex = bl.ph_in_bursts_mask(ph.size, bursts_dex)
ph_b_mask_aex = bl.ph_in_bursts_mask(ph.size, bursts_aex)
ph_b_mask_and = bl.ph_in_bursts_mask(ph.size, bursts_and)
assert (ph_b_mask_and == ph_b_mask_dex * ph_b_mask_aex).all()
def test_mch_count_ph_num_py_c(data):
na_py = bl.bslib.mch_count_ph_in_bursts_py(data.mburst, data.A_em)
na_c = bl.bslib.mch_count_ph_in_bursts_c(data.mburst, data.A_em)
assert list_array_equal(na_py, na_c)
assert na_py[0].dtype == np.float64
def test_burst_sizes(data):
"""Test for .burst_sizes_ich() and burst_sizes()"""
# Smoke test
plain_sizes = data.burst_sizes()
assert len(plain_sizes) == data.nch
# Test gamma and donor_ref arguments
bs1 = data.burst_sizes_ich(gamma=0.5, donor_ref=True)
bs2 = data.burst_sizes_ich(gamma=0.5, donor_ref=False)
assert np.allclose(bs1, bs2 / 0.5)
# Test add_naa
if data.alternated:
bs_no_naa = data.burst_sizes_ich(add_naa=False)
bs_naa = data.burst_sizes_ich(add_naa=True)
assert np.allclose(bs_no_naa + data.naa[0], bs_naa)
# Test beta and donor_ref arguments with gamma=1
naa1 = data.get_naa_corrected(beta=0.8, donor_ref=True)
naa2 = data.get_naa_corrected(beta=0.8, donor_ref=False)
assert np.allclose(naa1, naa2)
# Test beta and donor_ref arguments with gamma=0.5
naa1 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=True)
naa2 = data.get_naa_corrected(gamma=0.5, beta=0.8, donor_ref=False)
assert np.allclose(naa1 * 0.5, naa2)
def test_leakage(data):
"""
Test setting leakage before and after burst search
"""
# burst search, then set leakage
data.burst_search()
data.leakage = 0.04
na1 = list(data.na)
# set leakage, then burst search
data.burst_search()
na2 = list(data.na)
assert list_array_equal(na1, na2)
def test_gamma(data):
"""
Test setting gamma before and after burst search
"""
# burst search, then set gamma
data.burst_search()
E0 = list(data.E)
data.gamma = 0.5
E1 = list(data.E)
assert not list_array_equal(E0, E1)
# burst search after setting gamma
data.burst_search()
E2 = list(data.E)
assert list_array_equal(E1, E2)
def test_dir_ex(data_1ch):
"""
Test setting dir_ex before and after burst search
"""
data = data_1ch
# burst search, then set dir_ex
data.burst_search()
na0 = list(data.na)
data.dir_ex = 0.05
na1 = list(data.na)
assert not list_array_equal(na0, na1)
# burst search after setting dir_ex
data.burst_search()
na2 = list(data.na)
assert list_array_equal(na1, na2)
def test_beta(data_1ch):
"""
Test setting beta before and after burst search
"""
data = data_1ch
# burst search, then set beta
data.burst_search()
S0 = list(data.S)
data.beta = 0.7
S1 = list(data.S)
assert not list_array_equal(S0, S1)
# burst search after setting beta
data.burst_search()
S2 = list(data.S)
assert list_array_equal(S1, S2)
def test_bursts_interface(data):
d = data
for b in d.mburst:
assert (b.start == b.data[:, b._i_start]).all()
assert (b.stop == b.data[:, b._i_stop]).all()
assert (b.istart == b.data[:, b._i_istart]).all()
assert (b.istop == b.data[:, b._i_istop]).all()
rate = 1.*b.counts/b.width
assert (b.ph_rate == rate).all()
separation = b.start[1:] - b.stop[:-1]
assert (b.separation == separation).all()
assert (b.stop > b.start).all()
def test_burst_stop_istop(data):
"""Test coherence between b_end() and b_iend()"""
d = data
for ph, bursts in zip(d.ph_times_m, d.mburst):
assert (ph[bursts.istop] == bursts.stop).all()
def test_monotonic_burst_start(data):
"""Test for monotonic burst start times."""
d = data
for i in range(d.nch):
assert (np.diff(d.mburst[i].start) > 0).all()
def test_monotonic_burst_stop(data):
"""Test for monotonic burst stop times."""
d = data
for bursts in d.mburst:
assert (np.diff(bursts.stop) > 0).all()
def test_burst_istart_iend_size(data):
"""Test consistency between burst istart, istop and counts (i.e. size)"""
d = data
for bursts in d.mburst:
counts = bursts.istop - bursts.istart + 1
assert (counts == bursts.counts).all()
def test_burst_recompute_times(data):
"""Test Bursts.recompute_times method."""
d = data
for times, bursts in zip(d.ph_times_m, d.mburst):
newbursts = bursts.recompute_times(times)
assert newbursts == bursts
def test_burst_recompute_index(data):
"""Test Bursts.recompute_index_* methods."""
d = data
ph_sel = Ph_sel(Dex='Dem')
d.burst_search(ph_sel=ph_sel, index_allph=True)
d_sel = d.copy()
d_sel.burst_search(ph_sel=ph_sel, index_allph=False)
for times_sel, mask_sel, bursts_sel, times_allph, bursts_allph in zip(
d.iter_ph_times(ph_sel=ph_sel),
d.iter_ph_masks(ph_sel=ph_sel),
d_sel.mburst,
d.iter_ph_times(),
d.mburst):
assert (times_sel[bursts_sel.istart] == bursts_sel.start).all()
assert (times_sel[bursts_sel.istop] == bursts_sel.stop).all()
assert (times_allph[bursts_allph.istart] == bursts_allph.start).all()
assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all()
# Test individual methods
bursts_allph2 = bursts_sel.recompute_index_expand(mask_sel)
assert bursts_allph2 == bursts_allph
assert (times_allph[bursts_allph2.istart] == bursts_allph2.start).all()
assert (times_allph[bursts_allph2.istop] == bursts_allph2.stop).all()
bursts_sel2 = bursts_allph.recompute_index_reduce(times_sel)
assert (times_sel[bursts_sel2.istart] == bursts_sel2.start).all()
assert (times_sel[bursts_sel2.istop] == bursts_sel2.stop).all()
assert bursts_sel2 == bursts_sel
# Test round-trip
bursts_allph3 = bursts_sel2.recompute_index_expand(mask_sel)
assert bursts_allph3 == bursts_allph2
assert (times_allph[bursts_allph3.istart] == bursts_allph3.start).all()
assert (times_allph[bursts_allph3.istop] == bursts_allph3.stop).all()
## This test is only used to develop alternative implementations of
## Bursts.recompute_index_reduce() and is normally disabled as it is very slow.
#def test_burst_recompute_index_reduce(data):
# """Test different versions of Bursts.recompute_index_reduce methods.
#
# This test is very slow so it's normally disabled.
# """
# d = data
# ph_sel = Ph_sel(Dex='Aem')
# d.burst_search(ph_sel=ph_sel)
# d_sel = d.copy()
# d_sel.burst_search(ph_sel=ph_sel, index_allph=False)
# for times_sel, bursts_sel, times_allph, bursts_allph in zip(
# d.iter_ph_times(ph_sel=ph_sel),
# d_sel.mburst,
# d.iter_ph_times(),
# d.mburst):
# assert (times_allph[bursts_allph.istart] == bursts_allph.start).all()
# assert (times_allph[bursts_allph.istop] == bursts_allph.stop).all()
#
# bursts_sel1 = bursts_allph.recompute_index_reduce(times_sel)
# bursts_sel2 = bursts_allph.recompute_index_reduce2(times_sel)
# assert bursts_sel1 == bursts_sel2
# assert bursts_sel == bursts_sel1
def test_phrates_mtuple(data):
d = data
m = 10
max_num_ph = 20001
for ph in d.iter_ph_times():
phc = ph[:max_num_ph]
rates = phrates.mtuple_rates(phc, m)
delays = phrates.mtuple_delays(phc, m)
t_rates = 0.5 * (phc[m-1:] + phc[:-m+1])
assert phrates.mtuple_rates_max(phc, m) == rates.max()
assert phrates.mtuple_delays_min(phc, m) == delays.min()
assert phrates.default_c == 1
assert (rates == (m - 1 - phrates.default_c) / delays).all()
assert (phrates.mtuple_rates_t(phc, m) == t_rates).all()
if has_numba:
def test_phrates_kde(data):
d = data
tau = 5000 # 5000 * 12.5ns = 6.25 us
for ph in d.iter_ph_times():
# Test consistency of kde_laplace_nph and (kde_laplace, kde_rect)
rates = phrates.kde_laplace(ph, tau)
nrect = phrates.kde_rect(ph, tau*10)
ratesl, nph = phrates.nb.kde_laplace_nph(ph, tau)
assert (rates == ratesl).all()
assert (nph == nrect).all()
# Test consistency of kde_laplace and _kde_laplace_self_numba
ratesl2, nph2 = phrates.nb.kde_laplace_self_numba(ph, tau)
assert (nph2 == nrect).all()
assert (ratesl2 == rates).all()
# Smoke test laplace, gaussian, rect with time_axis
ratesl = phrates.kde_laplace(ph, tau, time_axis=ph+1)
assert ((ratesl >= 0) * (ratesl < 5e6)).all()
ratesg = phrates.kde_gaussian(ph, tau, time_axis=ph+1)
assert ((ratesg >= 0) * (ratesg < 5e6)).all()
ratesr = phrates.kde_rect(ph, tau, time_axis=ph+1)
assert ((ratesr >= 0) * (ratesr < 5e6)).all()
def test_phrates_kde_cy(data):
d = data
tau = 5000 # 5000 * 12.5ns = 6.25 us
for ph in d.iter_ph_times():
# Test consistency of kde_laplace_nph and (kde_laplace, kde_rect)
ratesg = phrates.nb.kde_gaussian_numba(ph, tau)
ratesl = phrates.nb.kde_laplace_numba(ph, tau)
ratesr = phrates.nb.kde_rect_numba(ph, tau)
ratesgc = phrates.cy.kde_gaussian_cy(ph, tau)
rateslc = phrates.cy.kde_laplace_cy(ph, tau)
ratesrc = phrates.cy.kde_rect_cy(ph, tau)
assert (ratesg == ratesgc).all()
assert (ratesl == rateslc).all()
assert (ratesr == ratesrc).all()
def test_burst_ph_data_functions(data):
"""Tests the functions that iterate or operate on per-burst "ph-data".
"""
d = data
for bursts, ph, mask in zip(d.mburst, d.iter_ph_times(),
d.iter_ph_masks(Ph_sel(Dex='Dem'))):
bstart = bursts.start
bend = bursts.stop
for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)):
assert ph[start] == bstart[i]
assert ph[stop-1] == bend[i]
for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts)):
assert burst_ph[0] == bstart[i]
assert burst_ph[-1] == bend[i]
for i, burst_ph in enumerate(bl.iter_bursts_ph(ph, bursts, mask=mask)):
if burst_ph.size > 0:
assert burst_ph[0] >= bstart[i]
assert burst_ph[-1] <= bend[i]
stats = bl.burst_ph_stats(ph, bursts, mask=mask)
assert (stats[~np.isnan(stats)] >= bstart[~np.isnan(stats)]).all()
assert (stats[~np.isnan(stats)] <= bend[~np.isnan(stats)]).all()
bistart = bursts.istart
biend = bursts.istop
bursts_mask = bl.ph_in_bursts_mask(ph.size, bursts)
for i, (start, stop) in enumerate(bl.iter_bursts_start_stop(bursts)):
assert bursts_mask[start:stop].all()
if start > 0:
if i > 0 and biend[i-1] < bistart[i] - 1:
assert not bursts_mask[start - 1]
if stop < ph.size:
if i < bistart.size-1 and bistart[i+1] > biend[i] + 1:
assert not bursts_mask[stop]
def test_ph_in_bursts_ich(data):
"""Tests the ph_in_bursts_ich method.
"""
d = data
for ich in range(d.nch):
ph_in_bursts = d.ph_in_bursts_ich(ich)
ph_in_bursts_dd = d.ph_in_bursts_ich(ich, ph_sel=Ph_sel(Dex='Dem'))
assert ph_in_bursts_dd.size < ph_in_bursts.size
def test_burst_fuse(data):
"""Test 2 independent implementations of fuse_bursts for consistency.
"""
d = data
for bursts in d.mburst:
new_mbursti = bl.fuse_bursts_iter(bursts, ms=1)
new_mburstd = bl.fuse_bursts_direct(bursts, ms=1)
assert new_mbursti == new_mburstd
def test_burst_fuse_0ms(data):
"""Test that after fusing with ms=0 the sum of bursts sizes is that same
as the number of ph in bursts (via burst selection).
"""
d = data
if d.nch == 8:
d.burst_search(L=10, m=10, F=7, computefret=False)
d.mburst[1] = bl.bslib.Bursts.empty() # Make one channel with no bursts
d._calc_burst_period()
d.calc_fret(count_ph=True)
df = d.fuse_bursts(ms=0)
for ich, bursts in enumerate(df.mburst):
mask = bl.ph_in_bursts_mask(df.ph_data_sizes[ich], bursts)
assert mask.sum() == bursts.counts.sum()
df.calc_fret(count_ph=True)
assert len(df.mburst) == len(d.mburst)
assert len(df.mburst) == d.nch
def test_burst_fuse_separation(data):
"""Test that after fusing bursts the minimum separation is equal
to the threshold used during fusing.
"""
d = data
fuse_ms = 2
df = d.fuse_bursts(ms=fuse_ms)
for bursts in df.mburst:
separation = bursts.separation * df.clk_p
if bursts.num_bursts > 0:
assert separation.min() >= fuse_ms * 1e-3
def test_calc_sbr(data):
"""Smoke test Data.calc_sbr()"""
data.calc_sbr()
def test_calc_max_rate(data):
"""Smoke test for Data.calc_max_rate()"""
data.calc_max_rate(m=10)
if data.alternated:
data.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem'), compact=True)
def test_burst_data(data):
"""Test for bext.burst_data()"""
bext.burst_data(data, include_bg=True, include_ph_index=True)
bext.burst_data(data, include_bg=False, include_ph_index=True)
bext.burst_data(data, include_bg=True, include_ph_index=False)
bext.burst_data(data, include_bg=False, include_ph_index=False)
def test_print_burst_stats(data):
"""Smoke test for burstlib.print_burst_stats()"""
bl.print_burst_stats(data)
def test_expand(data):
"""Test method `expand()` for `Data()`."""
d = data
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
width2 = bursts.width * d.clk_p
period = d.bp[ich]
bg_d2 = d.bg_from(Ph_sel(Dex='Dem'))[ich][period] * width2
bg_a2 = d.bg_from(Ph_sel(Dex='Aem'))[ich][period] * width2
assert (width == width2).all()
assert (nd == d.nd[ich]).all() and (na == d.na[ich]).all()
assert (bg_d == bg_d2).all() and (bg_a == bg_a2).all()
def test_burst_data_ich(data):
"""Test method `Data.burst_data_ich()`."""
d = data
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0:
continue # if no bursts skip this ch
burst_dict = d.burst_data_ich(ich=ich)
assert (burst_dict['size_raw'] == bursts.counts).all()
assert (burst_dict['t_start'] == bursts.start * d.clk_p).all()
assert (burst_dict['t_stop'] == bursts.stop * d.clk_p).all()
assert (burst_dict['i_start'] == bursts.istart).all()
assert (burst_dict['i_stop'] == bursts.istop).all()
assert (burst_dict['bg_period'] == d.bp[ich]).all()
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
width_ms = width * 1e3
assert (width_ms == burst_dict['width_ms']).all()
assert (nd == burst_dict['nd']).all()
assert (na == burst_dict['na']).all()
assert (bg_d == burst_dict['bg_dd']).all()
assert (bg_a == burst_dict['bg_ad']).all()
if d.alternated:
period = d.bp[ich]
bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period] * width
bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period] * width
assert (bg_da == burst_dict['bg_da']).all()
assert (bg_aa == burst_dict['bg_aa']).all()
def test_burst_corrections(data):
"""Test background and bleed-through corrections."""
d = data
d.calc_ph_num(alex_all=True)
d.corrections()
leakage = d.get_leakage_array()
for ich, bursts in enumerate(d.mburst):
if bursts.num_bursts == 0: continue # if no bursts skip this ch
nd, na, bg_d, bg_a, width = d.expand(ich, width=True)
burst_size_raw = bursts.counts
lk = leakage[ich]
if d.alternated:
nda, naa = d.nda[ich], d.naa[ich]
period = d.bp[ich]
bg_da = d.bg_from(Ph_sel(Aex='Dem'))[ich][period]*width
bg_aa = d.bg_from(Ph_sel(Aex='Aem'))[ich][period]*width
burst_size_raw2 = (nd + na + bg_d + bg_a + lk*nd + nda + naa +
bg_da + bg_aa)
assert np.allclose(burst_size_raw, burst_size_raw2)
else:
burst_size_raw2 = nd + na + bg_d + bg_a + lk*nd
assert np.allclose(burst_size_raw, burst_size_raw2)
def test_burst_search_consistency(data):
"""Test consistency of burst data array
"""
d = data
for mb, ph in zip(d.mburst, d.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size == istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width == stop - start)
df = d.fuse_bursts(ms=0)
for mb, ph in zip(df.mburst, df.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size == istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width == stop - start)
df = d.fuse_bursts(ms=1)
for mb, ph in zip(df.mburst, df.iter_ph_times()):
tot_size = mb.counts
istart, istop = mb.istart, mb.istop
assert np.all(tot_size <= istop - istart + 1)
start, stop, width = mb.start, mb.stop, mb.width
assert np.all(width <= stop - start)
def test_E_and_S_with_corrections(data):
d = data
gamma = 0.5
beta = 0.7
d.gamma = gamma
d.beta = beta
for i, (E, nd, na) in enumerate(zip(d.E, d.nd, d.na)):
assert (E == na / (nd * gamma + na)).all()
if d.alternated:
naa = d.naa[i]
if 'PAX' in data.meas_type:
naa = d.naa[i] - d.nar[i]
assert (d.S[i] == (gamma * nd + na) /
(gamma * nd + na + naa / beta)).all()
def test_burst_size_da(data):
"""Test that nd + na with no corrections is equal to b_size(mburst).
"""
d = data
d.calc_ph_num(alex_all=True)
if d.alternated:
for mb, nd, na, naa, nda in zip(d.mburst, d.nd, d.na, d.naa, d.nda):
tot_size = mb.counts
tot_size2 = nd + na + naa + nda
assert np.allclose(tot_size, tot_size2)
else:
for mb, nd, na in zip(d.mburst, d.nd, d.na):
tot_size = mb.counts
assert (tot_size == nd + na).all()
def test_burst_selection(data):
"""Smoke test for burst selection methods.
"""
d = data
d.select_bursts(select_bursts.size, th1=20, th2=100, add_naa=True)
d.select_bursts(select_bursts.size, th1=20, th2=100, gamma=0.5)
M1 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='first')
M2 = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='second')
Mb = d.select_bursts_mask(select_bursts.consecutive, th1=1e-3, th2=1e4,
kind='both')
Mb2 = [m1 + m2 for m1, m2 in zip(M1, M2)]
assert list_array_equal(Mb, Mb2)
def test_burst_selection_nocorrections(data):
"""Test burst selection with uncorrected bursts.
"""
d = data
d.burst_search(computefret=False)
d.calc_fret(count_ph=True, corrections=False)
ds1 = d.select_bursts(select_bursts.size, th1=20, th2=100,
computefret=False)
ds2 = d.select_bursts(select_bursts.size, th1=20, th2=100)
ds2.calc_ph_num()
ds2.calc_fret(corrections=False)
assert list_array_equal(ds1.nd, ds2.nd)
assert list_array_equal(ds1.na, ds2.na)
assert list_array_equal(ds1.E, ds2.E)
if d.alternated:
assert list_array_equal(ds1.naa, ds2.naa)
assert list_array_equal(ds1.E, ds2.E)
def test_burst_selection_ranges(data):
"""Test selection functions having a min-max range.
"""
d = data
d.burst_search()
d.calc_max_rate(m=10, ph_sel=Ph_sel(Dex='DAem'))
Range = namedtuple('Range', ['min', 'max', 'getter'])
sel_functions = dict(
E=Range(0.5, 1, None), nd=Range(30, 40, None), na=Range(30, 40, None),
time=Range(1, 61, lambda d, ich: d.mburst[ich].start * d.clk_p),
width=Range(0.5, 1.5, lambda d, ich: d.mburst[ich].width * d.clk_p*1e3),
peak_phrate=Range(50e3, 150e3, lambda d, ich: d.max_rate[ich]))
if d.alternated:
sel_functions.update(naa=Range(30, 40, None), S=Range(0.3, 0.7, None))
for func_name, range_ in sel_functions.items():
func = getattr(select_bursts, func_name)
getter = range_.getter
if getter is None:
getter = lambda d, ich: d[func_name][ich]
ds = d.select_bursts(func, args=(range_.min, range_.max))
for ich in range(d.nch):
selected = getter(ds, ich)
assert ((selected >= range_.min) * (selected <= range_.max)).all()
def test_join_data(data):
"""Smoke test for bext.join_data() function.
"""
d = data
dj = bext.join_data([d, d.copy()])
assert (dj.num_bursts == 2 * d.num_bursts).all()
for bursts in dj.mburst:
assert (np.diff(bursts.start) > 0).all()
def test_collapse(data_8ch):
"""Test the .collapse() method that joins the ch.
"""
d = data_8ch
dc1 = d.collapse()
bursts1 = dc1.mburst[0]
bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=True)
assert bursts1 == bursts2
bursts2 = bl.bslib.Bursts.merge(d.mburst, sort=False)
indexsort_stop = bursts2.stop.argsort()
bursts3 = bursts2[indexsort_stop]
indexsort_start = bursts3.start.argsort()
bursts4 = bursts3[indexsort_start]
assert bursts1 == bursts4
indexsort = np.lexsort((bursts2.stop, bursts2.start))
for name in d.burst_fields:
if name not in d or name == 'mburst':
continue
newfield = np.hstack(d[name])[indexsort]
assert np.allclose(dc1[name][0], newfield)
dc2 = d.collapse(update_gamma=False)
for name in d.burst_fields:
if name not in d: continue
if name == 'mburst':
assert dc1.mburst[0] == dc2.mburst[0]
else:
assert np.allclose(dc1[name][0], dc2[name][0])
if __name__ == '__main__':
pytest.main("-x -v fretbursts/tests/test_burstlib.py")
| gpl-2.0 |
kylerbrown/scikit-learn | benchmarks/bench_20newsgroups.py | 377 | 3555 | from __future__ import print_function, division
from time import time
import argparse
import numpy as np
from sklearn.dummy import DummyClassifier
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.metrics import accuracy_score
from sklearn.utils.validation import check_array
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
ESTIMATORS = {
"dummy": DummyClassifier(),
"random_forest": RandomForestClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"extra_trees": ExtraTreesClassifier(n_estimators=100,
max_features="sqrt",
min_samples_split=10),
"logistic_regression": LogisticRegression(),
"naive_bayes": MultinomialNB(),
"adaboost": AdaBoostClassifier(n_estimators=10),
}
###############################################################################
# Data
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-e', '--estimators', nargs="+", required=True,
choices=ESTIMATORS)
args = vars(parser.parse_args())
data_train = fetch_20newsgroups_vectorized(subset="train")
data_test = fetch_20newsgroups_vectorized(subset="test")
X_train = check_array(data_train.data, dtype=np.float32,
accept_sparse="csc")
X_test = check_array(data_test.data, dtype=np.float32, accept_sparse="csr")
y_train = data_train.target
y_test = data_test.target
print("20 newsgroups")
print("=============")
print("X_train.shape = {0}".format(X_train.shape))
print("X_train.format = {0}".format(X_train.format))
print("X_train.dtype = {0}".format(X_train.dtype))
print("X_train density = {0}"
"".format(X_train.nnz / np.product(X_train.shape)))
print("y_train {0}".format(y_train.shape))
print("X_test {0}".format(X_test.shape))
print("X_test.format = {0}".format(X_test.format))
print("X_test.dtype = {0}".format(X_test.dtype))
print("y_test {0}".format(y_test.shape))
print()
print("Classifier Training")
print("===================")
accuracy, train_time, test_time = {}, {}, {}
for name in sorted(args["estimators"]):
clf = ESTIMATORS[name]
try:
clf.set_params(random_state=0)
except (TypeError, ValueError):
pass
print("Training %s ... " % name, end="")
t0 = time()
clf.fit(X_train, y_train)
train_time[name] = time() - t0
t0 = time()
y_pred = clf.predict(X_test)
test_time[name] = time() - t0
accuracy[name] = accuracy_score(y_test, y_pred)
print("done")
print()
print("Classification performance:")
print("===========================")
print()
print("%s %s %s %s" % ("Classifier ", "train-time", "test-time",
"Accuracy"))
print("-" * 44)
for name in sorted(accuracy, key=accuracy.get):
print("%s %s %s %s" % (name.ljust(16),
("%.4fs" % train_time[name]).center(10),
("%.4fs" % test_time[name]).center(10),
("%.4f" % accuracy[name]).center(10)))
print()
| bsd-3-clause |
paultcochrane/bokeh | examples/charts/file/stocks_timeseries.py | 33 | 1230 | from collections import OrderedDict
import pandas as pd
from bokeh.charts import TimeSeries, show, output_file
# read in some stock data from the Yahoo Finance API
AAPL = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=AAPL&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
MSFT = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=MSFT&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
IBM = pd.read_csv(
"http://ichart.yahoo.com/table.csv?s=IBM&a=0&b=1&c=2000&d=0&e=1&f=2010",
parse_dates=['Date'])
xyvalues = OrderedDict(
AAPL=AAPL['Adj Close'],
Date=AAPL['Date'],
MSFT=MSFT['Adj Close'],
IBM=IBM['Adj Close'],
)
# any of the following commented are valid Bar inputs
#xyvalues = pd.DataFrame(xyvalues)
#lindex = xyvalues.pop('Date')
#lxyvalues = list(xyvalues.values())
#lxyvalues = np.array(xyvalues.values())
TOOLS="resize,pan,wheel_zoom,box_zoom,reset,previewsave"
output_file("stocks_timeseries.html")
ts = TimeSeries(
xyvalues, index='Date', legend=True,
title="Timeseries", tools=TOOLS, ylabel='Stock Prices')
# usage with iterable index
#ts = TimeSeries(
# lxyvalues, index=lindex,
# title="timeseries, pd_input", ylabel='Stock Prices')
show(ts)
| bsd-3-clause |
gerddie/nipype | nipype/algorithms/rapidart.py | 9 | 30137 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""
The rapidart module provides routines for artifact detection and region of
interest analysis.
These functions include:
* ArtifactDetect: performs artifact detection on functional images
* StimulusCorrelation: determines correlation between stimuli
schedule and movement/intensity parameters
Change directory to provide relative paths for doctests
>>> import os
>>> filepath = os.path.dirname( os.path.realpath( __file__ ) )
>>> datadir = os.path.realpath(os.path.join(filepath, '../testing/data'))
>>> os.chdir(datadir)
"""
import os
from copy import deepcopy
from warnings import warn
from nibabel import load, funcs, Nifti1Image
import numpy as np
from scipy import signal
import scipy.io as sio
from nipype.external import six
from ..interfaces.base import (BaseInterface, traits, InputMultiPath,
OutputMultiPath, TraitedSpec, File,
BaseInterfaceInputSpec, isdefined)
from ..utils.filemanip import filename_to_list, save_json, split_filename
from ..utils.misc import find_indices
from .. import logging, config
iflogger = logging.getLogger('interface')
def _get_affine_matrix(params, source):
"""Return affine matrix given a set of translation and rotation parameters
params : np.array (upto 12 long) in native package format
source : the package that generated the parameters
supports SPM, AFNI, FSFAST, FSL, NIPY
"""
if source == 'FSL':
params = params[[3, 4, 5, 0, 1, 2]]
elif source in ('AFNI', 'FSFAST'):
params = params[np.asarray([4, 5, 3, 1, 2, 0]) + (len(params) > 6)]
params[3:] = params[3:] * np.pi / 180.
if source == 'NIPY':
# nipy does not store typical euler angles, use nipy to convert
from nipy.algorithms.registration import to_matrix44
return to_matrix44(params)
#process for FSL, SPM, AFNI and FSFAST
rotfunc = lambda x: np.array([[np.cos(x), np.sin(x)],
[-np.sin(x), np.cos(x)]])
q = np.array([0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0])
if len(params) < 12:
params = np.hstack((params, q[len(params):]))
params.shape = (len(params),)
# Translation
T = np.eye(4)
T[0:3, -1] = params[0:3]
# Rotation
Rx = np.eye(4)
Rx[1:3, 1:3] = rotfunc(params[3])
Ry = np.eye(4)
Ry[(0, 0, 2, 2), (0, 2, 0, 2)] = rotfunc(params[4]).ravel()
Rz = np.eye(4)
Rz[0:2, 0:2] = rotfunc(params[5])
# Scaling
S = np.eye(4)
S[0:3, 0:3] = np.diag(params[6:9])
# Shear
Sh = np.eye(4)
Sh[(0, 0, 1), (1, 2, 2)] = params[9:12]
if source in ('AFNI', 'FSFAST'):
return np.dot(T, np.dot(Ry, np.dot(Rx, np.dot(Rz, np.dot(S, Sh)))))
return np.dot(T, np.dot(Rx, np.dot(Ry, np.dot(Rz, np.dot(S, Sh)))))
def _calc_norm(mc, use_differences, source, brain_pts=None):
"""Calculates the maximum overall displacement of the midpoints
of the faces of a cube due to translation and rotation.
Parameters
----------
mc : motion parameter estimates
[3 translation, 3 rotation (radians)]
use_differences : boolean
brain_pts : [4 x n_points] of coordinates
Returns
-------
norm : at each time point
displacement : euclidean distance (mm) of displacement at each coordinate
"""
if brain_pts is None:
respos = np.diag([70, 70, 75])
resneg = np.diag([-70, -110, -45])
all_pts = np.vstack((np.hstack((respos, resneg)), np.ones((1, 6))))
displacement = None
else:
all_pts = brain_pts
n_pts = all_pts.size - all_pts.shape[1]
newpos = np.zeros((mc.shape[0], n_pts))
if brain_pts is not None:
displacement = np.zeros((mc.shape[0], n_pts / 3))
for i in range(mc.shape[0]):
affine = _get_affine_matrix(mc[i, :], source)
newpos[i, :] = np.dot(affine,
all_pts)[0:3, :].ravel()
if brain_pts is not None:
displacement[i, :] = \
np.sqrt(np.sum(np.power(np.reshape(newpos[i, :],
(3, all_pts.shape[1])) -
all_pts[0:3, :],
2),
axis=0))
# np.savez('displacement.npz', newpos=newpos, pts=all_pts)
normdata = np.zeros(mc.shape[0])
if use_differences:
newpos = np.concatenate((np.zeros((1, n_pts)),
np.diff(newpos, n=1, axis=0)), axis=0)
for i in range(newpos.shape[0]):
normdata[i] = \
np.max(np.sqrt(np.sum(np.reshape(np.power(np.abs(newpos[i, :]), 2),
(3, all_pts.shape[1])), axis=0)))
else:
newpos = np.abs(signal.detrend(newpos, axis=0, type='constant'))
normdata = np.sqrt(np.mean(np.power(newpos, 2), axis=1))
return normdata, displacement
def _nanmean(a, axis=None):
"""Return the mean excluding items that are nan
>>> a = [1, 2, np.nan]
>>> _nanmean(a)
1.5
"""
if axis:
return np.nansum(a, axis) / np.sum(1 - np.isnan(a), axis)
else:
return np.nansum(a) / np.sum(1 - np.isnan(a))
class ArtifactDetectInputSpec(BaseInterfaceInputSpec):
realigned_files = InputMultiPath(File(exists=True),
desc="Names of realigned functional data files",
mandatory=True)
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=("Names of realignment parameters"
"corresponding to the functional data files"))
parameter_source = traits.Enum("SPM", "FSL", "AFNI", "NiPy", "FSFAST",
desc="Source of movement parameters",
mandatory=True)
use_differences = traits.ListBool([True, False], minlen=2, maxlen=2,
usedefault=True,
desc=("Use differences between successive motion (first element)"
"and intensity paramter (second element) estimates in order"
"to determine outliers. (default is [True, False])"))
use_norm = traits.Bool(True, requires=['norm_threshold'],
desc=("Uses a composite of the motion parameters in "
"order to determine outliers."),
usedefault=True)
norm_threshold = traits.Float(desc=("Threshold to use to detect motion-rela"
"ted outliers when composite motion is "
"being used"), mandatory=True,
xor=['rotation_threshold',
'translation_threshold'])
rotation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in radians) to use to detect rotation-related "
"outliers"))
translation_threshold = traits.Float(mandatory=True, xor=['norm_threshold'],
desc=("Threshold (in mm) to use to detect translation-related "
"outliers"))
zintensity_threshold = traits.Float(mandatory=True,
desc=("Intensity Z-threshold use to detection images that deviate "
"from the mean"))
mask_type = traits.Enum('spm_global', 'file', 'thresh',
desc=("Type of mask that should be used to mask the functional "
"data. *spm_global* uses an spm_global like calculation to "
"determine the brain mask. *file* specifies a brain mask "
"file (should be an image file consisting of 0s and 1s). "
"*thresh* specifies a threshold to use. By default all voxels"
"are used, unless one of these mask types are defined."),
mandatory=True)
mask_file = File(exists=True,
desc="Mask file to be used if mask_type is 'file'.")
mask_threshold = traits.Float(desc=("Mask threshold to be used if mask_type"
" is 'thresh'."))
intersect_mask = traits.Bool(True,
desc=("Intersect the masks when computed from "
"spm_global."))
save_plot = traits.Bool(True, desc="save plots containing outliers",
usedefault=True)
plot_type = traits.Enum('png', 'svg', 'eps', 'pdf',
desc="file type of the outlier plot",
usedefault=True)
bound_by_brainmask = traits.Bool(False, desc=("use the brain mask to "
"determine bounding box"
"for composite norm (works"
"for SPM and Nipy - currently"
"inaccurate for FSL, AFNI"),
usedefault=True)
global_threshold = traits.Float(8.0, desc=("use this threshold when mask "
"type equal's spm_global"),
usedefault=True)
class ArtifactDetectOutputSpec(TraitedSpec):
outlier_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing a list of "
"0-based indices corresponding to outlier volumes"))
intensity_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing the global "
"intensity values determined from the brainmask"))
norm_files = OutputMultiPath(File,
desc=("One file for each functional run containing the composite "
"norm"))
statistic_files = OutputMultiPath(File(exists=True),
desc=("One file for each functional run containing information "
"about the different types of artifacts and if design info is"
" provided then details of stimulus correlated motion and a "
"listing or artifacts by event type."))
plot_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the "
"detected outliers"))
mask_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the mask"
"used for global signal calculation"))
displacement_files = OutputMultiPath(File,
desc=("One image file for each functional run containing the voxel"
"displacement timeseries"))
class ArtifactDetect(BaseInterface):
"""Detects outliers in a functional imaging series
Uses intensity and motion parameters to infer outliers. If `use_norm` is
True, it computes the movement of the center of each face a cuboid centered
around the head and returns the maximal movement across the centers.
Examples
--------
>>> ad = ArtifactDetect()
>>> ad.inputs.realigned_files = 'functional.nii'
>>> ad.inputs.realignment_parameters = 'functional.par'
>>> ad.inputs.parameter_source = 'FSL'
>>> ad.inputs.norm_threshold = 1
>>> ad.inputs.use_differences = [True, False]
>>> ad.inputs.zintensity_threshold = 3
>>> ad.run() # doctest: +SKIP
"""
input_spec = ArtifactDetectInputSpec
output_spec = ArtifactDetectOutputSpec
def __init__(self, **inputs):
super(ArtifactDetect, self).__init__(**inputs)
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
if isinstance(motionfile, six.string_types):
infile = motionfile
elif isinstance(motionfile, list):
infile = motionfile[0]
else:
raise Exception("Unknown type of file")
_, filename, ext = split_filename(infile)
artifactfile = os.path.join(output_dir, ''.join(('art.', filename,
'_outliers.txt')))
intensityfile = os.path.join(output_dir, ''.join(('global_intensity.',
filename, '.txt')))
statsfile = os.path.join(output_dir, ''.join(('stats.', filename,
'.txt')))
normfile = os.path.join(output_dir, ''.join(('norm.', filename,
'.txt')))
plotfile = os.path.join(output_dir, ''.join(('plot.', filename, '.',
self.inputs.plot_type)))
displacementfile = os.path.join(output_dir, ''.join(('disp.',
filename, ext)))
maskfile = os.path.join(output_dir, ''.join(('mask.', filename, ext)))
return (artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile)
def _list_outputs(self):
outputs = self._outputs().get()
outputs['outlier_files'] = []
outputs['intensity_files'] = []
outputs['statistic_files'] = []
outputs['mask_files'] = []
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'] = []
if self.inputs.bound_by_brainmask:
outputs['displacement_files'] = []
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'] = []
for i, f in enumerate(filename_to_list(self.inputs.realigned_files)):
(outlierfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = \
self._get_output_filenames(f, os.getcwd())
outputs['outlier_files'].insert(i, outlierfile)
outputs['intensity_files'].insert(i, intensityfile)
outputs['statistic_files'].insert(i, statsfile)
outputs['mask_files'].insert(i, maskfile)
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
outputs['norm_files'].insert(i, normfile)
if self.inputs.bound_by_brainmask:
outputs['displacement_files'].insert(i, displacementfile)
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
outputs['plot_files'].insert(i, plotfile)
return outputs
def _plot_outliers_with_wave(self, wave, outliers, name):
import matplotlib.pyplot as plt
plt.plot(wave)
plt.ylim([wave.min(), wave.max()])
plt.xlim([0, len(wave) - 1])
if len(outliers):
plt.plot(np.tile(outliers[:, None], (1, 2)).T,
np.tile([wave.min(), wave.max()], (len(outliers), 1)).T,
'r')
plt.xlabel('Scans - 0-based')
plt.ylabel(name)
def _detect_outliers_core(self, imgfile, motionfile, runidx, cwd=None):
"""
Core routine for detecting outliers
"""
if not cwd:
cwd = os.getcwd()
# read in functional image
if isinstance(imgfile, six.string_types):
nim = load(imgfile)
elif isinstance(imgfile, list):
if len(imgfile) == 1:
nim = load(imgfile[0])
else:
images = [load(f) for f in imgfile]
nim = funcs.concat_images(images)
# compute global intensity signal
(x, y, z, timepoints) = nim.get_shape()
data = nim.get_data()
affine = nim.get_affine()
g = np.zeros((timepoints, 1))
masktype = self.inputs.mask_type
if masktype == 'spm_global': # spm_global like calculation
iflogger.debug('art: using spm global')
intersect_mask = self.inputs.intersect_mask
if intersect_mask:
mask = np.ones((x, y, z), dtype=bool)
for t0 in range(timepoints):
vol = data[:, :, :, t0]
# Use an SPM like approach
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask = mask * mask_tmp
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
if len(find_indices(mask)) < (np.prod((x, y, z)) / 10):
intersect_mask = False
g = np.zeros((timepoints, 1))
if not intersect_mask:
iflogger.info('not intersect_mask is True')
mask = np.zeros((x, y, z, timepoints))
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask_tmp = vol > \
(_nanmean(vol) / self.inputs.global_threshold)
mask[:, :, :, t0] = mask_tmp
g[t0] = np.nansum(vol * mask_tmp)/np.nansum(mask_tmp)
elif masktype == 'file': # uses a mask image to determine intensity
maskimg = load(self.inputs.mask_file)
mask = maskimg.get_data()
affine = maskimg.get_affine()
mask = mask > 0.5
for t0 in range(timepoints):
vol = data[:, :, :, t0]
g[t0] = _nanmean(vol[mask])
elif masktype == 'thresh': # uses a fixed signal threshold
for t0 in range(timepoints):
vol = data[:, :, :, t0]
mask = vol > self.inputs.mask_threshold
g[t0] = _nanmean(vol[mask])
else:
mask = np.ones((x, y, z))
g = _nanmean(data[mask > 0, :], 1)
# compute normalized intensity values
gz = signal.detrend(g, axis=0) # detrend the signal
if self.inputs.use_differences[1]:
gz = np.concatenate((np.zeros((1, 1)), np.diff(gz, n=1, axis=0)),
axis=0)
gz = (gz - np.mean(gz)) / np.std(gz) # normalize the detrended signal
iidx = find_indices(abs(gz) > self.inputs.zintensity_threshold)
# read in motion parameters
mc_in = np.loadtxt(motionfile)
mc = deepcopy(mc_in)
(artifactfile, intensityfile, statsfile, normfile, plotfile,
displacementfile, maskfile) = self._get_output_filenames(imgfile, cwd)
mask_img = Nifti1Image(mask.astype(np.uint8), affine)
mask_img.to_filename(maskfile)
if self.inputs.use_norm:
brain_pts = None
if self.inputs.bound_by_brainmask:
voxel_coords = np.nonzero(mask)
coords = np.vstack((voxel_coords[0],
np.vstack((voxel_coords[1],
voxel_coords[2])))).T
brain_pts = np.dot(affine,
np.hstack((coords,
np.ones((coords.shape[0], 1)))).T)
# calculate the norm of the motion parameters
normval, displacement = _calc_norm(mc,
self.inputs.use_differences[0],
self.inputs.parameter_source,
brain_pts=brain_pts)
tidx = find_indices(normval > self.inputs.norm_threshold)
ridx = find_indices(normval < 0)
if displacement is not None:
dmap = np.zeros((x, y, z, timepoints), dtype=np.float)
for i in range(timepoints):
dmap[voxel_coords[0],
voxel_coords[1],
voxel_coords[2], i] = displacement[i, :]
dimg = Nifti1Image(dmap, affine)
dimg.to_filename(displacementfile)
else:
if self.inputs.use_differences[0]:
mc = np.concatenate((np.zeros((1, 6)),
np.diff(mc_in, n=1, axis=0)),
axis=0)
traval = mc[:, 0:3] # translation parameters (mm)
rotval = mc[:, 3:6] # rotation parameters (rad)
tidx = find_indices(np.sum(abs(traval) >
self.inputs.translation_threshold, 1)
> 0)
ridx = find_indices(np.sum(abs(rotval) >
self.inputs.rotation_threshold, 1) > 0)
outliers = np.unique(np.union1d(iidx, np.union1d(tidx, ridx)))
# write output to outputfile
np.savetxt(artifactfile, outliers, fmt='%d', delimiter=' ')
np.savetxt(intensityfile, g, fmt='%.2f', delimiter=' ')
if self.inputs.use_norm:
np.savetxt(normfile, normval, fmt='%.4f', delimiter=' ')
if isdefined(self.inputs.save_plot) and self.inputs.save_plot:
import matplotlib
matplotlib.use(config.get("execution", "matplotlib_backend"))
import matplotlib.pyplot as plt
fig = plt.figure()
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(211)
else:
plt.subplot(311)
self._plot_outliers_with_wave(gz, iidx, 'Intensity')
if isdefined(self.inputs.use_norm) and self.inputs.use_norm:
plt.subplot(212)
self._plot_outliers_with_wave(normval, np.union1d(tidx, ridx),
'Norm (mm)')
else:
diff = ''
if self.inputs.use_differences[0]:
diff = 'diff'
plt.subplot(312)
self._plot_outliers_with_wave(traval, tidx,
'Translation (mm)' + diff)
plt.subplot(313)
self._plot_outliers_with_wave(rotval, ridx,
'Rotation (rad)' + diff)
plt.savefig(plotfile)
plt.close(fig)
motion_outliers = np.union1d(tidx, ridx)
stats = [{'motion_file': motionfile,
'functional_file': imgfile},
{'common_outliers': len(np.intersect1d(iidx, motion_outliers)),
'intensity_outliers': len(np.setdiff1d(iidx,
motion_outliers)),
'motion_outliers': len(np.setdiff1d(motion_outliers, iidx)),
},
{'motion': [{'using differences': self.inputs.use_differences[0]},
{'mean': np.mean(mc_in, axis=0).tolist(),
'min': np.min(mc_in, axis=0).tolist(),
'max': np.max(mc_in, axis=0).tolist(),
'std': np.std(mc_in, axis=0).tolist()},
]},
{'intensity': [{'using differences': self.inputs.use_differences[1]},
{'mean': np.mean(gz, axis=0).tolist(),
'min': np.min(gz, axis=0).tolist(),
'max': np.max(gz, axis=0).tolist(),
'std': np.std(gz, axis=0).tolist()},
]},
]
if self.inputs.use_norm:
stats.insert(3, {'motion_norm':
{'mean': np.mean(normval, axis=0).tolist(),
'min': np.min(normval, axis=0).tolist(),
'max': np.max(normval, axis=0).tolist(),
'std': np.std(normval, axis=0).tolist(),
}})
save_json(statsfile, stats)
def _run_interface(self, runtime):
"""Execute this module.
"""
funcfilelist = filename_to_list(self.inputs.realigned_files)
motparamlist = filename_to_list(self.inputs.realignment_parameters)
for i, imgf in enumerate(funcfilelist):
self._detect_outliers_core(imgf, motparamlist[i], i,
cwd=os.getcwd())
return runtime
class StimCorrInputSpec(BaseInterfaceInputSpec):
realignment_parameters = InputMultiPath(File(exists=True), mandatory=True,
desc=('Names of realignment parameters corresponding to the functional '
'data files'))
intensity_values = InputMultiPath(File(exists=True), mandatory=True,
desc='Name of file containing intensity values')
spm_mat_file = File(exists=True, mandatory=True,
desc='SPM mat file (use pre-estimate SPM.mat file)')
concatenated_design = traits.Bool(mandatory=True,
desc='state if the design matrix contains concatenated sessions')
class StimCorrOutputSpec(TraitedSpec):
stimcorr_files = OutputMultiPath(File(exists=True),
desc='List of files containing correlation values')
class StimulusCorrelation(BaseInterface):
"""Determines if stimuli are correlated with motion or intensity
parameters.
Currently this class supports an SPM generated design matrix and requires
intensity parameters. This implies that one must run
:ref:`ArtifactDetect <nipype.algorithms.rapidart.ArtifactDetect>`
and :ref:`Level1Design <nipype.interfaces.spm.model.Level1Design>` prior to running this or
provide an SPM.mat file and intensity parameters through some other means.
Examples
--------
>>> sc = StimulusCorrelation()
>>> sc.inputs.realignment_parameters = 'functional.par'
>>> sc.inputs.intensity_values = 'functional.rms'
>>> sc.inputs.spm_mat_file = 'SPM.mat'
>>> sc.inputs.concatenated_design = False
>>> sc.run() # doctest: +SKIP
"""
input_spec = StimCorrInputSpec
output_spec = StimCorrOutputSpec
def _get_output_filenames(self, motionfile, output_dir):
"""Generate output files based on motion filenames
Parameters
----------
motionfile: file/string
Filename for motion parameter file
output_dir: string
output directory in which the files will be generated
"""
(_, filename) = os.path.split(motionfile)
(filename, _) = os.path.splitext(filename)
corrfile = os.path.join(output_dir, ''.join(('qa.', filename,
'_stimcorr.txt')))
return corrfile
def _stimcorr_core(self, motionfile, intensityfile, designmatrix, cwd=None):
"""
Core routine for determining stimulus correlation
"""
if not cwd:
cwd = os.getcwd()
# read in motion parameters
mc_in = np.loadtxt(motionfile)
g_in = np.loadtxt(intensityfile)
g_in.shape = g_in.shape[0], 1
dcol = designmatrix.shape[1]
mccol = mc_in.shape[1]
concat_matrix = np.hstack((np.hstack((designmatrix, mc_in)), g_in))
cm = np.corrcoef(concat_matrix, rowvar=0)
corrfile = self._get_output_filenames(motionfile, cwd)
# write output to outputfile
file = open(corrfile, 'w')
file.write("Stats for:\n")
file.write("Stimulus correlated motion:\n%s\n" % motionfile)
for i in range(dcol):
file.write("SCM.%d:" % i)
for v in cm[i, dcol + np.arange(mccol)]:
file.write(" %.2f" % v)
file.write('\n')
file.write("Stimulus correlated intensity:\n%s\n" % intensityfile)
for i in range(dcol):
file.write("SCI.%d: %.2f\n" % (i, cm[i, -1]))
file.close()
def _get_spm_submatrix(self, spmmat, sessidx, rows=None):
"""
Parameters
----------
spmmat: scipy matlab object
full SPM.mat file loaded into a scipy object
sessidx: int
index to session that needs to be extracted.
"""
designmatrix = spmmat['SPM'][0][0].xX[0][0].X
U = spmmat['SPM'][0][0].Sess[0][sessidx].U[0]
if rows is None:
rows = spmmat['SPM'][0][0].Sess[0][sessidx].row[0] - 1
cols = spmmat['SPM'][0][0].Sess[0][sessidx].col[0][range(len(U))] - 1
outmatrix = designmatrix.take(rows.tolist(), axis=0).take(cols.tolist(),
axis=1)
return outmatrix
def _run_interface(self, runtime):
"""Execute this module.
"""
motparamlist = self.inputs.realignment_parameters
intensityfiles = self.inputs.intensity_values
spmmat = sio.loadmat(self.inputs.spm_mat_file, struct_as_record=False)
nrows = []
for i in range(len(motparamlist)):
sessidx = i
rows = None
if self.inputs.concatenated_design:
sessidx = 0
mc_in = np.loadtxt(motparamlist[i])
rows = np.sum(nrows) + np.arange(mc_in.shape[0])
nrows.append(mc_in.shape[0])
matrix = self._get_spm_submatrix(spmmat, sessidx, rows)
self._stimcorr_core(motparamlist[i], intensityfiles[i],
matrix, os.getcwd())
return runtime
def _list_outputs(self):
outputs = self._outputs().get()
files = []
for i, f in enumerate(self.inputs.realignment_parameters):
files.insert(i, self._get_output_filenames(f, os.getcwd()))
if files:
outputs['stimcorr_files'] = files
return outputs
| bsd-3-clause |
tedunderwood/GenreProject | python/piketty/fifteenwordsnippets.py | 1 | 11248 | # fifteenwordsnippets.py
# A script that searches a HathiTrust corpus of 6,942 volumes (1700-1923), plus Hoyt & Richard's
# corpus of 808 vols (1923-1950), for words related to money. It takes seven words on either
# side of those words to create a snippet.
# In cases where the possibly-monetary word is ambiguous, e.g. "pounds," it runs the central
# seven words of the snippet through a regularized logistic model (created by model_contexts)
# in order to make a prediction about the likelihood that this word refers to money. The
# model I used is based on 700 manually-tagged snippets; it's about about 87% accurate,
# five-fold crossvalidated.
import modelingcounter
import os, sys
import SonicScrewdriver as utils
import csv
import pickle
from bagofwords import WordVector, StandardizingVector
from sklearn.linear_model import LogisticRegression
# We start with a number of functions borrowed from other scripts; these were used to
# generate the logistic model, so we use them also here to clean and normalize snippets.
punctuple = ('.', ',', '?', '!', ';', '"', '“', '”', ':', '--', '—', ')', '(', "'", "`", "[", "]", "{", "}")
def all_nonalphanumeric(astring):
nonalphanum = True
for character in astring:
if character.isalpha() or character.isdigit():
nonalphanum = False
break
return nonalphanum
def strip_punctuation(astring):
global punctuple
keepclipping = True
suffix = ""
while keepclipping == True and len(astring) > 1:
keepclipping = False
if astring.endswith(punctuple):
suffix = astring[-1:] + suffix
astring = astring[:-1]
keepclipping = True
keepclipping = True
prefix = ""
while keepclipping == True and len(astring) > 1:
keepclipping = False
if astring.startswith(punctuple):
prefix = prefix + astring[:1]
astring = astring[1:]
keepclipping = True
return(prefix, astring, suffix)
def as_wordlist(line):
''' Converts a line into a list of words, splitting
tokens brutally and unreflectively at punctuation.
One of the effects will be to split possessives into noun
and s. But this might not be a bad thing for current
purposes.
'''
line = line.replace('”', ' ')
line = line.replace(':', ' ')
line = line.replace(';', ' ')
line = line.replace('—', ' ')
line = line.replace('--', ' ')
line = line.replace('.', ' ')
line = line.replace(',', ' ')
line = line.replace('-', ' ')
line = line.replace('—', ' ')
line = line.replace("'", ' ')
line = line.replace('"', ' ')
# That's not the most efficient way to do this computationally,
# but it prevents me from having to look up the .translate
# method.
words = line.split(' ')
wordlist = list()
for word in words:
word = word.lower()
prefix, word, suffix = strip_punctuation(word)
# In case we missed anything.
if len(word) > 0 and not all_nonalphanumeric(word):
wordlist.append(word)
return wordlist
def is_money(wordlist, WINDOWRADIUS, model, features, standardizer):
# We're getting a wordlist generated by WINDOWRADIUS, but
# we only want the central seven words for our model.
startindex = WINDOWRADIUS - 3
endindex = WINDOWRADIUS + 4
modelsegment = ' '.join(wordlist[startindex : endindex])
# You'd think we could just take a list of words, but in
# generating the model we ran strings through a particular
# tokenizing process, and we should replicate that here.
normalizedlist = as_wordlist(modelsegment)
vector = WordVector(normalizedlist)
vector.selectfeatures(features)
# This turns a sparse dictionary into an array with zeroes
# for missing features.
vector.normalizefrequencies()
# raw counts are divided by total counts.
vector.standardizefrequencies(standardizer)
# features are now centered on the means, and divided by
# standard deviations, calculated on the training set
classlabel = model.predict(vector.features)[0]
if classlabel == 1:
return True
elif classlabel == 0:
return False
else:
print("ANOMALY!")
print(classlabel)
return False
# Cause that's how I do error handling.
# Main script.
# Let's load the model.
modelfolder = "/Volumes/TARDIS/work/moneycontext/"
modelpath = modelfolder + "logisticmodel.p"
with open(modelpath, mode = 'rb') as f:
logisticmodel = pickle.load(f)
standardizerpath = modelfolder + 'standardizer.p'
with open(standardizerpath, mode = 'rb') as f:
standardizer = pickle.load(f)
featurepath = modelfolder + 'featurelist.p'
with open(featurepath, mode = 'rb') as f:
features = pickle.load(f)
# Now load HathiTrust metadata.
rows, columns, table = utils.readtsv('/Volumes/TARDIS/work/metadata/MergedMonographs.tsv')
ambiguouswords = {'crown', 'crowns', 'guinea', 'guineas', 'nickel', 'sovereign', 'sovereigns', 'pound', 'pounds', 'quid'}
moneywords = {'dollar', 'dollars', 'dime', 'dimes', 'nickel', 'nickels', 'pound', 'pounds', 'shilling', 'shillings', 'sovereign', 'sovereigns','cent', 'cents', 'centime', 'centimes', 'crown', 'crowns', 'halfcrown', 'half-crown','penny', 'pennies', 'pence', 'farthing', 'farthings', 'franc', 'francs', 'guilder', 'guilders', 'florin', 'florins', 'guinea', 'guineas', "ha'penny", 'tuppence', 'twopence', 'sixpence', '|arabicprice|', '|price|', 'quid'}
# Words I explicitly decided not to include: 'quarter', 'quarters', 'mark', 'marks.' Monetary uses
# seemed rare enough relative to others that they'd be more likely to introduce noise than to help.
# |arabicprice| is a code the tokenizer in modelingcounter produces whenever it encounters
# a number connected to £, $, ¢, s, or d. In the output we convert that to |price|, for no very
# good reason.
wealthwords = {'fortune', 'fortunes', 'wealth', 'rich', 'riches', 'money', 'moneys', 'fund', 'funds', 'sum', 'sums', 'price', 'prices', 'priced'}
# This is by no means an exhaustive list. Owe, loan, borrowed, etc.
# If we really want to get at the full range of words potentially
# associated with money, topic modeling would be an appropriate lever.
# We can perhaps enumerate currency terms intuitively, but not these.
alltargetwords = moneywords
sourcedir = "/Volumes/TARDIS/work/moneytexts/"
filelist = os.listdir(sourcedir)
filelist = [x for x in filelist if x.endswith(".txt")]
contexts = []
WINDOWRADIUS = 7
ctr = 0
for filename in filelist:
htid = utils.pairtreelabel(filename.replace('.fic.txt', ''))
if htid not in rows:
print(htid)
continue
else:
date = utils.simple_date(htid, table)
filepath = os.path.join(sourcedir, filename)
with open(filepath, encoding = 'utf-8') as f:
filelines = f.readlines()
pagelist = [filelines]
# The wordcounter module expects a list of pages, each of which is a list of lines.
# Ebooks have no pages -- at least as I currently receive them -- so we treat it
# all as one giant page.
tokenstream = modelingcounter.makestream(pagelist)
newcontexts = modelingcounter.extract_snippets(tokenstream, WINDOWRADIUS, alltargetwords)
approvedcontexts = []
for snippet, snippettomodel in newcontexts:
keyword = snippettomodel[WINDOWRADIUS]
keyword = keyword.lower()
prefix, keyword, suffix = strip_punctuation(keyword)
if keyword in wealthwords:
category = 'wealth'
elif keyword in ambiguouswords:
currency = is_money(snippettomodel, WINDOWRADIUS, logisticmodel, features, standardizer)
if currency:
category = 'money'
else:
category = "notmoney"
elif keyword in moneywords:
category = 'money'
else:
print('ANOMALY: ' + keyword)
# Cause that's how I do error handling.
category = 'null'
if category == 'money':
approvedcontexts.append((htid, date, snippet, keyword, category))
print(ctr)
ctr += 1
outfile = "/Volumes/TARDIS/work/moneycontext/twentyfivesnippets.tsv"
with open(outfile, mode='a', encoding='utf-8') as f:
for context in approvedcontexts:
htid, date, alist, keyword, category = context
snippet = " ".join(alist)
snippet = snippet.replace('\t', '')
# Because we don't want stray tabs in our tab-separated values.
f.write(htid + '\t' + str(date) + '\t' + keyword + '\t' + category + '\t' + snippet + '\n')
sourcedir = "/Volumes/TARDIS/work/US_NOVELS_1923-1950/"
filelist = os.listdir(sourcedir)
fileset = set([x for x in filelist if x.endswith(".txt")])
filelist = list(fileset)
metafile = os.path.join(sourcedir, "US_NOVELS_1923-1950_META.txt")
datedict = dict()
dateset = set()
with open(metafile, newline='', encoding = 'utf-8') as f:
reader = csv.reader(f)
for fields in reader:
idcode = fields[0]
date = int(fields[8])
datedict[idcode] = date
dateset.add(date)
for filename in filelist:
htid = utils.pairtreelabel(filename.replace('.txt', ''))
if htid not in datedict:
print(htid)
continue
else:
date = datedict[htid]
filepath = os.path.join(sourcedir, filename)
with open(filepath, encoding = 'utf-8') as f:
filelines = f.readlines()
pagelist = [filelines]
# The wordcounter module expects a list of pages, each of which is a list of lines.
# Ebooks have no pages -- at least as I currently receive them -- so we treat it
# all as one giant page.
tokenstream = modelingcounter.makestream(pagelist)
newcontexts = modelingcounter.extract_snippets(tokenstream, WINDOWRADIUS, alltargetwords)
approvedcontexts = []
for snippet, snippettomodel in newcontexts:
keyword = snippettomodel[WINDOWRADIUS]
keyword = keyword.lower()
prefix, keyword, suffix = strip_punctuation(keyword)
if keyword in wealthwords:
category = 'wealth'
elif keyword in ambiguouswords:
currency = is_money(snippettomodel, WINDOWRADIUS, logisticmodel, features, standardizer)
if currency:
category = 'money'
else:
category = "notmoney"
elif keyword in moneywords:
category = 'money'
else:
print('ANOMALY: ' + keyword)
# Cause that's how I do error handling.
category = 'null'
if category == 'money':
approvedcontexts.append((htid, date, snippet, keyword, category))
outfile = "/Volumes/TARDIS/work/moneycontext/twentyfivesnippets.tsv"
with open(outfile, mode='a', encoding='utf-8') as f:
for context in approvedcontexts:
htid, date, alist, keyword, category = context
snippet = " ".join(alist)
snippet = snippet.replace('\t', '')
# Because we don't want stray tabs in our tab-separated values.
f.write(htid + '\t' + str(date) + '\t' + keyword + '\t' + category + '\t' + snippet + '\n')
| mit |
alesaccoia/TF_SoundClassification | as_sound/exec/train_vad_ann_5FCL_classifier.py | 1 | 1585 | import numpy as np
import as_classification.ann_models
import as_sound.features.extractFeatures as ef
import as_classification.utilities
import os
import matplotlib.pyplot as plt
# -------------------------------
# CREATE MODEL
# -------------------------------
model = as_classification.ann_models.ANN_5FCL()
model.initialize(15,2)
# -------------------------------
# READ AUDIO FILES
# -------------------------------
speech_data = ef.computeSupervectorForFile(os.path.dirname(os.path.realpath(__file__)) + '/data/Speech.wav', 8000, 2048, 2049)
noise_data = ef.computeSupervectorForFile(os.path.dirname(os.path.realpath(__file__)) + '/data/Noise.wav', 8000, 2048, 2049)
whole_data = np.hstack((speech_data, noise_data))
whole_data = np.swapaxes(whole_data,0,1)
whole_labels = np.zeros((whole_data.shape[0], 2))
whole_labels[:speech_data.shape[1],0] = 1
whole_labels[speech_data.shape[1]:,1] = 1
training_data = {"labels": whole_labels,
"data": whole_data}
training_data, test_data = as_classification.utilities.divideTrainingData(training_data, 0.6)
# -------------------------------
# TRAIN
# -------------------------------
model.train(training_data, test_data, 10, 20, 100)
model.saveCheckpoint(os.path.dirname(os.path.realpath(__file__)) + '/data/vadModel_ANN_5FCL.chkp')
#xp = np.arange(0,prediction.shape[0])
#plt.plot(xp, test_data[:,14], '-b', label='RMS')
#plt.plot(xp, prediction[:,0], '-r', label='ANN Output')
#plt.legend(loc='upper left')
#plt.show()
#feat = as_sound.features.extractFeatures.computeSupervector(normalized_data)
| mit |
blaze/dask | dask/dataframe/tests/test_indexing.py | 2 | 19852 | import pandas as pd
import numpy as np
import pytest
import dask
import dask.dataframe as dd
from dask.dataframe._compat import tm, PANDAS_GT_100
from dask.dataframe.indexing import _coerce_loc_index
from dask.dataframe.utils import assert_eq, make_meta, PANDAS_VERSION
dsk = {
("x", 0): pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=[0, 1, 3]),
("x", 1): pd.DataFrame({"a": [4, 5, 6], "b": [3, 2, 1]}, index=[5, 6, 8]),
("x", 2): pd.DataFrame({"a": [7, 8, 9], "b": [0, 0, 0]}, index=[9, 9, 9]),
}
meta = make_meta({"a": "i8", "b": "i8"}, index=pd.Index([], "i8"))
d = dd.DataFrame(dsk, "x", meta, [0, 5, 9, 9])
full = d.compute()
CHECK_FREQ = {}
if dd._compat.PANDAS_GT_110:
CHECK_FREQ["check_freq"] = False
def test_loc():
assert d.loc[3:8].divisions[0] == 3
assert d.loc[3:8].divisions[-1] == 8
assert d.loc[5].divisions == (5, 5)
assert_eq(d.loc[5], full.loc[5:5])
assert_eq(d.loc[3:8], full.loc[3:8])
assert_eq(d.loc[:8], full.loc[:8])
assert_eq(d.loc[3:], full.loc[3:])
assert_eq(d.loc[[5]], full.loc[[5]])
expected_warning = FutureWarning
if not PANDAS_GT_100:
# removed in pandas 1.0
with pytest.warns(expected_warning):
assert_eq(d.loc[[3, 4, 1, 8]], full.loc[[3, 4, 1, 8]])
with pytest.warns(expected_warning):
assert_eq(d.loc[[3, 4, 1, 9]], full.loc[[3, 4, 1, 9]])
with pytest.warns(expected_warning):
assert_eq(d.loc[np.array([3, 4, 1, 9])], full.loc[np.array([3, 4, 1, 9])])
assert_eq(d.a.loc[5], full.a.loc[5:5])
assert_eq(d.a.loc[3:8], full.a.loc[3:8])
assert_eq(d.a.loc[:8], full.a.loc[:8])
assert_eq(d.a.loc[3:], full.a.loc[3:])
assert_eq(d.a.loc[[5]], full.a.loc[[5]])
if not PANDAS_GT_100:
# removed in pandas 1.0
with pytest.warns(expected_warning):
assert_eq(d.a.loc[[3, 4, 1, 8]], full.a.loc[[3, 4, 1, 8]])
with pytest.warns(expected_warning):
assert_eq(d.a.loc[[3, 4, 1, 9]], full.a.loc[[3, 4, 1, 9]])
with pytest.warns(expected_warning):
assert_eq(
d.a.loc[np.array([3, 4, 1, 9])], full.a.loc[np.array([3, 4, 1, 9])]
)
assert_eq(d.a.loc[[]], full.a.loc[[]])
assert_eq(d.a.loc[np.array([])], full.a.loc[np.array([])])
pytest.raises(KeyError, lambda: d.loc[1000])
assert_eq(d.loc[1000:], full.loc[1000:])
assert_eq(d.loc[-2000:-1000], full.loc[-2000:-1000])
assert sorted(d.loc[5].dask) == sorted(d.loc[5].dask)
assert sorted(d.loc[5].dask) != sorted(d.loc[6].dask)
def test_loc_non_informative_index():
df = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 30, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
ddf.divisions = (None,) * 3
assert not ddf.known_divisions
ddf.loc[20:30].compute(scheduler="sync")
assert_eq(ddf.loc[20:30], df.loc[20:30])
df = pd.DataFrame({"x": [1, 2, 3, 4]}, index=[10, 20, 20, 40])
ddf = dd.from_pandas(df, npartitions=2, sort=True)
assert_eq(ddf.loc[20], df.loc[20:20])
def test_loc_with_text_dates():
A = dd._compat.makeTimeSeries().iloc[:5]
B = dd._compat.makeTimeSeries().iloc[5:]
s = dd.Series(
{("df", 0): A, ("df", 1): B},
"df",
A,
[A.index.min(), B.index.min(), B.index.max()],
)
assert s.loc["2000":"2010"].divisions == s.divisions
assert_eq(s.loc["2000":"2010"], s)
assert len(s.loc["2000-01-03":"2000-01-05"].compute()) == 3
def test_loc_with_series():
assert_eq(d.loc[d.a % 2 == 0], full.loc[full.a % 2 == 0])
assert sorted(d.loc[d.a % 2].dask) == sorted(d.loc[d.a % 2].dask)
assert sorted(d.loc[d.a % 2].dask) != sorted(d.loc[d.a % 3].dask)
def test_loc_with_array():
assert_eq(d.loc[(d.a % 2 == 0).values], full.loc[(full.a % 2 == 0).values])
assert sorted(d.loc[(d.a % 2).values].dask) == sorted(d.loc[(d.a % 2).values].dask)
assert sorted(d.loc[(d.a % 2).values].dask) != sorted(d.loc[(d.a % 3).values].dask)
def test_loc_with_function():
assert_eq(d.loc[lambda df: df["a"] > 3, :], full.loc[lambda df: df["a"] > 3, :])
def _col_loc_fun(_df):
return _df.columns.str.contains("b")
assert_eq(d.loc[:, _col_loc_fun], full.loc[:, _col_loc_fun])
def test_loc_with_array_different_partition():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[(ddf.A > 0).values], df.loc[(df.A > 0).values])
with pytest.raises(ValueError):
ddf.loc[(ddf.A > 0).repartition(["a", "g", "k", "o", "t"]).values]
def test_loc_with_series_different_partition():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc[ddf.A > 0], df.loc[df.A > 0])
assert_eq(
ddf.loc[(ddf.A > 0).repartition(["a", "g", "k", "o", "t"])], df.loc[df.A > 0]
)
def test_loc2d():
# index indexer is always regarded as slice for duplicated values
assert_eq(d.loc[5, "a"], full.loc[5:5, "a"])
# assert_eq(d.loc[[5], 'a'], full.loc[[5], 'a'])
assert_eq(d.loc[5, ["a"]], full.loc[5:5, ["a"]])
# assert_eq(d.loc[[5], ['a']], full.loc[[5], ['a']])
assert_eq(d.loc[3:8, "a"], full.loc[3:8, "a"])
assert_eq(d.loc[:8, "a"], full.loc[:8, "a"])
assert_eq(d.loc[3:, "a"], full.loc[3:, "a"])
assert_eq(d.loc[[8], "a"], full.loc[[8], "a"])
assert_eq(d.loc[3:8, ["a"]], full.loc[3:8, ["a"]])
assert_eq(d.loc[:8, ["a"]], full.loc[:8, ["a"]])
assert_eq(d.loc[3:, ["a"]], full.loc[3:, ["a"]])
# 3d
with pytest.raises(pd.core.indexing.IndexingError):
d.loc[3, 3, 3]
# Series should raise
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[3:, 3]
with pytest.raises(pd.core.indexing.IndexingError):
d.a.loc[d.a % 2 == 0, 3]
@pytest.mark.skip(PANDAS_GT_100, reason="Removed in pandas 1.0")
def test_loc2d_some_missing():
with pytest.warns(FutureWarning):
assert_eq(d.loc[[3, 4, 3], ["a"]], full.loc[[3, 4, 3], ["a"]])
def test_loc2d_with_known_divisions():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
assert_eq(ddf.loc[["n"], ["A"]], df.loc[["n"], ["A"]])
assert_eq(ddf.loc[["a", "c", "n"], ["A"]], df.loc[["a", "c", "n"], ["A"]])
assert_eq(ddf.loc[["t", "b"], ["A"]], df.loc[["t", "b"], ["A"]])
assert_eq(
ddf.loc[["r", "r", "c", "g", "h"], ["A"]],
df.loc[["r", "r", "c", "g", "h"], ["A"]],
)
def test_loc2d_with_unknown_divisions():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("ABCDE"),
)
ddf = dd.from_pandas(df, 3)
ddf.divisions = (None,) * len(ddf.divisions)
assert ddf.known_divisions is False
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
def test_loc2d_duplicated_columns():
df = pd.DataFrame(
np.random.randn(20, 5),
index=list("abcdefghijklmnopqrst"),
columns=list("AABCD"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf.loc["a", "A"], df.loc[["a"], "A"])
assert_eq(ddf.loc["a", ["A"]], df.loc[["a"], ["A"]])
assert_eq(ddf.loc["j", "B"], df.loc[["j"], "B"])
assert_eq(ddf.loc["j", ["B"]], df.loc[["j"], ["B"]])
assert_eq(ddf.loc["a":"o", "A"], df.loc["a":"o", "A"])
assert_eq(ddf.loc["a":"o", ["A"]], df.loc["a":"o", ["A"]])
assert_eq(ddf.loc["j":"q", "B"], df.loc["j":"q", "B"])
assert_eq(ddf.loc["j":"q", ["B"]], df.loc["j":"q", ["B"]])
assert_eq(ddf.loc["a":"o", "B":"D"], df.loc["a":"o", "B":"D"])
assert_eq(ddf.loc["a":"o", "B":"D"], df.loc["a":"o", "B":"D"])
assert_eq(ddf.loc["j":"q", "B":"A"], df.loc["j":"q", "B":"A"])
assert_eq(ddf.loc["j":"q", "B":"A"], df.loc["j":"q", "B":"A"])
assert_eq(ddf.loc[ddf.B > 0, "B"], df.loc[df.B > 0, "B"])
assert_eq(ddf.loc[ddf.B > 0, ["A", "C"]], df.loc[df.B > 0, ["A", "C"]])
def test_getitem():
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
columns=list("ABC"),
)
ddf = dd.from_pandas(df, 2)
assert_eq(ddf["A"], df["A"])
# check cache consistency
tm.assert_series_equal(ddf["A"]._meta, ddf._meta["A"])
assert_eq(ddf[["A", "B"]], df[["A", "B"]])
tm.assert_frame_equal(ddf[["A", "B"]]._meta, ddf._meta[["A", "B"]])
assert_eq(ddf[ddf.C], df[df.C])
tm.assert_series_equal(ddf.C._meta, ddf._meta.C)
assert_eq(ddf[ddf.C.repartition([0, 2, 5, 8])], df[df.C])
pytest.raises(KeyError, lambda: df["X"])
pytest.raises(KeyError, lambda: df[["A", "X"]])
pytest.raises(AttributeError, lambda: df.X)
# not str/unicode
df = pd.DataFrame(np.random.randn(10, 5))
ddf = dd.from_pandas(df, 2)
assert_eq(ddf[0], df[0])
assert_eq(ddf[[1, 2]], df[[1, 2]])
pytest.raises(KeyError, lambda: df[8])
pytest.raises(KeyError, lambda: df[[1, 8]])
def test_getitem_slice():
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6, 7, 8, 9],
"B": [9, 8, 7, 6, 5, 4, 3, 2, 1],
"C": [True, False, True] * 3,
},
index=list("abcdefghi"),
)
ddf = dd.from_pandas(df, 3)
assert_eq(ddf["a":"e"], df["a":"e"])
assert_eq(ddf["a":"b"], df["a":"b"])
assert_eq(ddf["f":], df["f":])
def test_getitem_integer_slice():
df = pd.DataFrame({"A": range(6)})
ddf = dd.from_pandas(df, 2)
# integer slicing is iloc based
with pytest.raises(NotImplementedError):
ddf[1:3]
df = pd.DataFrame({"A": range(6)}, index=[1.0, 2.0, 3.0, 5.0, 10.0, 11.0])
ddf = dd.from_pandas(df, 2)
# except for float dtype indexes
assert_eq(ddf[2:8], df[2:8])
assert_eq(ddf[2:], df[2:])
assert_eq(ddf[:8], df[:8])
def test_loc_on_numpy_datetimes():
df = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(np.datetime64, ["2014", "2015", "2016"]))
)
a = dd.from_pandas(df, 2)
a.divisions = list(map(np.datetime64, a.divisions))
assert_eq(a.loc["2014":"2015"], a.loc["2014":"2015"])
def test_loc_on_pandas_datetimes():
df = pd.DataFrame(
{"x": [1, 2, 3]}, index=list(map(pd.Timestamp, ["2014", "2015", "2016"]))
)
a = dd.from_pandas(df, 2)
a.divisions = list(map(pd.Timestamp, a.divisions))
assert_eq(a.loc["2014":"2015"], a.loc["2014":"2015"])
def test_loc_datetime_no_freq():
# https://github.com/dask/dask/issues/2389
datetime_index = pd.date_range("2016-01-01", "2016-01-31", freq="12h")
datetime_index.freq = None # FORGET FREQUENCY
df = pd.DataFrame({"num": range(len(datetime_index))}, index=datetime_index)
ddf = dd.from_pandas(df, npartitions=1)
slice_ = slice("2016-01-03", "2016-01-05")
result = ddf.loc[slice_, :]
expected = df.loc[slice_, :]
assert_eq(result, expected)
def test_coerce_loc_index():
for t in [pd.Timestamp, np.datetime64]:
assert isinstance(_coerce_loc_index([t("2014")], "2014"), t)
def test_loc_timestamp_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df.loc["2011-01-02"], ddf.loc["2011-01-02"])
assert_eq(df.loc["2011-01-02":"2011-01-10"], ddf.loc["2011-01-02":"2011-01-10"])
# same reso, dask result is always DataFrame
assert_eq(
df.loc["2011-01-02 10:00"].to_frame().T,
ddf.loc["2011-01-02 10:00"],
**CHECK_FREQ
)
# series
assert_eq(df.A.loc["2011-01-02"], ddf.A.loc["2011-01-02"], **CHECK_FREQ)
assert_eq(
df.A.loc["2011-01-02":"2011-01-10"],
ddf.A.loc["2011-01-02":"2011-01-10"],
**CHECK_FREQ
)
# slice with timestamp (dask result must be DataFrame)
assert_eq(
df.loc[pd.Timestamp("2011-01-02")].to_frame().T,
ddf.loc[pd.Timestamp("2011-01-02")],
**CHECK_FREQ
)
assert_eq(
df.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
ddf.loc[pd.Timestamp("2011-01-02") : pd.Timestamp("2011-01-10")],
**CHECK_FREQ
)
assert_eq(
df.loc[pd.Timestamp("2011-01-02 10:00")].to_frame().T,
ddf.loc[pd.Timestamp("2011-01-02 10:00")],
**CHECK_FREQ
)
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="M", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df.loc["2011-01"], ddf.loc["2011-01"])
assert_eq(df.loc["2011"], ddf.loc["2011"])
assert_eq(df.loc["2011-01":"2012-05"], ddf.loc["2011-01":"2012-05"])
assert_eq(df.loc["2011":"2015"], ddf.loc["2011":"2015"])
# series
assert_eq(df.B.loc["2011-01"], ddf.B.loc["2011-01"])
assert_eq(df.B.loc["2011"], ddf.B.loc["2011"])
assert_eq(df.B.loc["2011-01":"2012-05"], ddf.B.loc["2011-01":"2012-05"])
assert_eq(df.B.loc["2011":"2015"], ddf.B.loc["2011":"2015"])
def test_getitem_timestamp_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df["2011-01-02"], ddf["2011-01-02"])
assert_eq(df["2011-01-02":"2011-01-10"], df["2011-01-02":"2011-01-10"])
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="D", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df["2011-01"], ddf["2011-01"])
assert_eq(df["2011"], ddf["2011"])
assert_eq(df["2011-01":"2012-05"], ddf["2011-01":"2012-05"])
assert_eq(df["2011":"2015"], ddf["2011":"2015"])
def test_loc_period_str():
# .loc with PeriodIndex doesn't support partial string indexing
# https://github.com/pydata/pandas/issues/13429
pass
def test_getitem_period_str():
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
# partial string slice
assert_eq(df["2011-01-02"], ddf["2011-01-02"])
assert_eq(df["2011-01-02":"2011-01-10"], df["2011-01-02":"2011-01-10"])
# same reso, dask result is always DataFrame
df = pd.DataFrame(
{"A": np.random.randn(100), "B": np.random.randn(100)},
index=pd.period_range("2011-01-01", freq="D", periods=100),
)
ddf = dd.from_pandas(df, 50)
assert_eq(df["2011-01"], ddf["2011-01"])
assert_eq(df["2011"], ddf["2011"])
assert_eq(df["2011-01":"2012-05"], ddf["2011-01":"2012-05"])
assert_eq(df["2011":"2015"], ddf["2011":"2015"])
def test_to_series():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_series(), ddf.index.to_series())
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_series(), ddf.index.to_series())
def test_to_frame():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(), ddf.index.to_frame())
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(), ddf.index.to_frame())
@pytest.mark.skipif(PANDAS_VERSION < "0.24.0", reason="No renaming for index")
def test_to_frame_name():
# Test for time index
df = pd.DataFrame(
{"A": np.random.randn(100)},
index=pd.date_range("2011-01-01", freq="H", periods=100),
)
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(name="foo"), ddf.index.to_frame(name="foo"))
# Test for numerical index
df = pd.DataFrame({"A": np.random.randn(100)}, index=range(100))
ddf = dd.from_pandas(df, 10)
assert_eq(df.index.to_frame(name="bar"), ddf.index.to_frame(name="bar"))
@pytest.mark.parametrize("indexer", [0, [0], [0, 1], [1, 0], [False, True, True]])
def test_iloc(indexer):
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
result = ddf.iloc[:, indexer]
expected = df.iloc[:, indexer]
assert_eq(result, expected)
def test_iloc_series():
s = pd.Series([1, 2, 3])
ds = dd.from_pandas(s, 2)
with pytest.raises(AttributeError):
ds.iloc[:]
def test_iloc_raises():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
with pytest.raises(NotImplementedError):
ddf.iloc[[0, 1], :]
with pytest.raises(NotImplementedError):
ddf.iloc[[0, 1], [0, 1]]
with pytest.raises(ValueError):
ddf.iloc[[0, 1], [0, 1], [1, 2]]
with pytest.raises(IndexError):
ddf.iloc[:, [5, 6]]
def test_iloc_duplicate_columns():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
df.columns = ["A", "A", "C"]
ddf.columns = ["A", "A", "C"]
selection = ddf.iloc[:, 2]
# Check that `iloc` is called instead of getitem
assert any([key.startswith("iloc") for key in selection.dask.layers.keys()])
select_first = ddf.iloc[:, 1]
assert_eq(select_first, df.iloc[:, 1])
select_zeroth = ddf.iloc[:, 0]
assert_eq(select_zeroth, df.iloc[:, 0])
select_list_cols = ddf.iloc[:, [0, 2]]
assert_eq(select_list_cols, df.iloc[:, [0, 2]])
select_negative = ddf.iloc[:, -1:-3:-1]
assert_eq(select_negative, df.iloc[:, -1:-3:-1])
def test_iloc_dispatch_to_getitem():
df = pd.DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
ddf = dd.from_pandas(df, 2)
selection = ddf.iloc[:, 2]
assert all([not key.startswith("iloc") for key in selection.dask.layers.keys()])
assert any([key.startswith("getitem") for key in selection.dask.layers.keys()])
select_first = ddf.iloc[:, 1]
assert_eq(select_first, df.iloc[:, 1])
select_zeroth = ddf.iloc[:, 0]
assert_eq(select_zeroth, df.iloc[:, 0])
select_list_cols = ddf.iloc[:, [0, 2]]
assert_eq(select_list_cols, df.iloc[:, [0, 2]])
select_negative = ddf.iloc[:, -1:-3:-1]
assert_eq(select_negative, df.iloc[:, -1:-3:-1])
def test_iloc_out_of_order_selection():
df = pd.DataFrame({"A": [1] * 100, "B": [2] * 100, "C": [3] * 100, "D": [4] * 100})
ddf = dd.from_pandas(df, 2)
ddf = ddf[["C", "A", "B"]]
a = ddf.iloc[:, 0]
b = ddf.iloc[:, 1]
c = ddf.iloc[:, 2]
assert a.name == "C"
assert b.name == "A"
assert c.name == "B"
a1, b1, c1 = dask.compute(a, b, c)
assert a1.name == "C"
assert b1.name == "A"
assert c1.name == "B"
| bsd-3-clause |
merenlab/anvio | anvio/drivers/sourmash.py | 2 | 4700 | # coding: utf-8
"""Interface to sourmash"""
import os
import numpy as np
import pandas as pd
import shutil
import anvio
import anvio.utils as utils
import anvio.terminal as terminal
import anvio.filesnpaths as filesnpaths
from scipy.stats import entropy, skew, kurtosis
from anvio.errors import ConfigError
__author__ = "Developers of anvi'o (see AUTHORS.txt)"
__copyright__ = "Copyleft 2015-2019, the Meren Lab (http://merenlab.org/)"
__credits__ = []
__license__ = "GPL 3.0"
__version__ = anvio.__version__
__maintainer__ = "Mahmoud Yousef"
__email__ = "mahmoudyousef@uchicago.edu"
class Sourmash:
"""This calculates a single kmer signature, and computes similarities.
Feel free to buff this to suit your needs
"""
def __init__(self, args={}, run=terminal.Run(), progress=terminal.Progress(), program_name='sourmash'):
self.run = run
self.progress = progress
self.program_name = program_name
self.check_program()
self.results = {}
A = lambda x: args.__dict__[x] if x in args.__dict__ else None
self.log_file_path = os.path.abspath(A('log_file') or filesnpaths.get_temp_file_path())
self.num_threads = A('num_threads') or 1
self.kmer_size = A('kmer_size') or 51
self.scale = A('scale') or 1000
self.run.warning("Anvi'o will use 'sourmash' by Brown et al. (DOI: 10.21105/joss.00027) to compute kmer sequences and determine mash distances. "
"If you publish your findings, please do not forget to properly credit their work",
lc='green', header="CITATION")
if self.num_threads != 1:
self.num_threads = 1
self.run.warning("Anvi'o speaking: sourmash currently doesn't support multithreading. "
"Anvi'o will have to reduce your number of threads to one :(")
self.run.info('[sourmash] Log file path', self.log_file_path, nl_after=1)
def check_program(self):
utils.is_program_exists(self.program_name)
def process(self, input_path, fasta_files):
self.run.info('[sourmash] Kmer size', self.kmer_size, nl_before=1)
self.run.info('[sourmash] Compression ratio', self.scale)
report_name = 'kmer_%d_mash_similarity' % self.kmer_size
# backup the old working directory before changing the directory
old_wd = os.getcwd()
os.chdir(input_path)
if not os.path.exists('output'):
os.mkdir('output')
else:
pass
self.progress.new('Sourmash')
self.progress.update('Computing fasta signatures for kmer=%d, scale=%d' % (self.kmer_size, self.scale))
scale = '--scaled=%i' % self.scale
compute_command = [self.program_name, 'compute',
'-k', self.kmer_size,
'-f', scale]
compute_command.extend(fasta_files)
exit_code = utils.run_command(compute_command, self.log_file_path, remove_log_file_if_exists=False)
if int(exit_code):
self.progress.end()
raise ConfigError("sourmash returned with non-zero exit code, there may be some errors. "
"Please check the log file `%s` for details. Offending command: "
"`%s` ..." % (self.log_file_path, ' '.join([str(x) for x in compute_command[:7]])))
self.progress.update('Computing similarity matrix for kmer=%d, scale=%d' % (self.kmer_size, self.scale))
compare_command = [self.program_name, 'compare',
'-k', self.kmer_size,
'--csv', os.path.join('output', report_name + '.txt')]
for f in fasta_files:
compare_command.append(f + ".sig")
exit_code = utils.run_command(compare_command, self.log_file_path, remove_log_file_if_exists=False)
if int(exit_code):
self.progress.end()
raise ConfigError("sourmash returned with non-zero exit code, there may be some errors. "
"Please check the log file `%s` for details. Offending command: "
"`%s` ..." % (self.log_file_path, ' '.join([str(x) for x in compute_command[:7]])))
self.results[report_name] = utils.get_TAB_delimited_file_as_dictionary(os.path.join('output', report_name + '.txt'),
indexing_field=-1,
separator=',')
self.progress.end()
# restore old working directory
os.chdir(old_wd)
return self.results
| gpl-3.0 |
JsNoNo/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 254 | 2795 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.grid_search import GridSearchCV
from sklearn.datasets import load_files
from sklearn.cross_validation import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the cross-validated scores for the each parameters set
# explored by the grid search
print(grid_search.grid_scores_)
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
BigDataforYou/movie_recommendation_workshop_1 | big_data_4_you_demo_1/venv/lib/python2.7/site-packages/pandas/stats/tests/test_math.py | 9 | 1836 | import nose
from datetime import datetime
from numpy.random import randn
import numpy as np
from pandas.core.api import Series, DataFrame, date_range
import pandas.util.testing as tm
import pandas.stats.math as pmath
from pandas import ols
N, K = 100, 10
_have_statsmodels = True
try:
import statsmodels.api as sm
except ImportError:
try:
import scikits.statsmodels.api as sm # noqa
except ImportError:
_have_statsmodels = False
class TestMath(tm.TestCase):
_nan_locs = np.arange(20, 40)
_inf_locs = np.array([])
def setUp(self):
arr = randn(N)
arr[self._nan_locs] = np.NaN
self.arr = arr
self.rng = date_range(datetime(2009, 1, 1), periods=N)
self.series = Series(arr.copy(), index=self.rng)
self.frame = DataFrame(randn(N, K), index=self.rng,
columns=np.arange(K))
def test_rank_1d(self):
self.assertEqual(1, pmath.rank(self.series))
self.assertEqual(0, pmath.rank(Series(0, self.series.index)))
def test_solve_rect(self):
if not _have_statsmodels:
raise nose.SkipTest("no statsmodels")
b = Series(np.random.randn(N), self.frame.index)
result = pmath.solve(self.frame, b)
with tm.assert_produces_warning(FutureWarning, check_stacklevel=False):
expected = ols(y=b, x=self.frame, intercept=False).beta
self.assertTrue(np.allclose(result, expected))
def test_inv_illformed(self):
singular = DataFrame(np.array([[1, 1], [2, 2]]))
rs = pmath.inv(singular)
expected = np.array([[0.1, 0.2], [0.1, 0.2]])
self.assertTrue(np.allclose(rs, expected))
if __name__ == '__main__':
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
trachelr/mne-python | mne/time_frequency/tfr.py | 2 | 48373 | """A module which implements the time frequency estimation.
Morlet code inspired by Matlab code from Sheraz Khan & Brainstorm & SPM
"""
# Authors : Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Hari Bharadwaj <hari@nmr.mgh.harvard.edu>
#
# License : BSD (3-clause)
import warnings
from math import sqrt
from copy import deepcopy
import numpy as np
from scipy import linalg
from scipy.fftpack import fftn, ifftn
from ..fixes import partial
from ..baseline import rescale
from ..parallel import parallel_func
from ..utils import logger, verbose, _time_mask
from ..channels.channels import ContainsMixin, UpdateChannelsMixin
from ..io.pick import pick_info, pick_types
from ..utils import check_fname
from .multitaper import dpss_windows
from .._hdf5 import write_hdf5, read_hdf5
def _get_data(inst, return_itc):
"""Get data from Epochs or Evoked instance as epochs x ch x time"""
from ..epochs import _BaseEpochs
from ..evoked import Evoked
if not isinstance(inst, (_BaseEpochs, Evoked)):
raise TypeError('inst must be Epochs or Evoked')
if isinstance(inst, _BaseEpochs):
data = inst.get_data()
else:
if return_itc:
raise ValueError('return_itc must be False for evoked data')
data = inst.data[np.newaxis, ...].copy()
return data
def morlet(sfreq, freqs, n_cycles=7, sigma=None, zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency
freqs : array
frequency range of interest (1 x Frequencies)
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
sigma : float, (optional)
It controls the width of the wavelet ie its temporal
resolution. If sigma is None the temporal resolution
is adapted with the frequency like for all wavelet transform.
The higher the frequency the shorter is the wavelet.
If sigma is fixed the temporal resolution is fixed
like for the short time Fourier transform and the number
of oscillations increases with the frequency.
zero_mean : bool
Make sure the wavelet is zero mean
Returns
-------
Ws : list of array
Wavelets time series
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
Ws = list()
n_cycles = np.atleast_1d(n_cycles)
if (n_cycles.size != 1) and (n_cycles.size != len(freqs)):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
# fixed or scale-dependent window
if sigma is None:
sigma_t = this_n_cycles / (2.0 * np.pi * f)
else:
sigma_t = this_n_cycles / (2.0 * np.pi * sigma)
# this scaling factor is proportional to (Tallon-Baudry 98):
# (sigma_t*sqrt(pi))^(-1/2);
t = np.arange(0., 5. * sigma_t, 1.0 / sfreq)
t = np.r_[-t[::-1], t[1:]]
oscillation = np.exp(2.0 * 1j * np.pi * f * t)
gaussian_enveloppe = np.exp(-t ** 2 / (2.0 * sigma_t ** 2))
if zero_mean: # to make it zero mean
real_offset = np.exp(- 2 * (np.pi * f * sigma_t) ** 2)
oscillation -= real_offset
W = oscillation * gaussian_enveloppe
W /= sqrt(0.5) * linalg.norm(W.ravel())
Ws.append(W)
return Ws
def _dpss_wavelet(sfreq, freqs, n_cycles=7, time_bandwidth=4.0,
zero_mean=False):
"""Compute Wavelets for the given frequency range
Parameters
----------
sfreq : float
Sampling Frequency.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
Defaults to 7.
time_bandwidth : float, (optional)
Time x Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1).
Default is 4.0, giving 3 good tapers.
Returns
-------
Ws : list of array
Wavelets time series
"""
Ws = list()
if time_bandwidth < 2.0:
raise ValueError("time_bandwidth should be >= 2.0 for good tapers")
n_taps = int(np.floor(time_bandwidth - 1))
n_cycles = np.atleast_1d(n_cycles)
if n_cycles.size != 1 and n_cycles.size != len(freqs):
raise ValueError("n_cycles should be fixed or defined for "
"each frequency.")
for m in range(n_taps):
Wm = list()
for k, f in enumerate(freqs):
if len(n_cycles) != 1:
this_n_cycles = n_cycles[k]
else:
this_n_cycles = n_cycles[0]
t_win = this_n_cycles / float(f)
t = np.arange(0., t_win, 1.0 / sfreq)
# Making sure wavelets are centered before tapering
oscillation = np.exp(2.0 * 1j * np.pi * f * (t - t_win / 2.))
# Get dpss tapers
tapers, conc = dpss_windows(t.shape[0], time_bandwidth / 2.,
n_taps)
Wk = oscillation * tapers[m]
if zero_mean: # to make it zero mean
real_offset = Wk.mean()
Wk -= real_offset
Wk /= sqrt(0.5) * linalg.norm(Wk.ravel())
Wm.append(Wk)
Ws.append(Wm)
return Ws
def _centered(arr, newsize):
"""Aux Function to center data"""
# Return the center newsize portion of the array.
newsize = np.asarray(newsize)
currsize = np.array(arr.shape)
startind = (currsize - newsize) // 2
endind = startind + newsize
myslice = [slice(startind[k], endind[k]) for k in range(len(endind))]
return arr[tuple(myslice)]
def _cwt_fft(X, Ws, mode="same"):
"""Compute cwt with fft based convolutions
Return a generator over signals.
"""
X = np.asarray(X)
# Precompute wavelets for given frequency range to save time
n_signals, n_times = X.shape
n_freqs = len(Ws)
Ws_max_size = max(W.size for W in Ws)
size = n_times + Ws_max_size - 1
# Always use 2**n-sized FFT
fsize = 2 ** int(np.ceil(np.log2(size)))
# precompute FFTs of Ws
fft_Ws = np.empty((n_freqs, fsize), dtype=np.complex128)
for i, W in enumerate(Ws):
if len(W) > n_times:
raise ValueError('Wavelet is too long for such a short signal. '
'Reduce the number of cycles.')
fft_Ws[i] = fftn(W, [fsize])
for k, x in enumerate(X):
if mode == "full":
tfr = np.zeros((n_freqs, fsize), dtype=np.complex128)
elif mode == "same" or mode == "valid":
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
fft_x = fftn(x, [fsize])
for i, W in enumerate(Ws):
ret = ifftn(fft_x * fft_Ws[i])[:n_times + W.size - 1]
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = _centered(ret, sz)
else:
tfr[i, :] = _centered(ret, n_times)
yield tfr
def _cwt_convolve(X, Ws, mode='same'):
"""Compute time freq decomposition with temporal convolutions
Return a generator over signals.
"""
X = np.asarray(X)
n_signals, n_times = X.shape
n_freqs = len(Ws)
# Compute convolutions
for x in X:
tfr = np.zeros((n_freqs, n_times), dtype=np.complex128)
for i, W in enumerate(Ws):
ret = np.convolve(x, W, mode=mode)
if len(W) > len(x):
raise ValueError('Wavelet is too long for such a short '
'signal. Reduce the number of cycles.')
if mode == "valid":
sz = abs(W.size - n_times) + 1
offset = (n_times - sz) / 2
tfr[i, offset:(offset + sz)] = ret
else:
tfr[i] = ret
yield tfr
def cwt_morlet(X, sfreq, freqs, use_fft=True, n_cycles=7.0, zero_mean=False):
"""Compute time freq decomposition with Morlet wavelets
This function operates directly on numpy arrays. Consider using
`tfr_morlet` to process `Epochs` or `Evoked` instances.
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
sfreq : float
sampling Frequency
freqs : array
Array of frequencies of interest
use_fft : bool
Compute convolution with FFT or temoral convolution.
n_cycles: float | array of float
Number of cycles. Fixed number or one per frequency.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
tfr.cwt : Compute time-frequency decomposition with user-provided wavelets
"""
mode = 'same'
# mode = "valid"
n_signals, n_times = X.shape
n_frequencies = len(freqs)
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, freqs, n_cycles=n_cycles, zero_mean=zero_mean)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr
return tfrs
def cwt(X, Ws, use_fft=True, mode='same', decim=1):
"""Compute time freq decomposition with continuous wavelet transform
Parameters
----------
X : array of shape [n_signals, n_times]
signals (one per line)
Ws : list of array
Wavelets time series
use_fft : bool
Use FFT for convolutions
mode : 'same' | 'valid' | 'full'
Convention for convolution
decim : int
Temporal decimation factor
Returns
-------
tfr : 3D array
Time Frequency Decompositions (n_signals x n_frequencies x n_times)
See Also
--------
mne.time_frequency.cwt_morlet : Compute time-frequency decomposition
with Morlet wavelets
"""
n_signals, n_times = X[:, ::decim].shape
n_frequencies = len(Ws)
if use_fft:
coefs = _cwt_fft(X, Ws, mode)
else:
coefs = _cwt_convolve(X, Ws, mode)
tfrs = np.empty((n_signals, n_frequencies, n_times), dtype=np.complex)
for k, tfr in enumerate(coefs):
tfrs[k] = tfr[..., ::decim]
return tfrs
def _time_frequency(X, Ws, use_fft, decim):
"""Aux of time_frequency for parallel computing over channels
"""
n_epochs, n_times = X.shape
n_times = n_times // decim + bool(n_times % decim)
n_frequencies = len(Ws)
psd = np.zeros((n_frequencies, n_times)) # PSD
plf = np.zeros((n_frequencies, n_times), np.complex) # phase lock
mode = 'same'
if use_fft:
tfrs = _cwt_fft(X, Ws, mode)
else:
tfrs = _cwt_convolve(X, Ws, mode)
for tfr in tfrs:
tfr = tfr[:, ::decim]
tfr_abs = np.abs(tfr)
psd += tfr_abs ** 2
plf += tfr / tfr_abs
psd /= n_epochs
plf = np.abs(plf) / n_epochs
return psd, plf
@verbose
def single_trial_power(data, sfreq, frequencies, use_fft=True, n_cycles=7,
baseline=None, baseline_mode='ratio', times=None,
decim=1, n_jobs=1, zero_mean=False, verbose=None):
"""Compute time-frequency power on single epochs
Parameters
----------
data : array of shape [n_epochs, n_channels, n_times]
The epochs
sfreq : float
Sampling rate
frequencies : array-like
The frequencies
use_fft : bool
Use the FFT for convolutions or not.
n_cycles : float | array of float
Number of cycles in the Morlet wavelet. Fixed number
or one per frequency.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
baseline_mode : None | 'ratio' | 'zscore'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
times : array
Required to define baseline
decim : int
Temporal decimation factor
n_jobs : int
The number of epochs to process at the same time
zero_mean : bool
Make sure the wavelets are zero mean.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : 4D array
Power estimate (Epochs x Channels x Frequencies x Timepoints).
"""
mode = 'same'
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
parallel, my_cwt, _ = parallel_func(cwt, n_jobs)
logger.info("Computing time-frequency power on single epochs...")
power = np.empty((n_epochs, n_channels, n_frequencies, n_times),
dtype=np.float)
# Package arguments for `cwt` here to minimize omissions where only one of
# the two calls below is updated with new function arguments.
cwt_kw = dict(Ws=Ws, use_fft=use_fft, mode=mode, decim=decim)
if n_jobs == 1:
for k, e in enumerate(data):
x = cwt(e, **cwt_kw)
power[k] = (x * x.conj()).real
else:
# Precompute tf decompositions in parallel
tfrs = parallel(my_cwt(e, **cwt_kw) for e in data)
for k, tfr in enumerate(tfrs):
power[k] = (tfr * tfr.conj()).real
# Run baseline correction. Be sure to decimate the times array as well if
# needed.
if times is not None:
times = times[::decim]
power = rescale(power, times, baseline, baseline_mode, copy=False)
return power
def _induced_power_cwt(data, sfreq, frequencies, use_fft=True, n_cycles=7,
decim=1, n_jobs=1, zero_mean=False):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with Morlet wavelets
Parameters
----------
data : array
3D array of shape [n_epochs, n_channels, n_times]
sfreq : float
sampling Frequency
frequencies : array
Array of frequencies of interest
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions.
n_cycles : float | array of float
Number of cycles. Fixed number or one per frequency.
decim: int
Temporal decimation factor
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package.
zero_mean : bool
Make sure the wavelets are zero mean.
Returns
-------
power : 2D array
Induced power (Channels x Frequencies x Timepoints).
Squared amplitude of time-frequency coefficients.
phase_lock : 2D array
Phase locking factor in [0, 1] (Channels x Frequencies x Timepoints)
"""
n_frequencies = len(frequencies)
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
# Precompute wavelets for given frequency range to save time
Ws = morlet(sfreq, frequencies, n_cycles=n_cycles, zero_mean=zero_mean)
psd = np.empty((n_channels, n_frequencies, n_times))
plf = np.empty((n_channels, n_frequencies, n_times))
# Separate to save memory for n_jobs=1
parallel, my_time_frequency, _ = parallel_func(_time_frequency, n_jobs)
psd_plf = parallel(my_time_frequency(data[:, c, :], Ws, use_fft, decim)
for c in range(n_channels))
for c, (psd_c, plf_c) in enumerate(psd_plf):
psd[c, :, :], plf[c, :, :] = psd_c, plf_c
return psd, plf
def _preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB):
"""Aux Function to prepare tfr computation"""
from ..viz.utils import _setup_vmin_vmax
if mode is not None and baseline is not None:
logger.info("Applying baseline correction '%s' during %s" %
(mode, baseline))
data = rescale(data.copy(), times, baseline, mode)
# crop time
itmin, itmax = None, None
idx = np.where(_time_mask(times, tmin, tmax))[0]
if tmin is not None:
itmin = idx[0]
if tmax is not None:
itmax = idx[-1] + 1
times = times[itmin:itmax]
# crop freqs
ifmin, ifmax = None, None
idx = np.where(_time_mask(freqs, fmin, fmax))[0]
if fmin is not None:
ifmin = idx[0]
if fmax is not None:
ifmax = idx[-1] + 1
freqs = freqs[ifmin:ifmax]
# crop data
data = data[:, ifmin:ifmax, itmin:itmax]
times *= 1e3
if dB:
data = 10 * np.log10((data * data.conj()).real)
vmin, vmax = _setup_vmin_vmax(data, vmin, vmax)
return data, times, freqs, vmin, vmax
class AverageTFR(ContainsMixin, UpdateChannelsMixin):
"""Container for Time-Frequency data
Can for example store induced power at sensor level or intertrial
coherence.
Parameters
----------
info : Info
The measurement info.
data : ndarray, shape (n_channels, n_freqs, n_times)
The data.
times : ndarray, shape (n_times,)
The time values in seconds.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
nave : int
The number of averaged TFRs.
comment : str | None
Comment on the data, e.g., the experimental condition.
Defaults to None.
method : str | None
Comment on the method used to compute the data, e.g., morlet wavelet.
Defaults to None.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Attributes
----------
ch_names : list
The names of the channels.
"""
@verbose
def __init__(self, info, data, times, freqs, nave, comment=None,
method=None, verbose=None):
self.info = info
if data.ndim != 3:
raise ValueError('data should be 3d. Got %d.' % data.ndim)
n_channels, n_freqs, n_times = data.shape
if n_channels != len(info['chs']):
raise ValueError("Number of channels and data size don't match"
" (%d != %d)." % (n_channels, len(info['chs'])))
if n_freqs != len(freqs):
raise ValueError("Number of frequencies and data size don't match"
" (%d != %d)." % (n_freqs, len(freqs)))
if n_times != len(times):
raise ValueError("Number of times and data size don't match"
" (%d != %d)." % (n_times, len(times)))
self.data = data
self.times = times
self.freqs = freqs
self.nave = nave
self.comment = comment
self.method = method
@property
def ch_names(self):
return self.info['ch_names']
def crop(self, tmin=None, tmax=None, copy=False):
"""Crop data to a given time interval
Parameters
----------
tmin : float | None
Start time of selection in seconds.
tmax : float | None
End time of selection in seconds.
copy : bool
If False epochs is cropped in place.
"""
inst = self if not copy else self.copy()
mask = _time_mask(inst.times, tmin, tmax)
inst.times = inst.times[mask]
inst.data = inst.data[..., mask]
return inst
@verbose
def plot(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
cmap='RdBu_r', dB=False, colorbar=True, show=True,
title=None, axes=None, verbose=None):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot. For user defined axes,
the colorbar cannot be drawn. Defaults to True.
show : bool
Call pyplot.show() at the end.
title : str | None
String for title. Defaults to None (blank/no title).
axes : instance of Axes | list | None
The axes to plot to. If list, the list must be a list of Axes of
the same length as the number of channels. If instance of Axes,
there must be only one channel plotted.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr
import matplotlib.pyplot as plt
times, freqs = self.times.copy(), self.freqs.copy()
data = self.data[picks]
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax, mode,
baseline, vmin, vmax, dB)
tmin, tmax = times[0], times[-1]
if isinstance(axes, plt.Axes):
axes = [axes]
if isinstance(axes, list) and len(axes) != len(picks):
raise RuntimeError('There must be an axes for each picked '
'channel.')
if colorbar:
logger.warning('Cannot draw colorbar for user defined axes.')
for idx in range(len(data)):
if axes is None:
fig = plt.figure()
ax = fig.add_subplot(111)
else:
ax = axes[idx]
fig = ax.get_figure()
_imshow_tfr(ax, 0, tmin, tmax, vmin, vmax, ylim=None,
tfr=data[idx: idx + 1], freq=freqs,
x_label='Time (ms)', y_label='Frequency (Hz)',
colorbar=False, picker=False, cmap=cmap)
if title:
fig.suptitle(title)
if show:
plt.show()
return fig
def plot_topo(self, picks=None, baseline=None, mode='mean', tmin=None,
tmax=None, fmin=None, fmax=None, vmin=None, vmax=None,
layout=None, cmap='RdBu_r', title=None, dB=False,
colorbar=True, layout_scale=0.945, show=True,
border='none', fig_facecolor='k', font_color='w'):
"""Plot TFRs in a topography with images
Parameters
----------
picks : array-like of int | None
The indices of the channels to plot. If None all available
channels are displayed.
baseline : None (default) or tuple of length 2
The time interval to apply baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal ot (None, None) all the time
interval is used.
mode : None | 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or zscore (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline)).
If None no baseline correction is applied.
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
vmin : float | None
The mininum value an the color scale. If vmin is None, the data
minimum value is used.
vmax : float | None
The maxinum value an the color scale. If vmax is None, the data
maximum value is used.
layout : Layout | None
Layout instance specifying sensor positions. If possible, the
correct layout is inferred from the data.
cmap : matplotlib colormap | str
The colormap to use. Defaults to 'RdBu_r'.
title : str
Title of the figure.
dB : bool
If True, 20*log10 is applied to the data to get dB.
colorbar : bool
If true, colorbar will be added to the plot
layout_scale : float
Scaling factor for adjusting the relative size of the layout
on the canvas.
show : bool
Call pyplot.show() at the end.
border : str
matplotlib borders style to be used for each sensor plot.
fig_facecolor : str | obj
The figure face color. Defaults to black.
font_color: str | obj
The color of tick labels in the colorbar. Defaults to white.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz.topo import _imshow_tfr, _plot_topo
import matplotlib.pyplot as plt
times = self.times.copy()
freqs = self.freqs
data = self.data
info = self.info
if picks is not None:
data = data[picks]
info = pick_info(info, picks)
data, times, freqs, vmin, vmax = \
_preproc_tfr(data, times, freqs, tmin, tmax, fmin, fmax,
mode, baseline, vmin, vmax, dB)
if layout is None:
from mne import find_layout
layout = find_layout(self.info)
imshow = partial(_imshow_tfr, tfr=data, freq=freqs, cmap=cmap)
fig = _plot_topo(info=info, times=times,
show_func=imshow, layout=layout,
colorbar=colorbar, vmin=vmin, vmax=vmax, cmap=cmap,
layout_scale=layout_scale, title=title, border=border,
x_label='Time (ms)', y_label='Frequency (Hz)',
fig_facecolor=fig_facecolor,
font_color=font_color)
if show:
plt.show()
return fig
def _check_compat(self, tfr):
"""checks that self and tfr have the same time-frequency ranges"""
assert np.all(tfr.times == self.times)
assert np.all(tfr.freqs == self.freqs)
def __add__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data += tfr.data
return out
def __iadd__(self, tfr):
self._check_compat(tfr)
self.data += tfr.data
return self
def __sub__(self, tfr):
self._check_compat(tfr)
out = self.copy()
out.data -= tfr.data
return out
def __isub__(self, tfr):
self._check_compat(tfr)
self.data -= tfr.data
return self
def copy(self):
"""Return a copy of the instance."""
return deepcopy(self)
def __repr__(self):
s = "time : [%f, %f]" % (self.times[0], self.times[-1])
s += ", freq : [%f, %f]" % (self.freqs[0], self.freqs[-1])
s += ", nave : %d" % self.nave
s += ', channels : %d' % self.data.shape[0]
return "<AverageTFR | %s>" % s
def apply_baseline(self, baseline, mode='mean'):
"""Baseline correct the data
Parameters
----------
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
"""
self.data = rescale(self.data, self.times, baseline, mode, copy=False)
def plot_topomap(self, tmin=None, tmax=None, fmin=None, fmax=None,
ch_type=None, baseline=None, mode='mean',
layout=None, vmin=None, vmax=None, cmap='RdBu_r',
sensors=True, colorbar=True, unit=None, res=64, size=2,
cbar_fmt='%1.1e', show_names=False, title=None,
axes=None, show=True, outlines='head', head_pos=None):
"""Plot topographic maps of time-frequency intervals of TFR data
Parameters
----------
tmin : None | float
The first time instant to display. If None the first time point
available is used.
tmax : None | float
The last time instant to display. If None the last time point
available is used.
fmin : None | float
The first frequency to display. If None the first frequency
available is used.
fmax : None | float
The last frequency to display. If None the last frequency
available is used.
ch_type : 'mag' | 'grad' | 'planar1' | 'planar2' | 'eeg' | None
The channel type to plot. For 'grad', the gradiometers are
collected in pairs and the RMS for each pair is plotted.
If None, then channels are chosen in the order given above.
baseline : tuple or list of length 2
The time interval to apply rescaling / baseline correction.
If None do not apply it. If baseline is (a, b)
the interval is between "a (s)" and "b (s)".
If a is None the beginning of the data is used
and if b is None then b is set to the end of the interval.
If baseline is equal to (None, None) all the time
interval is used.
mode : 'logratio' | 'ratio' | 'zscore' | 'mean' | 'percent'
Do baseline correction with ratio (power is divided by mean
power during baseline) or z-score (power is divided by standard
deviation of power during baseline after subtracting the mean,
power = [power - mean(power_baseline)] / std(power_baseline))
If None, baseline no correction will be performed.
layout : None | Layout
Layout instance specifying sensor positions (does not need to
be specified for Neuromag data). If possible, the correct layout
file is inferred from the data; if no appropriate layout file was
found, the layout is automatically generated from the sensor
locations.
vmin : float | callable
The value specfying the lower bound of the color range.
If None, and vmax is None, -vmax is used. Else np.min(data).
If callable, the output equals vmin(data).
vmax : float | callable
The value specfying the upper bound of the color range.
If None, the maximum absolute value is used. If vmin is None,
but vmax is not, defaults to np.min(data).
If callable, the output equals vmax(data).
cmap : matplotlib colormap
Colormap. For magnetometers and eeg defaults to 'RdBu_r', else
'Reds'.
sensors : bool | str
Add markers for sensor locations to the plot. Accepts matplotlib
plot format string (e.g., 'r+' for red plusses). If True, a circle
will be used (via .add_artist). Defaults to True.
colorbar : bool
Plot a colorbar.
unit : dict | str | None
The unit of the channel type used for colorbar label. If
scale is None the unit is automatically determined.
res : int
The resolution of the topomap image (n pixels along each side).
size : float
Side length per topomap in inches.
cbar_fmt : str
String format for colorbar values.
show_names : bool | callable
If True, show channel names on top of the map. If a callable is
passed, channel names will be formatted using the callable; e.g.,
to delete the prefix 'MEG ' from all channel names, pass the
function lambda x: x.replace('MEG ', ''). If `mask` is not None,
only significant sensors will be shown.
title : str | None
Title. If None (default), no title is displayed.
axes : instance of Axes | None
The axes to plot to. If None the axes is defined automatically.
show : bool
Call pyplot.show() at the end.
outlines : 'head' | dict | None
The outlines to be drawn. If 'head', a head scheme will be drawn.
If dict, each key refers to a tuple of x and y positions.
The values in 'mask_pos' will serve as image mask. If None, nothing
will be drawn. Defaults to 'head'. If dict, the 'autoshrink' (bool)
field will trigger automated shrinking of the positions due to
points outside the outline. Moreover, a matplotlib patch object can
be passed for advanced masking options, either directly or as a
function that returns patches (required for multi-axis plots).
head_pos : dict | None
If None (default), the sensors are positioned such that they span
the head circle. If dict, can have entries 'center' (tuple) and
'scale' (tuple) for what the center and scale of the head should be
relative to the electrode locations.
Returns
-------
fig : matplotlib.figure.Figure
The figure containing the topography.
"""
from ..viz import plot_tfr_topomap
return plot_tfr_topomap(self, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, ch_type=ch_type, baseline=baseline,
mode=mode, layout=layout, vmin=vmin, vmax=vmax,
cmap=cmap, sensors=sensors, colorbar=colorbar,
unit=unit, res=res, size=size,
cbar_fmt=cbar_fmt, show_names=show_names,
title=title, axes=axes, show=show,
outlines=outlines, head_pos=head_pos)
def save(self, fname, overwrite=False):
"""Save TFR object to hdf5 file
Parameters
----------
fname : str
The file name, which should end with -tfr.h5 .
overwrite : bool
If True, overwrite file (if it exists). Defaults to false
"""
write_tfrs(fname, self, overwrite=overwrite)
def _prepare_write_tfr(tfr, condition):
"""Aux function"""
return (condition, dict(times=tfr.times, freqs=tfr.freqs,
data=tfr.data, info=tfr.info, nave=tfr.nave,
comment=tfr.comment, method=tfr.method))
def write_tfrs(fname, tfr, overwrite=False):
"""Write a TFR dataset to hdf5.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5
tfr : AverageTFR instance, or list of AverageTFR instances
The TFR dataset, or list of TFR datasets, to save in one file.
Note. If .comment is not None, a name will be generated on the fly,
based on the order in which the TFR objects are passed
overwrite : bool
If True, overwrite file (if it exists). Defaults to False.
See Also
--------
read_tfrs
Notes
-----
.. versionadded:: 0.9.0
"""
out = []
if not isinstance(tfr, (list, tuple)):
tfr = [tfr]
for ii, tfr_ in enumerate(tfr):
comment = ii if tfr_.comment is None else tfr_.comment
out.append(_prepare_write_tfr(tfr_, condition=comment))
write_hdf5(fname, out, overwrite=overwrite)
def read_tfrs(fname, condition=None):
"""
Read TFR datasets from hdf5 file.
Parameters
----------
fname : string
The file name, which should end with -tfr.h5 .
condition : int or str | list of int or str | None
The condition to load. If None, all conditions will be returned.
Defaults to None.
See Also
--------
write_tfrs
Returns
-------
tfrs : list of instances of AverageTFR | instance of AverageTFR
Depending on `condition` either the TFR object or a list of multiple
TFR objects.
Notes
-----
.. versionadded:: 0.9.0
"""
check_fname(fname, 'tfr', ('-tfr.h5',))
logger.info('Reading %s ...' % fname)
tfr_data = read_hdf5(fname)
if condition is not None:
tfr_dict = dict(tfr_data)
if condition not in tfr_dict:
keys = ['%s' % k for k in tfr_dict]
raise ValueError('Cannot find condition ("{0}") in this file. '
'I can give you "{1}""'
.format(condition, " or ".join(keys)))
out = AverageTFR(**tfr_dict[condition])
else:
out = [AverageTFR(**d) for d in list(zip(*tfr_data))[1]]
return out
def tfr_morlet(inst, freqs, n_cycles, use_fft=False,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using Morlet wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
use_fft : bool
The fft based convolution or not.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Must be ``False`` for evoked data.
decim : int
The decimation factor on the time axis. To reduce memory usage.
n_jobs : int
The number of jobs to run in parallel.
Returns
-------
power : instance of AverageTFR
The averaged power.
itc : instance of AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_cwt(data, sfreq=info['sfreq'],
frequencies=freqs,
n_cycles=n_cycles, n_jobs=n_jobs,
use_fft=use_fft, decim=decim,
zero_mean=True)
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave, method='morlet-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='morlet-itc'))
return out
@verbose
def _induced_power_mtm(data, sfreq, frequencies, time_bandwidth=4.0,
use_fft=True, n_cycles=7, decim=1, n_jobs=1,
zero_mean=True, verbose=None):
"""Compute time induced power and inter-trial phase-locking factor
The time frequency decomposition is done with DPSS wavelets
Parameters
----------
data : np.ndarray, shape (n_epochs, n_channels, n_times)
The input data.
sfreq : float
sampling Frequency
frequencies : np.ndarray, shape (n_frequencies,)
Array of frequencies of interest
time_bandwidth : float
Time x (Full) Bandwidth product.
The number of good tapers (low-bias) is chosen automatically based on
this to equal floor(time_bandwidth - 1). Default is 4.0 (3 tapers).
use_fft : bool
Compute transform with fft based convolutions or temporal
convolutions. Defaults to True.
n_cycles : float | np.ndarray shape (n_frequencies,)
Number of cycles. Fixed number or one per frequency. Defaults to 7.
decim: int
Temporal decimation factor. Defaults to 1.
n_jobs : int
The number of CPUs used in parallel. All CPUs are used in -1.
Requires joblib package. Defaults to 1.
zero_mean : bool
Make sure the wavelets are zero mean. Defaults to True.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
power : np.ndarray, shape (n_channels, n_frequencies, n_times)
Induced power. Squared amplitude of time-frequency coefficients.
itc : np.ndarray, shape (n_channels, n_frequencies, n_times)
Phase locking value.
"""
n_epochs, n_channels, n_times = data[:, :, ::decim].shape
logger.info('Data is %d trials and %d channels', n_epochs, n_channels)
n_frequencies = len(frequencies)
logger.info('Multitaper time-frequency analysis for %d frequencies',
n_frequencies)
# Precompute wavelets for given frequency range to save time
Ws = _dpss_wavelet(sfreq, frequencies, n_cycles=n_cycles,
time_bandwidth=time_bandwidth, zero_mean=zero_mean)
n_taps = len(Ws)
logger.info('Using %d tapers', n_taps)
n_times_wavelets = Ws[0][0].shape[0]
if n_times <= n_times_wavelets:
warnings.warn("Time windows are as long or longer than the epoch. "
"Consider reducing n_cycles.")
psd = np.zeros((n_channels, n_frequencies, n_times))
itc = np.zeros((n_channels, n_frequencies, n_times))
parallel, my_time_frequency, _ = parallel_func(_time_frequency,
n_jobs)
for m in range(n_taps):
psd_itc = parallel(my_time_frequency(data[:, c, :],
Ws[m], use_fft, decim)
for c in range(n_channels))
for c, (psd_c, itc_c) in enumerate(psd_itc):
psd[c, :, :] += psd_c
itc[c, :, :] += itc_c
psd /= n_taps
itc /= n_taps
return psd, itc
def tfr_multitaper(inst, freqs, n_cycles, time_bandwidth=4.0, use_fft=True,
return_itc=True, decim=1, n_jobs=1):
"""Compute Time-Frequency Representation (TFR) using DPSS wavelets
Parameters
----------
inst : Epochs | Evoked
The epochs or evoked object.
freqs : ndarray, shape (n_freqs,)
The frequencies in Hz.
n_cycles : float | ndarray, shape (n_freqs,)
The number of cycles globally or for each frequency.
The time-window length is thus T = n_cycles / freq.
time_bandwidth : float, (optional)
Time x (Full) Bandwidth product. Should be >= 2.0.
Choose this along with n_cycles to get desired frequency resolution.
The number of good tapers (least leakage from far away frequencies)
is chosen automatically based on this to floor(time_bandwidth - 1).
Default is 4.0 (3 good tapers).
E.g., With freq = 20 Hz and n_cycles = 10, we get time = 0.5 s.
If time_bandwidth = 4., then frequency smoothing is (4 / time) = 8 Hz.
use_fft : bool
The fft based convolution or not.
Defaults to True.
return_itc : bool
Return intertrial coherence (ITC) as well as averaged power.
Defaults to True.
decim : int
The decimation factor on the time axis. To reduce memory usage.
Note than this is brute force decimation, no anti-aliasing is done.
Defaults to 1.
n_jobs : int
The number of jobs to run in parallel. Defaults to 1.
Returns
-------
power : AverageTFR
The averaged power.
itc : AverageTFR
The intertrial coherence (ITC). Only returned if return_itc
is True.
See Also
--------
tfr_multitaper, tfr_stockwell
Notes
-----
.. versionadded:: 0.9.0
"""
data = _get_data(inst, return_itc)
picks = pick_types(inst.info, meg=True, eeg=True)
info = pick_info(inst.info, picks)
data = data[:, picks, :]
power, itc = _induced_power_mtm(data, sfreq=info['sfreq'],
frequencies=freqs, n_cycles=n_cycles,
time_bandwidth=time_bandwidth,
use_fft=use_fft, decim=decim,
n_jobs=n_jobs, zero_mean=True,
verbose='INFO')
times = inst.times[::decim].copy()
nave = len(data)
out = AverageTFR(info, power, times, freqs, nave,
method='mutlitaper-power')
if return_itc:
out = (out, AverageTFR(info, itc, times, freqs, nave,
method='mutlitaper-itc'))
return out
| bsd-3-clause |
Peratham/tensorlib | doc/sphinxext/gen_rst.py | 11 | 38957 | """
Example generation for the scikit learn
Generate the rst files for the examples by iterating over the python
example files.
Files that generate images should start with 'plot'
"""
from __future__ import division, print_function
from time import time
import ast
import os
import re
import shutil
import traceback
import glob
import sys
import gzip
import posixpath
import subprocess
from textwrap import dedent
# Try Python 2 first, otherwise load from Python 3
try:
from StringIO import StringIO
import cPickle as pickle
import urllib2 as urllib
from urllib2 import HTTPError, URLError
except ImportError:
from io import StringIO
import pickle
import urllib.request
import urllib.error
import urllib.parse
from urllib.error import HTTPError, URLError
try:
# Python 2 built-in
execfile
except NameError:
def execfile(filename, global_vars=None, local_vars=None):
with open(filename) as f:
code = compile(f.read(), filename, 'exec')
exec(code, global_vars, local_vars)
try:
basestring
except NameError:
basestring = str
import token
import tokenize
import numpy as np
try:
# make sure that the Agg backend is set before importing any
# matplotlib
import matplotlib
matplotlib.use('Agg')
except ImportError:
# this script can be imported by nosetest to find tests to run: we should not
# impose the matplotlib requirement in that case.
pass
from sklearn.externals import joblib
###############################################################################
# A tee object to redict streams to multiple outputs
class Tee(object):
def __init__(self, file1, file2):
self.file1 = file1
self.file2 = file2
def write(self, data):
self.file1.write(data)
self.file2.write(data)
def flush(self):
self.file1.flush()
self.file2.flush()
###############################################################################
# Documentation link resolver objects
def _get_data(url):
"""Helper function to get data over http or from a local file"""
if url.startswith('http://'):
# Try Python 2, use Python 3 on exception
try:
resp = urllib.urlopen(url)
encoding = resp.headers.dict.get('content-encoding', 'plain')
except AttributeError:
resp = urllib.request.urlopen(url)
encoding = resp.headers.get('content-encoding', 'plain')
data = resp.read()
if encoding == 'plain':
pass
elif encoding == 'gzip':
data = StringIO(data)
data = gzip.GzipFile(fileobj=data).read()
else:
raise RuntimeError('unknown encoding')
else:
with open(url, 'r') as fid:
data = fid.read()
fid.close()
return data
mem = joblib.Memory(cachedir='_build')
get_data = mem.cache(_get_data)
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
def _select_block(str_in, start_tag, end_tag):
"""Select first block delimited by start_tag and end_tag"""
start_pos = str_in.find(start_tag)
if start_pos < 0:
raise ValueError('start_tag not found')
depth = 0
for pos in range(start_pos, len(str_in)):
if str_in[pos] == start_tag:
depth += 1
elif str_in[pos] == end_tag:
depth -= 1
if depth == 0:
break
sel = str_in[start_pos + 1:pos]
return sel
def _parse_dict_recursive(dict_str):
"""Parse a dictionary from the search index"""
dict_out = dict()
pos_last = 0
pos = dict_str.find(':')
while pos >= 0:
key = dict_str[pos_last:pos]
if dict_str[pos + 1] == '[':
# value is a list
pos_tmp = dict_str.find(']', pos + 1)
if pos_tmp < 0:
raise RuntimeError('error when parsing dict')
value = dict_str[pos + 2: pos_tmp].split(',')
# try to convert elements to int
for i in range(len(value)):
try:
value[i] = int(value[i])
except ValueError:
pass
elif dict_str[pos + 1] == '{':
# value is another dictionary
subdict_str = _select_block(dict_str[pos:], '{', '}')
value = _parse_dict_recursive(subdict_str)
pos_tmp = pos + len(subdict_str)
else:
raise ValueError('error when parsing dict: unknown elem')
key = key.strip('"')
if len(key) > 0:
dict_out[key] = value
pos_last = dict_str.find(',', pos_tmp)
if pos_last < 0:
break
pos_last += 1
pos = dict_str.find(':', pos_last)
return dict_out
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
class SphinxDocLinkResolver(object):
""" Resolve documentation links using searchindex.js generated by Sphinx
Parameters
----------
doc_url : str
The base URL of the project website.
searchindex : str
Filename of searchindex, relative to doc_url.
extra_modules_test : list of str
List of extra module names to test.
relative : bool
Return relative links (only useful for links to documentation of this
package).
"""
def __init__(self, doc_url, searchindex='searchindex.js',
extra_modules_test=None, relative=False):
self.doc_url = doc_url
self.relative = relative
self._link_cache = {}
self.extra_modules_test = extra_modules_test
self._page_cache = {}
if doc_url.startswith('http://'):
if relative:
raise ValueError('Relative links are only supported for local '
'URLs (doc_url cannot start with "http://)"')
searchindex_url = doc_url + '/' + searchindex
else:
searchindex_url = os.path.join(doc_url, searchindex)
# detect if we are using relative links on a Windows system
if os.name.lower() == 'nt' and not doc_url.startswith('http://'):
if not relative:
raise ValueError('You have to use relative=True for the local'
' package on a Windows system.')
self._is_windows = True
else:
self._is_windows = False
# download and initialize the search index
sindex = get_data(searchindex_url)
filenames, objects = parse_sphinx_searchindex(sindex)
self._searchindex = dict(filenames=filenames, objects=objects)
def _get_link(self, cobj):
"""Get a valid link, False if not found"""
fname_idx = None
full_name = cobj['module_short'] + '.' + cobj['name']
if full_name in self._searchindex['objects']:
value = self._searchindex['objects'][full_name]
if isinstance(value, dict):
value = value[next(iter(value.keys()))]
fname_idx = value[0]
elif cobj['module_short'] in self._searchindex['objects']:
value = self._searchindex['objects'][cobj['module_short']]
if cobj['name'] in value.keys():
fname_idx = value[cobj['name']][0]
if fname_idx is not None:
fname = self._searchindex['filenames'][fname_idx] + '.html'
if self._is_windows:
fname = fname.replace('/', '\\')
link = os.path.join(self.doc_url, fname)
else:
link = posixpath.join(self.doc_url, fname)
if hasattr(link, 'decode'):
link = link.decode('utf-8', 'replace')
if link in self._page_cache:
html = self._page_cache[link]
else:
html = get_data(link)
self._page_cache[link] = html
# test if cobj appears in page
comb_names = [cobj['module_short'] + '.' + cobj['name']]
if self.extra_modules_test is not None:
for mod in self.extra_modules_test:
comb_names.append(mod + '.' + cobj['name'])
url = False
if hasattr(html, 'decode'):
# Decode bytes under Python 3
html = html.decode('utf-8', 'replace')
for comb_name in comb_names:
if hasattr(comb_name, 'decode'):
# Decode bytes under Python 3
comb_name = comb_name.decode('utf-8', 'replace')
if comb_name in html:
url = link + u'#' + comb_name
link = url
else:
link = False
return link
def resolve(self, cobj, this_url):
"""Resolve the link to the documentation, returns None if not found
Parameters
----------
cobj : dict
Dict with information about the "code object" for which we are
resolving a link.
cobi['name'] : function or class name (str)
cobj['module_short'] : shortened module name (str)
cobj['module'] : module name (str)
this_url: str
URL of the current page. Needed to construct relative URLs
(only used if relative=True in constructor).
Returns
-------
link : str | None
The link (URL) to the documentation.
"""
full_name = cobj['module_short'] + '.' + cobj['name']
link = self._link_cache.get(full_name, None)
if link is None:
# we don't have it cached
link = self._get_link(cobj)
# cache it for the future
self._link_cache[full_name] = link
if link is False or link is None:
# failed to resolve
return None
if self.relative:
link = os.path.relpath(link, start=this_url)
if self._is_windows:
# replace '\' with '/' so it on the web
link = link.replace('\\', '/')
# for some reason, the relative link goes one directory too high up
link = link[3:]
return link
###############################################################################
rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
"""
plot_rst_template = """
.. _example_%(short_fname)s:
%(docstring)s
%(image_list)s
%(stdout)s
**Python source code:** :download:`%(fname)s <%(fname)s>`
.. literalinclude:: %(fname)s
:lines: %(end_row)s-
**Total running time of the example:** %(time_elapsed) .2f seconds
(%(time_m) .0f minutes %(time_s) .2f seconds)
"""
# The following strings are used when we have several pictures: we use
# an html div tag that our CSS uses to turn the lists into horizontal
# lists.
HLIST_HEADER = """
.. rst-class:: horizontal
"""
HLIST_IMAGE_TEMPLATE = """
*
.. image:: images/%s
:scale: 47
"""
SINGLE_IMAGE = """
.. image:: images/%s
:align: center
"""
# The following dictionary contains the information used to create the
# thumbnails for the front page of the scikit-learn home page.
# key: first image in set
# values: (number of plot in set, height of thumbnail)
carousel_thumbs = {'plot_single_localization_001.png': (1, 250),
'plot_multiple_localization_001.png': (1, 250),
'plot_overfeat_layer1_filters_001.png': (1, 250),
'plot_mnist_generator_001.png': (1, 250),
'plot_asirra_dataset_001.png': (1, 250),
}
def extract_docstring(filename, ignore_heading=False):
""" Extract a module-level docstring, if any
"""
lines = open(filename).readlines()
start_row = 0
if lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
docstring = ''
first_par = ''
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
for tok_type, tok_content, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif tok_type == 'STRING':
docstring = eval(tok_content)
# If the docstring is formatted with several paragraphs, extract
# the first one:
paragraphs = '\n'.join(
line.rstrip() for line
in docstring.split('\n')).split('\n\n')
if paragraphs:
if ignore_heading:
if len(paragraphs) > 1:
first_par = re.sub('\n', ' ', paragraphs[1])
first_par = ((first_par[:95] + '...')
if len(first_par) > 95 else first_par)
else:
raise ValueError("Docstring not found by gallery",
"Please check your example's layout",
" and make sure it's correct")
else:
first_par = paragraphs[0]
break
return docstring, first_par, erow + 1 + start_row
def generate_example_rst(app):
""" Generate the list of examples, as well as the contents of
examples.
"""
root_dir = os.path.join(app.builder.srcdir, 'auto_examples')
example_dir = os.path.abspath(os.path.join(app.builder.srcdir, '..',
'examples'))
generated_dir = os.path.abspath(os.path.join(app.builder.srcdir,
'modules', 'generated'))
try:
plot_gallery = eval(app.builder.config.plot_gallery)
except TypeError:
plot_gallery = bool(app.builder.config.plot_gallery)
if not os.path.exists(example_dir):
os.makedirs(example_dir)
if not os.path.exists(root_dir):
os.makedirs(root_dir)
if not os.path.exists(generated_dir):
os.makedirs(generated_dir)
# we create an index.rst with all examples
fhindex = open(os.path.join(root_dir, 'index.rst'), 'w')
# Note: The sidebar button has been removed from the examples page for now
# due to how it messes up the layout. Will be fixed at a later point
fhindex.write("""\
.. raw:: html
<style type="text/css">
div#sidebarbutton {
/* hide the sidebar collapser, while ensuring vertical arrangement */
width: 0px;
overflow: hidden;
}
</style>
Examples
========
.. _examples-index:
""")
# Here we don't use an os.walk, but we recurse only twice: flat is
# better than nested.
seen_backrefs = set()
generate_dir_rst('.', fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
for dir in sorted(os.listdir(example_dir)):
if os.path.isdir(os.path.join(example_dir, dir)):
generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs)
fhindex.flush()
def extract_line_count(filename, target_dir):
# Extract the line count of a file
example_file = os.path.join(target_dir, filename)
lines = open(example_file).readlines()
start_row = 0
if lines and lines[0].startswith('#!'):
lines.pop(0)
start_row = 1
line_iterator = iter(lines)
tokens = tokenize.generate_tokens(lambda: next(line_iterator))
check_docstring = True
erow_docstring = 0
for tok_type, _, _, (erow, _), _ in tokens:
tok_type = token.tok_name[tok_type]
if tok_type in ('NEWLINE', 'COMMENT', 'NL', 'INDENT', 'DEDENT'):
continue
elif ((tok_type == 'STRING') and check_docstring):
erow_docstring = erow
check_docstring = False
return erow_docstring+1+start_row, erow+1+start_row
def line_count_sort(file_list, target_dir):
# Sort the list of examples by line-count
new_list = [x for x in file_list if x.endswith('.py')]
unsorted = np.zeros(shape=(len(new_list), 2))
unsorted = unsorted.astype(np.object)
for count, exmpl in enumerate(new_list):
docstr_lines, total_lines = extract_line_count(exmpl, target_dir)
unsorted[count][1] = total_lines - docstr_lines
unsorted[count][0] = exmpl
index = np.lexsort((unsorted[:, 0].astype(np.str),
unsorted[:, 1].astype(np.float)))
if not len(unsorted):
return []
return np.array(unsorted[index][:, 0]).tolist()
def _thumbnail_div(subdir, full_dir, fname, snippet):
"""Generates RST to place a thumbnail in a gallery"""
thumb = os.path.join(full_dir, 'images', 'thumb', fname[:-3] + '.png')
link_name = os.path.join(full_dir, fname).replace(os.path.sep, '_')
ref_name = os.path.join(subdir, fname).replace(os.path.sep, '_')
if ref_name.startswith('._'):
ref_name = ref_name[2:]
out = []
out.append("""
.. raw:: html
<div class="thumbnailContainer">
<div class="docstringWrapper">
""")
out.append('.. figure:: %s\n' % thumb)
if link_name.startswith('._'):
link_name = link_name[2:]
if full_dir != '.':
out.append(' :target: ./%s/%s.html\n\n' % (full_dir, fname[:-3]))
else:
out.append(' :target: ./%s.html\n\n' % link_name[:-3])
out.append(""" :ref:`example_%s`
.. raw:: html
<p>%s
</p></div>
</div>
""" % (ref_name, snippet))
return ''.join(out)
def generate_dir_rst(dir, fhindex, example_dir, root_dir, plot_gallery, seen_backrefs):
""" Generate the rst file for an example directory.
"""
if not dir == '.':
target_dir = os.path.join(root_dir, dir)
src_dir = os.path.join(example_dir, dir)
else:
target_dir = root_dir
src_dir = example_dir
if not os.path.exists(os.path.join(src_dir, 'README.txt')):
raise ValueError('Example directory %s does not have a README.txt' %
src_dir)
fhindex.write("""
%s
""" % open(os.path.join(src_dir, 'README.txt')).read())
if not os.path.exists(target_dir):
os.makedirs(target_dir)
sorted_listdir = line_count_sort(os.listdir(src_dir),
src_dir)
if not os.path.exists(os.path.join(dir, 'images', 'thumb')):
os.makedirs(os.path.join(dir, 'images', 'thumb'))
for fname in sorted_listdir:
if fname.endswith('py'):
backrefs = generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery)
new_fname = os.path.join(src_dir, fname)
_, snippet, _ = extract_docstring(new_fname, True)
fhindex.write(_thumbnail_div(dir, dir, fname, snippet))
fhindex.write("""
.. toctree::
:hidden:
%s/%s
""" % (dir, fname[:-3]))
for backref in backrefs:
include_path = os.path.join(root_dir, '../modules/generated/%s.examples' % backref)
seen = backref in seen_backrefs
with open(include_path, 'a' if seen else 'w') as ex_file:
if not seen:
# heading
print(file=ex_file)
print('Examples using ``%s``' % backref, file=ex_file)
print('-----------------%s--' % ('-' * len(backref)),
file=ex_file)
print(file=ex_file)
rel_dir = os.path.join('../../auto_examples', dir)
ex_file.write(_thumbnail_div(dir, rel_dir, fname, snippet))
seen_backrefs.add(backref)
fhindex.write("""
.. raw:: html
<div class="clearer"></div>
""") # clear at the end of the section
# modules for which we embed links into example code
DOCMODULES = ['sklearn', 'matplotlib', 'numpy', 'scipy']
def make_thumbnail(in_fname, out_fname, width, height):
"""Make a thumbnail with the same aspect ratio centered in an
image with a given width and height
"""
# local import to avoid testing dependency on PIL:
try:
from PIL import Image
except ImportError:
import Image
img = Image.open(in_fname)
width_in, height_in = img.size
scale_w = width / float(width_in)
scale_h = height / float(height_in)
if height_in * scale_w <= height:
scale = scale_w
else:
scale = scale_h
width_sc = int(round(scale * width_in))
height_sc = int(round(scale * height_in))
# resize the image
img.thumbnail((width_sc, height_sc), Image.ANTIALIAS)
# insert centered
thumb = Image.new('RGB', (width, height), (255, 255, 255))
pos_insert = ((width - width_sc) // 2, (height - height_sc) // 2)
thumb.paste(img, pos_insert)
thumb.save(out_fname)
# Use optipng to perform lossless compression on the resized image if
# software is installed
if os.environ.get('SKLEARN_DOC_OPTIPNG', False):
try:
subprocess.call(["optipng", "-quiet", "-o", "9", out_fname])
except Exception:
warnings.warn('Install optipng to reduce the size of the generated images')
def get_short_module_name(module_name, obj_name):
""" Get the shortest possible module name """
parts = module_name.split('.')
short_name = module_name
for i in range(len(parts) - 1, 0, -1):
short_name = '.'.join(parts[:i])
try:
exec('from %s import %s' % (short_name, obj_name))
except ImportError:
# get the last working module name
short_name = '.'.join(parts[:(i + 1)])
break
return short_name
class NameFinder(ast.NodeVisitor):
"""Finds the longest form of variable names and their imports in code
Only retains names from imported modules.
"""
def __init__(self):
super(NameFinder, self).__init__()
self.imported_names = {}
self.accessed_names = set()
def visit_Import(self, node, prefix=''):
for alias in node.names:
local_name = alias.asname or alias.name
self.imported_names[local_name] = prefix + alias.name
def visit_ImportFrom(self, node):
self.visit_Import(node, node.module + '.')
def visit_Name(self, node):
self.accessed_names.add(node.id)
def visit_Attribute(self, node):
attrs = []
while isinstance(node, ast.Attribute):
attrs.append(node.attr)
node = node.value
if isinstance(node, ast.Name):
# This is a.b, not e.g. a().b
attrs.append(node.id)
self.accessed_names.add('.'.join(reversed(attrs)))
else:
# need to get a in a().b
self.visit(node)
def get_mapping(self):
for name in self.accessed_names:
local_name = name.split('.', 1)[0]
remainder = name[len(local_name):]
if local_name in self.imported_names:
# Join import path to relative path
full_name = self.imported_names[local_name] + remainder
yield name, full_name
def identify_names(code):
"""Builds a codeobj summary by identifying and resovles used names
>>> code = '''
... from a.b import c
... import d as e
... print(c)
... e.HelloWorld().f.g
... '''
>>> for name, o in sorted(identify_names(code).items()):
... print(name, o['name'], o['module'], o['module_short'])
c c a.b a.b
e.HelloWorld HelloWorld d d
"""
finder = NameFinder()
finder.visit(ast.parse(code))
example_code_obj = {}
for name, full_name in finder.get_mapping():
# name is as written in file (e.g. np.asarray)
# full_name includes resolved import path (e.g. numpy.asarray)
module, attribute = full_name.rsplit('.', 1)
# get shortened module name
module_short = get_short_module_name(module, attribute)
cobj = {'name': attribute, 'module': module,
'module_short': module_short}
example_code_obj[name] = cobj
return example_code_obj
def generate_file_rst(fname, target_dir, src_dir, root_dir, plot_gallery):
""" Generate the rst file for a given example.
Returns the set of sklearn functions/classes imported in the example.
"""
base_image_name = os.path.splitext(fname)[0]
image_fname = '%s_%%03d.png' % base_image_name
this_template = rst_template
last_dir = os.path.split(src_dir)[-1]
# to avoid leading . in file names, and wrong names in links
if last_dir == '.' or last_dir == 'examples':
last_dir = ''
else:
last_dir += '_'
short_fname = last_dir + fname
src_file = os.path.join(src_dir, fname)
example_file = os.path.join(target_dir, fname)
shutil.copyfile(src_file, example_file)
# The following is a list containing all the figure names
figure_list = []
image_dir = os.path.join(target_dir, 'images')
thumb_dir = os.path.join(image_dir, 'thumb')
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if not os.path.exists(thumb_dir):
os.makedirs(thumb_dir)
image_path = os.path.join(image_dir, image_fname)
stdout_path = os.path.join(image_dir,
'stdout_%s.txt' % base_image_name)
time_path = os.path.join(image_dir,
'time_%s.txt' % base_image_name)
thumb_file = os.path.join(thumb_dir, fname[:-3] + '.png')
time_elapsed = 0
time_m = 0
time_s = 0
if plot_gallery and fname.startswith('plot'):
# generate the plot as png image if file name
# starts with plot and if it is more recent than an
# existing image.
first_image_file = image_path % 1
if os.path.exists(stdout_path):
stdout = open(stdout_path).read()
else:
stdout = ''
if os.path.exists(time_path):
time_elapsed = float(open(time_path).read())
if not os.path.exists(first_image_file) or \
os.stat(first_image_file).st_mtime <= os.stat(src_file).st_mtime:
# We need to execute the code
print('plotting %s' % fname)
t0 = time()
import matplotlib.pyplot as plt
plt.close('all')
cwd = os.getcwd()
try:
# First CD in the original example dir, so that any file
# created by the example get created in this directory
orig_stdout = sys.stdout
os.chdir(os.path.dirname(src_file))
my_buffer = StringIO()
my_stdout = Tee(sys.stdout, my_buffer)
sys.stdout = my_stdout
my_globals = {'pl': plt}
execfile(os.path.basename(src_file), my_globals)
time_elapsed = time() - t0
sys.stdout = orig_stdout
my_stdout = my_buffer.getvalue()
if '__doc__' in my_globals:
# The __doc__ is often printed in the example, we
# don't with to echo it
my_stdout = my_stdout.replace(
my_globals['__doc__'],
'')
my_stdout = my_stdout.strip()
if my_stdout:
stdout = '**Script output**::\n\n %s\n\n' % (
'\n '.join(my_stdout.split('\n')))
open(stdout_path, 'w').write(stdout)
open(time_path, 'w').write('%f' % time_elapsed)
os.chdir(cwd)
# In order to save every figure we have two solutions :
# * iterate from 1 to infinity and call plt.fignum_exists(n)
# (this requires the figures to be numbered
# incrementally: 1, 2, 3 and not 1, 2, 5)
# * iterate over [fig_mngr.num for fig_mngr in
# matplotlib._pylab_helpers.Gcf.get_all_fig_managers()]
fig_managers = matplotlib._pylab_helpers.Gcf.get_all_fig_managers()
for fig_mngr in fig_managers:
# Set the fig_num figure as the current figure as we can't
# save a figure that's not the current figure.
plt.figure(fig_mngr.num)
plt.savefig(image_path % fig_mngr.num)
figure_list.append(image_fname % fig_mngr.num)
except:
print(80 * '_')
print('%s is not compiling:' % fname)
traceback.print_exc()
print(80 * '_')
finally:
os.chdir(cwd)
sys.stdout = orig_stdout
print(" - time elapsed : %.2g sec" % time_elapsed)
else:
figure_list = [f[len(image_dir):]
for f in glob.glob(image_path.replace("%03d",
'[0-9][0-9][0-9]'))]
figure_list.sort()
# generate thumb file
this_template = plot_rst_template
car_thumb_path = os.path.join(os.path.split(root_dir)[0], '_build/html/dev/_images/')
# Note: normaly, make_thumbnail is used to write to the path contained in `thumb_file`
# which is within `auto_examples/../images/thumbs` depending on the example.
# Because the carousel has different dimensions than those of the examples gallery,
# I did not simply reuse them all as some contained whitespace due to their default gallery
# thumbnail size. Below, for a few cases, seperate thumbnails are created (the originals can't
# just be overwritten with the carousel dimensions as it messes up the examples gallery layout).
# The special carousel thumbnails are written directly to
# _build/html/dev/_images/,
# as for some reason unknown to me, Sphinx refuses to copy my 'extra' thumbnails from the
# auto examples gallery to the _build folder. This works fine as is, but it would be cleaner to
# have it happen with the rest. Ideally the should be written to 'thumb_file' as well, and then
# copied to the _images folder during the `Copying Downloadable Files` step like the rest.
if not os.path.exists(car_thumb_path):
os.makedirs(car_thumb_path)
if os.path.exists(first_image_file):
# We generate extra special thumbnails for the carousel
carousel_tfile = os.path.join(car_thumb_path, fname[:-3] + '_carousel.png')
first_img = image_fname % 1
if first_img in carousel_thumbs:
make_thumbnail((image_path % carousel_thumbs[first_img][0]),
carousel_tfile, carousel_thumbs[first_img][1], 190)
make_thumbnail(first_image_file, thumb_file, 400, 280)
if not os.path.exists(thumb_file):
# create something to replace the thumbnail
make_thumbnail('images/no_image.png', thumb_file, 200, 140)
docstring, short_desc, end_row = extract_docstring(example_file)
# Depending on whether we have one or more figures, we're using a
# horizontal list or a single rst call to 'image'.
if len(figure_list) == 1:
figure_name = figure_list[0]
image_list = SINGLE_IMAGE % figure_name.lstrip('/')
else:
image_list = HLIST_HEADER
for figure_name in figure_list:
image_list += HLIST_IMAGE_TEMPLATE % figure_name.lstrip('/')
time_m, time_s = divmod(time_elapsed, 60)
f = open(os.path.join(target_dir, fname[:-2] + 'rst'), 'w')
f.write(this_template % locals())
f.flush()
# save variables so we can later add links to the documentation
example_code_obj = identify_names(open(example_file).read())
if example_code_obj:
codeobj_fname = example_file[:-3] + '_codeobj.pickle'
with open(codeobj_fname, 'wb') as fid:
pickle.dump(example_code_obj, fid, pickle.HIGHEST_PROTOCOL)
backrefs = set('{module_short}.{name}'.format(**entry)
for entry in example_code_obj.values()
if entry['module'].startswith('sklearn'))
return backrefs
def embed_code_links(app, exception):
"""Embed hyperlinks to documentation into example code"""
try:
if exception is not None:
return
print('Embedding documentation hyperlinks in examples..')
# Add resolvers for the packages for which we want to show links
doc_resolvers = {}
doc_resolvers['sklearn'] = SphinxDocLinkResolver(app.builder.outdir,
relative=True)
doc_resolvers['matplotlib'] = SphinxDocLinkResolver(
'http://matplotlib.org')
doc_resolvers['numpy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/numpy-1.6.0')
doc_resolvers['scipy'] = SphinxDocLinkResolver(
'http://docs.scipy.org/doc/scipy-0.11.0/reference')
example_dir = os.path.join(app.builder.srcdir, 'auto_examples')
html_example_dir = os.path.abspath(os.path.join(app.builder.outdir,
'auto_examples'))
# patterns for replacement
link_pattern = '<a href="%s">%s</a>'
orig_pattern = '<span class="n">%s</span>'
period = '<span class="o">.</span>'
for dirpath, _, filenames in os.walk(html_example_dir):
for fname in filenames:
print('\tprocessing: %s' % fname)
full_fname = os.path.join(html_example_dir, dirpath, fname)
subpath = dirpath[len(html_example_dir) + 1:]
pickle_fname = os.path.join(example_dir, subpath,
fname[:-5] + '_codeobj.pickle')
if os.path.exists(pickle_fname):
# we have a pickle file with the objects to embed links for
with open(pickle_fname, 'rb') as fid:
example_code_obj = pickle.load(fid)
fid.close()
str_repl = {}
# generate replacement strings with the links
for name, cobj in example_code_obj.items():
this_module = cobj['module'].split('.')[0]
if this_module not in doc_resolvers:
continue
link = doc_resolvers[this_module].resolve(cobj,
full_fname)
if link is not None:
parts = name.split('.')
name_html = period.join(orig_pattern % part
for part in parts)
str_repl[name_html] = link_pattern % (link, name_html)
# do the replacement in the html file
# ensure greediness
names = sorted(str_repl, key=len, reverse=True)
expr = re.compile(r'(?<!\.)\b' + # don't follow . or word
'|'.join(re.escape(name)
for name in names))
def substitute_link(match):
return str_repl[match.group()]
if len(str_repl) > 0:
with open(full_fname, 'rb') as fid:
lines_in = fid.readlines()
with open(full_fname, 'wb') as fid:
for line in lines_in:
line = line.decode('utf-8')
line = expr.sub(substitute_link, line)
fid.write(line.encode('utf-8'))
except HTTPError as e:
print("The following HTTP Error has occurred:\n")
print(e.code)
except URLError as e:
print("\n...\n"
"Warning: Embedding the documentation hyperlinks requires "
"internet access.\nPlease check your network connection.\n"
"Unable to continue embedding due to a URL Error: \n")
print(e.args)
print('[done]')
def setup(app):
app.connect('builder-inited', generate_example_rst)
app.add_config_value('plot_gallery', True, 'html')
# embed links after build is finished
app.connect('build-finished', embed_code_links)
# Sphinx hack: sphinx copies generated images to the build directory
# each time the docs are made. If the desired image name already
# exists, it appends a digit to prevent overwrites. The problem is,
# the directory is never cleared. This means that each time you build
# the docs, the number of images in the directory grows.
#
# This question has been asked on the sphinx development list, but there
# was no response: http://osdir.com/ml/sphinx-dev/2011-02/msg00123.html
#
# The following is a hack that prevents this behavior by clearing the
# image build directory each time the docs are built. If sphinx
# changes their layout between versions, this will not work (though
# it should probably not cause a crash). Tested successfully
# on Sphinx 1.0.7
build_image_dir = '_build/html/_images'
if os.path.exists(build_image_dir):
filelist = os.listdir(build_image_dir)
for filename in filelist:
if filename.endswith('png'):
os.remove(os.path.join(build_image_dir, filename))
def setup_module():
# HACK: Stop nosetests running setup() above
pass
| bsd-3-clause |
JackKelly/neuralnilm_prototype | scripts/e129.py | 2 | 5359 | from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, SubsampleLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer
from lasagne.updates import adagrad, nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
"""
def exp_a(name):
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[20, 20, 20, 20, 20],
max_input_power=None,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.7,
n_seq_per_batch=50
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=SAVE_PLOT_INTERVAL,
loss_function=crossentropy,
updates=partial(nesterov_momentum, learning_rate=1.0),
layers_config=[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(25),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
],
layer_changes={
2001: {
'remove_from': -3,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
},
4001: {
'remove_from': -3,
'new_layers':
[
{
'type': BLSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': sigmoid
}
]
}
}
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=5000)
except KeyboardInterrupt:
break
except TrainingError as e:
print("EXCEPTION:", e)
if __name__ == "__main__":
main()
| mit |
jseabold/statsmodels | examples/python/contrasts.py | 5 | 9020 | # coding: utf-8
# DO NOT EDIT
# Autogenerated from the notebook contrasts.ipynb.
# Edit the notebook and then sync the output with this file.
#
# flake8: noqa
# DO NOT EDIT
# # Contrasts Overview
import numpy as np
import statsmodels.api as sm
# This document is based heavily on this excellent resource from UCLA
# http://www.ats.ucla.edu/stat/r/library/contrast_coding.htm
# A categorical variable of K categories, or levels, usually enters a
# regression as a sequence of K-1 dummy variables. This amounts to a linear
# hypothesis on the level means. That is, each test statistic for these
# variables amounts to testing whether the mean for that level is
# statistically significantly different from the mean of the base category.
# This dummy coding is called Treatment coding in R parlance, and we will
# follow this convention. There are, however, different coding methods that
# amount to different sets of linear hypotheses.
#
# In fact, the dummy coding is not technically a contrast coding. This is
# because the dummy variables add to one and are not functionally
# independent of the model's intercept. On the other hand, a set of
# *contrasts* for a categorical variable with `k` levels is a set of `k-1`
# functionally independent linear combinations of the factor level means
# that are also independent of the sum of the dummy variables. The dummy
# coding is not wrong *per se*. It captures all of the coefficients, but it
# complicates matters when the model assumes independence of the
# coefficients such as in ANOVA. Linear regression models do not assume
# independence of the coefficients and thus dummy coding is often the only
# coding that is taught in this context.
#
# To have a look at the contrast matrices in Patsy, we will use data from
# UCLA ATS. First let's load the data.
# #### Example Data
import pandas as pd
url = 'https://stats.idre.ucla.edu/stat/data/hsb2.csv'
hsb2 = pd.read_table(url, delimiter=",")
hsb2.head(10)
# It will be instructive to look at the mean of the dependent variable,
# write, for each level of race ((1 = Hispanic, 2 = Asian, 3 = African
# American and 4 = Caucasian)).
hsb2.groupby('race')['write'].mean()
# #### Treatment (Dummy) Coding
# Dummy coding is likely the most well known coding scheme. It compares
# each level of the categorical variable to a base reference level. The base
# reference level is the value of the intercept. It is the default contrast
# in Patsy for unordered categorical factors. The Treatment contrast matrix
# for race would be
from patsy.contrasts import Treatment
levels = [1, 2, 3, 4]
contrast = Treatment(reference=0).code_without_intercept(levels)
print(contrast.matrix)
# Here we used `reference=0`, which implies that the first level,
# Hispanic, is the reference category against which the other level effects
# are measured. As mentioned above, the columns do not sum to zero and are
# thus not independent of the intercept. To be explicit, let's look at how
# this would encode the `race` variable.
hsb2.race.head(10)
print(contrast.matrix[hsb2.race - 1, :][:20])
sm.categorical(hsb2.race.values)
# This is a bit of a trick, as the `race` category conveniently maps to
# zero-based indices. If it does not, this conversion happens under the
# hood, so this will not work in general but nonetheless is a useful exercise
# to fix ideas. The below illustrates the output using the three contrasts
# above
from statsmodels.formula.api import ols
mod = ols("write ~ C(race, Treatment)", data=hsb2)
res = mod.fit()
print(res.summary())
# We explicitly gave the contrast for race; however, since Treatment is
# the default, we could have omitted this.
# ### Simple Coding
# Like Treatment Coding, Simple Coding compares each level to a fixed
# reference level. However, with simple coding, the intercept is the grand
# mean of all the levels of the factors. Patsy does not have the Simple
# contrast included, but you can easily define your own contrasts. To do so,
# write a class that contains a code_with_intercept and a
# code_without_intercept method that returns a patsy.contrast.ContrastMatrix
# instance
from patsy.contrasts import ContrastMatrix
def _name_levels(prefix, levels):
return ["[%s%s]" % (prefix, level) for level in levels]
class Simple(object):
def _simple_contrast(self, levels):
nlevels = len(levels)
contr = -1. / nlevels * np.ones((nlevels, nlevels - 1))
contr[1:][np.diag_indices(nlevels - 1)] = (nlevels - 1.) / nlevels
return contr
def code_with_intercept(self, levels):
contrast = np.column_stack((np.ones(len(levels)),
self._simple_contrast(levels)))
return ContrastMatrix(contrast, _name_levels("Simp.", levels))
def code_without_intercept(self, levels):
contrast = self._simple_contrast(levels)
return ContrastMatrix(contrast, _name_levels("Simp.", levels[:-1]))
hsb2.groupby('race')['write'].mean().mean()
contrast = Simple().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Simple)", data=hsb2)
res = mod.fit()
print(res.summary())
# ### Sum (Deviation) Coding
# Sum coding compares the mean of the dependent variable for a given level
# to the overall mean of the dependent variable over all the levels. That
# is, it uses contrasts between each of the first k-1 levels and level k In
# this example, level 1 is compared to all the others, level 2 to all the
# others, and level 3 to all the others.
from patsy.contrasts import Sum
contrast = Sum().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Sum)", data=hsb2)
res = mod.fit()
print(res.summary())
# This corresponds to a parameterization that forces all the coefficients
# to sum to zero. Notice that the intercept here is the grand mean where the
# grand mean is the mean of means of the dependent variable by each level.
hsb2.groupby('race')['write'].mean().mean()
# ### Backward Difference Coding
# In backward difference coding, the mean of the dependent variable for a
# level is compared with the mean of the dependent variable for the prior
# level. This type of coding may be useful for a nominal or an ordinal
# variable.
from patsy.contrasts import Diff
contrast = Diff().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Diff)", data=hsb2)
res = mod.fit()
print(res.summary())
# For example, here the coefficient on level 1 is the mean of `write` at
# level 2 compared with the mean at level 1. Ie.,
res.params["C(race, Diff)[D.1]"]
hsb2.groupby('race').mean()["write"][2] - hsb2.groupby(
'race').mean()["write"][1]
# ### Helmert Coding
# Our version of Helmert coding is sometimes referred to as Reverse
# Helmert Coding. The mean of the dependent variable for a level is compared
# to the mean of the dependent variable over all previous levels. Hence, the
# name 'reverse' being sometimes applied to differentiate from forward
# Helmert coding. This comparison does not make much sense for a nominal
# variable such as race, but we would use the Helmert contrast like so:
from patsy.contrasts import Helmert
contrast = Helmert().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(race, Helmert)", data=hsb2)
res = mod.fit()
print(res.summary())
# To illustrate, the comparison on level 4 is the mean of the dependent
# variable at the previous three levels taken from the mean at level 4
grouped = hsb2.groupby('race')
grouped.mean()["write"][4] - grouped.mean()["write"][:3].mean()
# As you can see, these are only equal up to a constant. Other versions of
# the Helmert contrast give the actual difference in means. Regardless, the
# hypothesis tests are the same.
k = 4
1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean())
k = 3
1. / k * (grouped.mean()["write"][k] - grouped.mean()["write"][:k - 1].mean())
# ### Orthogonal Polynomial Coding
# The coefficients taken on by polynomial coding for `k=4` levels are the
# linear, quadratic, and cubic trends in the categorical variable. The
# categorical variable here is assumed to be represented by an underlying,
# equally spaced numeric variable. Therefore, this type of encoding is used
# only for ordered categorical variables with equal spacing. In general, the
# polynomial contrast produces polynomials of order `k-1`. Since `race` is
# not an ordered factor variable let's use `read` as an example. First we
# need to create an ordered categorical from `read`.
hsb2['readcat'] = np.asarray(pd.cut(hsb2.read, bins=3))
hsb2.groupby('readcat').mean()['write']
from patsy.contrasts import Poly
levels = hsb2.readcat.unique().tolist()
contrast = Poly().code_without_intercept(levels)
print(contrast.matrix)
mod = ols("write ~ C(readcat, Poly)", data=hsb2)
res = mod.fit()
print(res.summary())
# As you can see, readcat has a significant linear effect on the dependent
# variable `write` but not a significant quadratic or cubic effect.
| bsd-3-clause |
rahul-c1/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 19 | 2844 | """
Testing for mean shift clustering methods
"""
import numpy as np
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
"""Test estimate_bandwidth"""
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
""" Test MeanShift algorithm """
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
cluster_centers = ms.cluster_centers_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_meanshift_predict():
"""Test MeanShift.predict"""
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_unfitted():
"""Non-regression: before fit, there should be not fitted attributes."""
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
"""
Test the bin seeding technique which can be used in the mean shift
algorithm
"""
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.5, 1.5], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
test_bins = get_bin_seeds(X, 0.01, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(test_result) == 6)
| bsd-3-clause |
jingxiang-li/kaggle-yelp | archive/preprocess.py | 1 | 1280 | from __future__ import division, absolute_import
from __future__ import print_function, unicode_literals
import pandas as pd
from transfer_features import *
# process training data
biz2label = pd.read_csv("rawdata/train.csv", index_col=0)
photo2biz = pd.read_csv("rawdata/train_photo_to_biz_ids.csv", index_col=0)
biz2label.sort_index(inplace=True)
for biz_id, biz_label in biz2label.iterrows():
photo_ids = photo2biz[photo2biz["business_id"] == biz_id].index
batch_size = len(photo_ids)
img_list = ['rawdata/train_photos/' + str(id) + '.jpg' for id in photo_ids]
# pprint(img_list)
out_file = 'features/inception-21k-global/' + str(biz_id) + '.npy'
X = get_features(img_list, 'models/inception-21k/Inception', 9)
np.save(out_file, X)
print(out_file, 'finished!!')
# process test data
photo2biz = pd.read_csv("rawdata/test_photo_to_biz.csv")
photo_ids = photo2biz["photo_id"]
photo_ids = np.unique(photo_ids)
f = open("features/inception-21k-global-test.csv", 'w')
for photo_id in photo_ids:
img_list = ['rawdata/test_photos/' + str(photo_id) + '.jpg']
X = get_features(img_list, 'models/inception-21k/Inception', 9)[0, :]
f.write(str(photo_id) + ',')
f.write(",".join(X.astype(str)) + '\n')
print(photo_id, 'finished!!')
| mit |
zak-k/iris | lib/iris/tests/test_quickplot.py | 4 | 7721 | # (C) British Crown Copyright 2010 - 2016, Met Office
#
# This file is part of Iris.
#
# Iris is free software: you can redistribute it and/or modify it under
# the terms of the GNU Lesser General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Iris is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Iris. If not, see <http://www.gnu.org/licenses/>.
"""
Tests the high-level plotting interface.
"""
from __future__ import (absolute_import, division, print_function)
from six.moves import (filter, input, map, range, zip) # noqa
# import iris tests first so that some things can be initialised before importing anything else
import iris.tests as tests
import iris.tests.test_plot as test_plot
import iris
# Run tests in no graphics mode if matplotlib is not available.
if tests.MPL_AVAILABLE:
import matplotlib.pyplot as plt
import iris.plot as iplt
import iris.quickplot as qplt
# Caches _load_theta so subsequent calls are faster
def cache(fn, cache={}):
def inner(*args, **kwargs):
key = "result"
if not cache:
cache[key] = fn(*args, **kwargs)
return cache[key]
return inner
@cache
def _load_theta():
path = tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp'))
theta = iris.load_cube(path, 'air_potential_temperature')
# Improve the unit
theta.units = 'K'
return theta
@tests.skip_data
@tests.skip_plot
class TestQuickplotCoordinatesGiven(test_plot.TestPlotCoordinatesGiven):
def setUp(self):
tests.GraphicsTest.setUp(self)
filename = tests.get_data_path(('PP', 'COLPEX', 'theta_and_orog_subset.pp'))
self.cube = test_plot.load_cube_once(filename, 'air_potential_temperature')
self.draw_module = iris.quickplot
self.contourf = test_plot.LambdaStr('iris.quickplot.contourf', lambda cube, *args, **kwargs:
iris.quickplot.contourf(cube, *args, **kwargs))
self.contour = test_plot.LambdaStr('iris.quickplot.contour', lambda cube, *args, **kwargs:
iris.quickplot.contour(cube, *args, **kwargs))
self.points = test_plot.LambdaStr('iris.quickplot.points', lambda cube, *args, **kwargs:
iris.quickplot.points(cube, c=cube.data, *args, **kwargs))
self.plot = test_plot.LambdaStr('iris.quickplot.plot', lambda cube, *args, **kwargs:
iris.quickplot.plot(cube, *args, **kwargs))
self.results = {'yx': (
[self.contourf, ['grid_latitude', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'grid_latitude']],
[self.contour, ['grid_latitude', 'grid_longitude']],
[self.contour, ['grid_longitude', 'grid_latitude']],
[self.points, ['grid_latitude', 'grid_longitude']],
[self.points, ['grid_longitude', 'grid_latitude']],
),
'zx': (
[self.contourf, ['model_level_number', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'model_level_number']],
[self.contour, ['model_level_number', 'grid_longitude']],
[self.contour, ['grid_longitude', 'model_level_number']],
[self.points, ['model_level_number', 'grid_longitude']],
[self.points, ['grid_longitude', 'model_level_number']],
),
'tx': (
[self.contourf, ['time', 'grid_longitude']],
[self.contourf, ['grid_longitude', 'time']],
[self.contour, ['time', 'grid_longitude']],
[self.contour, ['grid_longitude', 'time']],
[self.points, ['time', 'grid_longitude']],
[self.points, ['grid_longitude', 'time']],
),
'x': (
[self.plot, ['grid_longitude']],
),
'y': (
[self.plot, ['grid_latitude']],
),
}
@tests.skip_data
@tests.skip_plot
class TestLabels(tests.GraphicsTest):
def setUp(self):
super(TestLabels, self).setUp()
self.theta = _load_theta()
def _slice(self, coords):
"""Returns the first cube containing the requested coordinates."""
for cube in self.theta.slices(coords):
break
return cube
def _small(self):
# Use a restricted size so we can make out the detail
cube = self._slice(['model_level_number', 'grid_longitude'])
return cube[:5, :5]
def test_contour(self):
qplt.contour(self._small())
self.check_graphic()
qplt.contourf(self._small(), coords=['model_level_number', 'grid_longitude'])
self.check_graphic()
def test_contourf(self):
qplt.contourf(self._small())
cube = self._small()
iplt.orography_at_points(cube)
self.check_graphic()
qplt.contourf(self._small(), coords=['model_level_number', 'grid_longitude'])
self.check_graphic()
qplt.contourf(self._small(), coords=['grid_longitude', 'model_level_number'])
self.check_graphic()
def test_contourf_nameless(self):
cube = self._small()
cube.standard_name = None
qplt.contourf(cube, coords=['grid_longitude', 'model_level_number'])
self.check_graphic()
def test_pcolor(self):
qplt.pcolor(self._small())
self.check_graphic()
def test_pcolormesh(self):
qplt.pcolormesh(self._small())
#cube = self._small()
#iplt.orography_at_bounds(cube)
self.check_graphic()
def test_map(self):
cube = self._slice(['grid_latitude', 'grid_longitude'])
qplt.contour(cube)
self.check_graphic()
# check that the result of adding 360 to the data is *almost* identically the same result
lon = cube.coord('grid_longitude')
lon.points = lon.points + 360
qplt.contour(cube)
self.check_graphic()
def test_alignment(self):
cube = self._small()
qplt.contourf(cube)
#qplt.outline(cube)
qplt.points(cube)
self.check_graphic()
@tests.skip_data
@tests.skip_plot
class TestTimeReferenceUnitsLabels(tests.GraphicsTest):
def setUp(self):
super(TestTimeReferenceUnitsLabels, self).setUp()
path = tests.get_data_path(('PP', 'aPProt1', 'rotatedMHtimecube.pp'))
self.cube = iris.load_cube(path)[:, 0, 0]
def test_reference_time_units(self):
# units should not be displayed for a reference time
qplt.plot(self.cube.coord('time'), self.cube)
plt.gcf().autofmt_xdate()
self.check_graphic()
def test_not_reference_time_units(self):
# units should be displayed for other time coordinates
qplt.plot(self.cube.coord('forecast_period'), self.cube)
self.check_graphic()
if __name__ == "__main__":
tests.main()
| gpl-3.0 |
Chaparqanatoos/kaggle-knowledge | src/main/python/titanic.py | 1 | 2524 | import pandas as pd
import numpy as np
def pre_process(df):
df['Gender'] = df['Sex'].map({'female': 0, 'male': 1}).astype(int)
median_ages = np.zeros((2, 3))
for i in range(0, 2):
for j in range(0, 3):
median_ages[i, j] = df[(df['Gender'] == i) & (df['Pclass'] == j + 1)]['Age'].dropna().median()
df['AgeFill'] = df['Age']
for i in range(0, 2):
for j in range(0, 3):
df.loc[ (df.Age.isnull()) & (df.Gender == i) & (df.Pclass == j + 1), 'AgeFill'] = median_ages[i, j]
df['AgeIsNull'] = pd.isnull(df.Age).astype(int)
df['FamilySize'] = df['SibSp'] + df['Parch']
df['Age*Class'] = df.AgeFill * df.Pclass
df = df.drop(['Name', 'Sex', 'Ticket', 'Cabin', 'Embarked'], axis=1)
df = df.drop(['Age'], axis=1)
df.loc[df.Fare.isnull(), 'Fare'] = df['Fare'].dropna().median()
return df.values
# For .read_csv, always use header=0 when you know row 0 is the header row
train_df = pd.read_csv('/home/namukhtar/Datasets/kaggle/titanic/train.csv', header=0)
# print train_df.head(10)
train_data = pre_process(train_df)
test_df = pd.read_csv('/home/namukhtar/Datasets/kaggle/titanic/test.csv', header=0)
# print test_df.head(10)
test_data = pre_process(test_df)
# Import the random forest package
from sklearn.ensemble import RandomForestClassifier
# Create the random forest object which will include all the parameters
# for the fit
forest = RandomForestClassifier(n_estimators=100)
# Fit the training data to the Survived labels and create the decision trees
forest = forest.fit(train_data[0::, 2::], train_data[0::, 1])
# Take the same decision trees and run it on the test data
output = forest.predict(test_data[0::, 1::])
out_df = pd.DataFrame({'PassengerId' : test_data[0::, 0], 'Survived' : output})
out_df["PassengerId"] = out_df["PassengerId"].astype("int")
out_df["Survived"] = out_df["Survived"].astype("int")
out_df.to_csv('titanic-randomforest.csv', index=False)
from sklearn import svm
svc = svm.SVC(kernel='linear')
svc = svc.fit(train_data[0::, 2::], train_data[0::, 1])
# Take the same decision trees and run it on the test data
output = svc.predict(test_data[0::, 1::])
out_df = pd.DataFrame({'PassengerId' : test_data[0::, 0], 'Survived' : output})
out_df["PassengerId"] = out_df["PassengerId"].astype("int")
out_df["Survived"] = out_df["Survived"].astype("int")
out_df.to_csv('titanic-svm.csv', index=False)
| apache-2.0 |
mlperf/training_results_v0.7 | Fujitsu/benchmarks/resnet/implementations/implementation_open/mxnet/example/deep-embedded-clustering/dec.py | 6 | 7768 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
from __future__ import print_function
import os
import logging
import numpy as np
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
import mxnet as mx
import data
import model
from autoencoder import AutoEncoderModel
from solver import Solver, Monitor
def cluster_acc(Y_pred, Y):
from sklearn.utils.linear_assignment_ import linear_assignment
assert Y_pred.size == Y.size
D = max(Y_pred.max(), Y.max())+1
w = np.zeros((D, D), dtype=np.int64)
for i in range(Y_pred.size):
w[Y_pred[i], int(Y[i])] += 1
ind = linear_assignment(w.max() - w)
return sum([w[i, j] for i, j in ind])*1.0/Y_pred.size, w
class DECModel(model.MXModel):
class DECLoss(mx.operator.NumpyOp):
def __init__(self, num_centers, alpha):
super(DECModel.DECLoss, self).__init__(need_top_grad=False)
self.num_centers = num_centers
self.alpha = alpha
def forward(self, in_data, out_data):
z = in_data[0]
mu = in_data[1]
q = out_data[0]
self.mask = 1.0/(1.0+cdist(z, mu)**2/self.alpha)
q[:] = self.mask**((self.alpha+1.0)/2.0)
q[:] = (q.T/q.sum(axis=1)).T
def backward(self, out_grad, in_data, out_data, in_grad):
q = out_data[0]
z = in_data[0]
mu = in_data[1]
p = in_data[2]
dz = in_grad[0]
dmu = in_grad[1]
self.mask *= (self.alpha+1.0)/self.alpha*(p-q)
dz[:] = (z.T*self.mask.sum(axis=1)).T - self.mask.dot(mu)
dmu[:] = (mu.T*self.mask.sum(axis=0)).T - self.mask.T.dot(z)
def infer_shape(self, in_shape):
assert len(in_shape) == 3
assert len(in_shape[0]) == 2
input_shape = in_shape[0]
label_shape = (input_shape[0], self.num_centers)
mu_shape = (self.num_centers, input_shape[1])
out_shape = (input_shape[0], self.num_centers)
return [input_shape, mu_shape, label_shape], [out_shape]
def list_arguments(self):
return ['data', 'mu', 'label']
def setup(self, X, num_centers, alpha, save_to='dec_model'):
sep = X.shape[0]*9//10
X_train = X[:sep]
X_val = X[sep:]
ae_model = AutoEncoderModel(self.xpu, [X.shape[1], 500, 500, 2000, 10], pt_dropout=0.2)
if not os.path.exists(save_to+'_pt.arg'):
ae_model.layerwise_pretrain(X_train, 256, 50000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.lr_scheduler.FactorScheduler(20000, 0.1))
ae_model.finetune(X_train, 256, 100000, 'sgd', l_rate=0.1, decay=0.0,
lr_scheduler=mx.lr_scheduler.FactorScheduler(20000, 0.1))
ae_model.save(save_to+'_pt.arg')
logging.log(logging.INFO, "Autoencoder Training error: %f"%ae_model.eval(X_train))
logging.log(logging.INFO, "Autoencoder Validation error: %f"%ae_model.eval(X_val))
else:
ae_model.load(save_to+'_pt.arg')
self.ae_model = ae_model
self.dec_op = DECModel.DECLoss(num_centers, alpha)
label = mx.sym.Variable('label')
self.feature = self.ae_model.encoder
self.loss = self.dec_op(data=self.ae_model.encoder, label=label, name='dec')
self.args.update({k: v for k, v in self.ae_model.args.items() if k in self.ae_model.encoder.list_arguments()})
self.args['dec_mu'] = mx.nd.empty((num_centers, self.ae_model.dims[-1]), ctx=self.xpu)
self.args_grad.update({k: mx.nd.empty(v.shape, ctx=self.xpu) for k, v in self.args.items()})
self.args_mult.update({k: k.endswith('bias') and 2.0 or 1.0 for k in self.args})
self.num_centers = num_centers
def cluster(self, X, y=None, update_interval=None):
N = X.shape[0]
if not update_interval:
update_interval = N
batch_size = 256
test_iter = mx.io.NDArrayIter({'data': X}, batch_size=batch_size, shuffle=False,
last_batch_handle='pad')
args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
kmeans = KMeans(self.num_centers, n_init=20)
kmeans.fit(z)
args['dec_mu'][:] = kmeans.cluster_centers_
solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)
def ce(label, pred):
return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
solver.set_metric(mx.metric.CustomMetric(ce))
label_buff = np.zeros((X.shape[0], self.num_centers))
train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff}, batch_size=batch_size,
shuffle=False, last_batch_handle='roll_over')
self.y_pred = np.zeros((X.shape[0]))
def refresh(i):
if i%update_interval == 0:
z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
p = np.zeros((z.shape[0], self.num_centers))
self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
y_pred = p.argmax(axis=1)
print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
print(np.std(np.bincount(y.astype(np.int))), np.bincount(y.astype(np.int)))
if y is not None:
print(cluster_acc(y_pred, y)[0])
weight = 1.0/p.sum(axis=0)
weight *= self.num_centers/weight.sum()
p = (p**2)*weight
train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
print(np.sum(y_pred != self.y_pred), 0.001*y_pred.shape[0])
if np.sum(y_pred != self.y_pred) < 0.001*y_pred.shape[0]:
self.y_pred = y_pred
return True
self.y_pred = y_pred
solver.set_iter_start_callback(refresh)
solver.set_monitor(Monitor(50))
solver.solve(self.xpu, self.loss, args, self.args_grad, None,
train_iter, 0, 1000000000, {}, False)
self.end_args = args
if y is not None:
return cluster_acc(self.y_pred, y)[0]
else:
return -1
def mnist_exp(xpu):
X, Y = data.get_mnist()
if not os.path.isdir('data'):
os.makedirs('data')
dec_model = DECModel(xpu, X, 10, 1.0, 'data/mnist')
acc = []
for i in [10*(2**j) for j in range(9)]:
acc.append(dec_model.cluster(X, Y, i))
logging.log(logging.INFO, 'Clustering Acc: %f at update interval: %d'%(acc[-1], i))
logging.info(str(acc))
logging.info('Best Clustering ACC: %f at update_interval: %d'%(np.max(acc), 10*(2**np.argmax(acc))))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
mnist_exp(mx.gpu(0))
| apache-2.0 |
waynenilsen/statsmodels | statsmodels/sandbox/examples/ex_kaplan_meier.py | 33 | 2838 | #An example for the Kaplan-Meier estimator
from __future__ import print_function
from statsmodels.compat.python import lrange
import statsmodels.api as sm
import matplotlib.pyplot as plt
import numpy as np
from statsmodels.sandbox.survival2 import KaplanMeier
#Getting the strike data as an array
dta = sm.datasets.strikes.load()
print('basic data')
print('\n')
dta = list(dta.values()[-1])
print(dta[lrange(5),:])
print('\n')
#Create the KaplanMeier object and fit the model
km = KaplanMeier(dta,0)
km.fit()
#show the results
km.plot()
print('basic model')
print('\n')
km.summary()
print('\n')
#Mutiple survival curves
km2 = KaplanMeier(dta,0,exog=1)
km2.fit()
print('more than one curve')
print('\n')
km2.summary()
print('\n')
km2.plot()
#with censoring
censoring = np.ones_like(dta[:,0])
censoring[dta[:,0] > 80] = 0
dta = np.c_[dta,censoring]
print('with censoring')
print('\n')
print(dta[lrange(5),:])
print('\n')
km3 = KaplanMeier(dta,0,exog=1,censoring=2)
km3.fit()
km3.summary()
print('\n')
km3.plot()
#Test for difference of survival curves
log_rank = km3.test_diff([0.0645,-0.03957])
print('log rank test')
print('\n')
print(log_rank)
print('\n')
#The zeroth element of log_rank is the chi-square test statistic
#for the difference between the survival curves for exog = 0.0645
#and exog = -0.03957, the index one element is the degrees of freedom for
#the test, and the index two element is the p-value for the test
wilcoxon = km3.test_diff([0.0645,-0.03957], rho=1)
print('Wilcoxon')
print('\n')
print(wilcoxon)
print('\n')
#Same info as log_rank, but for Peto and Peto modification to the
#Gehan-Wilcoxon test
#User specified functions for tests
#A wider range of rates can be accessed by using the 'weight' parameter
#for the test_diff method
#For example, if the desire weights are S(t)*(1-S(t)), where S(t) is a pooled
#estimate for the survival function, this could be computed by doing
def weights(t):
#must accept one arguement, even though it is not used here
s = KaplanMeier(dta,0,censoring=2)
s.fit()
s = s.results[0][0]
s = s * (1 - s)
return s
#KaplanMeier provides an array of times to the weighting function
#internally, so the weighting function must accept one arguement
test = km3.test_diff([0.0645,-0.03957], weight=weights)
print('user specified weights')
print('\n')
print(test)
print('\n')
#Groups with nan names
#These can be handled by passing the data to KaplanMeier as an array of strings
groups = np.ones_like(dta[:,1])
groups = groups.astype('S4')
groups[dta[:,1] > 0] = 'high'
groups[dta[:,1] <= 0] = 'low'
dta = dta.astype('S4')
dta[:,1] = groups
print('with nan group names')
print('\n')
print(dta[lrange(5),:])
print('\n')
km4 = KaplanMeier(dta,0,exog=1,censoring=2)
km4.fit()
km4.summary()
print('\n')
km4.plot()
#show all the plots
plt.show()
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/linear_model/tests/test_sgd.py | 34 | 47824 | import pickle
import unittest
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import raises
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false, assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import ignore_warnings
from sklearn import linear_model, datasets, metrics
from sklearn.base import clone
from sklearn.linear_model import SGDClassifier, SGDRegressor
from sklearn.preprocessing import LabelEncoder, scale, MinMaxScaler
from sklearn.linear_model import sgd_fast
class SparseSGDClassifier(SGDClassifier):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).fit(X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).partial_fit(X, y, *args, **kw)
def decision_function(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).decision_function(X)
def predict_proba(self, X):
X = sp.csr_matrix(X)
return super(SparseSGDClassifier, self).predict_proba(X)
class SparseSGDRegressor(SGDRegressor):
def fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.fit(self, X, y, *args, **kw)
def partial_fit(self, X, y, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.partial_fit(self, X, y, *args, **kw)
def decision_function(self, X, *args, **kw):
X = sp.csr_matrix(X)
return SGDRegressor.decision_function(self, X, *args, **kw)
# Test Data
# test sample 1
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y = [1, 1, 1, 2, 2, 2]
T = np.array([[-1, -1], [2, 2], [3, 2]])
true_result = [1, 2, 2]
# test sample 2; string class labels
X2 = np.array([[-1, 1], [-0.75, 0.5], [-1.5, 1.5],
[1, 1], [0.75, 0.5], [1.5, 1.5],
[-1, -1], [0, -0.5], [1, -1]])
Y2 = ["one"] * 3 + ["two"] * 3 + ["three"] * 3
T2 = np.array([[-1.5, 0.5], [1, 2], [0, -2]])
true_result2 = ["one", "two", "three"]
# test sample 3
X3 = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1], [0, 0, 0, 0, 1, 1],
[0, 0, 0, 1, 0, 0], [0, 0, 0, 1, 0, 0]])
Y3 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
# test sample 4 - two more or less redundant feature groups
X4 = np.array([[1, 0.9, 0.8, 0, 0, 0], [1, .84, .98, 0, 0, 0],
[1, .96, .88, 0, 0, 0], [1, .91, .99, 0, 0, 0],
[0, 0, 0, .89, .91, 1], [0, 0, 0, .79, .84, 1],
[0, 0, 0, .91, .95, 1], [0, 0, 0, .93, 1, 1]])
Y4 = np.array([1, 1, 1, 1, 2, 2, 2, 2])
iris = datasets.load_iris()
# test sample 5 - test sample 1 as binary classification problem
X5 = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
Y5 = [1, 1, 1, 2, 2, 2]
true_result5 = [0, 1, 1]
# Classification Test Case
class CommonTest(object):
def factory(self, **kwargs):
if "random_state" not in kwargs:
kwargs["random_state"] = 42
return self.factory_class(**kwargs)
# a simple implementation of ASGD to use for testing
# uses squared loss to find the gradient
def asgd(self, X, y, eta, alpha, weight_init=None, intercept_init=0.0):
if weight_init is None:
weights = np.zeros(X.shape[1])
else:
weights = weight_init
average_weights = np.zeros(X.shape[1])
intercept = intercept_init
average_intercept = 0.0
decay = 1.0
# sparse data has a fixed decay of .01
if (isinstance(self, SparseSGDClassifierTestCase) or
isinstance(self, SparseSGDRegressorTestCase)):
decay = .01
for i, entry in enumerate(X):
p = np.dot(entry, weights)
p += intercept
gradient = p - y[i]
weights *= 1.0 - (eta * alpha)
weights += -(eta * gradient * entry)
intercept += -(eta * gradient) * decay
average_weights *= i
average_weights += weights
average_weights /= i + 1.0
average_intercept *= i
average_intercept += intercept
average_intercept /= i + 1.0
return average_weights, average_intercept
def _test_warm_start(self, X, Y, lr):
# Test that explicit warm restart...
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf.fit(X, Y)
clf2 = self.factory(alpha=0.001, eta0=0.01, n_iter=5, shuffle=False,
learning_rate=lr)
clf2.fit(X, Y,
coef_init=clf.coef_.copy(),
intercept_init=clf.intercept_.copy())
# ... and implicit warm restart are equivalent.
clf3 = self.factory(alpha=0.01, eta0=0.01, n_iter=5, shuffle=False,
warm_start=True, learning_rate=lr)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf.t_)
assert_array_almost_equal(clf3.coef_, clf.coef_)
clf3.set_params(alpha=0.001)
clf3.fit(X, Y)
assert_equal(clf3.t_, clf2.t_)
assert_array_almost_equal(clf3.coef_, clf2.coef_)
def test_warm_start_constant(self):
self._test_warm_start(X, Y, "constant")
def test_warm_start_invscaling(self):
self._test_warm_start(X, Y, "invscaling")
def test_warm_start_optimal(self):
self._test_warm_start(X, Y, "optimal")
def test_input_format(self):
# Input format tests.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
Y_ = np.array(Y)[:, np.newaxis]
Y_ = np.c_[Y_, Y_]
assert_raises(ValueError, clf.fit, X, Y_)
def test_clone(self):
# Test whether clone works ok.
clf = self.factory(alpha=0.01, n_iter=5, penalty='l1')
clf = clone(clf)
clf.set_params(penalty='l2')
clf.fit(X, Y)
clf2 = self.factory(alpha=0.01, n_iter=5, penalty='l2')
clf2.fit(X, Y)
assert_array_equal(clf.coef_, clf2.coef_)
def test_plain_has_no_average_attr(self):
clf = self.factory(average=True, eta0=.01)
clf.fit(X, Y)
assert_true(hasattr(clf, 'average_coef_'))
assert_true(hasattr(clf, 'average_intercept_'))
assert_true(hasattr(clf, 'standard_intercept_'))
assert_true(hasattr(clf, 'standard_coef_'))
clf = self.factory()
clf.fit(X, Y)
assert_false(hasattr(clf, 'average_coef_'))
assert_false(hasattr(clf, 'average_intercept_'))
assert_false(hasattr(clf, 'standard_intercept_'))
assert_false(hasattr(clf, 'standard_coef_'))
def test_late_onset_averaging_not_reached(self):
clf1 = self.factory(average=600)
clf2 = self.factory()
for _ in range(100):
if isinstance(clf1, SGDClassifier):
clf1.partial_fit(X, Y, classes=np.unique(Y))
clf2.partial_fit(X, Y, classes=np.unique(Y))
else:
clf1.partial_fit(X, Y)
clf2.partial_fit(X, Y)
assert_array_almost_equal(clf1.coef_, clf2.coef_, decimal=16)
assert_almost_equal(clf1.intercept_, clf2.intercept_, decimal=16)
def test_late_onset_averaging_reached(self):
eta0 = .001
alpha = .0001
Y_encode = np.array(Y)
Y_encode[Y_encode == 1] = -1.0
Y_encode[Y_encode == 2] = 1.0
clf1 = self.factory(average=7, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=2, shuffle=False)
clf2 = self.factory(average=0, learning_rate="constant",
loss='squared_loss', eta0=eta0,
alpha=alpha, n_iter=1, shuffle=False)
clf1.fit(X, Y_encode)
clf2.fit(X, Y_encode)
average_weights, average_intercept = \
self.asgd(X, Y_encode, eta0, alpha,
weight_init=clf2.coef_.ravel(),
intercept_init=clf2.intercept_)
assert_array_almost_equal(clf1.coef_.ravel(),
average_weights.ravel(),
decimal=16)
assert_almost_equal(clf1.intercept_, average_intercept, decimal=16)
@raises(ValueError)
def test_sgd_bad_alpha_for_optimal_learning_rate(self):
# Check whether expected ValueError on bad alpha, i.e. 0
# since alpha is used to compute the optimal learning rate
self.factory(alpha=0, learning_rate="optimal")
class DenseSGDClassifierTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDClassifier
def test_sgd(self):
# Check that SGD gives any results :-)
for loss in ("hinge", "squared_hinge", "log", "modified_huber"):
clf = self.factory(penalty='l2', alpha=0.01, fit_intercept=True,
loss=loss, n_iter=10, shuffle=True)
clf.fit(X, Y)
# assert_almost_equal(clf.coef_[0], clf.coef_[1], decimal=7)
assert_array_equal(clf.predict(T), true_result)
@raises(ValueError)
def test_sgd_bad_l1_ratio(self):
# Check whether expected ValueError on bad l1_ratio
self.factory(l1_ratio=1.1)
@raises(ValueError)
def test_sgd_bad_learning_rate_schedule(self):
# Check whether expected ValueError on bad learning_rate
self.factory(learning_rate="<unknown>")
@raises(ValueError)
def test_sgd_bad_eta0(self):
# Check whether expected ValueError on bad eta0
self.factory(eta0=0, learning_rate="constant")
@raises(ValueError)
def test_sgd_bad_alpha(self):
# Check whether expected ValueError on bad alpha
self.factory(alpha=-.1)
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
@raises(ValueError)
def test_sgd_n_iter_param(self):
# Test parameter validity check
self.factory(n_iter=-10000)
@raises(ValueError)
def test_sgd_shuffle_param(self):
# Test parameter validity check
self.factory(shuffle="false")
@raises(TypeError)
def test_argument_coef(self):
# Checks coef_init not allowed as model argument (only fit)
# Provided coef_ does not match dataset.
self.factory(coef_init=np.zeros((3,))).fit(X, Y)
@raises(ValueError)
def test_provide_coef(self):
# Checks coef_init shape for the warm starts
# Provided coef_ does not match dataset.
self.factory().fit(X, Y, coef_init=np.zeros((3,)))
@raises(ValueError)
def test_set_intercept(self):
# Checks intercept_ shape for the warm starts
# Provided intercept_ does not match dataset.
self.factory().fit(X, Y, intercept_init=np.zeros((3,)))
def test_set_intercept_binary(self):
# Checks intercept_ shape for the warm starts in binary case
self.factory().fit(X5, Y5, intercept_init=0)
def test_average_binary_computed_correctly(self):
# Checks the SGDClassifier correctly computes the average weights
eta = .1
alpha = 2.
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
# simple linear function without noise
y = np.dot(X, w)
y = np.sign(y)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
average_weights = average_weights.reshape(1, -1)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=14)
assert_almost_equal(clf.intercept_, average_intercept, decimal=14)
def test_set_intercept_to_intercept(self):
# Checks intercept_ shape consistency for the warm starts
# Inconsistent intercept_ shape.
clf = self.factory().fit(X5, Y5)
self.factory().fit(X5, Y5, intercept_init=clf.intercept_)
clf = self.factory().fit(X, Y)
self.factory().fit(X, Y, intercept_init=clf.intercept_)
@raises(ValueError)
def test_sgd_at_least_two_labels(self):
# Target must have at least two labels
self.factory(alpha=0.01, n_iter=20).fit(X2, np.ones(9))
def test_partial_fit_weight_class_balanced(self):
# partial_fit with class_weight='balanced' not supported"""
assert_raises_regexp(ValueError,
"class_weight 'balanced' is not supported for "
"partial_fit. In order to use 'balanced' weights, "
"use compute_class_weight\('balanced', classes, y\). "
"In place of y you can us a large enough sample "
"of the full training set target to properly "
"estimate the class frequency distributions. "
"Pass the resulting weights as the class_weight "
"parameter.",
self.factory(class_weight='balanced').partial_fit,
X, Y, classes=np.unique(Y))
def test_sgd_multiclass(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_average(self):
eta = .001
alpha = .01
# Multi-class average test case
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
np_Y2 = np.array(Y2)
clf.fit(X2, np_Y2)
classes = np.unique(np_Y2)
for i, cl in enumerate(classes):
y_i = np.ones(np_Y2.shape[0])
y_i[np_Y2 != cl] = -1
average_coef, average_intercept = self.asgd(X2, y_i, eta, alpha)
assert_array_almost_equal(average_coef, clf.coef_[i], decimal=16)
assert_almost_equal(average_intercept,
clf.intercept_[i],
decimal=16)
def test_sgd_multiclass_with_init_coef(self):
# Multi-class test case
clf = self.factory(alpha=0.01, n_iter=20)
clf.fit(X2, Y2, coef_init=np.zeros((3, 2)),
intercept_init=np.zeros(3))
assert_equal(clf.coef_.shape, (3, 2))
assert_true(clf.intercept_.shape, (3,))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_sgd_multiclass_njobs(self):
# Multi-class test case with multi-core support
clf = self.factory(alpha=0.01, n_iter=20, n_jobs=2).fit(X2, Y2)
assert_equal(clf.coef_.shape, (3, 2))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
pred = clf.predict(T2)
assert_array_equal(pred, true_result2)
def test_set_coef_multiclass(self):
# Checks coef_init and intercept_init shape for multi-class
# problems
# Provided coef_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2, coef_init=np.zeros((2, 2)))
# Provided coef_ does match dataset
clf = self.factory().fit(X2, Y2, coef_init=np.zeros((3, 2)))
# Provided intercept_ does not match dataset
clf = self.factory()
assert_raises(ValueError, clf.fit, X2, Y2,
intercept_init=np.zeros((1,)))
# Provided intercept_ does match dataset.
clf = self.factory().fit(X2, Y2, intercept_init=np.zeros((3,)))
def test_sgd_proba(self):
# Check SGD.predict_proba
# Hinge loss does not allow for conditional prob estimate.
# We cannot use the factory here, because it defines predict_proba
# anyway.
clf = SGDClassifier(loss="hinge", alpha=0.01, n_iter=10).fit(X, Y)
assert_false(hasattr(clf, "predict_proba"))
assert_false(hasattr(clf, "predict_log_proba"))
# log and modified_huber losses can output probability estimates
# binary case
for loss in ["log", "modified_huber"]:
clf = self.factory(loss=loss, alpha=0.01, n_iter=10)
clf.fit(X, Y)
p = clf.predict_proba([[3, 2]])
assert_true(p[0, 1] > 0.5)
p = clf.predict_proba([[-1, -1]])
assert_true(p[0, 1] < 0.5)
p = clf.predict_log_proba([[3, 2]])
assert_true(p[0, 1] > p[0, 0])
p = clf.predict_log_proba([[-1, -1]])
assert_true(p[0, 1] < p[0, 0])
# log loss multiclass probability estimates
clf = self.factory(loss="log", alpha=0.01, n_iter=10).fit(X2, Y2)
d = clf.decision_function([[.1, -.1], [.3, .2]])
p = clf.predict_proba([[.1, -.1], [.3, .2]])
assert_array_equal(np.argmax(p, axis=1), np.argmax(d, axis=1))
assert_almost_equal(p[0].sum(), 1)
assert_true(np.all(p[0] >= 0))
p = clf.predict_proba([[-1, -1]])
d = clf.decision_function([[-1, -1]])
assert_array_equal(np.argsort(p[0]), np.argsort(d[0]))
l = clf.predict_log_proba([[3, 2]])
p = clf.predict_proba([[3, 2]])
assert_array_almost_equal(np.log(p), l)
l = clf.predict_log_proba([[-1, -1]])
p = clf.predict_proba([[-1, -1]])
assert_array_almost_equal(np.log(p), l)
# Modified Huber multiclass probability estimates; requires a separate
# test because the hard zero/one probabilities may destroy the
# ordering present in decision_function output.
clf = self.factory(loss="modified_huber", alpha=0.01, n_iter=10)
clf.fit(X2, Y2)
d = clf.decision_function([[3, 2]])
p = clf.predict_proba([[3, 2]])
if not isinstance(self, SparseSGDClassifierTestCase):
assert_equal(np.argmax(d, axis=1), np.argmax(p, axis=1))
else: # XXX the sparse test gets a different X2 (?)
assert_equal(np.argmin(d, axis=1), np.argmin(p, axis=1))
# the following sample produces decision_function values < -1,
# which would cause naive normalization to fail (see comment
# in SGDClassifier.predict_proba)
x = X.mean(axis=0)
d = clf.decision_function([x])
if np.all(d < -1): # XXX not true in sparse test case (why?)
p = clf.predict_proba([x])
assert_array_almost_equal(p[0], [1 / 3.] * 3)
def test_sgd_l1(self):
# Test L1 regularization
n = len(X4)
rng = np.random.RandomState(13)
idx = np.arange(n)
rng.shuffle(idx)
X = X4[idx, :]
Y = Y4[idx]
clf = self.factory(penalty='l1', alpha=.2, fit_intercept=False,
n_iter=2000, shuffle=False)
clf.fit(X, Y)
assert_array_equal(clf.coef_[0, 1:-1], np.zeros((4,)))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# test sparsify with dense inputs
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
# pickle and unpickle with sparse coef_
clf = pickle.loads(pickle.dumps(clf))
assert_true(sp.issparse(clf.coef_))
pred = clf.predict(X)
assert_array_equal(pred, Y)
def test_class_weights(self):
# Test class weights.
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight=None)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False,
class_weight={1: 0.001})
clf.fit(X, y)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
def test_equal_class_weight(self):
# Test if equal class weights approx. equals no class weights.
X = [[1, 0], [1, 0], [0, 1], [0, 1]]
y = [0, 0, 1, 1]
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=None)
clf.fit(X, y)
X = [[1, 0], [0, 1]]
y = [0, 1]
clf_weighted = self.factory(alpha=0.1, n_iter=1000,
class_weight={0: 0.5, 1: 0.5})
clf_weighted.fit(X, y)
# should be similar up to some epsilon due to learning rate schedule
assert_almost_equal(clf.coef_, clf_weighted.coef_, decimal=2)
@raises(ValueError)
def test_wrong_class_weight_label(self):
# ValueError due to not existing class label.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight={0: 0.5})
clf.fit(X, Y)
@raises(ValueError)
def test_wrong_class_weight_format(self):
# ValueError due to wrong class_weight argument type.
clf = self.factory(alpha=0.1, n_iter=1000, class_weight=[0.5])
clf.fit(X, Y)
def test_weights_multiplied(self):
# Tests that class_weight and sample_weight are multiplicative
class_weights = {1: .6, 2: .3}
rng = np.random.RandomState(0)
sample_weights = rng.random_sample(Y4.shape[0])
multiplied_together = np.copy(sample_weights)
multiplied_together[Y4 == 1] *= class_weights[1]
multiplied_together[Y4 == 2] *= class_weights[2]
clf1 = self.factory(alpha=0.1, n_iter=20, class_weight=class_weights)
clf2 = self.factory(alpha=0.1, n_iter=20)
clf1.fit(X4, Y4, sample_weight=sample_weights)
clf2.fit(X4, Y4, sample_weight=multiplied_together)
assert_almost_equal(clf1.coef_, clf2.coef_)
def test_balanced_weight(self):
# Test class weights for imbalanced data"""
# compute reference metrics on iris dataset that is quite balanced by
# default
X, y = iris.data, iris.target
X = scale(X)
idx = np.arange(X.shape[0])
rng = np.random.RandomState(6)
rng.shuffle(idx)
X = X[idx]
y = y[idx]
clf = self.factory(alpha=0.0001, n_iter=1000,
class_weight=None, shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf.predict(X), average='weighted'), 0.96,
decimal=1)
# make the same prediction using balanced class_weight
clf_balanced = self.factory(alpha=0.0001, n_iter=1000,
class_weight="balanced",
shuffle=False).fit(X, y)
assert_almost_equal(metrics.f1_score(y, clf_balanced.predict(X), average='weighted'), 0.96,
decimal=1)
# Make sure that in the balanced case it does not change anything
# to use "balanced"
assert_array_almost_equal(clf.coef_, clf_balanced.coef_, 6)
# build an very very imbalanced dataset out of iris data
X_0 = X[y == 0, :]
y_0 = y[y == 0]
X_imbalanced = np.vstack([X] + [X_0] * 10)
y_imbalanced = np.concatenate([y] + [y_0] * 10)
# fit a model on the imbalanced data without class weight info
clf = self.factory(n_iter=1000, class_weight=None, shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_less(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit a model with balanced class_weight enabled
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
# fit another using a fit parameter override
clf = self.factory(n_iter=1000, class_weight="balanced", shuffle=False)
clf.fit(X_imbalanced, y_imbalanced)
y_pred = clf.predict(X)
assert_greater(metrics.f1_score(y, y_pred, average='weighted'), 0.96)
def test_sample_weights(self):
# Test weights on individual samples
X = np.array([[-1.0, -1.0], [-1.0, 0], [-.8, -1.0],
[1.0, 1.0], [1.0, 0.0]])
y = [1, 1, 1, -1, -1]
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
clf.fit(X, y)
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([1]))
# we give a small weights to class 1
clf.fit(X, y, sample_weight=[0.001] * 3 + [1] * 2)
# now the hyperplane should rotate clock-wise and
# the prediction on this point should shift
assert_array_equal(clf.predict([[0.2, -1.0]]), np.array([-1]))
@raises(ValueError)
def test_wrong_sample_weights(self):
# Test if ValueError is raised if sample_weight has wrong shape
clf = self.factory(alpha=0.1, n_iter=1000, fit_intercept=False)
# provided sample_weight too long
clf.fit(X, Y, sample_weight=np.arange(7))
@raises(ValueError)
def test_partial_fit_exception(self):
clf = self.factory(alpha=0.01)
# classes was not specified
clf.partial_fit(X3, Y3)
def test_partial_fit_binary(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y)
clf.partial_fit(X[:third], Y[:third], classes=classes)
assert_equal(clf.coef_.shape, (1, X.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
y_pred = clf.predict(T)
assert_array_equal(y_pred, true_result)
def test_partial_fit_multiclass(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01)
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
assert_equal(clf.decision_function([[0, 0]]).shape, (1, 3))
id1 = id(clf.coef_.data)
clf.partial_fit(X2[third:], Y2[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def test_partial_fit_multiclass_average(self):
third = X2.shape[0] // 3
clf = self.factory(alpha=0.01, average=X2.shape[0])
classes = np.unique(Y2)
clf.partial_fit(X2[:third], Y2[:third], classes=classes)
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
clf.partial_fit(X2[third:], Y2[third:])
assert_equal(clf.coef_.shape, (3, X2.shape[1]))
assert_equal(clf.intercept_.shape, (3,))
def test_fit_then_partial_fit(self):
# Partial_fit should work after initial fit in the multiclass case.
# Non-regression test for #2496; fit would previously produce a
# Fortran-ordered coef_ that subsequent partial_fit couldn't handle.
clf = self.factory()
clf.fit(X2, Y2)
clf.partial_fit(X2, Y2) # no exception here
def _test_partial_fit_equal_fit(self, lr):
for X_, Y_, T_ in ((X, Y, T), (X2, Y2, T2)):
clf = self.factory(alpha=0.01, eta0=0.01, n_iter=2,
learning_rate=lr, shuffle=False)
clf.fit(X_, Y_)
y_pred = clf.decision_function(T_)
t = clf.t_
classes = np.unique(Y_)
clf = self.factory(alpha=0.01, eta0=0.01, learning_rate=lr,
shuffle=False)
for i in range(2):
clf.partial_fit(X_, Y_, classes=classes)
y_pred2 = clf.decision_function(T_)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_regression_losses(self):
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant",
eta0=0.1, loss="squared_epsilon_insensitive")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, loss="huber")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
clf = self.factory(alpha=0.01, learning_rate="constant", eta0=0.01,
loss="squared_loss")
clf.fit(X, Y)
assert_equal(1.0, np.mean(clf.predict(X) == Y))
def test_warm_start_multiclass(self):
self._test_warm_start(X2, Y2, "optimal")
def test_multiple_fit(self):
# Test multiple calls of fit w/ different shaped inputs.
clf = self.factory(alpha=0.01, n_iter=5,
shuffle=False)
clf.fit(X, Y)
assert_true(hasattr(clf, "coef_"))
# Non-regression test: try fitting with a different label set.
y = [["ham", "spam"][i] for i in LabelEncoder().fit_transform(Y)]
clf.fit(X[:, :-1], y)
class SparseSGDClassifierTestCase(DenseSGDClassifierTestCase):
"""Run exactly the same tests using the sparse representation variant"""
factory_class = SparseSGDClassifier
###############################################################################
# Regression Test Case
class DenseSGDRegressorTestCase(unittest.TestCase, CommonTest):
"""Test suite for the dense representation variant of SGD"""
factory_class = SGDRegressor
def test_sgd(self):
# Check that SGD gives any results.
clf = self.factory(alpha=0.1, n_iter=2,
fit_intercept=False)
clf.fit([[0, 0], [1, 1], [2, 2]], [0, 1, 2])
assert_equal(clf.coef_[0], clf.coef_[1])
@raises(ValueError)
def test_sgd_bad_penalty(self):
# Check whether expected ValueError on bad penalty
self.factory(penalty='foobar', l1_ratio=0.85)
@raises(ValueError)
def test_sgd_bad_loss(self):
# Check whether expected ValueError on bad loss
self.factory(loss="foobar")
def test_sgd_averaged_computed_correctly(self):
# Tests the average regressor matches the naive implementation
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.fit(X, y)
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_averaged_partial_fit(self):
# Tests whether the partial fit yields the same average as the fit
eta = .001
alpha = .01
n_samples = 20
n_features = 10
rng = np.random.RandomState(0)
X = rng.normal(size=(n_samples, n_features))
w = rng.normal(size=n_features)
# simple linear function without noise
y = np.dot(X, w)
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
clf.partial_fit(X[:int(n_samples / 2)][:], y[:int(n_samples / 2)])
clf.partial_fit(X[int(n_samples / 2):][:], y[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X, y, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_[0], average_intercept, decimal=16)
def test_average_sparse(self):
# Checks the average weights on data with 0s
eta = .001
alpha = .01
clf = self.factory(loss='squared_loss',
learning_rate='constant',
eta0=eta, alpha=alpha,
fit_intercept=True,
n_iter=1, average=True, shuffle=False)
n_samples = Y3.shape[0]
clf.partial_fit(X3[:int(n_samples / 2)][:], Y3[:int(n_samples / 2)])
clf.partial_fit(X3[int(n_samples / 2):][:], Y3[int(n_samples / 2):])
average_weights, average_intercept = self.asgd(X3, Y3, eta, alpha)
assert_array_almost_equal(clf.coef_,
average_weights,
decimal=16)
assert_almost_equal(clf.intercept_, average_intercept, decimal=16)
def test_sgd_least_squares_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='squared_loss', alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_sgd_epsilon_insensitive(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss='epsilon_insensitive', epsilon=0.01,
alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_true(score > 0.5)
def test_sgd_huber_fit(self):
xmin, xmax = -5, 5
n_samples = 100
rng = np.random.RandomState(0)
X = np.linspace(xmin, xmax, n_samples).reshape(n_samples, 1)
# simple linear function without noise
y = 0.5 * X.ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.99)
# simple linear function with noise
y = 0.5 * X.ravel() + rng.randn(n_samples, 1).ravel()
clf = self.factory(loss="huber", epsilon=0.1, alpha=0.1, n_iter=20,
fit_intercept=False)
clf.fit(X, y)
score = clf.score(X, y)
assert_greater(score, 0.5)
def test_elasticnet_convergence(self):
# Check that the SGD output is consistent with coordinate descent
n_samples, n_features = 1000, 5
rng = np.random.RandomState(0)
X = rng.randn(n_samples, n_features)
# ground_truth linear model that generate y from X and to which the
# models should converge if the regularizer would be set to 0.0
ground_truth_coef = rng.randn(n_features)
y = np.dot(X, ground_truth_coef)
# XXX: alpha = 0.1 seems to cause convergence problems
for alpha in [0.01, 0.001]:
for l1_ratio in [0.5, 0.8, 1.0]:
cd = linear_model.ElasticNet(alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
cd.fit(X, y)
sgd = self.factory(penalty='elasticnet', n_iter=50,
alpha=alpha, l1_ratio=l1_ratio,
fit_intercept=False)
sgd.fit(X, y)
err_msg = ("cd and sgd did not converge to comparable "
"results for alpha=%f and l1_ratio=%f"
% (alpha, l1_ratio))
assert_almost_equal(cd.coef_, sgd.coef_, decimal=2,
err_msg=err_msg)
@ignore_warnings
def test_partial_fit(self):
third = X.shape[0] // 3
clf = self.factory(alpha=0.01)
clf.partial_fit(X[:third], Y[:third])
assert_equal(clf.coef_.shape, (X.shape[1], ))
assert_equal(clf.intercept_.shape, (1,))
assert_equal(clf.predict([[0, 0]]).shape, (1, ))
id1 = id(clf.coef_.data)
clf.partial_fit(X[third:], Y[third:])
id2 = id(clf.coef_.data)
# check that coef_ haven't been re-allocated
assert_true(id1, id2)
def _test_partial_fit_equal_fit(self, lr):
clf = self.factory(alpha=0.01, n_iter=2, eta0=0.01,
learning_rate=lr, shuffle=False)
clf.fit(X, Y)
y_pred = clf.predict(T)
t = clf.t_
clf = self.factory(alpha=0.01, eta0=0.01,
learning_rate=lr, shuffle=False)
for i in range(2):
clf.partial_fit(X, Y)
y_pred2 = clf.predict(T)
assert_equal(clf.t_, t)
assert_array_almost_equal(y_pred, y_pred2, decimal=2)
def test_partial_fit_equal_fit_constant(self):
self._test_partial_fit_equal_fit("constant")
def test_partial_fit_equal_fit_optimal(self):
self._test_partial_fit_equal_fit("optimal")
def test_partial_fit_equal_fit_invscaling(self):
self._test_partial_fit_equal_fit("invscaling")
def test_loss_function_epsilon(self):
clf = self.factory(epsilon=0.9)
clf.set_params(epsilon=0.1)
assert clf.loss_functions['huber'][1] == 0.1
class SparseSGDRegressorTestCase(DenseSGDRegressorTestCase):
# Run exactly the same tests using the sparse representation variant
factory_class = SparseSGDRegressor
def test_l1_ratio():
# Test if l1 ratio extremes match L1 and L2 penalty settings.
X, y = datasets.make_classification(n_samples=1000,
n_features=100, n_informative=20,
random_state=1234)
# test if elasticnet with l1_ratio near 1 gives same result as pure l1
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.9999999999, random_state=42).fit(X, y)
est_l1 = SGDClassifier(alpha=0.001, penalty='l1', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l1.coef_)
# test if elasticnet with l1_ratio near 0 gives same result as pure l2
est_en = SGDClassifier(alpha=0.001, penalty='elasticnet',
l1_ratio=0.0000000001, random_state=42).fit(X, y)
est_l2 = SGDClassifier(alpha=0.001, penalty='l2', random_state=42).fit(X, y)
assert_array_almost_equal(est_en.coef_, est_l2.coef_)
def test_underflow_or_overlow():
with np.errstate(all='raise'):
# Generate some weird data with hugely unscaled features
rng = np.random.RandomState(0)
n_samples = 100
n_features = 10
X = rng.normal(size=(n_samples, n_features))
X[:, :2] *= 1e300
assert_true(np.isfinite(X).all())
# Use MinMaxScaler to scale the data without introducing a numerical
# instability (computing the standard deviation naively is not possible
# on this data)
X_scaled = MinMaxScaler().fit_transform(X)
assert_true(np.isfinite(X_scaled).all())
# Define a ground truth on the scaled data
ground_truth = rng.normal(size=n_features)
y = (np.dot(X_scaled, ground_truth) > 0.).astype(np.int32)
assert_array_equal(np.unique(y), [0, 1])
model = SGDClassifier(alpha=0.1, loss='squared_hinge', n_iter=500)
# smoke test: model is stable on scaled data
model.fit(X_scaled, y)
assert_true(np.isfinite(model.coef_).all())
# model is numerically unstable on unscaled data
msg_regxp = (r"Floating-point under-/overflow occurred at epoch #.*"
" Scaling input data with StandardScaler or MinMaxScaler"
" might help.")
assert_raises_regexp(ValueError, msg_regxp, model.fit, X, y)
def test_numerical_stability_large_gradient():
# Non regression test case for numerical stability on scaled problems
# where the gradient can still explode with some losses
model = SGDClassifier(loss='squared_hinge', n_iter=10, shuffle=True,
penalty='elasticnet', l1_ratio=0.3, alpha=0.01,
eta0=0.001, random_state=0)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_true(np.isfinite(model.coef_).all())
def test_large_regularization():
# Non regression tests for numerical stability issues caused by large
# regularization parameters
for penalty in ['l2', 'l1', 'elasticnet']:
model = SGDClassifier(alpha=1e5, learning_rate='constant', eta0=0.1,
n_iter=5, penalty=penalty, shuffle=False)
with np.errstate(all='raise'):
model.fit(iris.data, iris.target)
assert_array_almost_equal(model.coef_, np.zeros_like(model.coef_))
def _test_gradient_common(loss_function, cases):
# Test gradient of different loss functions
# cases is a list of (p, y, expected)
for p, y, expected in cases:
assert_almost_equal(loss_function.dloss(p, y), expected)
def test_gradient_hinge():
# Test Hinge (hinge / perceptron)
# hinge
loss = sgd_fast.Hinge(1.0)
cases = [
# (p, y, expected)
(1.1, 1.0, 0.0), (-2.0, -1.0, 0.0),
(1.0, 1.0, -1.0), (-1.0, -1.0, 1.0), (0.5, 1.0, -1.0),
(2.0, -1.0, 1.0), (-0.5, -1.0, 1.0), (0.0, 1.0, -1.0)
]
_test_gradient_common(loss, cases)
# perceptron
loss = sgd_fast.Hinge(0.0)
cases = [
# (p, y, expected)
(1.0, 1.0, 0.0), (-0.1, -1.0, 0.0),
(0.0, 1.0, -1.0), (0.0, -1.0, 1.0), (0.5, -1.0, 1.0),
(2.0, -1.0, 1.0), (-0.5, 1.0, -1.0), (-1.0, 1.0, -1.0),
]
_test_gradient_common(loss, cases)
def test_gradient_squared_hinge():
# Test SquaredHinge
loss = sgd_fast.SquaredHinge(1.0)
cases = [
# (p, y, expected)
(1.0, 1.0, 0.0), (-2.0, -1.0, 0.0), (1.0, -1.0, 4.0),
(-1.0, 1.0, -4.0), (0.5, 1.0, -1.0), (0.5, -1.0, 3.0)
]
_test_gradient_common(loss, cases)
def test_gradient_log():
# Test Log (logistic loss)
loss = sgd_fast.Log()
cases = [
# (p, y, expected)
(1.0, 1.0, -1.0 / (np.exp(1.0) + 1.0)),
(1.0, -1.0, 1.0 / (np.exp(-1.0) + 1.0)),
(-1.0, -1.0, 1.0 / (np.exp(1.0) + 1.0)),
(-1.0, 1.0, -1.0 / (np.exp(-1.0) + 1.0)),
(0.0, 1.0, -0.5), (0.0, -1.0, 0.5),
(17.9, -1.0, 1.0), (-17.9, 1.0, -1.0),
]
_test_gradient_common(loss, cases)
assert_almost_equal(loss.dloss(18.1, 1.0), np.exp(-18.1) * -1.0, 16)
assert_almost_equal(loss.dloss(-18.1, -1.0), np.exp(-18.1) * 1.0, 16)
def test_gradient_squared_loss():
# Test SquaredLoss
loss = sgd_fast.SquaredLoss()
cases = [
# (p, y, expected)
(0.0, 0.0, 0.0), (1.0, 1.0, 0.0), (1.0, 0.0, 1.0),
(0.5, -1.0, 1.5), (-2.5, 2.0, -4.5)
]
_test_gradient_common(loss, cases)
def test_gradient_huber():
# Test Huber
loss = sgd_fast.Huber(0.1)
cases = [
# (p, y, expected)
(0.0, 0.0, 0.0), (0.1, 0.0, 0.1), (0.0, 0.1, -0.1),
(3.95, 4.0, -0.05), (5.0, 2.0, 0.1), (-1.0, 5.0, -0.1)
]
_test_gradient_common(loss, cases)
def test_gradient_modified_huber():
# Test ModifiedHuber
loss = sgd_fast.ModifiedHuber()
cases = [
# (p, y, expected)
(1.0, 1.0, 0.0), (-1.0, -1.0, 0.0), (2.0, 1.0, 0.0),
(0.0, 1.0, -2.0), (-1.0, 1.0, -4.0), (0.5, -1.0, 3.0),
(0.5, -1.0, 3.0), (-2.0, 1.0, -4.0), (-3.0, 1.0, -4.0)
]
_test_gradient_common(loss, cases)
def test_gradient_epsilon_insensitive():
# Test EpsilonInsensitive
loss = sgd_fast.EpsilonInsensitive(0.1)
cases = [
(0.0, 0.0, 0.0), (0.1, 0.0, 0.0), (-2.05, -2.0, 0.0),
(3.05, 3.0, 0.0), (2.2, 2.0, 1.0), (2.0, -1.0, 1.0),
(2.0, 2.2, -1.0), (-2.0, 1.0, -1.0)
]
_test_gradient_common(loss, cases)
def test_gradient_squared_epsilon_insensitive():
# Test SquaredEpsilonInsensitive
loss = sgd_fast.SquaredEpsilonInsensitive(0.1)
cases = [
(0.0, 0.0, 0.0), (0.1, 0.0, 0.0), (-2.05, -2.0, 0.0),
(3.05, 3.0, 0.0), (2.2, 2.0, 0.2), (2.0, -1.0, 5.8),
(2.0, 2.2, -0.2), (-2.0, 1.0, -5.8)
]
_test_gradient_common(loss, cases)
| bsd-3-clause |
hasecbinusr/pysal | pysal/contrib/pdio/dbf.py | 7 | 6661 | """miscellaneous file manipulation utilities
"""
import numpy as np
import pysal as ps
import pandas as pd
def check_dups(li):
"""checks duplicates in list of ID values
ID values must be read in as a list
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Arguments
---------
li : list of ID values
Returns
-------
a list with the duplicate IDs
"""
return list(set([x for x in li if li.count(x) > 1]))
def dbfdups(dbfpath,idvar):
"""checks duplicates in a dBase file
ID variable must be specified correctly
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Arguments
---------
dbfpath : file path to dBase file
idvar : ID variable in dBase file
Returns
-------
a list with the duplicate IDs
"""
db = ps.open(dbfpath,'r')
li = db.by_col(idvar)
return list(set([x for x in li if li.count(x) > 1]))
def df2dbf(df, dbf_path, my_specs=None):
'''
Convert a pandas.DataFrame into a dbf.
__author__ = "Dani Arribas-Bel <darribas@asu.edu>, Luc Anselin <luc.anselin@asu.edu>"
...
Arguments
---------
df : DataFrame
Pandas dataframe object to be entirely written out to a dbf
dbf_path : str
Path to the output dbf. It is also returned by the function
my_specs : list
List with the field_specs to use for each column.
Defaults to None and applies the following scheme:
* int: ('N', 14, 0) - for all ints
* float: ('N', 14, 14) - for all floats
* str: ('C', 14, 0) - for string, object and category
with all variants for different type sizes
Note: use of dtypes.name may not be fully robust, but preferred apprach of using
isinstance seems too clumsy
'''
if my_specs:
specs = my_specs
else:
"""
type2spec = {int: ('N', 20, 0),
np.int64: ('N', 20, 0),
np.int32: ('N', 20, 0),
np.int16: ('N', 20, 0),
np.int8: ('N', 20, 0),
float: ('N', 36, 15),
np.float64: ('N', 36, 15),
np.float32: ('N', 36, 15),
str: ('C', 14, 0)
}
types = [type(df[i].iloc[0]) for i in df.columns]
"""
# new approach using dtypes.name to avoid numpy name issue in type
type2spec = {'int': ('N', 20, 0),
'int8': ('N', 20, 0),
'int16': ('N', 20, 0),
'int32': ('N', 20, 0),
'int64': ('N', 20, 0),
'float': ('N', 36, 15),
'float32': ('N', 36, 15),
'float64': ('N', 36, 15),
'str': ('C', 14, 0),
'object': ('C', 14, 0),
'category': ('C', 14, 0)
}
types = [df[i].dtypes.name for i in df.columns]
specs = [type2spec[t] for t in types]
db = ps.open(dbf_path, 'w')
db.header = list(df.columns)
db.field_spec = specs
for i, row in df.T.iteritems():
db.write(row)
db.close()
return dbf_path
def dbf2df(dbf_path, index=None, cols=False, incl_index=False):
'''
Read a dbf file as a pandas.DataFrame, optionally selecting the index
variable and which columns are to be loaded.
__author__ = "Dani Arribas-Bel <darribas@asu.edu> "
...
Arguments
---------
dbf_path : str
Path to the DBF file to be read
index : str
Name of the column to be used as the index of the DataFrame
cols : list
List with the names of the columns to be read into the
DataFrame. Defaults to False, which reads the whole dbf
incl_index : Boolean
If True index is included in the DataFrame as a
column too. Defaults to False
Returns
-------
df : DataFrame
pandas.DataFrame object created
'''
db = ps.open(dbf_path)
if cols:
if incl_index:
cols.append(index)
vars_to_read = cols
else:
vars_to_read = db.header
data = dict([(var, db.by_col(var)) for var in vars_to_read])
if index:
index = db.by_col(index)
db.close()
return pd.DataFrame(data, index=index, columns=vars_to_read)
else:
db.close()
return pd.DataFrame(data,columns=vars_to_read)
def dbfjoin(dbf1_path,dbf2_path,out_path,joinkey1,joinkey2):
'''
Wrapper function to merge two dbf files into a new dbf file.
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Uses dbf2df and df2dbf to read and write the dbf files into a pandas
DataFrame. Uses all default settings for dbf2df and df2dbf (see docs
for specifics).
...
Arguments
---------
dbf1_path : str
Path to the first (left) dbf file
dbf2_path : str
Path to the second (right) dbf file
out_path : str
Path to the output dbf file (returned by the function)
joinkey1 : str
Variable name for the key in the first dbf. Must be specified.
Key must take unique values.
joinkey2 : str
Variable name for the key in the second dbf. Must be specified.
Key must take unique values.
Returns
-------
dbfpath : path to output file
'''
df1 = dbf2df(dbf1_path,index=joinkey1)
df2 = dbf2df(dbf2_path,index=joinkey2)
dfbig = pd.merge(df1,df2,left_on=joinkey1,right_on=joinkey2,sort=False)
dp = df2dbf(dfbig,out_path)
return dp
def dta2dbf(dta_path,dbf_path):
"""
Wrapper function to convert a stata dta file into a dbf file.
__author__ = "Luc Anselin <luc.anselin@asu.edu> "
Uses df2dbf to write the dbf files from a pandas
DataFrame. Uses all default settings for df2dbf (see docs
for specifics).
...
Arguments
---------
dta_path : str
Path to the Stata dta file
dbf_path : str
Path to the output dbf file
Returns
-------
dbf_path : path to output file
"""
db = pd.read_stata(dta_path)
dp = df2dbf(db,dbf_path)
return dp
| bsd-3-clause |
rubennj/pvlib-python | docs/sphinx/sphinxext/numpydoc/docscrape_sphinx.py | 41 | 9437 | from __future__ import division, absolute_import, print_function
import sys, re, inspect, textwrap, pydoc
import sphinx
import collections
from .docscrape import NumpyDocString, FunctionDoc, ClassDoc
if sys.version_info[0] >= 3:
sixu = lambda s: s
else:
sixu = lambda s: unicode(s, 'unicode_escape')
class SphinxDocString(NumpyDocString):
def __init__(self, docstring, config={}):
NumpyDocString.__init__(self, docstring, config=config)
self.load_config(config)
def load_config(self, config):
self.use_plots = config.get('use_plots', False)
self.class_members_toctree = config.get('class_members_toctree', True)
# string conversion routines
def _str_header(self, name, symbol='`'):
return ['.. rubric:: ' + name, '']
def _str_field_list(self, name):
return [':' + name + ':']
def _str_indent(self, doc, indent=4):
out = []
for line in doc:
out += [' '*indent + line]
return out
def _str_signature(self):
return ['']
if self['Signature']:
return ['``%s``' % self['Signature']] + ['']
else:
return ['']
def _str_summary(self):
return self['Summary'] + ['']
def _str_extended_summary(self):
return self['Extended Summary'] + ['']
def _str_returns(self):
out = []
if self['Returns']:
out += self._str_field_list('Returns')
out += ['']
for param, param_type, desc in self['Returns']:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent([param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
def _str_param_list(self, name):
out = []
if self[name]:
out += self._str_field_list(name)
out += ['']
for param, param_type, desc in self[name]:
if param_type:
out += self._str_indent(['**%s** : %s' % (param.strip(),
param_type)])
else:
out += self._str_indent(['**%s**' % param.strip()])
if desc:
out += ['']
out += self._str_indent(desc, 8)
out += ['']
return out
@property
def _obj(self):
if hasattr(self, '_cls'):
return self._cls
elif hasattr(self, '_f'):
return self._f
return None
def _str_member_list(self, name):
"""
Generate a member listing, autosummary:: table where possible,
and a table where not.
"""
out = []
if self[name]:
out += ['.. rubric:: %s' % name, '']
prefix = getattr(self, '_name', '')
if prefix:
prefix = '~%s.' % prefix
autosum = []
others = []
for param, param_type, desc in self[name]:
param = param.strip()
# Check if the referenced member can have a docstring or not
param_obj = getattr(self._obj, param, None)
if not (callable(param_obj)
or isinstance(param_obj, property)
or inspect.isgetsetdescriptor(param_obj)):
param_obj = None
if param_obj and (pydoc.getdoc(param_obj) or not desc):
# Referenced object has a docstring
autosum += [" %s%s" % (prefix, param)]
else:
others.append((param, param_type, desc))
if autosum:
out += ['.. autosummary::']
if self.class_members_toctree:
out += [' :toctree:']
out += [''] + autosum
if others:
maxlen_0 = max(3, max([len(x[0]) for x in others]))
hdr = sixu("=")*maxlen_0 + sixu(" ") + sixu("=")*10
fmt = sixu('%%%ds %%s ') % (maxlen_0,)
out += ['', hdr]
for param, param_type, desc in others:
desc = sixu(" ").join(x.strip() for x in desc).strip()
if param_type:
desc = "(%s) %s" % (param_type, desc)
out += [fmt % (param.strip(), desc)]
out += [hdr]
out += ['']
return out
def _str_section(self, name):
out = []
if self[name]:
out += self._str_header(name)
out += ['']
content = textwrap.dedent("\n".join(self[name])).split("\n")
out += content
out += ['']
return out
def _str_see_also(self, func_role):
out = []
if self['See Also']:
see_also = super(SphinxDocString, self)._str_see_also(func_role)
out = ['.. seealso::', '']
out += self._str_indent(see_also[2:])
return out
def _str_warnings(self):
out = []
if self['Warnings']:
out = ['.. warning::', '']
out += self._str_indent(self['Warnings'])
return out
def _str_index(self):
idx = self['index']
out = []
if len(idx) == 0:
return out
out += ['.. index:: %s' % idx.get('default','')]
for section, references in idx.items():
if section == 'default':
continue
elif section == 'refguide':
out += [' single: %s' % (', '.join(references))]
else:
out += [' %s: %s' % (section, ','.join(references))]
return out
def _str_references(self):
out = []
if self['References']:
out += self._str_header('References')
if isinstance(self['References'], str):
self['References'] = [self['References']]
out.extend(self['References'])
out += ['']
# Latex collects all references to a separate bibliography,
# so we need to insert links to it
if sphinx.__version__ >= "0.6":
out += ['.. only:: latex','']
else:
out += ['.. latexonly::','']
items = []
for line in self['References']:
m = re.match(r'.. \[([a-z0-9._-]+)\]', line, re.I)
if m:
items.append(m.group(1))
out += [' ' + ", ".join(["[%s]_" % item for item in items]), '']
return out
def _str_examples(self):
examples_str = "\n".join(self['Examples'])
if (self.use_plots and 'import matplotlib' in examples_str
and 'plot::' not in examples_str):
out = []
out += self._str_header('Examples')
out += ['.. plot::', '']
out += self._str_indent(self['Examples'])
out += ['']
return out
else:
return self._str_section('Examples')
def __str__(self, indent=0, func_role="obj"):
out = []
out += self._str_signature()
out += self._str_index() + ['']
out += self._str_summary()
out += self._str_extended_summary()
out += self._str_param_list('Parameters')
out += self._str_returns()
for param_list in ('Other Parameters', 'Raises', 'Warns'):
out += self._str_param_list(param_list)
out += self._str_warnings()
out += self._str_see_also(func_role)
out += self._str_section('Notes')
out += self._str_references()
out += self._str_examples()
for param_list in ('Attributes', 'Methods'):
out += self._str_member_list(param_list)
out = self._str_indent(out,indent)
return '\n'.join(out)
class SphinxFunctionDoc(SphinxDocString, FunctionDoc):
def __init__(self, obj, doc=None, config={}):
self.load_config(config)
FunctionDoc.__init__(self, obj, doc=doc, config=config)
class SphinxClassDoc(SphinxDocString, ClassDoc):
def __init__(self, obj, doc=None, func_doc=None, config={}):
self.load_config(config)
ClassDoc.__init__(self, obj, doc=doc, func_doc=None, config=config)
class SphinxObjDoc(SphinxDocString):
def __init__(self, obj, doc=None, config={}):
self._f = obj
self.load_config(config)
SphinxDocString.__init__(self, doc, config=config)
def get_doc_object(obj, what=None, doc=None, config={}):
if what is None:
if inspect.isclass(obj):
what = 'class'
elif inspect.ismodule(obj):
what = 'module'
elif isinstance(obj, collections.Callable):
what = 'function'
else:
what = 'object'
if what == 'class':
return SphinxClassDoc(obj, func_doc=SphinxFunctionDoc, doc=doc,
config=config)
elif what in ('function', 'method'):
return SphinxFunctionDoc(obj, doc=doc, config=config)
else:
if doc is None:
doc = pydoc.getdoc(obj)
return SphinxObjDoc(obj, doc, config=config)
| bsd-3-clause |
Garrett-R/scikit-learn | sklearn/metrics/tests/test_ranking.py | 4 | 33878 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import auc
from sklearn.metrics import auc_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import roc_curve
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import roc_auc_score
from sklearn.metrics.base import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
"""Test Area under Receiver Operating Characteristic (ROC) curve"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
roc_auc = auc(fpr, tpr)
expected_auc = _auc(y_true, probas_pred)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_almost_equal(roc_auc,
ignore_warnings(auc_score)(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
"""Test whether the returned threshold matches up with tpr"""
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
"""Test to ensure that we don't return spurious repeating thresholds.
Duplicated thresholds can arise due to machine precision issues.
"""
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
"""roc_curve not applicable for multi-class problems"""
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
"""roc_curve for confidence scores"""
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
"""roc_curve for hard decisions"""
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
tpr, fpr, _ = roc_curve(y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_auc():
"""Test Area Under Curve (AUC) computation"""
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
"""Test that roc_auc_score function returns an error when trying
to compute AUC for non-binary class values.
"""
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
"""Test Precision-Recall and aread under PR curve"""
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
f = ignore_warnings(auc_score)
roc_auc = f(y_true, probas_pred)
roc_auc_scaled = f(y_true, 100 * probas_pred)
roc_auc_shifted = f(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
"""Check on several small example that it works """
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
"""Check tie handling in score"""
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
""" Check that Label ranking average precision works for various"""
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
return_indicator=True,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
| bsd-3-clause |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/patches.py | 6 | 148732 | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, zip
import math
import matplotlib as mpl
import numpy as np
import matplotlib.cbook as cbook
import matplotlib.artist as artist
from matplotlib.artist import allow_rasterization
import matplotlib.colors as colors
from matplotlib import docstring
import matplotlib.transforms as transforms
from matplotlib.path import Path
import matplotlib.lines as mlines
from matplotlib.bezier import split_bezier_intersecting_with_closedpath
from matplotlib.bezier import get_intersection, inside_circle, get_parallels
from matplotlib.bezier import make_wedged_bezier2
from matplotlib.bezier import split_path_inout, get_cos_sin
from matplotlib.bezier import make_path_regular, concatenate_paths
# these are not available for the object inspector until after the
# class is built so we define an initial set here for the init
# function and they will be overridden after object definition
docstring.interpd.update(Patch="""
================= ==============================================
Property Description
================= ==============================================
alpha float
animated [True | False]
antialiased or aa [True | False]
capstyle ['butt' | 'round' | 'projecting']
clip_box a matplotlib.transform.Bbox instance
clip_on [True | False]
edgecolor or ec any matplotlib color
facecolor or fc any matplotlib color
figure a matplotlib.figure.Figure instance
fill [True | False]
hatch unknown
joinstyle ['miter' | 'round' | 'bevel']
label any string
linewidth or lw float
lod [True | False]
transform a matplotlib.transform transformation instance
visible [True | False]
zorder any number
================= ==============================================
""")
_patch_alias_map = {
'antialiased': ['aa'],
'edgecolor': ['ec'],
'facecolor': ['fc'],
'linewidth': ['lw'],
'linestyle': ['ls']
}
class Patch(artist.Artist):
"""
A patch is a 2D artist with a face color and an edge color.
If any of *edgecolor*, *facecolor*, *linewidth*, or *antialiased*
are *None*, they default to their rc params setting.
"""
zorder = 1
validCap = ('butt', 'round', 'projecting')
validJoin = ('miter', 'round', 'bevel')
# Whether to draw an edge by default. Set on a
# subclass-by-subclass basis.
_edge_default = False
def __str__(self):
return str(self.__class__).split('.')[-1]
def __init__(self,
edgecolor=None,
facecolor=None,
color=None,
linewidth=None,
linestyle=None,
antialiased=None,
hatch=None,
fill=True,
capstyle=None,
joinstyle=None,
**kwargs):
"""
The following kwarg properties are supported
%(Patch)s
"""
artist.Artist.__init__(self)
if linewidth is None:
linewidth = mpl.rcParams['patch.linewidth']
if linestyle is None:
linestyle = "solid"
if capstyle is None:
capstyle = 'butt'
if joinstyle is None:
joinstyle = 'miter'
if antialiased is None:
antialiased = mpl.rcParams['patch.antialiased']
self._fill = True # needed for set_facecolor call
if color is not None:
if (edgecolor is not None or facecolor is not None):
import warnings
warnings.warn("Setting the 'color' property will override"
"the edgecolor or facecolor properties. ")
self.set_color(color)
else:
self.set_edgecolor(edgecolor)
self.set_facecolor(facecolor)
# unscaled dashes. Needed to scale dash patterns by lw
self._us_dashes = None
self._linewidth = 0
self.set_fill(fill)
self.set_linestyle(linestyle)
self.set_linewidth(linewidth)
self.set_antialiased(antialiased)
self.set_hatch(hatch)
self.set_capstyle(capstyle)
self.set_joinstyle(joinstyle)
self._combined_transform = transforms.IdentityTransform()
if len(kwargs):
self.update(kwargs)
def get_verts(self):
"""
Return a copy of the vertices used in this patch
If the patch contains Bezier curves, the curves will be
interpolated by line segments. To access the curves as
curves, use :meth:`get_path`.
"""
trans = self.get_transform()
path = self.get_path()
polygons = path.to_polygons(trans)
if len(polygons):
return polygons[0]
return []
def _process_radius(self, radius):
if radius is not None:
return radius
if cbook.is_numlike(self._picker):
_radius = self._picker
else:
if self.get_edgecolor()[3] == 0:
_radius = 0
else:
_radius = self.get_linewidth()
return _radius
def contains(self, mouseevent, radius=None):
"""Test whether the mouse event occurred in the patch.
Returns T/F, {}
"""
if six.callable(self._contains):
return self._contains(self, mouseevent)
radius = self._process_radius(radius)
inside = self.get_path().contains_point(
(mouseevent.x, mouseevent.y), self.get_transform(), radius)
return inside, {}
def contains_point(self, point, radius=None):
"""
Returns *True* if the given point is inside the path
(transformed with its transform attribute).
"""
radius = self._process_radius(radius)
return self.get_path().contains_point(point,
self.get_transform(),
radius)
def update_from(self, other):
"""
Updates this :class:`Patch` from the properties of *other*.
"""
artist.Artist.update_from(self, other)
# For some properties we don't need or don't want to go through the
# getters/setters, so we just copy them directly.
self._edgecolor = other._edgecolor
self._facecolor = other._facecolor
self._fill = other._fill
self._hatch = other._hatch
# copy the unscaled dash pattern
self._us_dashes = other._us_dashes
self.set_linewidth(other._linewidth) # also sets dash properties
self.set_transform(other.get_data_transform())
def get_extents(self):
"""
Return a :class:`~matplotlib.transforms.Bbox` object defining
the axis-aligned extents of the :class:`Patch`.
"""
return self.get_path().get_extents(self.get_transform())
def get_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` applied
to the :class:`Patch`.
"""
return self.get_patch_transform() + artist.Artist.get_transform(self)
def get_data_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
maps data coordinates to physical coordinates.
"""
return artist.Artist.get_transform(self)
def get_patch_transform(self):
"""
Return the :class:`~matplotlib.transforms.Transform` instance which
takes patch coordinates to data coordinates.
For example, one may define a patch of a circle which represents a
radius of 5 by providing coordinates for a unit circle, and a
transform which scales the coordinates (the patch coordinate) by 5.
"""
return transforms.IdentityTransform()
def get_antialiased(self):
"""
Returns True if the :class:`Patch` is to be drawn with antialiasing.
"""
return self._antialiased
get_aa = get_antialiased
def get_edgecolor(self):
"""
Return the edge color of the :class:`Patch`.
"""
return self._edgecolor
get_ec = get_edgecolor
def get_facecolor(self):
"""
Return the face color of the :class:`Patch`.
"""
return self._facecolor
get_fc = get_facecolor
def get_linewidth(self):
"""
Return the line width in points.
"""
return self._linewidth
get_lw = get_linewidth
def get_linestyle(self):
"""
Return the linestyle. Will be one of ['solid' | 'dashed' |
'dashdot' | 'dotted']
"""
return self._linestyle
get_ls = get_linestyle
def set_antialiased(self, aa):
"""
Set whether to use antialiased rendering
ACCEPTS: [True | False] or None for default
"""
if aa is None:
aa = mpl.rcParams['patch.antialiased']
self._antialiased = aa
self.stale = True
def set_aa(self, aa):
"""alias for set_antialiased"""
return self.set_antialiased(aa)
def _set_edgecolor(self, color):
if color is None:
if (mpl.rcParams['patch.force_edgecolor'] or
not self._fill or self._edge_default):
color = mpl.rcParams['patch.edgecolor']
else:
color = 'none'
self._edgecolor = colors.to_rgba(color, self._alpha)
self.stale = True
def set_edgecolor(self, color):
"""
Set the patch edge color
ACCEPTS: mpl color spec, None, 'none', or 'auto'
"""
self._original_edgecolor = color
self._set_edgecolor(color)
def set_ec(self, color):
"""alias for set_edgecolor"""
return self.set_edgecolor(color)
def _set_facecolor(self, color):
if color is None:
color = mpl.rcParams['patch.facecolor']
alpha = self._alpha if self._fill else 0
self._facecolor = colors.to_rgba(color, alpha)
self.stale = True
def set_facecolor(self, color):
"""
Set the patch face color
ACCEPTS: mpl color spec, or None for default, or 'none' for no color
"""
self._original_facecolor = color
self._set_facecolor(color)
def set_fc(self, color):
"""alias for set_facecolor"""
return self.set_facecolor(color)
def set_color(self, c):
"""
Set both the edgecolor and the facecolor.
ACCEPTS: matplotlib color spec
.. seealso::
:meth:`set_facecolor`, :meth:`set_edgecolor`
For setting the edge or face color individually.
"""
self.set_facecolor(c)
self.set_edgecolor(c)
def set_alpha(self, alpha):
"""
Set the alpha tranparency of the patch.
ACCEPTS: float or None
"""
if alpha is not None:
try:
float(alpha)
except TypeError:
raise TypeError('alpha must be a float or None')
artist.Artist.set_alpha(self, alpha)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
# stale is already True
def set_linewidth(self, w):
"""
Set the patch linewidth in points
ACCEPTS: float or None for default
"""
if w is None:
w = mpl.rcParams['patch.linewidth']
if w is None:
w = mpl.rcParams['axes.linewidth']
self._linewidth = float(w)
# scale the dash pattern by the linewidth
offset, ls = self._us_dashes
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_lw(self, lw):
"""alias for set_linewidth"""
return self.set_linewidth(lw)
def set_linestyle(self, ls):
"""
Set the patch linestyle
=========================== =================
linestyle description
=========================== =================
``'-'`` or ``'solid'`` solid line
``'--'`` or ``'dashed'`` dashed line
``'-.'`` or ``'dashdot'`` dash-dotted line
``':'`` or ``'dotted'`` dotted line
=========================== =================
Alternatively a dash tuple of the following form can be provided::
(offset, onoffseq),
where ``onoffseq`` is an even length tuple of on and off ink
in points.
ACCEPTS: ['solid' | 'dashed', 'dashdot', 'dotted' |
(offset, on-off-dash-seq) |
``'-'`` | ``'--'`` | ``'-.'`` | ``':'`` | ``'None'`` |
``' '`` | ``''``]
Parameters
----------
ls : { '-', '--', '-.', ':'} and more see description
The line style.
"""
if ls is None:
ls = "solid"
self._linestyle = ls
# get the unscalled dash pattern
offset, ls = self._us_dashes = mlines._get_dash_pattern(ls)
# scale the dash pattern by the linewidth
self._dashoffset, self._dashes = mlines._scale_dashes(
offset, ls, self._linewidth)
self.stale = True
def set_ls(self, ls):
"""alias for set_linestyle"""
return self.set_linestyle(ls)
def set_fill(self, b):
"""
Set whether to fill the patch
ACCEPTS: [True | False]
"""
self._fill = bool(b)
self._set_facecolor(self._original_facecolor)
self._set_edgecolor(self._original_edgecolor)
self.stale = True
def get_fill(self):
'return whether fill is set'
return self._fill
# Make fill a property so as to preserve the long-standing
# but somewhat inconsistent behavior in which fill was an
# attribute.
fill = property(get_fill, set_fill)
def set_capstyle(self, s):
"""
Set the patch capstyle
ACCEPTS: ['butt' | 'round' | 'projecting']
"""
s = s.lower()
if s not in self.validCap:
raise ValueError('set_capstyle passed "%s";\n' % (s,) +
'valid capstyles are %s' % (self.validCap,))
self._capstyle = s
self.stale = True
def get_capstyle(self):
"Return the current capstyle"
return self._capstyle
def set_joinstyle(self, s):
"""
Set the patch joinstyle
ACCEPTS: ['miter' | 'round' | 'bevel']
"""
s = s.lower()
if s not in self.validJoin:
raise ValueError('set_joinstyle passed "%s";\n' % (s,) +
'valid joinstyles are %s' % (self.validJoin,))
self._joinstyle = s
self.stale = True
def get_joinstyle(self):
"Return the current joinstyle"
return self._joinstyle
def set_hatch(self, hatch):
"""
Set the hatching pattern
*hatch* can be one of::
/ - diagonal hatching
\ - back diagonal
| - vertical
- - horizontal
+ - crossed
x - crossed diagonal
o - small circle
O - large circle
. - dots
* - stars
Letters can be combined, in which case all the specified
hatchings are done. If same letter repeats, it increases the
density of hatching of that pattern.
Hatching is supported in the PostScript, PDF, SVG and Agg
backends only.
ACCEPTS: ['/' | '\\\\' | '|' | '-' | '+' | 'x' | 'o' | 'O' | '.' | '*']
"""
self._hatch = hatch
self.stale = True
def get_hatch(self):
'Return the current hatching pattern'
return self._hatch
@allow_rasterization
def draw(self, renderer):
'Draw the :class:`Patch` to the given *renderer*.'
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(0, self._dashes)
gc.set_capstyle(self._capstyle)
gc.set_joinstyle(self._joinstyle)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_url(self._url)
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
path = self.get_path()
transform = self.get_transform()
tpath = transform.transform_path_non_affine(path)
affine = transform.get_affine()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
renderer.draw_path(gc, tpath, affine, rgbFace)
gc.restore()
renderer.close_group('patch')
self.stale = False
def get_path(self):
"""
Return the path of this patch
"""
raise NotImplementedError('Derived must override')
def get_window_extent(self, renderer=None):
return self.get_path().get_extents(self.get_transform())
patchdoc = artist.kwdoc(Patch)
for k in ('Rectangle', 'Circle', 'RegularPolygon', 'Polygon', 'Wedge', 'Arrow',
'FancyArrow', 'YAArrow', 'CirclePolygon', 'Ellipse', 'Arc',
'FancyBboxPatch', 'Patch'):
docstring.interpd.update({k: patchdoc})
# define Patch.__init__ docstring after the class has been added to interpd
docstring.dedent_interpd(Patch.__init__)
class Shadow(Patch):
def __str__(self):
return "Shadow(%s)" % (str(self.patch))
@docstring.dedent_interpd
def __init__(self, patch, ox, oy, props=None, **kwargs):
"""
Create a shadow of the given *patch* offset by *ox*, *oy*.
*props*, if not *None*, is a patch property update dictionary.
If *None*, the shadow will have have the same color as the face,
but darkened.
kwargs are
%(Patch)s
"""
Patch.__init__(self)
self.patch = patch
self.props = props
self._ox, self._oy = ox, oy
self._shadow_transform = transforms.Affine2D()
self._update()
def _update(self):
self.update_from(self.patch)
if self.props is not None:
self.update(self.props)
else:
r, g, b, a = colors.to_rgba(self.patch.get_facecolor())
rho = 0.3
r = rho * r
g = rho * g
b = rho * b
self.set_facecolor((r, g, b, 0.5))
self.set_edgecolor((r, g, b, 0.5))
self.set_alpha(0.5)
def _update_transform(self, renderer):
ox = renderer.points_to_pixels(self._ox)
oy = renderer.points_to_pixels(self._oy)
self._shadow_transform.clear().translate(ox, oy)
def _get_ox(self):
return self._ox
def _set_ox(self, ox):
self._ox = ox
def _get_oy(self):
return self._oy
def _set_oy(self, oy):
self._oy = oy
def get_path(self):
return self.patch.get_path()
def get_patch_transform(self):
return self.patch.get_patch_transform() + self._shadow_transform
def draw(self, renderer):
self._update_transform(renderer)
Patch.draw(self, renderer)
class Rectangle(Patch):
"""
Draw a rectangle with lower left at *xy* = (*x*, *y*) with
specified *width* and *height*.
"""
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y, self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*angle*
rotation in degrees (anti-clockwise)
*fill* is a boolean indicating whether to fill the rectangle
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = float(xy[0])
self._y = float(xy[1])
self._width = float(width)
self._height = float(height)
self._angle = float(angle)
# Note: This cannot be calculated until this is added to an Axes
self._rect_transform = transforms.IdentityTransform()
def get_path(self):
"""
Return the vertices of the rectangle
"""
return Path.unit_rectangle()
def _update_patch_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
x = self.convert_xunits(self._x)
y = self.convert_yunits(self._y)
width = self.convert_xunits(self._width)
height = self.convert_yunits(self._height)
bbox = transforms.Bbox.from_bounds(x, y, width, height)
rot_trans = transforms.Affine2D()
rot_trans.rotate_deg_around(x, y, self._angle)
self._rect_transform = transforms.BboxTransformTo(bbox)
self._rect_transform += rot_trans
def get_patch_transform(self):
self._update_patch_transform()
return self._rect_transform
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_xy(self):
"Return the left and bottom coords of the rectangle"
return self._x, self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_xy(self, xy):
"""
Set the left and bottom coords of the rectangle
ACCEPTS: 2-item sequence
"""
self._x, self._y = xy
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
xy = property(get_xy, set_xy)
class RegularPolygon(Patch):
"""
A regular polygon patch.
"""
def __str__(self):
return "Poly%d(%g,%g)" % (self._numVertices, self._xy[0], self._xy[1])
@docstring.dedent_interpd
def __init__(self, xy, numVertices, radius=5, orientation=0,
**kwargs):
"""
Constructor arguments:
*xy*
A length 2 tuple (*x*, *y*) of the center.
*numVertices*
the number of vertices.
*radius*
The distance from the center to each of the vertices.
*orientation*
rotates the polygon (in radians).
Valid kwargs are:
%(Patch)s
"""
self._xy = xy
self._numVertices = numVertices
self._orientation = orientation
self._radius = radius
self._path = Path.unit_regular_polygon(numVertices)
self._poly_transform = transforms.Affine2D()
self._update_transform()
Patch.__init__(self, **kwargs)
def _update_transform(self):
self._poly_transform.clear() \
.scale(self.radius) \
.rotate(self.orientation) \
.translate(*self.xy)
def _get_xy(self):
return self._xy
def _set_xy(self, xy):
self._xy = xy
self._update_transform()
xy = property(_get_xy, _set_xy)
def _get_orientation(self):
return self._orientation
def _set_orientation(self, orientation):
self._orientation = orientation
self._update_transform()
orientation = property(_get_orientation, _set_orientation)
def _get_radius(self):
return self._radius
def _set_radius(self, radius):
self._radius = radius
self._update_transform()
radius = property(_get_radius, _set_radius)
def _get_numvertices(self):
return self._numVertices
def _set_numvertices(self, numVertices):
self._numVertices = numVertices
numvertices = property(_get_numvertices, _set_numvertices)
def get_path(self):
return self._path
def get_patch_transform(self):
self._update_transform()
return self._poly_transform
class PathPatch(Patch):
"""
A general polycurve path patch.
"""
_edge_default = True
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, path, **kwargs):
"""
*path* is a :class:`matplotlib.path.Path` object.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._path = path
def get_path(self):
return self._path
class Polygon(Patch):
"""
A general polygon patch.
"""
def __str__(self):
return "Poly((%g, %g) ...)" % tuple(self._path.vertices[0])
@docstring.dedent_interpd
def __init__(self, xy, closed=True, **kwargs):
"""
*xy* is a numpy array with shape Nx2.
If *closed* is *True*, the polygon will be closed so the
starting and ending points are the same.
Valid kwargs are:
%(Patch)s
.. seealso::
:class:`Patch`
For additional kwargs
"""
Patch.__init__(self, **kwargs)
self._closed = closed
self.set_xy(xy)
def get_path(self):
"""
Get the path of the polygon
Returns
-------
path : Path
The :class:`~matplotlib.path.Path` object for
the polygon
"""
return self._path
def get_closed(self):
"""
Returns if the polygon is closed
Returns
-------
closed : bool
If the path is closed
"""
return self._closed
def set_closed(self, closed):
"""
Set if the polygon is closed
Parameters
----------
closed : bool
True if the polygon is closed
"""
if self._closed == bool(closed):
return
self._closed = bool(closed)
self.set_xy(self.get_xy())
self.stale = True
def get_xy(self):
"""
Get the vertices of the path
Returns
-------
vertices : numpy array
The coordinates of the vertices as a Nx2
ndarray.
"""
return self._path.vertices
def set_xy(self, xy):
"""
Set the vertices of the polygon
Parameters
----------
xy : numpy array or iterable of pairs
The coordinates of the vertices as a Nx2
ndarray or iterable of pairs.
"""
xy = np.asarray(xy)
if self._closed:
if len(xy) and (xy[0] != xy[-1]).any():
xy = np.concatenate([xy, [xy[0]]])
else:
if len(xy) > 2 and (xy[0] == xy[-1]).all():
xy = xy[:-1]
self._path = Path(xy, closed=self._closed)
self.stale = True
_get_xy = get_xy
_set_xy = set_xy
xy = property(
get_xy, set_xy, None,
"""Set/get the vertices of the polygon. This property is
provided for backward compatibility with matplotlib 0.91.x
only. New code should use
:meth:`~matplotlib.patches.Polygon.get_xy` and
:meth:`~matplotlib.patches.Polygon.set_xy` instead.""")
class Wedge(Patch):
"""
Wedge shaped patch.
"""
def __str__(self):
return "Wedge(%g,%g)" % (self.theta1, self.theta2)
@docstring.dedent_interpd
def __init__(self, center, r, theta1, theta2, width=None, **kwargs):
"""
Draw a wedge centered at *x*, *y* center with radius *r* that
sweeps *theta1* to *theta2* (in degrees). If *width* is given,
then a partial wedge is drawn from inner radius *r* - *width*
to outer radius *r*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = center
self.r, self.width = r, width
self.theta1, self.theta2 = theta1, theta2
self._patch_transform = transforms.IdentityTransform()
self._recompute_path()
def _recompute_path(self):
# Inner and outer rings are connected unless the annulus is complete
if abs((self.theta2 - self.theta1) - 360) <= 1e-12:
theta1, theta2 = 0, 360
connector = Path.MOVETO
else:
theta1, theta2 = self.theta1, self.theta2
connector = Path.LINETO
# Form the outer ring
arc = Path.arc(theta1, theta2)
if self.width is not None:
# Partial annulus needs to draw the outer ring
# followed by a reversed and scaled inner ring
v1 = arc.vertices
v2 = arc.vertices[::-1] * float(self.r - self.width) / self.r
v = np.vstack([v1, v2, v1[0, :], (0, 0)])
c = np.hstack([arc.codes, arc.codes, connector, Path.CLOSEPOLY])
c[len(arc.codes)] = connector
else:
# Wedge doesn't need an inner ring
v = np.vstack([arc.vertices, [(0, 0), arc.vertices[0, :], (0, 0)]])
c = np.hstack([arc.codes, [connector, connector, Path.CLOSEPOLY]])
# Shift and scale the wedge to the final location.
v *= self.r
v += np.asarray(self.center)
self._path = Path(v, c)
def set_center(self, center):
self._path = None
self.center = center
self.stale = True
def set_radius(self, radius):
self._path = None
self.r = radius
self.stale = True
def set_theta1(self, theta1):
self._path = None
self.theta1 = theta1
self.stale = True
def set_theta2(self, theta2):
self._path = None
self.theta2 = theta2
self.stale = True
def set_width(self, width):
self._path = None
self.width = width
self.stale = True
def get_path(self):
if self._path is None:
self._recompute_path()
return self._path
# COVERAGE NOTE: Not used internally or from examples
class Arrow(Patch):
"""
An arrow patch.
"""
def __str__(self):
return "Arrow()"
_path = Path([
[0.0, 0.1], [0.0, -0.1],
[0.8, -0.1], [0.8, -0.3],
[1.0, 0.0], [0.8, 0.3],
[0.8, 0.1], [0.0, 0.1]],
closed=True)
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=1.0, **kwargs):
"""
Draws an arrow, starting at (*x*, *y*), direction and length
given by (*dx*, *dy*) the width of the arrow is scaled by *width*.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
L = np.hypot(dx, dy)
if L != 0:
cx = float(dx) / L
sx = float(dy) / L
else:
# Account for division by zero
cx, sx = 0, 1
trans1 = transforms.Affine2D().scale(L, width)
trans2 = transforms.Affine2D.from_values(cx, sx, -sx, cx, 0.0, 0.0)
trans3 = transforms.Affine2D().translate(x, y)
trans = trans1 + trans2 + trans3
self._patch_transform = trans.frozen()
def get_path(self):
return self._path
def get_patch_transform(self):
return self._patch_transform
class FancyArrow(Polygon):
"""
Like Arrow, but lets you set head width and head height independently.
"""
_edge_default = True
def __str__(self):
return "FancyArrow()"
@docstring.dedent_interpd
def __init__(self, x, y, dx, dy, width=0.001, length_includes_head=False,
head_width=None, head_length=None, shape='full', overhang=0,
head_starts_at_zero=False, **kwargs):
"""
Constructor arguments
*width*: float (default: 0.001)
width of full arrow tail
*length_includes_head*: [True | False] (default: False)
True if head is to be counted in calculating the length.
*head_width*: float or None (default: 3*width)
total width of the full arrow head
*head_length*: float or None (default: 1.5 * head_width)
length of arrow head
*shape*: ['full', 'left', 'right'] (default: 'full')
draw the left-half, right-half, or full arrow
*overhang*: float (default: 0)
fraction that the arrow is swept back (0 overhang means
triangular shape). Can be negative or greater than one.
*head_starts_at_zero*: [True | False] (default: False)
if True, the head starts being drawn at coordinate 0
instead of ending at coordinate 0.
Other valid kwargs (inherited from :class:`Patch`) are:
%(Patch)s
"""
if head_width is None:
head_width = 3 * width
if head_length is None:
head_length = 1.5 * head_width
distance = np.hypot(dx, dy)
if length_includes_head:
length = distance
else:
length = distance + head_length
if not length:
verts = [] # display nothing if empty
else:
# start by drawing horizontal arrow, point at (0,0)
hw, hl, hs, lw = head_width, head_length, overhang, width
left_half_arrow = np.array([
[0.0, 0.0], # tip
[-hl, -hw / 2.0], # leftmost
[-hl * (1 - hs), -lw / 2.0], # meets stem
[-length, -lw / 2.0], # bottom left
[-length, 0],
])
# if we're not including the head, shift up by head length
if not length_includes_head:
left_half_arrow += [head_length, 0]
# if the head starts at 0, shift up by another head length
if head_starts_at_zero:
left_half_arrow += [head_length / 2.0, 0]
# figure out the shape, and complete accordingly
if shape == 'left':
coords = left_half_arrow
else:
right_half_arrow = left_half_arrow * [1, -1]
if shape == 'right':
coords = right_half_arrow
elif shape == 'full':
# The half-arrows contain the midpoint of the stem,
# which we can omit from the full arrow. Including it
# twice caused a problem with xpdf.
coords = np.concatenate([left_half_arrow[:-2],
right_half_arrow[-2::-1]])
else:
raise ValueError("Got unknown shape: %s" % shape)
if distance != 0:
cx = float(dx) / distance
sx = float(dy) / distance
else:
#Account for division by zero
cx, sx = 0, 1
M = np.array([[cx, sx], [-sx, cx]])
verts = np.dot(coords, M) + (x + dx, y + dy)
Polygon.__init__(self, list(map(tuple, verts)), closed=True, **kwargs)
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
docstring.interpd.update({"FancyArrow": FancyArrow.__init__.__doc__})
class YAArrow(Patch):
"""
Yet another arrow class.
This is an arrow that is defined in display space and has a tip at
*x1*, *y1* and a base at *x2*, *y2*.
"""
def __str__(self):
return "YAArrow()"
@docstring.dedent_interpd
def __init__(self, figure, xytip, xybase,
width=4, frac=0.1, headwidth=12, **kwargs):
"""
Constructor arguments:
*xytip*
(*x*, *y*) location of arrow tip
*xybase*
(*x*, *y*) location the arrow base mid point
*figure*
The :class:`~matplotlib.figure.Figure` instance
(fig.dpi)
*width*
The width of the arrow in points
*frac*
The fraction of the arrow length occupied by the head
*headwidth*
The width of the base of the arrow head in points
Valid kwargs are:
%(Patch)s
"""
self.xytip = xytip
self.xybase = xybase
self.width = width
self.frac = frac
self.headwidth = headwidth
Patch.__init__(self, **kwargs)
# Set self.figure after Patch.__init__, since it sets self.figure to
# None
self.figure = figure
def get_path(self):
# Since this is dpi dependent, we need to recompute the path
# every time.
# the base vertices
x1, y1 = self.xytip
x2, y2 = self.xybase
k1 = self.width * self.figure.dpi / 72. / 2.
k2 = self.headwidth * self.figure.dpi / 72. / 2.
xb1, yb1, xb2, yb2 = self.getpoints(x1, y1, x2, y2, k1)
# a point on the segment 20% of the distance from the tip to the base
theta = math.atan2(y2 - y1, x2 - x1)
r = math.sqrt((y2 - y1) ** 2. + (x2 - x1) ** 2.)
xm = x1 + self.frac * r * math.cos(theta)
ym = y1 + self.frac * r * math.sin(theta)
xc1, yc1, xc2, yc2 = self.getpoints(x1, y1, xm, ym, k1)
xd1, yd1, xd2, yd2 = self.getpoints(x1, y1, xm, ym, k2)
xs = self.convert_xunits([xb1, xb2, xc2, xd2, x1, xd1, xc1, xb1])
ys = self.convert_yunits([yb1, yb2, yc2, yd2, y1, yd1, yc1, yb1])
return Path(list(zip(xs, ys)), closed=True)
def get_patch_transform(self):
return transforms.IdentityTransform()
def getpoints(self, x1, y1, x2, y2, k):
"""
For line segment defined by (*x1*, *y1*) and (*x2*, *y2*)
return the points on the line that is perpendicular to the
line and intersects (*x2*, *y2*) and the distance from (*x2*,
*y2*) of the returned points is *k*.
"""
x1, y1, x2, y2, k = list(map(float, (x1, y1, x2, y2, k)))
if y2 - y1 == 0:
return x2, y2 + k, x2, y2 - k
elif x2 - x1 == 0:
return x2 + k, y2, x2 - k, y2
m = (y2 - y1) / (x2 - x1)
pm = -1. / m
a = 1
b = -2 * y2
c = y2 ** 2. - k ** 2. * pm ** 2. / (1. + pm ** 2.)
y3a = (-b + math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3a = (y3a - y2) / pm + x2
y3b = (-b - math.sqrt(b ** 2. - 4 * a * c)) / (2. * a)
x3b = (y3b - y2) / pm + x2
return x3a, y3a, x3b, y3b
class CirclePolygon(RegularPolygon):
"""
A polygon-approximation of a circle patch.
"""
def __str__(self):
return "CirclePolygon(%d,%d)" % self.center
@docstring.dedent_interpd
def __init__(self, xy, radius=5,
resolution=20, # the number of vertices
** kwargs):
"""
Create a circle at *xy* = (*x*, *y*) with given *radius*.
This circle is approximated by a regular polygon with
*resolution* sides. For a smoother circle drawn with splines,
see :class:`~matplotlib.patches.Circle`.
Valid kwargs are:
%(Patch)s
"""
RegularPolygon.__init__(self, xy,
resolution,
radius,
orientation=0,
**kwargs)
class Ellipse(Patch):
"""
A scale-free ellipse.
"""
def __str__(self):
return "Ellipse(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0, **kwargs):
"""
*xy*
center of ellipse
*width*
total length (diameter) of horizontal axis
*height*
total length (diameter) of vertical axis
*angle*
rotation in degrees (anti-clockwise)
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self.center = xy
self.width, self.height = width, height
self.angle = angle
self._path = Path.unit_circle()
# Note: This cannot be calculated until this is added to an Axes
self._patch_transform = transforms.IdentityTransform()
def _recompute_transform(self):
"""NOTE: This cannot be called until after this has been added
to an Axes, otherwise unit conversion will fail. This
maxes it very important to call the accessor method and
not directly access the transformation member variable.
"""
center = (self.convert_xunits(self.center[0]),
self.convert_yunits(self.center[1]))
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
self._patch_transform = transforms.Affine2D() \
.scale(width * 0.5, height * 0.5) \
.rotate_deg(self.angle) \
.translate(*center)
def get_path(self):
"""
Return the vertices of the rectangle
"""
return self._path
def get_patch_transform(self):
self._recompute_transform()
return self._patch_transform
class Circle(Ellipse):
"""
A circle patch.
"""
def __str__(self):
return "Circle((%g,%g),r=%g)" % (self.center[0],
self.center[1],
self.radius)
@docstring.dedent_interpd
def __init__(self, xy, radius=5, **kwargs):
"""
Create true circle at center *xy* = (*x*, *y*) with given
*radius*. Unlike :class:`~matplotlib.patches.CirclePolygon`
which is a polygonal approximation, this uses Bézier splines
and is much closer to a scale-free circle.
Valid kwargs are:
%(Patch)s
"""
Ellipse.__init__(self, xy, radius * 2, radius * 2, **kwargs)
self.radius = radius
def set_radius(self, radius):
"""
Set the radius of the circle
ACCEPTS: float
"""
self.width = self.height = 2 * radius
self.stale = True
def get_radius(self):
'return the radius of the circle'
return self.width / 2.
radius = property(get_radius, set_radius)
class Arc(Ellipse):
"""
An elliptical arc. Because it performs various optimizations, it
can not be filled.
The arc must be used in an :class:`~matplotlib.axes.Axes`
instance---it can not be added directly to a
:class:`~matplotlib.figure.Figure`---because it is optimized to
only render the segments that are inside the axes bounding box
with high resolution.
"""
def __str__(self):
return "Arc(%s,%s;%sx%s)" % (self.center[0], self.center[1],
self.width, self.height)
@docstring.dedent_interpd
def __init__(self, xy, width, height, angle=0.0,
theta1=0.0, theta2=360.0, **kwargs):
"""
The following args are supported:
*xy*
center of ellipse
*width*
length of horizontal axis
*height*
length of vertical axis
*angle*
rotation in degrees (anti-clockwise)
*theta1*
starting angle of the arc in degrees
*theta2*
ending angle of the arc in degrees
If *theta1* and *theta2* are not provided, the arc will form a
complete ellipse.
Valid kwargs are:
%(Patch)s
"""
fill = kwargs.setdefault('fill', False)
if fill:
raise ValueError("Arc objects can not be filled")
Ellipse.__init__(self, xy, width, height, angle, **kwargs)
self.theta1 = theta1
self.theta2 = theta2
self._path = Path.arc(self.theta1, self.theta2)
@allow_rasterization
def draw(self, renderer):
"""
Ellipses are normally drawn using an approximation that uses
eight cubic bezier splines. The error of this approximation
is 1.89818e-6, according to this unverified source:
Lancaster, Don. Approximating a Circle or an Ellipse Using
Four Bezier Cubic Splines.
http://www.tinaja.com/glib/ellipse4.pdf
There is a use case where very large ellipses must be drawn
with very high accuracy, and it is too expensive to render the
entire ellipse with enough segments (either splines or line
segments). Therefore, in the case where either radius of the
ellipse is large enough that the error of the spline
approximation will be visible (greater than one pixel offset
from the ideal), a different technique is used.
In that case, only the visible parts of the ellipse are drawn,
with each visible arc using a fixed number of spline segments
(8). The algorithm proceeds as follows:
1. The points where the ellipse intersects the axes bounding
box are located. (This is done be performing an inverse
transformation on the axes bbox such that it is relative
to the unit circle -- this makes the intersection
calculation much easier than doing rotated ellipse
intersection directly).
This uses the "line intersecting a circle" algorithm
from:
Vince, John. Geometry for Computer Graphics: Formulae,
Examples & Proofs. London: Springer-Verlag, 2005.
2. The angles of each of the intersection points are
calculated.
3. Proceeding counterclockwise starting in the positive
x-direction, each of the visible arc-segments between the
pairs of vertices are drawn using the bezier arc
approximation technique implemented in
:meth:`matplotlib.path.Path.arc`.
"""
if not hasattr(self, 'axes'):
raise RuntimeError('Arcs can only be used in Axes instances')
self._recompute_transform()
# Get the width and height in pixels
width = self.convert_xunits(self.width)
height = self.convert_yunits(self.height)
width, height = self.get_transform().transform_point(
(width, height))
inv_error = (1.0 / 1.89818e-6) * 0.5
if width < inv_error and height < inv_error:
# self._path = Path.arc(self.theta1, self.theta2)
return Patch.draw(self, renderer)
def iter_circle_intersect_on_line(x0, y0, x1, y1):
dx = x1 - x0
dy = y1 - y0
dr2 = dx * dx + dy * dy
D = x0 * y1 - x1 * y0
D2 = D * D
discrim = dr2 - D2
# Single (tangential) intersection
if discrim == 0.0:
x = (D * dy) / dr2
y = (-D * dx) / dr2
yield x, y
elif discrim > 0.0:
# The definition of "sign" here is different from
# np.sign: we never want to get 0.0
if dy < 0.0:
sign_dy = -1.0
else:
sign_dy = 1.0
sqrt_discrim = np.sqrt(discrim)
for sign in (1., -1.):
x = (D * dy + sign * sign_dy * dx * sqrt_discrim) / dr2
y = (-D * dx + sign * np.abs(dy) * sqrt_discrim) / dr2
yield x, y
def iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
epsilon = 1e-9
if x1 < x0:
x0e, x1e = x1, x0
else:
x0e, x1e = x0, x1
if y1 < y0:
y0e, y1e = y1, y0
else:
y0e, y1e = y0, y1
x0e -= epsilon
y0e -= epsilon
x1e += epsilon
y1e += epsilon
for x, y in iter_circle_intersect_on_line(x0, y0, x1, y1):
if x >= x0e and x <= x1e and y >= y0e and y <= y1e:
yield x, y
# Transforms the axes box_path so that it is relative to the unit
# circle in the same way that it is relative to the desired
# ellipse.
box_path = Path.unit_rectangle()
box_path_transform = transforms.BboxTransformTo(self.axes.bbox) + \
self.get_transform().inverted()
box_path = box_path.transformed(box_path_transform)
PI = np.pi
TWOPI = PI * 2.0
RAD2DEG = 180.0 / PI
DEG2RAD = PI / 180.0
theta1 = self.theta1
theta2 = self.theta2
thetas = {}
# For each of the point pairs, there is a line segment
for p0, p1 in zip(box_path.vertices[:-1], box_path.vertices[1:]):
x0, y0 = p0
x1, y1 = p1
for x, y in iter_circle_intersect_on_line_seg(x0, y0, x1, y1):
theta = np.arccos(x)
if y < 0:
theta = TWOPI - theta
# Convert radians to angles
theta *= RAD2DEG
if theta > theta1 and theta < theta2:
thetas[theta] = None
thetas = list(six.iterkeys(thetas))
thetas.sort()
thetas.append(theta2)
last_theta = theta1
theta1_rad = theta1 * DEG2RAD
inside = box_path.contains_point((np.cos(theta1_rad),
np.sin(theta1_rad)))
# save original path
path_original = self._path
for theta in thetas:
if inside:
Path.arc(last_theta, theta, 8)
Patch.draw(self, renderer)
inside = False
else:
inside = True
last_theta = theta
# restore original path
self._path = path_original
def bbox_artist(artist, renderer, props=None, fill=True):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
*props* is a dict of rectangle props with the additional property
'pad' that sets the padding around the bbox in points.
"""
if props is None:
props = {}
props = props.copy() # don't want to alter the pad externally
pad = props.pop('pad', 4)
pad = renderer.points_to_pixels(pad)
bbox = artist.get_window_extent(renderer)
l, b, w, h = bbox.bounds
l -= pad / 2.
b -= pad / 2.
w += pad
h += pad
r = Rectangle(xy=(l, b),
width=w,
height=h,
fill=fill,
)
r.set_transform(transforms.IdentityTransform())
r.set_clip_on(False)
r.update(props)
r.draw(renderer)
def draw_bbox(bbox, renderer, color='k', trans=None):
"""
This is a debug function to draw a rectangle around the bounding
box returned by
:meth:`~matplotlib.artist.Artist.get_window_extent` of an artist,
to test whether the artist is returning the correct bbox.
"""
l, b, w, h = bbox.bounds
r = Rectangle(xy=(l, b),
width=w,
height=h,
edgecolor=color,
fill=False,
)
if trans is not None:
r.set_transform(trans)
r.set_clip_on(False)
r.draw(renderer)
def _pprint_table(_table, leadingspace=2):
"""
Given the list of list of strings, return a string of REST table format.
"""
if leadingspace:
pad = ' ' * leadingspace
else:
pad = ''
columns = [[] for cell in _table[0]]
for row in _table:
for column, cell in zip(columns, row):
column.append(cell)
col_len = [max([len(cell) for cell in column]) for column in columns]
lines = []
table_formatstr = pad + ' '.join([('=' * cl) for cl in col_len])
lines.append('')
lines.append(table_formatstr)
lines.append(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(_table[0], col_len)]))
lines.append(table_formatstr)
lines.extend([(pad + ' '.join([cell.ljust(cl)
for cell, cl
in zip(row, col_len)]))
for row in _table[1:]])
lines.append(table_formatstr)
lines.append('')
return "\n".join(lines)
def _pprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a formatted string listing all the
styles. Used to update the documentation.
"""
names, attrss, clss = [], [], []
import inspect
_table = [["Class", "Name", "Attrs"]]
for name, cls in sorted(_styles.items()):
if six.PY2:
args, varargs, varkw, defaults = inspect.getargspec(cls.__init__)
else:
(args, varargs, varkw, defaults, kwonlyargs, kwonlydefs,
annotations) = inspect.getfullargspec(cls.__init__)
if defaults:
args = [(argname, argdefault)
for argname, argdefault in zip(args[1:], defaults)]
else:
args = None
if args is None:
argstr = 'None'
else:
argstr = ",".join([("%s=%s" % (an, av))
for an, av
in args])
# adding ``quotes`` since - and | have special meaning in reST
_table.append([cls.__name__, "``%s``" % name, argstr])
return _pprint_table(_table)
def _simpleprint_styles(_styles):
"""
A helper function for the _Style class. Given the dictionary of
(stylename : styleclass), return a string rep of the list of keys.
Used to update the documentation.
"""
styles = "[ \'"
styles += "\' | \'".join(str(i) for i in sorted(_styles.keys()))
styles += "\' ]"
return styles
class _Style(object):
"""
A base class for the Styles. It is meant to be a container class,
where actual styles are declared as subclass of it, and it
provides some helper functions.
"""
def __new__(self, stylename, **kw):
"""
return the instance of the subclass with the given style name.
"""
# the "class" should have the _style_list attribute, which is
# a dictionary of stylname, style class paie.
_list = stylename.replace(" ", "").split(",")
_name = _list[0].lower()
try:
_cls = self._style_list[_name]
except KeyError:
raise ValueError("Unknown style : %s" % stylename)
try:
_args_pair = [cs.split("=") for cs in _list[1:]]
_args = dict([(k, float(v)) for k, v in _args_pair])
except ValueError:
raise ValueError("Incorrect style argument : %s" % stylename)
_args.update(kw)
return _cls(**_args)
@classmethod
def get_styles(klass):
"""
A class method which returns a dictionary of available styles.
"""
return klass._style_list
@classmethod
def pprint_styles(klass):
"""
A class method which returns a string of the available styles.
"""
return _pprint_styles(klass._style_list)
@classmethod
def register(klass, name, style):
"""
Register a new style.
"""
if not issubclass(style, klass._Base):
raise ValueError("%s must be a subclass of %s" % (style,
klass._Base))
klass._style_list[name] = style
class BoxStyle(_Style):
"""
:class:`BoxStyle` is a container class which defines several
boxstyle classes, which are used for :class:`FancyBboxPatch`.
A style object can be created as::
BoxStyle.Round(pad=0.2)
or::
BoxStyle("Round", pad=0.2)
or::
BoxStyle("Round, pad=0.2")
Following boxstyle classes are defined.
%(AvailableBoxstyles)s
An instance of any boxstyle class is an callable object,
whose call signature is::
__call__(self, x0, y0, width, height, mutation_size, aspect_ratio=1.)
and returns a :class:`Path` instance. *x0*, *y0*, *width* and
*height* specify the location and size of the box to be
drawn. *mutation_scale* determines the overall size of the
mutation (by which I mean the transformation of the rectangle to
the fancy box). *mutation_aspect* determines the aspect-ratio of
the mutation.
.. plot:: mpl_examples/pylab_examples/fancybox_demo2.py
"""
_style_list = {}
class _Base(object):
"""
:class:`BBoxTransmuterBase` and its derivatives are used to make a
fancy box around a given rectangle. The :meth:`__call__` method
returns the :class:`~matplotlib.path.Path` of the fancy box. This
class is not an artist and actual drawing of the fancy box is done
by the :class:`FancyBboxPatch` class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
"""
initializtion.
"""
super(BoxStyle._Base, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
"""
The transmute method is a very core of the
:class:`BboxTransmuter` class and must be overriden in the
subclasses. It receives the location and size of the
rectangle, and the mutation_size, with which the amount of
padding and etc. will be scaled. It returns a
:class:`~matplotlib.path.Path` instance.
"""
raise NotImplementedError('Derived must override')
def __call__(self, x0, y0, width, height, mutation_size,
aspect_ratio=1.):
"""
Given the location and size of the box, return the path of
the box around it.
- *x0*, *y0*, *width*, *height* : location and size of the box
- *mutation_size* : a reference scale for the mutation.
- *aspect_ratio* : aspect-ration for the mutation.
"""
# The __call__ method is a thin wrapper around the transmute method
# and take care of the aspect.
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
y0, height = y0 / aspect_ratio, height / aspect_ratio
# call transmute method with squeezed height.
path = self.transmute(x0, y0, width, height, mutation_size)
vertices, codes = path.vertices, path.codes
# Restore the height
vertices[:, 1] = vertices[:, 1] * aspect_ratio
return Path(vertices, codes)
else:
return self.transmute(x0, y0, width, height, mutation_size)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(BoxStyle, self.__class__.__name__),
self.__dict__
)
class Square(_Base):
"""
A simple square box.
"""
def __init__(self, pad=0.3):
"""
*pad*
amount of padding
"""
self.pad = pad
super(BoxStyle.Square, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2*pad, height + 2*pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
vertices = [(x0, y0), (x1, y0), (x1, y1), (x0, y1), (x0, y0)]
codes = [Path.MOVETO] + [Path.LINETO] * 3 + [Path.CLOSEPOLY]
return Path(vertices, codes)
_style_list["square"] = Square
class Circle(_Base):
"""A simple circle box."""
def __init__(self, pad=0.3):
"""
Parameters
----------
pad : float
The amount of padding around the original box.
"""
self.pad = pad
super(BoxStyle.Circle, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
pad = mutation_size * self.pad
width, height = width + 2 * pad, height + 2 * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
return Path.circle((x0 + width/2., y0 + height/2.),
(max([width, height]) / 2.))
_style_list["circle"] = Circle
class LArrow(_Base):
"""
(left) Arrow Box
"""
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.LArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
width, height = width + 2. * pad, height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0) / 2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), (x1, y1), (x0 + dxx, y1),
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # arrow
(x0 + dxx, y0), (x0 + dxx, y0)]
com = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO, Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["larrow"] = LArrow
class RArrow(LArrow):
"""
(right) Arrow Box
"""
def __init__(self, pad=0.3):
super(BoxStyle.RArrow, self).__init__(pad)
def transmute(self, x0, y0, width, height, mutation_size):
p = BoxStyle.LArrow.transmute(self, x0, y0,
width, height, mutation_size)
p.vertices[:, 0] = 2 * x0 + width - p.vertices[:, 0]
return p
_style_list["rarrow"] = RArrow
class DArrow(_Base):
"""
(Double) Arrow Box
"""
# This source is copied from LArrow,
# modified to add a right arrow to the bbox.
def __init__(self, pad=0.3):
self.pad = pad
super(BoxStyle.DArrow, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# width and height with padding added.
# The width is padded by the arrows, so we don't need to pad it.
height = height + 2. * pad
# boundary of the padded box
x0, y0 = x0 - pad, y0 - pad
x1, y1 = x0 + width, y0 + height
dx = (y1 - y0)/2.
dxx = dx * .5
# adjust x0. 1.4 <- sqrt(2)
x0 = x0 + pad / 1.4
cp = [(x0 + dxx, y0), (x1, y0), # bot-segment
(x1, y0 - dxx), (x1 + dx + dxx, y0 + dx),
(x1, y1 + dxx), # right-arrow
(x1, y1), (x0 + dxx, y1), # top-segment
(x0 + dxx, y1 + dxx), (x0 - dx, y0 + dx),
(x0 + dxx, y0 - dxx), # left-arrow
(x0 + dxx, y0), (x0 + dxx, y0)] # close-poly
com = [Path.MOVETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO, Path.LINETO,
Path.LINETO,
Path.LINETO, Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list['darrow'] = DArrow
class Round(_Base):
"""
A box with round corners.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding radius of corners. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of the roudning corner
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad
width, height = width + 2. * pad, height + 2. * pad
x0, y0 = x0 - pad, y0 - pad,
x1, y1 = x0 + width, y0 + height
# Round corners are implemented as quadratic bezier. e.g.,
# [(x0, y0-dr), (x0, y0), (x0+dr, y0)] for lower left corner.
cp = [(x0 + dr, y0),
(x1 - dr, y0),
(x1, y0), (x1, y0 + dr),
(x1, y1 - dr),
(x1, y1), (x1 - dr, y1),
(x0 + dr, y1),
(x0, y1), (x0, y1 - dr),
(x0, y0 + dr),
(x0, y0), (x0 + dr, y0),
(x0 + dr, y0)]
com = [Path.MOVETO,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.LINETO,
Path.CURVE3, Path.CURVE3,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round"] = Round
class Round4(_Base):
"""
Another box with round edges.
"""
def __init__(self, pad=0.3, rounding_size=None):
"""
*pad*
amount of padding
*rounding_size*
rounding size of edges. *pad* if None
"""
self.pad = pad
self.rounding_size = rounding_size
super(BoxStyle.Round4, self).__init__()
def transmute(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# roudning size. Use a half of the pad if not set.
if self.rounding_size:
dr = mutation_size * self.rounding_size
else:
dr = pad / 2.
width, height = (width + 2. * pad - 2 * dr,
height + 2. * pad - 2 * dr)
x0, y0 = x0 - pad + dr, y0 - pad + dr,
x1, y1 = x0 + width, y0 + height
cp = [(x0, y0),
(x0 + dr, y0 - dr), (x1 - dr, y0 - dr), (x1, y0),
(x1 + dr, y0 + dr), (x1 + dr, y1 - dr), (x1, y1),
(x1 - dr, y1 + dr), (x0 + dr, y1 + dr), (x0, y1),
(x0 - dr, y1 - dr), (x0 - dr, y0 + dr), (x0, y0),
(x0, y0)]
com = [Path.MOVETO,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CURVE4, Path.CURVE4, Path.CURVE4,
Path.CLOSEPOLY]
path = Path(cp, com)
return path
_style_list["round4"] = Round4
class Sawtooth(_Base):
"""
A sawtooth box.
"""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
self.pad = pad
self.tooth_size = tooth_size
super(BoxStyle.Sawtooth, self).__init__()
def _get_sawtooth_vertices(self, x0, y0, width, height, mutation_size):
# padding
pad = mutation_size * self.pad
# size of sawtooth
if self.tooth_size is None:
tooth_size = self.pad * .5 * mutation_size
else:
tooth_size = self.tooth_size * mutation_size
tooth_size2 = tooth_size / 2.
width, height = (width + 2. * pad - tooth_size,
height + 2. * pad - tooth_size)
# the sizes of the vertical and horizontal sawtooth are
# separately adjusted to fit the given box size.
dsx_n = int(np.round((width - tooth_size) / (tooth_size * 2))) * 2
dsx = (width - tooth_size) / dsx_n
dsy_n = int(np.round((height - tooth_size) / (tooth_size * 2))) * 2
dsy = (height - tooth_size) / dsy_n
x0, y0 = x0 - pad + tooth_size2, y0 - pad + tooth_size2
x1, y1 = x0 + width, y0 + height
bottom_saw_x = [x0] + \
[x0 + tooth_size2 + dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x1 - tooth_size2]
bottom_saw_y = [y0] + \
[y0 - tooth_size2, y0,
y0 + tooth_size2, y0] * dsx_n + \
[y0 - tooth_size2]
right_saw_x = [x1] + \
[x1 + tooth_size2,
x1,
x1 - tooth_size2,
x1] * dsx_n + \
[x1 + tooth_size2]
right_saw_y = [y0] + \
[y0 + tooth_size2 + dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y1 - tooth_size2]
top_saw_x = [x1] + \
[x1 - tooth_size2 - dsx * .5 * i
for i
in range(dsx_n * 2)] + \
[x0 + tooth_size2]
top_saw_y = [y1] + \
[y1 + tooth_size2,
y1,
y1 - tooth_size2,
y1] * dsx_n + \
[y1 + tooth_size2]
left_saw_x = [x0] + \
[x0 - tooth_size2,
x0,
x0 + tooth_size2,
x0] * dsy_n + \
[x0 - tooth_size2]
left_saw_y = [y1] + \
[y1 - tooth_size2 - dsy * .5 * i
for i
in range(dsy_n * 2)] + \
[y0 + tooth_size2]
saw_vertices = (list(zip(bottom_saw_x, bottom_saw_y)) +
list(zip(right_saw_x, right_saw_y)) +
list(zip(top_saw_x, top_saw_y)) +
list(zip(left_saw_x, left_saw_y)) +
[(bottom_saw_x[0], bottom_saw_y[0])])
return saw_vertices
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0, width,
height, mutation_size)
path = Path(saw_vertices, closed=True)
return path
_style_list["sawtooth"] = Sawtooth
class Roundtooth(Sawtooth):
"""A rounded tooth box."""
def __init__(self, pad=0.3, tooth_size=None):
"""
*pad*
amount of padding
*tooth_size*
size of the sawtooth. pad* if None
"""
super(BoxStyle.Roundtooth, self).__init__(pad, tooth_size)
def transmute(self, x0, y0, width, height, mutation_size):
saw_vertices = self._get_sawtooth_vertices(x0, y0,
width, height,
mutation_size)
# Add a trailing vertex to allow us to close the polygon correctly
saw_vertices = np.concatenate([np.array(saw_vertices),
[saw_vertices[0]]], axis=0)
codes = ([Path.MOVETO] +
[Path.CURVE3, Path.CURVE3] * ((len(saw_vertices)-1)//2) +
[Path.CLOSEPOLY])
return Path(saw_vertices, codes)
_style_list["roundtooth"] = Roundtooth
if __doc__: # __doc__ could be None if -OO optimization is enabled
__doc__ = cbook.dedent(__doc__) % \
{"AvailableBoxstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableBoxstyles=_pprint_styles(BoxStyle._style_list),
ListBoxstyles=_simpleprint_styles(BoxStyle._style_list))
class FancyBboxPatch(Patch):
"""
Draw a fancy box around a rectangle with lower left at *xy*=(*x*,
*y*) with specified width and height.
:class:`FancyBboxPatch` class is similar to :class:`Rectangle`
class, but it draws a fancy box around the rectangle. The
transformation of the rectangle box to the fancy box is delegated
to the :class:`BoxTransmuterBase` and its derived classes.
"""
_edge_default = True
def __str__(self):
return self.__class__.__name__ \
+ "(%g,%g;%gx%g)" % (self._x, self._y,
self._width, self._height)
@docstring.dedent_interpd
def __init__(self, xy, width, height,
boxstyle="round",
bbox_transmuter=None,
mutation_scale=1.,
mutation_aspect=None,
**kwargs):
"""
*xy* = lower left corner
*width*, *height*
*boxstyle* determines what kind of fancy box will be drawn. It
can be a string of the style name with a comma separated
attribute, or an instance of :class:`BoxStyle`. Following box
styles are available.
%(AvailableBoxstyles)s
*mutation_scale* : a value with which attributes of boxstyle
(e.g., pad) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
self._x = xy[0]
self._y = xy[1]
self._width = width
self._height = height
if boxstyle == "custom":
if bbox_transmuter is None:
raise ValueError("bbox_transmuter argument is needed with "
"custom boxstyle")
self._bbox_transmuter = bbox_transmuter
else:
self.set_boxstyle(boxstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.stale = True
@docstring.dedent_interpd
def set_boxstyle(self, boxstyle=None, **kw):
"""
Set the box style.
*boxstyle* can be a string with boxstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords::
set_boxstyle("round,pad=0.2")
set_boxstyle("round", pad=0.2)
Old attrs simply are forgotten.
Without argument (or with *boxstyle* = None), it returns
available box styles.
The following boxstyles are available:
%(AvailableBoxstyles)s
ACCEPTS: %(ListBoxstyles)s
"""
if boxstyle is None:
return BoxStyle.pprint_styles()
if isinstance(boxstyle, BoxStyle._Base):
self._bbox_transmuter = boxstyle
elif six.callable(boxstyle):
self._bbox_transmuter = boxstyle
else:
self._bbox_transmuter = BoxStyle(boxstyle, **kw)
self.stale = True
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_boxstyle(self):
"Return the boxstyle object"
return self._bbox_transmuter
def get_path(self):
"""
Return the mutated path of the rectangle
"""
_path = self.get_boxstyle()(self._x, self._y,
self._width, self._height,
self.get_mutation_scale(),
self.get_mutation_aspect())
return _path
# Following methods are borrowed from the Rectangle class.
def get_x(self):
"Return the left coord of the rectangle"
return self._x
def get_y(self):
"Return the bottom coord of the rectangle"
return self._y
def get_width(self):
"Return the width of the rectangle"
return self._width
def get_height(self):
"Return the height of the rectangle"
return self._height
def set_x(self, x):
"""
Set the left coord of the rectangle
ACCEPTS: float
"""
self._x = x
self.stale = True
def set_y(self, y):
"""
Set the bottom coord of the rectangle
ACCEPTS: float
"""
self._y = y
self.stale = True
def set_width(self, w):
"""
Set the width rectangle
ACCEPTS: float
"""
self._width = w
self.stale = True
def set_height(self, h):
"""
Set the width rectangle
ACCEPTS: float
"""
self._height = h
self.stale = True
def set_bounds(self, *args):
"""
Set the bounds of the rectangle: l,b,w,h
ACCEPTS: (left, bottom, width, height)
"""
if len(args) == 0:
l, b, w, h = args[0]
else:
l, b, w, h = args
self._x = l
self._y = b
self._width = w
self._height = h
self.stale = True
def get_bbox(self):
return transforms.Bbox.from_bounds(self._x, self._y,
self._width, self._height)
class ConnectionStyle(_Style):
"""
:class:`ConnectionStyle` is a container class which defines
several connectionstyle classes, which is used to create a path
between two points. These are mainly used with
:class:`FancyArrowPatch`.
A connectionstyle object can be either created as::
ConnectionStyle.Arc3(rad=0.2)
or::
ConnectionStyle("Arc3", rad=0.2)
or::
ConnectionStyle("Arc3, rad=0.2")
The following classes are defined
%(AvailableConnectorstyles)s
An instance of any connection style class is an callable object,
whose call signature is::
__call__(self, posA, posB,
patchA=None, patchB=None,
shrinkA=2., shrinkB=2.)
and it returns a :class:`Path` instance. *posA* and *posB* are
tuples of x,y coordinates of the two points to be
connected. *patchA* (or *patchB*) is given, the returned path is
clipped so that it start (or end) from the boundary of the
patch. The path is further shrunk by *shrinkA* (or *shrinkB*)
which is given in points.
"""
_style_list = {}
class _Base(object):
"""
A base class for connectionstyle classes. The subclass needs
to implement a *connect* method whose call signature is::
connect(posA, posB)
where posA and posB are tuples of x, y coordinates to be
connected. The method needs to return a path connecting two
points. This base class defines a __call__ method, and a few
helper methods.
"""
class SimpleEvent:
def __init__(self, xy):
self.x, self.y = xy
def _clip(self, path, patchA, patchB):
"""
Clip the path to the boundary of the patchA and patchB.
The starting point of the path needed to be inside of the
patchA and the end point inside the patch B. The *contains*
methods of each patch object is utilized to test if the point
is inside the path.
"""
if patchA:
def insideA(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchA.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideA)
except ValueError:
right = path
path = right
if patchB:
def insideB(xy_display):
xy_event = ConnectionStyle._Base.SimpleEvent(xy_display)
return patchB.contains(xy_event)[0]
try:
left, right = split_path_inout(path, insideB)
except ValueError:
left = path
path = left
return path
def _shrink(self, path, shrinkA, shrinkB):
"""
Shrink the path by fixed size (in points) with shrinkA and shrinkB
"""
if shrinkA:
x, y = path.vertices[0]
insideA = inside_circle(x, y, shrinkA)
try:
left, right = split_path_inout(path, insideA)
path = right
except ValueError:
pass
if shrinkB:
x, y = path.vertices[-1]
insideB = inside_circle(x, y, shrinkB)
try:
left, right = split_path_inout(path, insideB)
path = left
except ValueError:
pass
return path
def __call__(self, posA, posB,
shrinkA=2., shrinkB=2., patchA=None, patchB=None):
"""
Calls the *connect* method to create a path between *posA*
and *posB*. The path is clipped and shrunken.
"""
path = self.connect(posA, posB)
clipped_path = self._clip(path, patchA, patchB)
shrunk_path = self._shrink(clipped_path, shrinkA, shrinkB)
return shrunk_path
def __reduce__(self):
# because we have decided to nest these classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ConnectionStyle, self.__class__.__name__),
self.__dict__
)
class Arc3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The curve is created so that the middle contol points
(C1) is located at the same distance from the start (C0) and
end points(C2) and the distance of the C1 to the line
connecting C0-C2 is *rad* times the distance of C0-C2.
"""
def __init__(self, rad=0.):
"""
*rad*
curvature of the curve.
"""
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
dx, dy = x2 - x1, y2 - y1
f = self.rad
cx, cy = x12 + f * dy, y12 - f * dx
vertices = [(x1, y1),
(cx, cy),
(x2, y2)]
codes = [Path.MOVETO,
Path.CURVE3,
Path.CURVE3]
return Path(vertices, codes)
_style_list["arc3"] = Arc3
class Angle3(_Base):
"""
Creates a simple quadratic bezier curve between two
points. The middle control points is placed at the
intersecting point of two lines which crosses the start (or
end) point and has a angle of angleA (or angleB).
"""
def __init__(self, angleA=90, angleB=0):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
"""
self.angleA = angleA
self.angleB = angleB
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1), (cx, cy), (x2, y2)]
codes = [Path.MOVETO, Path.CURVE3, Path.CURVE3]
return Path(vertices, codes)
_style_list["angle3"] = Angle3
class Angle(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path has a one passing-through point placed at
the intersecting point of two lines which crosses the start
(or end) point and has a angle of angleA (or angleB). The
connecting edges are rounded with *rad*.
"""
def __init__(self, angleA=90, angleB=0, rad=0.):
"""
*angleA*
starting angle of the path
*angleB*
ending angle of the path
*rad*
rounding radius of the edge
"""
self.angleA = angleA
self.angleB = angleB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
cosA, sinA = (math.cos(self.angleA / 180. * math.pi),
math.sin(self.angleA / 180. * math.pi))
cosB, sinB = (math.cos(self.angleB / 180. * math.pi),
math.sin(self.angleB / 180. * math.pi))
cx, cy = get_intersection(x1, y1, cosA, sinA,
x2, y2, cosB, sinB)
vertices = [(x1, y1)]
codes = [Path.MOVETO]
if self.rad == 0.:
vertices.append((cx, cy))
codes.append(Path.LINETO)
else:
dx1, dy1 = x1 - cx, y1 - cy
d1 = (dx1 ** 2 + dy1 ** 2) ** .5
f1 = self.rad / d1
dx2, dy2 = x2 - cx, y2 - cy
d2 = (dx2 ** 2 + dy2 ** 2) ** .5
f2 = self.rad / d2
vertices.extend([(cx + dx1 * f1, cy + dy1 * f1),
(cx, cy),
(cx + dx2 * f2, cy + dy2 * f2)])
codes.extend([Path.LINETO, Path.CURVE3, Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["angle"] = Angle
class Arc(_Base):
"""
Creates a picewise continuous quadratic bezier path between
two points. The path can have two passing-through points, a
point placed at the distance of armA and angle of angleA from
point A, another point with respect to point B. The edges are
rounded with *rad*.
"""
def __init__(self, angleA=0, angleB=0, armA=None, armB=None, rad=0.):
"""
*angleA* :
starting angle of the path
*angleB* :
ending angle of the path
*armA* :
length of the starting arm
*armB* :
length of the ending arm
*rad* :
rounding radius of the edges
"""
self.angleA = angleA
self.angleB = angleB
self.armA = armA
self.armB = armB
self.rad = rad
def connect(self, posA, posB):
x1, y1 = posA
x2, y2 = posB
vertices = [(x1, y1)]
rounded = []
codes = [Path.MOVETO]
if self.armA:
cosA = math.cos(self.angleA / 180. * math.pi)
sinA = math.sin(self.angleA / 180. * math.pi)
# x_armA, y_armB
d = self.armA - self.rad
rounded.append((x1 + d * cosA, y1 + d * sinA))
d = self.armA
rounded.append((x1 + d * cosA, y1 + d * sinA))
if self.armB:
cosB = math.cos(self.angleB / 180. * math.pi)
sinB = math.sin(self.angleB / 180. * math.pi)
x_armB, y_armB = x2 + self.armB * cosB, y2 + self.armB * sinB
if rounded:
xp, yp = rounded[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
else:
xp, yp = vertices[-1]
dx, dy = x_armB - xp, y_armB - yp
dd = (dx * dx + dy * dy) ** .5
d = dd - self.rad
rounded = [(xp + d * dx / dd, yp + d * dy / dd),
(x_armB, y_armB)]
if rounded:
xp, yp = rounded[-1]
dx, dy = x2 - xp, y2 - yp
dd = (dx * dx + dy * dy) ** .5
rounded.append((xp + self.rad * dx / dd,
yp + self.rad * dy / dd))
vertices.extend(rounded)
codes.extend([Path.LINETO,
Path.CURVE3,
Path.CURVE3])
vertices.append((x2, y2))
codes.append(Path.LINETO)
return Path(vertices, codes)
_style_list["arc"] = Arc
class Bar(_Base):
"""
A line with *angle* between A and B with *armA* and
*armB*. One of the arms is extended so that they are connected in
a right angle. The length of armA is determined by (*armA*
+ *fraction* x AB distance). Same for armB.
"""
def __init__(self, armA=0., armB=0., fraction=0.3, angle=None):
"""
Parameters
----------
armA : float
minimum length of armA
armB : float
minimum length of armB
fraction : float
a fraction of the distance between two points that
will be added to armA and armB.
angle : float or None
angle of the connecting line (if None, parallel
to A and B)
"""
self.armA = armA
self.armB = armB
self.fraction = fraction
self.angle = angle
def connect(self, posA, posB):
x1, y1 = posA
x20, y20 = x2, y2 = posB
x12, y12 = (x1 + x2) / 2., (y1 + y2) / 2.
theta1 = math.atan2(y2 - y1, x2 - x1)
dx, dy = x2 - x1, y2 - y1
dd = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd, dy / dd
armA, armB = self.armA, self.armB
if self.angle is not None:
#angle = self.angle % 180.
#if angle < 0. or angle > 180.:
# angle
#theta0 = (self.angle%180.)/180.*math.pi
theta0 = self.angle / 180. * math.pi
#theta0 = (((self.angle+90)%180.) - 90.)/180.*math.pi
dtheta = theta1 - theta0
dl = dd * math.sin(dtheta)
dL = dd * math.cos(dtheta)
#x2, y2 = x2 + dl*ddy, y2 - dl*ddx
x2, y2 = x1 + dL * math.cos(theta0), y1 + dL * math.sin(theta0)
armB = armB - dl
# update
dx, dy = x2 - x1, y2 - y1
dd2 = (dx * dx + dy * dy) ** .5
ddx, ddy = dx / dd2, dy / dd2
else:
dl = 0.
#if armA > armB:
# armB = armA + dl
#else:
# armA = armB - dl
arm = max(armA, armB)
f = self.fraction * dd + arm
#fB = self.fraction*dd + armB
cx1, cy1 = x1 + f * ddy, y1 - f * ddx
cx2, cy2 = x2 + f * ddy, y2 - f * ddx
vertices = [(x1, y1),
(cx1, cy1),
(cx2, cy2),
(x20, y20)]
codes = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return Path(vertices, codes)
_style_list["bar"] = Bar
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableConnectorstyles": _pprint_styles(_style_list)}
def _point_along_a_line(x0, y0, x1, y1, d):
"""
find a point along a line connecting (x0, y0) -- (x1, y1) whose
distance from (x0, y0) is d.
"""
dx, dy = x0 - x1, y0 - y1
ff = d / (dx * dx + dy * dy) ** .5
x2, y2 = x0 - ff * dx, y0 - ff * dy
return x2, y2
class ArrowStyle(_Style):
"""
:class:`ArrowStyle` is a container class which defines several
arrowstyle classes, which is used to create an arrow path along a
given path. These are mainly used with :class:`FancyArrowPatch`.
A arrowstyle object can be either created as::
ArrowStyle.Fancy(head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy", head_length=.4, head_width=.4, tail_width=.4)
or::
ArrowStyle("Fancy, head_length=.4, head_width=.4, tail_width=.4")
The following classes are defined
%(AvailableArrowstyles)s
An instance of any arrow style class is a callable object,
whose call signature is::
__call__(self, path, mutation_size, linewidth, aspect_ratio=1.)
and it returns a tuple of a :class:`Path` instance and a boolean
value. *path* is a :class:`Path` instance along which the arrow
will be drawn. *mutation_size* and *aspect_ratio* have the same
meaning as in :class:`BoxStyle`. *linewidth* is a line width to be
stroked. This is meant to be used to correct the location of the
head so that it does not overshoot the destination point, but not all
classes support it.
.. plot:: mpl_examples/pylab_examples/fancyarrow_demo.py
"""
_style_list = {}
class _Base(object):
"""
Arrow Transmuter Base class
ArrowTransmuterBase and its derivatives are used to make a fancy
arrow around a given path. The __call__ method returns a path
(which will be used to create a PathPatch instance) and a boolean
value indicating the path is open therefore is not fillable. This
class is not an artist and actual drawing of the fancy arrow is
done by the FancyArrowPatch class.
"""
# The derived classes are required to be able to be initialized
# w/o arguments, i.e., all its argument (except self) must have
# the default values.
def __init__(self):
super(ArrowStyle._Base, self).__init__()
@staticmethod
def ensure_quadratic_bezier(path):
""" Some ArrowStyle class only wokrs with a simple
quaratic bezier curve (created with Arc3Connetion or
Angle3Connector). This static method is to check if the
provided path is a simple quadratic bezier curve and returns
its control points if true.
"""
segments = list(path.iter_segments())
if ((len(segments) != 2) or (segments[0][1] != Path.MOVETO) or
(segments[1][1] != Path.CURVE3)):
msg = "'path' it's not a valid quadratic bezier curve"
raise ValueError(msg)
return list(segments[0][0]) + list(segments[1][0])
def transmute(self, path, mutation_size, linewidth):
"""
The transmute method is the very core of the ArrowStyle
class and must be overriden in the subclasses. It receives
the path object along which the arrow will be drawn, and
the mutation_size, with which the arrow head etc.
will be scaled. The linewidth may be used to adjust
the path so that it does not pass beyond the given
points. It returns a tuple of a Path instance and a
boolean. The boolean value indicate whether the path can
be filled or not. The return value can also be a list of paths
and list of booleans of a same length.
"""
raise NotImplementedError('Derived must override')
def __call__(self, path, mutation_size, linewidth,
aspect_ratio=1.):
"""
The __call__ method is a thin wrapper around the transmute method
and take care of the aspect ratio.
"""
path = make_path_regular(path)
if aspect_ratio is not None:
# Squeeze the given height by the aspect_ratio
vertices, codes = path.vertices[:], path.codes[:]
# Squeeze the height
vertices[:, 1] = vertices[:, 1] / aspect_ratio
path_shrunk = Path(vertices, codes)
# call transmute method with squeezed height.
path_mutated, fillable = self.transmute(path_shrunk,
linewidth,
mutation_size)
if cbook.iterable(fillable):
path_list = []
for p in zip(path_mutated):
v, c = p.vertices, p.codes
# Restore the height
v[:, 1] = v[:, 1] * aspect_ratio
path_list.append(Path(v, c))
return path_list, fillable
else:
return path_mutated, fillable
else:
return self.transmute(path, mutation_size, linewidth)
def __reduce__(self):
# because we have decided to nest thes classes, we need to
# add some more information to allow instance pickling.
import matplotlib.cbook as cbook
return (cbook._NestedClassGetter(),
(ArrowStyle, self.__class__.__name__),
self.__dict__
)
class _Curve(_Base):
"""
A simple arrow which will work with any path instance. The
returned path is simply concatenation of the original path + at
most two paths representing the arrow head at the begin point and the
at the end point. The arrow heads can be either open or closed.
"""
def __init__(self, beginarrow=None, endarrow=None,
fillbegin=False, fillend=False,
head_length=.2, head_width=.1):
"""
The arrows are drawn if *beginarrow* and/or *endarrow* are
true. *head_length* and *head_width* determines the size
of the arrow relative to the *mutation scale*. The
arrowhead at the begin (or end) is closed if fillbegin (or
fillend) is True.
"""
self.beginarrow, self.endarrow = beginarrow, endarrow
self.head_length, self.head_width = head_length, head_width
self.fillbegin, self.fillend = fillbegin, fillend
super(ArrowStyle._Curve, self).__init__()
def _get_arrow_wedge(self, x0, y0, x1, y1,
head_dist, cos_t, sin_t, linewidth
):
"""
Return the paths for arrow heads. Since arrow lines are
drawn with capstyle=projected, The arrow goes beyond the
desired point. This method also returns the amount of the path
to be shrunken so that it does not overshoot.
"""
# arrow from x0, y0 to x1, y1
dx, dy = x0 - x1, y0 - y1
cp_distance = np.hypot(dx, dy)
# pad_projected : amount of pad to account the
# overshooting of the projection of the wedge
pad_projected = (.5 * linewidth / sin_t)
# Account for division by zero
if cp_distance == 0:
cp_distance = 1
# apply pad for projected edge
ddx = pad_projected * dx / cp_distance
ddy = pad_projected * dy / cp_distance
# offset for arrow wedge
dx = dx / cp_distance * head_dist
dy = dy / cp_distance * head_dist
dx1, dy1 = cos_t * dx + sin_t * dy, -sin_t * dx + cos_t * dy
dx2, dy2 = cos_t * dx - sin_t * dy, sin_t * dx + cos_t * dy
vertices_arrow = [(x1 + ddx + dx1, y1 + ddy + dy1),
(x1 + ddx, y1 + ddy),
(x1 + ddx + dx2, y1 + ddy + dy2)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow, ddx, ddy
def transmute(self, path, mutation_size, linewidth):
head_length, head_width = self.head_length * mutation_size, \
self.head_width * mutation_size
head_dist = math.sqrt(head_length ** 2 + head_width ** 2)
cos_t, sin_t = head_length / head_dist, head_width / head_dist
# begin arrow
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
# If there is no room for an arrow and a line, then skip the arrow
has_begin_arrow = (self.beginarrow and
not ((x0 == x1) and (y0 == y1)))
if has_begin_arrow:
verticesA, codesA, ddxA, ddyA = \
self._get_arrow_wedge(x1, y1, x0, y0,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesA, codesA = [], []
ddxA, ddyA = 0., 0.
# end arrow
x2, y2 = path.vertices[-2]
x3, y3 = path.vertices[-1]
# If there is no room for an arrow and a line, then skip the arrow
has_end_arrow = (self.endarrow and not ((x2 == x3) and (y2 == y3)))
if has_end_arrow:
verticesB, codesB, ddxB, ddyB = \
self._get_arrow_wedge(x2, y2, x3, y3,
head_dist, cos_t, sin_t,
linewidth)
else:
verticesB, codesB = [], []
ddxB, ddyB = 0., 0.
# this simple code will not work if ddx, ddy is greater than
# separation bettern vertices.
_path = [Path(np.concatenate([[(x0 + ddxA, y0 + ddyA)],
path.vertices[1:-1],
[(x3 + ddxB, y3 + ddyB)]]),
path.codes)]
_fillable = [False]
if has_begin_arrow:
if self.fillbegin:
p = np.concatenate([verticesA, [verticesA[0],
verticesA[0]], ])
c = np.concatenate([codesA, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
_fillable.append(True)
else:
_path.append(Path(verticesA, codesA))
_fillable.append(False)
if has_end_arrow:
if self.fillend:
_fillable.append(True)
p = np.concatenate([verticesB, [verticesB[0],
verticesB[0]], ])
c = np.concatenate([codesB, [Path.LINETO, Path.CLOSEPOLY]])
_path.append(Path(p, c))
else:
_fillable.append(False)
_path.append(Path(verticesB, codesB))
return _path, _fillable
class Curve(_Curve):
"""
A simple curve without any arrow head.
"""
def __init__(self):
super(ArrowStyle.Curve, self).__init__(
beginarrow=False, endarrow=False)
_style_list["-"] = Curve
class CurveA(_Curve):
"""
An arrow with a head at its begin point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveA, self).__init__(
beginarrow=True, endarrow=False,
head_length=head_length, head_width=head_width)
_style_list["<-"] = CurveA
class CurveB(_Curve):
"""
An arrow with a head at its end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveB, self).__init__(
beginarrow=False, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["->"] = CurveB
class CurveAB(_Curve):
"""
An arrow with heads both at the begin and the end point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveAB, self).__init__(
beginarrow=True, endarrow=True,
head_length=head_length, head_width=head_width)
_style_list["<->"] = CurveAB
class CurveFilledA(_Curve):
"""
An arrow with filled triangle head at the begin.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledA, self).__init__(
beginarrow=True, endarrow=False,
fillbegin=True, fillend=False,
head_length=head_length, head_width=head_width)
_style_list["<|-"] = CurveFilledA
class CurveFilledB(_Curve):
"""
An arrow with filled triangle head at the end.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledB, self).__init__(
beginarrow=False, endarrow=True,
fillbegin=False, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["-|>"] = CurveFilledB
class CurveFilledAB(_Curve):
"""
An arrow with filled triangle heads both at the begin and the end
point.
"""
def __init__(self, head_length=.4, head_width=.2):
"""
*head_length*
length of the arrow head
*head_width*
width of the arrow head
"""
super(ArrowStyle.CurveFilledAB, self).__init__(
beginarrow=True, endarrow=True,
fillbegin=True, fillend=True,
head_length=head_length, head_width=head_width)
_style_list["<|-|>"] = CurveFilledAB
class _Bracket(_Base):
def __init__(self, bracketA=None, bracketB=None,
widthA=1., widthB=1.,
lengthA=0.2, lengthB=0.2,
angleA=None, angleB=None,
scaleA=None, scaleB=None):
self.bracketA, self.bracketB = bracketA, bracketB
self.widthA, self.widthB = widthA, widthB
self.lengthA, self.lengthB = lengthA, lengthB
self.angleA, self.angleB = angleA, angleB
self.scaleA, self.scaleB = scaleA, scaleB
def _get_bracket(self, x0, y0,
cos_t, sin_t, width, length):
# arrow from x0, y0 to x1, y1
from matplotlib.bezier import get_normal_points
x1, y1, x2, y2 = get_normal_points(x0, y0, cos_t, sin_t, width)
dx, dy = length * cos_t, length * sin_t
vertices_arrow = [(x1 + dx, y1 + dy),
(x1, y1),
(x2, y2),
(x2 + dx, y2 + dy)]
codes_arrow = [Path.MOVETO,
Path.LINETO,
Path.LINETO,
Path.LINETO]
return vertices_arrow, codes_arrow
def transmute(self, path, mutation_size, linewidth):
if self.scaleA is None:
scaleA = mutation_size
else:
scaleA = self.scaleA
if self.scaleB is None:
scaleB = mutation_size
else:
scaleB = self.scaleB
vertices_list, codes_list = [], []
if self.bracketA:
x0, y0 = path.vertices[0]
x1, y1 = path.vertices[1]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesA, codesA = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthA * scaleA,
self.lengthA * scaleA)
vertices_list.append(verticesA)
codes_list.append(codesA)
vertices_list.append(path.vertices)
codes_list.append(path.codes)
if self.bracketB:
x0, y0 = path.vertices[-1]
x1, y1 = path.vertices[-2]
cos_t, sin_t = get_cos_sin(x1, y1, x0, y0)
verticesB, codesB = self._get_bracket(x0, y0, cos_t, sin_t,
self.widthB * scaleB,
self.lengthB * scaleB)
vertices_list.append(verticesB)
codes_list.append(codesB)
vertices = np.concatenate(vertices_list)
codes = np.concatenate(codes_list)
p = Path(vertices, codes)
return p, False
class BracketAB(_Bracket):
"""
An arrow with a bracket(]) at both ends.
"""
def __init__(self,
widthA=1., lengthA=0.2, angleA=None,
widthB=1., lengthB=0.2, angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketAB, self).__init__(
True, True, widthA=widthA, lengthA=lengthA,
angleA=angleA, widthB=widthB, lengthB=lengthB,
angleB=angleB)
_style_list["]-["] = BracketAB
class BracketA(_Bracket):
"""
An arrow with a bracket(]) at its end.
"""
def __init__(self, widthA=1., lengthA=0.2, angleA=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
"""
super(ArrowStyle.BracketA, self).__init__(True, None,
widthA=widthA,
lengthA=lengthA,
angleA=angleA)
_style_list["]-"] = BracketA
class BracketB(_Bracket):
"""
An arrow with a bracket([) at its end.
"""
def __init__(self, widthB=1., lengthB=0.2, angleB=None):
"""
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BracketB, self).__init__(None, True,
widthB=widthB,
lengthB=lengthB,
angleB=angleB)
_style_list["-["] = BracketB
class BarAB(_Bracket):
"""
An arrow with a bar(|) at both ends.
"""
def __init__(self,
widthA=1., angleA=None,
widthB=1., angleB=None):
"""
*widthA*
width of the bracket
*lengthA*
length of the bracket
*angleA*
angle between the bracket and the line
*widthB*
width of the bracket
*lengthB*
length of the bracket
*angleB*
angle between the bracket and the line
"""
super(ArrowStyle.BarAB, self).__init__(
True, True, widthA=widthA, lengthA=0, angleA=angleA,
widthB=widthB, lengthB=0, angleB=angleB)
_style_list["|-|"] = BarAB
class Simple(_Base):
"""
A simple arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.5, head_width=.5, tail_width=.2):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Simple, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
in_f = inside_circle(x2, y2, head_length)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
try:
arrow_out, arrow_in = \
split_bezier_intersecting_with_closedpath(arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_in = [(x0, y0), (x1n, y1n), (x2, y2)]
arrow_out = None
# head
head_width = self.head_width * mutation_size
head_left, head_right = make_wedged_bezier2(arrow_in,
head_width / 2., wm=.5)
# tail
if arrow_out is not None:
tail_width = self.tail_width * mutation_size
tail_left, tail_right = get_parallels(arrow_out,
tail_width / 2.)
patch_path = [(Path.MOVETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_right[0]),
(Path.CLOSEPOLY, tail_right[0]),
]
else:
patch_path = [(Path.MOVETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.CLOSEPOLY, head_left[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["simple"] = Simple
class Fancy(_Base):
"""
A fancy arrow. Only works with a quadratic bezier curve.
"""
def __init__(self, head_length=.4, head_width=.4, tail_width=.4):
"""
*head_length*
length of the arrow head
*head_with*
width of the arrow head
*tail_width*
width of the arrow tail
"""
self.head_length, self.head_width, self.tail_width = \
head_length, head_width, tail_width
super(ArrowStyle.Fancy, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
# divide the path into a head and a tail
head_length = self.head_length * mutation_size
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
from .bezier import NonIntersectingPathException
# path for head
in_f = inside_circle(x2, y2, head_length)
try:
path_out, path_in = \
split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01)
except NonIntersectingPathException:
# if this happens, make a straight line of the head_length
# long.
x0, y0 = _point_along_a_line(x2, y2, x1, y1, head_length)
x1n, y1n = 0.5 * (x0 + x2), 0.5 * (y0 + y2)
arrow_path = [(x0, y0), (x1n, y1n), (x2, y2)]
path_head = arrow_path
else:
path_head = path_in
# path for head
in_f = inside_circle(x2, y2, head_length * .8)
path_out, path_in = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
path_tail = path_out
# head
head_width = self.head_width * mutation_size
head_l, head_r = make_wedged_bezier2(path_head,
head_width / 2.,
wm=.6)
# tail
tail_width = self.tail_width * mutation_size
tail_left, tail_right = make_wedged_bezier2(path_tail,
tail_width * .5,
w1=1., wm=0.6, w2=0.3)
# path for head
in_f = inside_circle(x0, y0, tail_width * .3)
path_in, path_out = split_bezier_intersecting_with_closedpath(
arrow_path,
in_f,
tolerence=0.01
)
tail_start = path_in[-1]
head_right, head_left = head_r, head_l
patch_path = [(Path.MOVETO, tail_start),
(Path.LINETO, tail_right[0]),
(Path.CURVE3, tail_right[1]),
(Path.CURVE3, tail_right[2]),
(Path.LINETO, head_right[0]),
(Path.CURVE3, head_right[1]),
(Path.CURVE3, head_right[2]),
(Path.CURVE3, head_left[1]),
(Path.CURVE3, head_left[0]),
(Path.LINETO, tail_left[2]),
(Path.CURVE3, tail_left[1]),
(Path.CURVE3, tail_left[0]),
(Path.LINETO, tail_start),
(Path.CLOSEPOLY, tail_start),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["fancy"] = Fancy
class Wedge(_Base):
"""
Wedge(?) shape. Only works with a quadratic bezier curve. The
begin point has a width of the tail_width and the end point has a
width of 0. At the middle, the width is shrink_factor*tail_width.
"""
def __init__(self, tail_width=.3, shrink_factor=0.5):
"""
*tail_width*
width of the tail
*shrink_factor*
fraction of the arrow width at the middle point
"""
self.tail_width = tail_width
self.shrink_factor = shrink_factor
super(ArrowStyle.Wedge, self).__init__()
def transmute(self, path, mutation_size, linewidth):
x0, y0, x1, y1, x2, y2 = self.ensure_quadratic_bezier(path)
arrow_path = [(x0, y0), (x1, y1), (x2, y2)]
b_plus, b_minus = make_wedged_bezier2(
arrow_path,
self.tail_width * mutation_size / 2.,
wm=self.shrink_factor)
patch_path = [(Path.MOVETO, b_plus[0]),
(Path.CURVE3, b_plus[1]),
(Path.CURVE3, b_plus[2]),
(Path.LINETO, b_minus[2]),
(Path.CURVE3, b_minus[1]),
(Path.CURVE3, b_minus[0]),
(Path.CLOSEPOLY, b_minus[0]),
]
path = Path([p for c, p in patch_path], [c for c, p in patch_path])
return path, True
_style_list["wedge"] = Wedge
if __doc__:
__doc__ = cbook.dedent(__doc__) % \
{"AvailableArrowstyles": _pprint_styles(_style_list)}
docstring.interpd.update(
AvailableArrowstyles=_pprint_styles(ArrowStyle._style_list),
AvailableConnectorstyles=_pprint_styles(ConnectionStyle._style_list),
)
class FancyArrowPatch(Patch):
"""
A fancy arrow patch. It draws an arrow using the :class:ArrowStyle.
"""
_edge_default = True
def __str__(self):
if self._posA_posB is not None:
(x1, y1), (x2, y2) = self._posA_posB
return self.__class__.__name__ \
+ "(%g,%g->%g,%g)" % (x1, y1, x2, y2)
else:
return self.__class__.__name__ \
+ "(%s)" % (str(self._path_original),)
@docstring.dedent_interpd
def __init__(self, posA=None, posB=None,
path=None,
arrowstyle="simple",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=2.,
shrinkB=2.,
mutation_scale=1.,
mutation_aspect=None,
dpi_cor=1.,
**kwargs):
"""
If *posA* and *posB* is given, a path connecting two point are
created according to the connectionstyle. The path will be
clipped with *patchA* and *patchB* and further shrunken by
*shrinkA* and *shrinkB*. An arrow is drawn along this
resulting path using the *arrowstyle* parameter. If *path*
provided, an arrow is drawn along this path and *patchA*,
*patchB*, *shrinkA*, and *shrinkB* are ignored.
The *connectionstyle* describes how *posA* and *posB* are
connected. It can be an instance of the ConnectionStyle class
(matplotlib.patches.ConnectionStlye) or a string of the
connectionstyle name, with optional comma-separated
attributes. The following connection styles are available.
%(AvailableConnectorstyles)s
The *arrowstyle* describes how the fancy arrow will be
drawn. It can be string of the available arrowstyle names,
with optional comma-separated attributes, or one of the
ArrowStyle instance. The optional attributes are meant to be
scaled with the *mutation_scale*. The following arrow styles are
available.
%(AvailableArrowstyles)s
*mutation_scale* : a value with which attributes of arrowstyle
(e.g., head_length) will be scaled. default=1.
*mutation_aspect* : The height of the rectangle will be
squeezed by this value before the mutation and the mutated
box will be stretched by the inverse of it. default=None.
Valid kwargs are:
%(Patch)s
"""
Patch.__init__(self, **kwargs)
if posA is not None and posB is not None and path is None:
self._posA_posB = [posA, posB]
if connectionstyle is None:
connectionstyle = "arc3"
self.set_connectionstyle(connectionstyle)
elif posA is None and posB is None and path is not None:
self._posA_posB = None
self._connetors = None
else:
raise ValueError("either posA and posB, or path need to provided")
self.patchA = patchA
self.patchB = patchB
self.shrinkA = shrinkA
self.shrinkB = shrinkB
self._path_original = path
self.set_arrowstyle(arrowstyle)
self._mutation_scale = mutation_scale
self._mutation_aspect = mutation_aspect
self.set_dpi_cor(dpi_cor)
#self._draw_in_display_coordinate = True
def set_dpi_cor(self, dpi_cor):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
"""
self._dpi_cor = dpi_cor
self.stale = True
def get_dpi_cor(self):
"""
dpi_cor is currently used for linewidth-related things and
shrink factor. Mutation scale is affected by this.
"""
return self._dpi_cor
def set_positions(self, posA, posB):
""" set the begin and end positions of the connecting
path. Use current value if None.
"""
if posA is not None:
self._posA_posB[0] = posA
if posB is not None:
self._posA_posB[1] = posB
self.stale = True
def set_patchA(self, patchA):
""" set the begin patch.
"""
self.patchA = patchA
self.stale = True
def set_patchB(self, patchB):
""" set the begin patch
"""
self.patchB = patchB
self.stale = True
def set_connectionstyle(self, connectionstyle, **kw):
"""
Set the connection style.
*connectionstyle* can be a string with connectionstyle name with
optional comma-separated attributes. Alternatively, the attrs can be
provided as keywords.
set_connectionstyle("arc,angleA=0,armA=30,rad=10")
set_connectionstyle("arc", angleA=0,armA=30,rad=10)
Old attrs simply are forgotten.
Without argument (or with connectionstyle=None), return
available styles as a list of strings.
"""
if connectionstyle is None:
return ConnectionStyle.pprint_styles()
if isinstance(connectionstyle, ConnectionStyle._Base):
self._connector = connectionstyle
elif six.callable(connectionstyle):
# we may need check the calling convention of the given function
self._connector = connectionstyle
else:
self._connector = ConnectionStyle(connectionstyle, **kw)
self.stale = True
def get_connectionstyle(self):
"""
Return the ConnectionStyle instance
"""
return self._connector
def set_arrowstyle(self, arrowstyle=None, **kw):
"""
Set the arrow style.
*arrowstyle* can be a string with arrowstyle name with optional
comma-separated attributes. Alternatively, the attrs can
be provided as keywords.
set_arrowstyle("Fancy,head_length=0.2")
set_arrowstyle("fancy", head_length=0.2)
Old attrs simply are forgotten.
Without argument (or with arrowstyle=None), return
available box styles as a list of strings.
"""
if arrowstyle is None:
return ArrowStyle.pprint_styles()
if isinstance(arrowstyle, ArrowStyle._Base):
self._arrow_transmuter = arrowstyle
else:
self._arrow_transmuter = ArrowStyle(arrowstyle, **kw)
self.stale = True
def get_arrowstyle(self):
"""
Return the arrowstyle object
"""
return self._arrow_transmuter
def set_mutation_scale(self, scale):
"""
Set the mutation scale.
ACCEPTS: float
"""
self._mutation_scale = scale
self.stale = True
def get_mutation_scale(self):
"""
Return the mutation scale.
"""
return self._mutation_scale
def set_mutation_aspect(self, aspect):
"""
Set the aspect ratio of the bbox mutation.
ACCEPTS: float
"""
self._mutation_aspect = aspect
self.stale = True
def get_mutation_aspect(self):
"""
Return the aspect ratio of the bbox mutation.
"""
return self._mutation_aspect
def get_path(self):
"""
return the path of the arrow in the data coordinate. Use
get_path_in_displaycoord() method to retrieve the arrow path
in the display coord.
"""
_path, fillable = self.get_path_in_displaycoord()
if cbook.iterable(fillable):
_path = concatenate_paths(_path)
return self.get_transform().inverted().transform_path(_path)
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
if self._posA_posB is not None:
posA = self.get_transform().transform_point(self._posA_posB[0])
posB = self.get_transform().transform_point(self._posA_posB[1])
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
else:
_path = self.get_transform().transform_path(self._path_original)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
#if not fillable:
# self._fill = False
return _path, fillable
def draw(self, renderer):
if not self.get_visible():
return
renderer.open_group('patch', self.get_gid())
gc = renderer.new_gc()
gc.set_foreground(self._edgecolor, isRGBA=True)
lw = self._linewidth
if self._edgecolor[3] == 0:
lw = 0
gc.set_linewidth(lw)
gc.set_dashes(self._dashoffset, self._dashes)
gc.set_antialiased(self._antialiased)
self._set_gc_clip(gc)
gc.set_capstyle('round')
gc.set_snap(self.get_snap())
rgbFace = self._facecolor
if rgbFace[3] == 0:
rgbFace = None # (some?) renderers expect this as no-fill signal
gc.set_alpha(self._alpha)
if self._hatch:
gc.set_hatch(self._hatch)
if self.get_sketch_params() is not None:
gc.set_sketch_params(*self.get_sketch_params())
# FIXME : dpi_cor is for the dpi-dependecy of the
# linewidth. There could be room for improvement.
#
#dpi_cor = renderer.points_to_pixels(1.)
self.set_dpi_cor(renderer.points_to_pixels(1.))
path, fillable = self.get_path_in_displaycoord()
if not cbook.iterable(fillable):
path = [path]
fillable = [fillable]
affine = transforms.IdentityTransform()
if self.get_path_effects():
from matplotlib.patheffects import PathEffectRenderer
renderer = PathEffectRenderer(self.get_path_effects(), renderer)
for p, f in zip(path, fillable):
if f:
renderer.draw_path(gc, p, affine, rgbFace)
else:
renderer.draw_path(gc, p, affine, None)
gc.restore()
renderer.close_group('patch')
self.stale = False
class ConnectionPatch(FancyArrowPatch):
"""
A :class:`~matplotlib.patches.ConnectionPatch` class is to make
connecting lines between two points (possibly in different axes).
"""
def __str__(self):
return "ConnectionPatch((%g,%g),(%g,%g))" % \
(self.xy1[0], self.xy1[1], self.xy2[0], self.xy2[1])
@docstring.dedent_interpd
def __init__(self, xyA, xyB, coordsA, coordsB=None,
axesA=None, axesB=None,
arrowstyle="-",
arrow_transmuter=None,
connectionstyle="arc3",
connector=None,
patchA=None,
patchB=None,
shrinkA=0.,
shrinkB=0.,
mutation_scale=10.,
mutation_aspect=None,
clip_on=False,
dpi_cor=1.,
**kwargs):
"""
Connect point *xyA* in *coordsA* with point *xyB* in *coordsB*
Valid keys are
=============== ======================================================
Key Description
=============== ======================================================
arrowstyle the arrow style
connectionstyle the connection style
relpos default is (0.5, 0.5)
patchA default is bounding box of the text
patchB default is None
shrinkA default is 2 points
shrinkB default is 2 points
mutation_scale default is text size (in points)
mutation_aspect default is 1.
? any key for :class:`matplotlib.patches.PathPatch`
=============== ======================================================
*coordsA* and *coordsB* are strings that indicate the
coordinates of *xyA* and *xyB*.
================= ===================================================
Property Description
================= ===================================================
'figure points' points from the lower left corner of the figure
'figure pixels' pixels from the lower left corner of the figure
'figure fraction' 0,0 is lower left of figure and 1,1 is upper, right
'axes points' points from lower left corner of axes
'axes pixels' pixels from lower left corner of axes
'axes fraction' 0,1 is lower left of axes and 1,1 is upper right
'data' use the coordinate system of the object being
annotated (default)
'offset points' Specify an offset (in points) from the *xy* value
'polar' you can specify *theta*, *r* for the annotation,
even in cartesian plots. Note that if you
are using a polar axes, you do not need
to specify polar for the coordinate
system since that is the native "data" coordinate
system.
================= ===================================================
"""
if coordsB is None:
coordsB = coordsA
# we'll draw ourself after the artist we annotate by default
self.xy1 = xyA
self.xy2 = xyB
self.coords1 = coordsA
self.coords2 = coordsB
self.axesA = axesA
self.axesB = axesB
FancyArrowPatch.__init__(self,
posA=(0, 0), posB=(1, 1),
arrowstyle=arrowstyle,
arrow_transmuter=arrow_transmuter,
connectionstyle=connectionstyle,
connector=connector,
patchA=patchA,
patchB=patchB,
shrinkA=shrinkA,
shrinkB=shrinkB,
mutation_scale=mutation_scale,
mutation_aspect=mutation_aspect,
clip_on=clip_on,
dpi_cor=dpi_cor,
**kwargs)
# if True, draw annotation only if self.xy is inside the axes
self._annotation_clip = None
def _get_xy(self, x, y, s, axes=None):
"""
caculate the pixel position of given point
"""
if axes is None:
axes = self.axes
if s == 'data':
trans = axes.transData
x = float(self.convert_xunits(x))
y = float(self.convert_yunits(y))
return trans.transform_point((x, y))
elif s == 'offset points':
# convert the data point
dx, dy = self.xy
# prevent recursion
if self.xycoords == 'offset points':
return self._get_xy(dx, dy, 'data')
dx, dy = self._get_xy(dx, dy, self.xycoords)
# convert the offset
dpi = self.figure.get_dpi()
x *= dpi / 72.
y *= dpi / 72.
# add the offset to the data point
x += dx
y += dy
return x, y
elif s == 'polar':
theta, r = x, y
x = r * np.cos(theta)
y = r * np.sin(theta)
trans = axes.transData
return trans.transform_point((x, y))
elif s == 'figure points':
# points from the lower left corner of the figure
dpi = self.figure.dpi
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
x *= dpi / 72.
y *= dpi / 72.
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure pixels':
# pixels from the lower left corner of the figure
l, b, w, h = self.figure.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
if y < 0:
y = t + y
return x, y
elif s == 'figure fraction':
# (0,0) is lower left, (1,1) is upper right of figure
trans = self.figure.transFigure
return trans.transform_point((x, y))
elif s == 'axes points':
# points from the lower left corner of the axes
dpi = self.figure.dpi
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x * dpi / 72.
else:
x = l + x * dpi / 72.
if y < 0:
y = t + y * dpi / 72.
else:
y = b + y * dpi / 72.
return x, y
elif s == 'axes pixels':
#pixels from the lower left corner of the axes
l, b, w, h = axes.bbox.bounds
r = l + w
t = b + h
if x < 0:
x = r + x
else:
x = l + x
if y < 0:
y = t + y
else:
y = b + y
return x, y
elif s == 'axes fraction':
#(0,0) is lower left, (1,1) is upper right of axes
trans = axes.transAxes
return trans.transform_point((x, y))
def set_annotation_clip(self, b):
"""
set *annotation_clip* attribute.
* True: the annotation will only be drawn when self.xy is inside the
axes.
* False: the annotation will always be drawn regardless of its
position.
* None: the self.xy will be checked only if *xycoords* is "data"
"""
self._annotation_clip = b
self.stale = True
def get_annotation_clip(self):
"""
Return *annotation_clip* attribute.
See :meth:`set_annotation_clip` for the meaning of return values.
"""
return self._annotation_clip
def get_path_in_displaycoord(self):
"""
Return the mutated path of the arrow in the display coord
"""
dpi_cor = self.get_dpi_cor()
x, y = self.xy1
posA = self._get_xy(x, y, self.coords1, self.axesA)
x, y = self.xy2
posB = self._get_xy(x, y, self.coords2, self.axesB)
_path = self.get_connectionstyle()(posA, posB,
patchA=self.patchA,
patchB=self.patchB,
shrinkA=self.shrinkA * dpi_cor,
shrinkB=self.shrinkB * dpi_cor
)
_path, fillable = self.get_arrowstyle()(
_path,
self.get_mutation_scale() * dpi_cor,
self.get_linewidth() * dpi_cor,
self.get_mutation_aspect()
)
return _path, fillable
def _check_xy(self, renderer):
"""
check if the annotation need to
be drawn.
"""
b = self.get_annotation_clip()
if b or (b is None and self.coords1 == "data"):
x, y = self.xy1
xy_pixel = self._get_xy(x, y, self.coords1, self.axesA)
if not self.axes.contains_point(xy_pixel):
return False
if b or (b is None and self.coords2 == "data"):
x, y = self.xy2
xy_pixel = self._get_xy(x, y, self.coords2, self.axesB)
if self.axesB is None:
axes = self.axes
else:
axes = self.axesB
if not axes.contains_point(xy_pixel):
return False
return True
def draw(self, renderer):
"""
Draw.
"""
if renderer is not None:
self._renderer = renderer
if not self.get_visible():
return
if not self._check_xy(renderer):
return
FancyArrowPatch.draw(self, renderer)
| gpl-3.0 |
kcavagnolo/astroML | examples/learning/plot_neighbors_photoz.py | 3 | 2115 | """
K-Neighbors for Photometric Redshifts
-------------------------------------
Estimate redshifts from the colors of sdss galaxies and quasars.
This uses colors from a sample of 50,000 objects with SDSS photometry
and ugriz magnitudes. The example shows how far one can get with an
extremely simple machine learning approach to the photometric redshift
problem.
The function :func:`fetch_sdss_galaxy_colors` used below actually queries
the SDSS CASjobs server for the colors of the 50,000 galaxies.
"""
# Author: Jake VanderPlas <vanderplas@astro.washington.edu>
# License: BSD
# The figure is an example from astroML: see http://astroML.github.com
from __future__ import print_function, division
import numpy as np
from matplotlib import pyplot as plt
from sklearn.neighbors import KNeighborsRegressor
from astroML.datasets import fetch_sdss_galaxy_colors
from astroML.plotting import scatter_contour
n_neighbors = 1
data = fetch_sdss_galaxy_colors()
N = len(data)
# shuffle data
np.random.seed(0)
np.random.shuffle(data)
# put colors in a matrix
X = np.zeros((N, 4))
X[:, 0] = data['u'] - data['g']
X[:, 1] = data['g'] - data['r']
X[:, 2] = data['r'] - data['i']
X[:, 3] = data['i'] - data['z']
z = data['redshift']
# divide into training and testing data
Ntrain = N // 2
Xtrain = X[:Ntrain]
ztrain = z[:Ntrain]
Xtest = X[Ntrain:]
ztest = z[Ntrain:]
knn = KNeighborsRegressor(n_neighbors, weights='uniform')
zpred = knn.fit(Xtrain, ztrain).predict(Xtest)
axis_lim = np.array([-0.1, 2.5])
rms = np.sqrt(np.mean((ztest - zpred) ** 2))
print("RMS error = %.2g" % rms)
ax = plt.axes()
plt.scatter(ztest, zpred, c='k', lw=0, s=4)
plt.plot(axis_lim, axis_lim, '--k')
plt.plot(axis_lim, axis_lim + rms, ':k')
plt.plot(axis_lim, axis_lim - rms, ':k')
plt.xlim(axis_lim)
plt.ylim(axis_lim)
plt.text(0.99, 0.02, "RMS error = %.2g" % rms,
ha='right', va='bottom', transform=ax.transAxes,
bbox=dict(ec='w', fc='w'), fontsize=16)
plt.title('Photo-z: Nearest Neigbor Regression')
plt.xlabel(r'$\mathrm{z_{spec}}$', fontsize=14)
plt.ylabel(r'$\mathrm{z_{phot}}$', fontsize=14)
plt.show()
| bsd-2-clause |
iproduct/course-social-robotics | 11-dnn-keras/venv/Lib/site-packages/pandas/tests/reshape/merge/test_merge_ordered.py | 3 | 6370 | import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, merge_ordered
import pandas._testing as tm
class TestMergeOrdered:
def setup_method(self, method):
self.left = DataFrame({"key": ["a", "c", "e"], "lvalue": [1, 2.0, 3]})
self.right = DataFrame({"key": ["b", "c", "d", "f"], "rvalue": [1, 2, 3.0, 4]})
def test_basic(self):
result = merge_ordered(self.left, self.right, on="key")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1, np.nan, 2, np.nan, 3, np.nan],
"rvalue": [np.nan, 1, 2, 3, np.nan, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_ffill(self):
result = merge_ordered(self.left, self.right, on="key", fill_method="ffill")
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"],
"lvalue": [1.0, 1, 2, 2, 3, 3.0],
"rvalue": [np.nan, 1, 2, 3, 3, 4],
}
)
tm.assert_frame_equal(result, expected)
def test_multigroup(self):
left = pd.concat([self.left, self.left], ignore_index=True)
left["group"] = ["a"] * 3 + ["b"] * 3
result = merge_ordered(
left, self.right, on="key", left_by="group", fill_method="ffill"
)
expected = DataFrame(
{
"key": ["a", "b", "c", "d", "e", "f"] * 2,
"lvalue": [1.0, 1, 2, 2, 3, 3.0] * 2,
"rvalue": [np.nan, 1, 2, 3, 3, 4] * 2,
}
)
expected["group"] = ["a"] * 6 + ["b"] * 6
tm.assert_frame_equal(result, expected.loc[:, result.columns])
result2 = merge_ordered(
self.right, left, on="key", right_by="group", fill_method="ffill"
)
tm.assert_frame_equal(result, result2.loc[:, result.columns])
result = merge_ordered(left, self.right, on="key", left_by="group")
assert result["group"].notna().all()
def test_merge_type(self):
class NotADataFrame(DataFrame):
@property
def _constructor(self):
return NotADataFrame
nad = NotADataFrame(self.left)
result = nad.merge(self.right, on="key")
assert isinstance(result, NotADataFrame)
def test_empty_sequence_concat(self):
# GH 9157
empty_pat = "[Nn]o objects"
none_pat = "objects.*None"
test_cases = [
((), empty_pat),
([], empty_pat),
({}, empty_pat),
([None], none_pat),
([None, None], none_pat),
]
for df_seq, pattern in test_cases:
with pytest.raises(ValueError, match=pattern):
pd.concat(df_seq)
pd.concat([DataFrame()])
pd.concat([None, DataFrame()])
pd.concat([DataFrame(), None])
def test_doc_example(self):
left = DataFrame(
{
"group": list("aaabbb"),
"key": ["a", "c", "e", "a", "c", "e"],
"lvalue": [1, 2, 3] * 2,
}
)
right = DataFrame({"key": ["b", "c", "d"], "rvalue": [1, 2, 3]})
result = merge_ordered(left, right, fill_method="ffill", left_by="group")
expected = DataFrame(
{
"group": list("aaaaabbbbb"),
"key": ["a", "b", "c", "d", "e"] * 2,
"lvalue": [1, 1, 2, 2, 3] * 2,
"rvalue": [np.nan, 1, 2, 3, 3] * 2,
}
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"left, right, on, left_by, right_by, expected",
[
(
DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
DataFrame({"T": [2], "E": [1]}),
["T"],
["G", "H"],
None,
DataFrame(
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
}
),
),
(
DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
DataFrame({"T": [2], "E": [1]}),
"T",
["G", "H"],
None,
DataFrame(
{
"G": ["g"] * 3,
"H": ["h"] * 3,
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
}
),
),
(
DataFrame({"T": [2], "E": [1]}),
DataFrame({"G": ["g", "g"], "H": ["h", "h"], "T": [1, 3]}),
["T"],
None,
["G", "H"],
DataFrame(
{
"T": [1, 2, 3],
"E": [np.nan, 1.0, np.nan],
"G": ["g"] * 3,
"H": ["h"] * 3,
}
),
),
],
)
def test_list_type_by(self, left, right, on, left_by, right_by, expected):
# GH 35269
result = merge_ordered(
left=left,
right=right,
on=on,
left_by=left_by,
right_by=right_by,
)
tm.assert_frame_equal(result, expected)
def test_left_by_length_equals_to_right_shape0(self):
# GH 38166
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHT"))
right = DataFrame([[2, 1]], columns=list("TE"))
result = merge_ordered(left, right, on="T", left_by=["G", "H"])
expected = DataFrame(
{"G": ["g"] * 3, "H": ["h"] * 3, "T": [1, 2, 3], "E": [np.nan, 1.0, np.nan]}
)
tm.assert_frame_equal(result, expected)
def test_elements_not_in_by_but_in_df(self):
# GH 38167
left = DataFrame([["g", "h", 1], ["g", "h", 3]], columns=list("GHT"))
right = DataFrame([[2, 1]], columns=list("TE"))
msg = r"\{'h'\} not found in left columns"
with pytest.raises(KeyError, match=msg):
merge_ordered(left, right, on="T", left_by=["G", "h"])
| gpl-2.0 |
vibhorag/scikit-learn | sklearn/tests/test_isotonic.py | 230 | 11087 | import numpy as np
import pickle
from sklearn.isotonic import (check_increasing, isotonic_regression,
IsotonicRegression)
from sklearn.utils.testing import (assert_raises, assert_array_equal,
assert_true, assert_false, assert_equal,
assert_array_almost_equal,
assert_warns_message, assert_no_warnings)
from sklearn.utils import shuffle
def test_permutation_invariance():
# check that fit is permuation invariant.
# regression test of missing sorting of sample-weights
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
x_s, y_s, sample_weight_s = shuffle(x, y, sample_weight, random_state=0)
y_transformed = ir.fit_transform(x, y, sample_weight=sample_weight)
y_transformed_s = ir.fit(x_s, y_s, sample_weight=sample_weight_s).transform(x)
assert_array_equal(y_transformed, y_transformed_s)
def test_check_increasing_up():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1.5, 2.77, 8.99, 8.99, 50]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_up_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5]
# Check that we got increasing=True and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_true(is_increasing)
def test_check_increasing_down():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1.5, -2.77, -8.99, -8.99, -50]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_increasing_down_extreme():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, -2, -3, -4, -5]
# Check that we got increasing=False and no warnings
is_increasing = assert_no_warnings(check_increasing, x, y)
assert_false(is_increasing)
def test_check_ci_warn():
x = [0, 1, 2, 3, 4, 5]
y = [0, -1, 2, -3, 4, -5]
# Check that we got increasing=False and CI interval warning
is_increasing = assert_warns_message(UserWarning, "interval",
check_increasing,
x, y)
assert_false(is_increasing)
def test_isotonic_regression():
y = np.array([3, 7, 5, 9, 8, 7, 10])
y_ = np.array([3, 6, 6, 8, 8, 8, 10])
assert_array_equal(y_, isotonic_regression(y))
x = np.arange(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(ir.transform(x), ir.predict(x))
# check that it is immune to permutation
perm = np.random.permutation(len(y))
ir = IsotonicRegression(y_min=0., y_max=1.)
assert_array_equal(ir.fit_transform(x[perm], y[perm]),
ir.fit_transform(x, y)[perm])
assert_array_equal(ir.transform(x[perm]), ir.transform(x)[perm])
# check we don't crash when all x are equal:
ir = IsotonicRegression()
assert_array_equal(ir.fit_transform(np.ones(len(x)), y), np.mean(y))
def test_isotonic_regression_ties_min():
# Setup examples with ties on minimum
x = [0, 1, 1, 2, 3, 4, 5]
y = [0, 1, 2, 3, 4, 5, 6]
y_true = [0, 1.5, 1.5, 3, 4, 5, 6]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_max():
# Setup examples with ties on maximum
x = [1, 2, 3, 4, 5, 5]
y = [1, 2, 3, 4, 5, 6]
y_true = [1, 2, 3, 4, 5.5, 5.5]
# Check that we get identical results for fit/transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_equal(ir.fit(x, y).transform(x), ir.fit_transform(x, y))
assert_array_equal(y_true, ir.fit_transform(x, y))
def test_isotonic_regression_ties_secondary_():
"""
Test isotonic regression fit, transform and fit_transform
against the "secondary" ties method and "pituitary" data from R
"isotone" package, as detailed in: J. d. Leeuw, K. Hornik, P. Mair,
Isotone Optimization in R: Pool-Adjacent-Violators Algorithm
(PAVA) and Active Set Methods
Set values based on pituitary example and
the following R command detailed in the paper above:
> library("isotone")
> data("pituitary")
> res1 <- gpava(pituitary$age, pituitary$size, ties="secondary")
> res1$x
`isotone` version: 1.0-2, 2014-09-07
R version: R version 3.1.1 (2014-07-10)
"""
x = [8, 8, 8, 10, 10, 10, 12, 12, 12, 14, 14]
y = [21, 23.5, 23, 24, 21, 25, 21.5, 22, 19, 23.5, 25]
y_true = [22.22222, 22.22222, 22.22222, 22.22222, 22.22222, 22.22222,
22.22222, 22.22222, 22.22222, 24.25, 24.25]
# Check fit, transform and fit_transform
ir = IsotonicRegression()
ir.fit(x, y)
assert_array_almost_equal(ir.transform(x), y_true, 4)
assert_array_almost_equal(ir.fit_transform(x, y), y_true, 4)
def test_isotonic_regression_reversed():
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
y_ = IsotonicRegression(increasing=False).fit_transform(
np.arange(len(y)), y)
assert_array_equal(np.ones(y_[:-1].shape), ((y_[:-1] - y_[1:]) >= 0))
def test_isotonic_regression_auto_decreasing():
# Set y and x for decreasing
y = np.array([10, 9, 10, 7, 6, 6.1, 5])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship decreases
is_increasing = y_[0] < y_[-1]
assert_false(is_increasing)
def test_isotonic_regression_auto_increasing():
# Set y and x for decreasing
y = np.array([5, 6.1, 6, 7, 10, 9, 10])
x = np.arange(len(y))
# Create model and fit_transform
ir = IsotonicRegression(increasing='auto')
y_ = assert_no_warnings(ir.fit_transform, x, y)
# Check that relationship increases
is_increasing = y_[0] < y_[-1]
assert_true(is_increasing)
def test_assert_raises_exceptions():
ir = IsotonicRegression()
rng = np.random.RandomState(42)
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7, 3], [0.1, 0.6])
assert_raises(ValueError, ir.fit, [0, 1, 2], [5, 7])
assert_raises(ValueError, ir.fit, rng.randn(3, 10), [0, 1, 2])
assert_raises(ValueError, ir.transform, rng.randn(3, 10))
def test_isotonic_sample_weight_parameter_default_value():
# check if default value of sample_weight parameter is one
ir = IsotonicRegression()
# random test data
rng = np.random.RandomState(42)
n = 100
x = np.arange(n)
y = rng.randint(-50, 50, size=(n,)) + 50. * np.log(1 + np.arange(n))
# check if value is correctly used
weights = np.ones(n)
y_set_value = ir.fit_transform(x, y, sample_weight=weights)
y_default_value = ir.fit_transform(x, y)
assert_array_equal(y_set_value, y_default_value)
def test_isotonic_min_max_boundaries():
# check if min value is used correctly
ir = IsotonicRegression(y_min=2, y_max=4)
n = 6
x = np.arange(n)
y = np.arange(n)
y_test = [2, 2, 2, 3, 4, 4]
y_result = np.round(ir.fit_transform(x, y))
assert_array_equal(y_result, y_test)
def test_isotonic_sample_weight():
ir = IsotonicRegression()
x = [1, 2, 3, 4, 5, 6, 7]
y = [1, 41, 51, 1, 2, 5, 24]
sample_weight = [1, 2, 3, 4, 5, 6, 7]
expected_y = [1, 13.95, 13.95, 13.95, 13.95, 13.95, 24]
received_y = ir.fit_transform(x, y, sample_weight=sample_weight)
assert_array_equal(expected_y, received_y)
def test_isotonic_regression_oob_raise():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
ir.fit(x, y)
# Check that an exception is thrown
assert_raises(ValueError, ir.predict, [min(x) - 10, max(x) + 10])
def test_isotonic_regression_oob_clip():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
# Predict from training and test x and check that min/max match.
y1 = ir.predict([min(x) - 10, max(x) + 10])
y2 = ir.predict(x)
assert_equal(max(y1), max(y2))
assert_equal(min(y1), min(y2))
def test_isotonic_regression_oob_nan():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="nan")
ir.fit(x, y)
# Predict from training and test x and check that we have two NaNs.
y1 = ir.predict([min(x) - 10, max(x) + 10])
assert_equal(sum(np.isnan(y1)), 2)
def test_isotonic_regression_oob_bad():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="xyz")
# Make sure that we throw an error for bad out_of_bounds value
assert_raises(ValueError, ir.fit, x, y)
def test_isotonic_regression_oob_bad_after():
# Set y and x
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="raise")
# Make sure that we throw an error for bad out_of_bounds value in transform
ir.fit(x, y)
ir.out_of_bounds = "xyz"
assert_raises(ValueError, ir.transform, x)
def test_isotonic_regression_pickle():
y = np.array([3, 7, 5, 9, 8, 7, 10])
x = np.arange(len(y))
# Create model and fit
ir = IsotonicRegression(increasing='auto', out_of_bounds="clip")
ir.fit(x, y)
ir_ser = pickle.dumps(ir, pickle.HIGHEST_PROTOCOL)
ir2 = pickle.loads(ir_ser)
np.testing.assert_array_equal(ir.predict(x), ir2.predict(x))
def test_isotonic_duplicate_min_entry():
x = [0, 0, 1]
y = [0, 0, 1]
ir = IsotonicRegression(increasing=True, out_of_bounds="clip")
ir.fit(x, y)
all_predictions_finite = np.all(np.isfinite(ir.predict(x)))
assert_true(all_predictions_finite)
def test_isotonic_zero_weight_loop():
# Test from @ogrisel's issue:
# https://github.com/scikit-learn/scikit-learn/issues/4297
# Get deterministic RNG with seed
rng = np.random.RandomState(42)
# Create regression and samples
regression = IsotonicRegression()
n_samples = 50
x = np.linspace(-3, 3, n_samples)
y = x + rng.uniform(size=n_samples)
# Get some random weights and zero out
w = rng.uniform(size=n_samples)
w[5:8] = 0
regression.fit(x, y, sample_weight=w)
# This will hang in failure case.
regression.fit(x, y, sample_weight=w)
| bsd-3-clause |
nelson-liu/scikit-learn | examples/ensemble/plot_adaboost_regression.py | 311 | 1529 | """
======================================
Decision Tree Regression with AdaBoost
======================================
A decision tree is boosted using the AdaBoost.R2 [1] algorithm on a 1D
sinusoidal dataset with a small amount of Gaussian noise.
299 boosts (300 decision trees) is compared with a single decision tree
regressor. As the number of boosts is increased the regressor can fit more
detail.
.. [1] H. Drucker, "Improving Regressors using Boosting Techniques", 1997.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
# importing necessary libraries
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import AdaBoostRegressor
# Create the dataset
rng = np.random.RandomState(1)
X = np.linspace(0, 6, 100)[:, np.newaxis]
y = np.sin(X).ravel() + np.sin(6 * X).ravel() + rng.normal(0, 0.1, X.shape[0])
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=4)
regr_2 = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),
n_estimators=300, random_state=rng)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
y_1 = regr_1.predict(X)
y_2 = regr_2.predict(X)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="training samples")
plt.plot(X, y_1, c="g", label="n_estimators=1", linewidth=2)
plt.plot(X, y_2, c="r", label="n_estimators=300", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Boosted Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
arabenjamin/scikit-learn | sklearn/ensemble/tests/test_base.py | 284 | 1328 | """
Testing for the base module (sklearn.ensemble.base).
"""
# Authors: Gilles Louppe
# License: BSD 3 clause
from numpy.testing import assert_equal
from nose.tools import assert_true
from sklearn.utils.testing import assert_raise_message
from sklearn.datasets import load_iris
from sklearn.ensemble import BaggingClassifier
from sklearn.linear_model import Perceptron
def test_base():
# Check BaseEnsemble methods.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=3)
iris = load_iris()
ensemble.fit(iris.data, iris.target)
ensemble.estimators_ = [] # empty the list and create estimators manually
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator()
ensemble._make_estimator(append=False)
assert_equal(3, len(ensemble))
assert_equal(3, len(ensemble.estimators_))
assert_true(isinstance(ensemble[0], Perceptron))
def test_base_zero_n_estimators():
# Check that instantiating a BaseEnsemble with n_estimators<=0 raises
# a ValueError.
ensemble = BaggingClassifier(base_estimator=Perceptron(), n_estimators=0)
iris = load_iris()
assert_raise_message(ValueError,
"n_estimators must be greater than zero, got 0.",
ensemble.fit, iris.data, iris.target)
| bsd-3-clause |
allrod5/extra-trees | benchmarks/classification/decision_surface.py | 1 | 4826 | print(__doc__)
# Code source: Gaël Varoquaux
# Andreas Müller
# Modified for MCZA015-13 class project by Rodrigo Martins de Oliveira
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
from sklearn.datasets import make_circles
from sklearn.datasets import make_classification
from sklearn.datasets import make_moons
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import ExtraTreesClassifier as SKExtraTreesClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.gaussian_process import GaussianProcessClassifier
from sklearn.gaussian_process.kernels import RBF
from sklearn.model_selection import train_test_split
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC
from sklearn.tree import DecisionTreeClassifier
from extra_trees.ensemble.forest import ExtraTreesClassifier
h = .02 # step size in the mesh
names = [
"Nearest Neighbors",
"Linear SVM",
"RBF SVM",
"Gaussian Process",
"Neural Net",
"Naive Bayes",
"QDA",
"AdaBoost",
"Decision Tree",
"Random Forest",
"ExtraTrees (SciKit)",
"ExtraTrees",
]
classifiers = [
KNeighborsClassifier(3),
SVC(kernel="linear", C=0.025),
SVC(gamma=2, C=1),
GaussianProcessClassifier(1.0 * RBF(1.0), warm_start=True),
MLPClassifier(alpha=1),
GaussianNB(),
QuadraticDiscriminantAnalysis(),
AdaBoostClassifier(),
DecisionTreeClassifier(max_depth=5),
RandomForestClassifier(max_depth=5, n_estimators=10, max_features=2),
SKExtraTreesClassifier(n_estimators=10, max_features=2),
ExtraTreesClassifier(n_estimators=10, max_features=2),
]
X, y = make_classification(
n_features=2, n_redundant=0, n_informative=2,
random_state=1, n_clusters_per_class=1)
rng = np.random.RandomState(2)
X += 2 * rng.uniform(size=X.shape)
linearly_separable = (X, y)
datasets = [
make_moons(noise=0.3, random_state=0),
make_circles(noise=0.2, factor=0.5, random_state=1),
linearly_separable
]
figure = plt.figure(figsize=(33, 11))
i = 1
# iterate over datasets
for ds_cnt, ds in enumerate(datasets):
# preprocess dataset, split into training and test part
X, y = ds
X = StandardScaler().fit_transform(X)
X_train, X_test, y_train, y_test = \
train_test_split(X, y, test_size=.4, random_state=42)
x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5
y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5
xx, yy = np.meshgrid(
np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
# just plot the dataset first
cm = plt.cm.RdBu
cm_bright = ListedColormap(['#FF0000', '#0000FF'])
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
if ds_cnt == 0:
ax.set_title("Input data")
# Plot the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright, alpha=0.6,
edgecolors='k')
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
i += 1
# iterate over classifiers
for name, clf in zip(names, classifiers):
ax = plt.subplot(len(datasets), len(classifiers) + 1, i)
clf.fit(X_train, y_train)
score = clf.score(X_test, y_test)
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
if hasattr(clf, "decision_function"):
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
else:
Z = clf.predict_proba(np.c_[xx.ravel(), yy.ravel()])[:, 1]
# Put the result into a color plot
Z = Z.reshape(xx.shape)
ax.contourf(xx, yy, Z, cmap=cm, alpha=.8)
# Plot also the training points
ax.scatter(
X_train[:, 0], X_train[:, 1], c=y_train, cmap=cm_bright,
edgecolors='k')
# and testing points
ax.scatter(
X_test[:, 0], X_test[:, 1], c=y_test, cmap=cm_bright,
edgecolors='k', alpha=0.6)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if ds_cnt == 0:
ax.set_title(name)
ax.text(
xx.max() - .3, yy.min() + .3, ('%.2f' % score).lstrip('0'),
size=15, horizontalalignment='right')
i += 1
plt.tight_layout()
plt.show()
| mit |
dadawang/tushare | tushare/stock/macro.py | 37 | 12728 | # -*- coding:utf-8 -*-
"""
宏观经济数据接口
Created on 2015/01/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
import numpy as np
import re
import json
from tushare.stock import macro_vars as vs
from tushare.stock import cons as ct
try:
from urllib.request import urlopen, Request
except ImportError:
from urllib2 import urlopen, Request
def get_gdp_year():
"""
获取年度国内生产总值数据
Return
--------
DataFrame
year :统计年度
gdp :国内生产总值(亿元)
pc_gdp :人均国内生产总值(元)
gnp :国民生产总值(亿元)
pi :第一产业(亿元)
si :第二产业(亿元)
industry :工业(亿元)
cons_industry :建筑业(亿元)
ti :第三产业(亿元)
trans_industry :交通运输仓储邮电通信业(亿元)
lbdy :批发零售贸易及餐饮业(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 0, 70,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_YEAR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_quarter():
"""
获取季度国内生产总值数据
Return
--------
DataFrame
quarter :季度
gdp :国内生产总值(亿元)
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业增加值(亿元)
pi_yoy:第一产业增加值同比增长(%)
si :第二产业增加值(亿元)
si_yoy :第二产业增加值同比增长(%)
ti :第三产业增加值(亿元)
ti_yoy :第三产业增加值同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 1, 250,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_QUARTER_COLS)
df['quarter'] = df['quarter'].astype(object)
df[df==0] = np.NaN
return df
def get_gdp_for():
"""
获取三大需求对GDP贡献数据
Return
--------
DataFrame
year :统计年度
end_for :最终消费支出贡献率(%)
for_rate :最终消费支出拉动(百分点)
asset_for :资本形成总额贡献率(%)
asset_rate:资本形成总额拉动(百分点)
goods_for :货物和服务净出口贡献率(%)
goods_rate :货物和服务净出口拉动(百分点)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 4, 80, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"','').replace('null','0')
js = json.loads(datastr)
df = pd.DataFrame(js,columns=vs.GDP_FOR_COLS)
df[df==0] = np.NaN
return df
def get_gdp_pull():
"""
获取三大产业对GDP拉动数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值同比增长(%)
pi :第一产业拉动率(%)
si :第二产业拉动率(%)
industry:其中工业拉动(%)
ti :第三产业拉动率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[0], 5, 60, rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_PULL_COLS)
df[df==0] = np.NaN
return df
def get_gdp_contrib():
"""
获取三大产业贡献率数据
Return
--------
DataFrame
year :统计年度
gdp_yoy :国内生产总值
pi :第一产业献率(%)
si :第二产业献率(%)
industry:其中工业献率(%)
ti :第三产业献率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'], rdint,
vs.MACRO_TYPE[0], 6, 60, rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
datastr = datastr.replace('"', '').replace('null', '0')
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.GDP_CONTRIB_COLS)
df[df==0] = np.NaN
return df
def get_cpi():
"""
获取居民消费价格指数数据
Return
--------
DataFrame
month :统计月份
cpi :价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 0, 600,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.CPI_COLS)
df['cpi'] = df['cpi'].astype(float)
return df
def get_ppi():
"""
获取工业品出厂价格指数数据
Return
--------
DataFrame
month :统计月份
ppiip :工业品出厂价格指数
ppi :生产资料价格指数
qm:采掘工业价格指数
rmi:原材料工业价格指数
pi:加工工业价格指数
cg:生活资料价格指数
food:食品类价格指数
clothing:衣着类价格指数
roeu:一般日用品价格指数
dcg:耐用消费品价格指数
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[1], 3, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk') if ct.PY3 else text
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.PPI_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, np.NaN, x))
if i != 'month':
df[i] = df[i].astype(float)
return df
def get_deposit_rate():
"""
获取存款利率数据
Return
--------
DataFrame
date :变动日期
deposit_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 2, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.DEPOSIT_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_loan_rate():
"""
获取贷款利率数据
Return
--------
DataFrame
date :执行日期
loan_type :存款种类
rate:利率(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 3, 800,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.LOAN_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_rrr():
"""
获取存款准备金率数据
Return
--------
DataFrame
date :变动日期
before :调整前存款准备金率(%)
now:调整后存款准备金率(%)
changed:调整幅度(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 4, 100,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.RRR_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply():
"""
获取货币供应量数据
Return
--------
DataFrame
month :统计时间
m2 :货币和准货币(广义货币M2)(亿元)
m2_yoy:货币和准货币(广义货币M2)同比增长(%)
m1:货币(狭义货币M1)(亿元)
m1_yoy:货币(狭义货币M1)同比增长(%)
m0:流通中现金(M0)(亿元)
m0_yoy:流通中现金(M0)同比增长(%)
cd:活期存款(亿元)
cd_yoy:活期存款同比增长(%)
qm:准货币(亿元)
qm_yoy:准货币同比增长(%)
ftd:定期存款(亿元)
ftd_yoy:定期存款同比增长(%)
sd:储蓄存款(亿元)
sd_yoy:储蓄存款同比增长(%)
rests:其他存款(亿元)
rests_yoy:其他存款同比增长(%)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 1, 600,
rdint))
text = urlopen(request, timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
def get_money_supply_bal():
"""
获取货币供应量(年底余额)数据
Return
--------
DataFrame
year :统计年度
m2 :货币和准货币(亿元)
m1:货币(亿元)
m0:流通中现金(亿元)
cd:活期存款(亿元)
qm:准货币(亿元)
ftd:定期存款(亿元)
sd:储蓄存款(亿元)
rests:其他存款(亿元)
"""
rdint = vs.random()
request = Request(vs.MACRO_URL%(vs.P_TYPE['http'], vs.DOMAINS['sina'],
rdint, vs.MACRO_TYPE[2], 0, 200,
rdint))
text = urlopen(request,timeout=10).read()
text = text.decode('gbk')
regSym = re.compile(r'\,count:(.*?)\}')
datastr = regSym.findall(text)
datastr = datastr[0]
datastr = datastr.split('data:')[1]
js = json.loads(datastr)
df = pd.DataFrame(js, columns=vs.MONEY_SUPPLY_BLA_COLS)
for i in df.columns:
df[i] = df[i].apply(lambda x:np.where(x is None, '--', x))
return df
| bsd-3-clause |
Remi-C/LOD_ordering_for_patches_of_points | script/test_octree_LOD.py | 1 | 7481 | # -*- coding: utf-8 -*-
"""
Created on Tue Dec 2 22:08:22 2014
@author: remi
"""
#trying to order points by octree with python
from numpy import random, sqrt
from sklearn import preprocessing
import matplotlib.pyplot as plt
#defining a dummy entry :a random 3D pointcloud
pointcloud = random.rand(16*16,2);
index = np.arange(1,16*16+1)
#parameters
tot_level = 3 ;
#centering data so that leftmost pint is 0 abs, bottom most point is 0
pointcloud[:,0] = pointcloud[:,0]- np.amin(pointcloud[:,0]);
pointcloud[:,1] = pointcloud[:,1]- np.amin(pointcloud[:,1]);
#finding the max scaling, in X, Y or Z
max_r = max(np.amax(pointcloud[:,0])-np.amin(pointcloud[:,0]), np.amax(pointcloud[:,1])-np.amin(pointcloud[:,1]))
#dividing so max scale is 0 . Now the point cloud is between 0,1 and 0,1
pointcloud = pointcloud/ max_r ;
#we have to trick a litlle, so has that for level 3 for instance, all value are between 0 and 7 included, but not reaching 8.
pointcloud_int = np.trunc(abs((pointcloud*pow(2,tot_level)-0.0001))).astype(int)
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro') ;
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro') ;
plt.axis([-1, 8, -1, 8]) ;
plt.show() ;
plt.close('all');
result_point = pointcloud_int[rec_ar[:,0]]
plt.plot(result_point[:,0],result_point[:,1], 'ro') ;
rec_ar = np.array(rec)
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
np.binary_repr(1)
def bin(s):
return str(s) if s<=1 else bin(s>>1) + str(s&1)
def testBit(int_type, offset):
mask = 1 << offset
return( (int_type & mask)>0 )
testBit(8,1)
pointcloud_bin = np.binary_repr(pointcloud_int)
pointcloud_int >> (tot_level-1) ;
#np.binary_repr(8)
( ((pointcloud_int >> 1 ) << 1) ) >> (tot_level-1) ;
testBit(pointcloud_int[:,1],3)
#cut the input point cloud into 8 based on l bit value starting form right to left
point_cloud_0_0_mask = np.logical_and((testBit(pointcloud_int[:,0],2)==0) , (testBit(pointcloud_int[:,1],2)==0) ) ;
pivot = np.array([pow(2,tot_level-1),pow(2,tot_level-1)])
pointcloud_centered = pointcloud_int - pivot
#coordinate to work :
toto = np.array([1,2,3])
testBit(toto,1)
(pointcloud_int >>1 )>>5
pow(2,4)
1<<4
#
# level 0
result = list() ;
pointcloud_int ;
index
pivot
cur_lev = 0
rec = [];
#find the 0 level point
min_point = np.argmin(np.sum(np.abs(pointcloud_int - pivot ),axis=1))
result.append(list((index[min_point],cur_lev)))
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print b_x, b_y
rec.append (np.logical_and(
(testBit(pointcloud_int[:,0],2)>0)==b_x
,(testBit(pointcloud_int[:,1],2)>0)==b_y
)
)
testBit(pointcloud_int[:,0],2)
print (testBit(pointcloud_int[:,0],2)>0==b_x) ;
print (testBit(pointcloud_int[:,1],2)>0==b_y) ;
rec[b_x,b_y] = np.logical_and((testBit(pointcloud_int[:,0],2)>0==b_x)
,(testBit(pointcloud_int[:,1],2)>0==b_y) )
print rec
np.binary_repr(pointcloud_int[:,0] )
#givne a point cloud
#compute the closest to center
def recursive_octree_ordering(point_array,index_array, center_point, level,tot_level, result,piv):
#importing necessary lib
import numpy as np;
#print for debug
# print '\n\n working on level : '+str(level);
# print 'input points: \n\t',point_array ;
# print 'index_array : \n\t',index_array;
# print 'center_point : \n\t',center_point;
# print 'level : \n\t',level;
# print 'tot_level : \n\t',tot_level;
# print 'result : \n\t',result;
#stopping condition : no points:
if len(point_array) == 0|level<=2:
return;
#updatig level;
sub_part_level = level+1 ;
print 'level ',level,' , points remaining : ',len(point_array) ;
print center_point;
piv.append(center_point);
#find the closest point to pivot
min_point = np.argmin(np.sum(np.abs(point_array - center_point ),axis=1))
result.append(list((index_array[min_point],level))) ;
#removing the found point from the array of points
#np.delete(point_array, min_point, axis=0) ;
#np.delete(index_array, min_point, axis=0) ;
#stopping if it remains only one pioint : we won't divide further, same if we have reached max depth
if (len(point_array) ==1 )|(level >= tot_level):
return;
#compute the 4 sub parts
for b_x in list((0,1)) :
for b_y in list((0,1)) :
#looping on all 4 sub parts
print (b_x*2-1), (b_y*2-1) ;
udpate_to_pivot = np.asarray([ (b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
sub_part_center_point = center_point +udpate_to_pivot;
# we want to iterateon
# we need to update : : point_array , index_array center_point , level
#update point_array and index_array : we need to find the points that are in the subparts
#update center point, we need to add/substract to previous pivot 2^level+11
#find the points concerned :
point_in_subpart_mask = np.logical_and(
testBit(point_array[:,0],tot_level - level-1) ==b_x
, testBit(point_array[:,1],tot_level - level -1) ==b_y ) ;
sub_part_points= point_array[point_in_subpart_mask];
sub_part_index = index_array[point_in_subpart_mask];
sub_part_center_point = center_point + np.asarray([
(b_x*2-1)*(pow(2,tot_level - level -2 ))
,(b_y*2-1)*(pow(2,tot_level - level -2 ))
]);
if len(sub_part_points)>=1:
recursive_octree_ordering(sub_part_points
,sub_part_index
, sub_part_center_point
, sub_part_level
, tot_level
, result
, piv);
continue;
else:
print 'at televel ',level,'bx by:',b_x,' ',b_y,' refusing to go one, ', len(sub_part_points), ' points remaining fo this'
continue;
rec = [] ;
piv = [] ;
recursive_octree_ordering(pointcloud_int,index,pivot,0,3,rec, piv );
#recursive_octree_ordering(pointcloud_int,index, np.array([2,2]),1,3,rec, piv );
piv_ar = np.array(piv)
plt.plot(piv_ar[:,0], piv_ar[:,1], 'ro') ;
plot(x=pointcloud_int[:,0].T,y=pointcloud_int[:,1].T, marker='o', color='r', ls='' )
plt.plot(pointcloud_int.T, marker='o', color='r', ls='')
plt.imsave('/')
from mpl_toolkits.mplot3d import Axes3D
plt.scatter(pointcloud[:,0], pointcloud[:,1],c='red');
plt.scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
plt.plot(pointcloud[:,0],pointcloud[:,1], 'ro')
plt.plot(pointcloud_int[:,0],pointcloud_int[:,1], 'ro')
plt.axis([-1, 8, -1, 8])
plt.show();
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1]);
ax.scatter(pointcloud_int[:,0], pointcloud_int[:,1], pointcloud_int[:,0], zdir='z', c= 'red')
fig.show()
fig, axes = plt.subplots(1, 2, figsize=(12,3))
axes[0].scatter(pointcloud[:,0], pointcloud[:,1],c='red');
axes[1].scatter(pointcloud_int[:,0], pointcloud_int[:,1],c='green');
fig.show();
for f in list((0,1)):
(f*2-1)
import octree_ordering | lgpl-3.0 |
huobaowangxi/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 162 | 9771 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold._utils import _binary_search_perplexity
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, desired_perplexity, verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
fun = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[0]
grad = lambda params: _kl_divergence(params, P, alpha, n_samples,
n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
for init in ('random', 'pca'):
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
init=init, random_state=0)
X_embedded = tsne.fit_transform(X)
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0)
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
metric="precomputed", random_state=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca' or 'random'.
assert_raises_regexp(ValueError, "'init' must be either 'pca' or 'random'",
TSNE, init="not available")
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_verbose():
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
| bsd-3-clause |
Tong-Chen/scikit-learn | sklearn/datasets/base.py | 6 | 18276 | """
Base IO code for all datasets
"""
# Copyright (c) 2007 David Cournapeau <cournape@gmail.com>
# 2010 Fabian Pedregosa <fabian.pedregosa@inria.fr>
# 2010 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import csv
import shutil
import warnings
from os import environ
from os.path import dirname
from os.path import join
from os.path import exists
from os.path import expanduser
from os.path import isdir
from os import listdir
from os import makedirs
import numpy as np
from ..utils import check_random_state
class Bunch(dict):
"""Container object for datasets: dictionary-like object that
exposes its keys as attributes."""
def __init__(self, **kwargs):
dict.__init__(self, kwargs)
self.__dict__ = self
def get_data_home(data_home=None):
"""Return the path of the scikit-learn data dir.
This folder is used by some large dataset loaders to avoid
downloading the data several times.
By default the data dir is set to a folder named 'scikit_learn_data'
in the user home folder.
Alternatively, it can be set by the 'SCIKIT_LEARN_DATA' environment
variable or programmatically by giving an explit folder path. The
'~' symbol is expanded to the user home folder.
If the folder does not already exist, it is automatically created.
"""
if data_home is None:
data_home = environ.get('SCIKIT_LEARN_DATA',
join('~', 'scikit_learn_data'))
data_home = expanduser(data_home)
if not exists(data_home):
makedirs(data_home)
return data_home
def clear_data_home(data_home=None):
"""Delete all the content of the data home cache."""
data_home = get_data_home(data_home)
shutil.rmtree(data_home)
def load_files(container_path, description=None, categories=None,
load_content=True, shuffle=True, encoding=None,
charset=None, charset_error=None,
decode_error='strict', random_state=0):
"""Load text files with categories as subfolder names.
Individual samples are assumed to be files stored a two levels folder
structure such as the following:
container_folder/
category_1_folder/
file_1.txt
file_2.txt
...
file_42.txt
category_2_folder/
file_43.txt
file_44.txt
...
The folder names are used has supervised signal label names. The
individual file names are not important.
This function does not try to extract features into a numpy array or
scipy sparse matrix. In addition, if load_content is false it
does not try to load the files in memory.
To use text files in a scikit-learn classification or clustering
algorithm, you will need to use the `sklearn.feature_extraction.text`
module to build a feature extraction transformer that suits your
problem.
If you set load_content=True, you should also specify the encoding of
the text using the 'encoding' parameter. For many modern text files,
'utf-8' will be the correct encoding. If you leave encoding equal to None,
then the content will be made of bytes instead of Unicode, and you will
not be able to use most functions in `sklearn.feature_extraction.text`.
Similar feature extractors should be built for other kind of unstructured
data input such as images, audio, video, ...
Parameters
----------
container_path : string or unicode
Path to the main folder holding one subfolder per category
description: string or unicode, optional (default=None)
A paragraph describing the characteristic of the dataset: its source,
reference, etc.
categories : A collection of strings or None, optional (default=None)
If None (default), load all the categories.
If not None, list of category names to load (other categories ignored).
load_content : boolean, optional (default=True)
Whether to load or not the content of the different files. If
true a 'data' attribute containing the text information is present
in the data structure returned. If not, a filenames attribute
gives the path to the files.
encoding : string or None (default is None)
If None, do not try to decode the content of the files (e.g. for
images or other non-text content).
If not None, encoding to use to decode text files to Unicode if
load_content is True.
decode_error: {'strict', 'ignore', 'replace'}, optional
Instruction on what to do if a byte sequence is given to analyze that
contains characters not of the given `encoding`. Passed as keyword
argument 'errors' to bytes.decode.
shuffle : bool, optional (default=True)
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state : int, RandomState instance or None, optional (default=0)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: either
data, the raw text data to learn, or 'filenames', the files
holding it, 'target', the classification labels (integer index),
'target_names', the meaning of the labels, and 'DESCR', the full
description of the dataset.
"""
if charset is not None:
warnings.warn("The charset parameter is deprecated as of version "
"0.14 and will be removed in 0.16. Use encode instead.",
DeprecationWarning)
encoding = charset
if charset_error is not None:
warnings.warn("The charset_error parameter is deprecated as of "
"version 0.14 and will be removed in 0.16. Use "
"decode_error instead.",
DeprecationWarning)
decode_error = charset_error
target = []
target_names = []
filenames = []
folders = [f for f in sorted(listdir(container_path))
if isdir(join(container_path, f))]
if categories is not None:
folders = [f for f in folders if f in categories]
for label, folder in enumerate(folders):
target_names.append(folder)
folder_path = join(container_path, folder)
documents = [join(folder_path, d)
for d in sorted(listdir(folder_path))]
target.extend(len(documents) * [label])
filenames.extend(documents)
# convert to array for fancy indexing
filenames = np.array(filenames)
target = np.array(target)
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(filenames.shape[0])
random_state.shuffle(indices)
filenames = filenames[indices]
target = target[indices]
if load_content:
data = [open(filename, 'rb').read() for filename in filenames]
if encoding is not None:
data = [d.decode(encoding, decode_error) for d in data]
return Bunch(data=data,
filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
return Bunch(filenames=filenames,
target_names=target_names,
target=target,
DESCR=description)
def load_iris():
"""Load and return the iris dataset (classification).
The iris dataset is a classic and very easy multi-class classification
dataset.
================= ==============
Classes 3
Samples per class 50
Samples total 150
Dimensionality 4
Features real, positive
================= ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'target_names', the meaning of the labels, 'feature_names', the
meaning of the features, and 'DESCR', the
full description of the dataset.
Examples
--------
Let's say you are interested in the samples 10, 25, and 50, and want to
know their class name.
>>> from sklearn.datasets import load_iris
>>> data = load_iris()
>>> data.target[[10, 25, 50]]
array([0, 0, 1])
>>> list(data.target_names)
['setosa', 'versicolor', 'virginica']
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data', 'iris.csv')))
fdescr = open(join(module_path, 'descr', 'iris.rst'))
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
target_names = np.array(temp[2:])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,), dtype=np.int)
for i, ir in enumerate(data_file):
data[i] = np.asarray(ir[:-1], dtype=np.float)
target[i] = np.asarray(ir[-1], dtype=np.int)
return Bunch(data=data, target=target,
target_names=target_names,
DESCR=fdescr.read(),
feature_names=['sepal length (cm)', 'sepal width (cm)',
'petal length (cm)', 'petal width (cm)'])
def load_digits(n_class=10):
"""Load and return the digits dataset (classification).
Each datapoint is a 8x8 image of a digit.
================= ==============
Classes 10
Samples per class ~180
Samples total 1797
Dimensionality 64
Features integers 0-16
================= ==============
Parameters
----------
n_class : integer, between 0 and 10, optional (default=10)
The number of classes to return.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'images', the images corresponding
to each sample, 'target', the classification labels for each
sample, 'target_names', the meaning of the labels, and 'DESCR',
the full description of the dataset.
Examples
--------
To load the data and visualize the images::
>>> from sklearn.datasets import load_digits
>>> digits = load_digits()
>>> print(digits.data.shape)
(1797, 64)
>>> import pylab as pl #doctest: +SKIP
>>> pl.gray() #doctest: +SKIP
>>> pl.matshow(digits.images[0]) #doctest: +SKIP
>>> pl.show() #doctest: +SKIP
"""
module_path = dirname(__file__)
data = np.loadtxt(join(module_path, 'data', 'digits.csv.gz'),
delimiter=',')
descr = open(join(module_path, 'descr', 'digits.rst')).read()
target = data[:, -1]
flat_data = data[:, :-1]
images = flat_data.view()
images.shape = (-1, 8, 8)
if n_class < 10:
idx = target < n_class
flat_data, target = flat_data[idx], target[idx]
images = images[idx]
return Bunch(data=flat_data,
target=target.astype(np.int),
target_names=np.arange(10),
images=images,
DESCR=descr)
def load_diabetes():
"""Load and return the diabetes dataset (regression).
============== ==================
Samples total 442
Dimensionality 10
Features real, -.2 < x < .2
Targets integer 25 - 346
============== ==================
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn and 'target', the regression target for each
sample.
"""
base_dir = join(dirname(__file__), 'data')
data = np.loadtxt(join(base_dir, 'diabetes_data.csv.gz'))
target = np.loadtxt(join(base_dir, 'diabetes_target.csv.gz'))
return Bunch(data=data, target=target)
def load_linnerud():
"""Load and return the linnerud dataset (multivariate regression).
Samples total: 20
Dimensionality: 3 for both data and targets
Features: integer
Targets: integer
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are: 'data' and
'targets', the two multivariate datasets, with 'data' corresponding to
the exercise and 'targets' corresponding to the physiological
measurements, as well as 'feature_names' and 'target_names'.
"""
base_dir = join(dirname(__file__), 'data/')
# Read data
data_exercise = np.loadtxt(base_dir + 'linnerud_exercise.csv', skiprows=1)
data_physiological = np.loadtxt(base_dir + 'linnerud_physiological.csv',
skiprows=1)
# Read header
with open(base_dir + 'linnerud_exercise.csv') as f:
header_exercise = f.readline().split()
with open(base_dir + 'linnerud_physiological.csv') as f:
header_physiological = f.readline().split()
with open(dirname(__file__) + '/descr/linnerud.rst') as f:
descr = f.read()
return Bunch(data=data_exercise, feature_names=header_exercise,
target=data_physiological,
target_names=header_physiological,
DESCR=descr)
def load_boston():
"""Load and return the boston house-prices dataset (regression).
============== ==============
Samples total 506
Dimensionality 13
Features real, positive
Targets real 5. - 50.
============== ==============
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the regression targets,
'target_names', the meaning of the labels, and 'DESCR', the
full description of the dataset.
Examples
--------
>>> from sklearn.datasets import load_boston
>>> boston = load_boston()
>>> print(boston.data.shape)
(506, 13)
"""
module_path = dirname(__file__)
data_file = csv.reader(open(join(module_path, 'data',
'boston_house_prices.csv')))
fdescr = open(join(module_path, 'descr', 'boston_house_prices.rst'))
temp = next(data_file)
n_samples = int(temp[0])
n_features = int(temp[1])
data = np.empty((n_samples, n_features))
target = np.empty((n_samples,))
temp = next(data_file) # names of features
feature_names = np.array(temp)
for i, d in enumerate(data_file):
data[i] = np.asarray(d[:-1], dtype=np.float)
target[i] = np.asarray(d[-1], dtype=np.float)
return Bunch(data=data,
target=target,
feature_names=feature_names,
DESCR=fdescr.read())
def load_sample_images():
"""Load sample images for image manipulation.
Loads both, ``china`` and ``flower``.
Returns
-------
data : Bunch
Dictionary-like object with the following attributes :
'images', the two sample images, 'filenames', the file
names for the images, and 'DESCR'
the full description of the dataset.
Examples
--------
To load the data and visualize the images:
>>> from sklearn.datasets import load_sample_images
>>> dataset = load_sample_images() #doctest: +SKIP
>>> len(dataset.images) #doctest: +SKIP
2
>>> first_img_data = dataset.images[0] #doctest: +SKIP
>>> first_img_data.shape #doctest: +SKIP
(427, 640, 3)
>>> first_img_data.dtype #doctest: +SKIP
dtype('uint8')
"""
# Try to import imread from scipy. We do this lazily here to prevent
# this module from depending on PIL.
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
raise ImportError("The Python Imaging Library (PIL) "
"is required to load data from jpeg files")
module_path = join(dirname(__file__), "images")
with open(join(module_path, 'README.txt')) as f:
descr = f.read()
filenames = [join(module_path, filename)
for filename in os.listdir(module_path)
if filename.endswith(".jpg")]
# Load image data for each image in the source folder.
images = [imread(filename) for filename in filenames]
return Bunch(images=images,
filenames=filenames,
DESCR=descr)
def load_sample_image(image_name):
"""Load the numpy array of a single sample image
Parameters
-----------
image_name: {`china.jpg`, `flower.jpg`}
The name of the sample image loaded
Returns
-------
img: 3D array
The image as a numpy array: height x width x color
Examples
---------
>>> from sklearn.datasets import load_sample_image
>>> china = load_sample_image('china.jpg') # doctest: +SKIP
>>> china.dtype # doctest: +SKIP
dtype('uint8')
>>> china.shape # doctest: +SKIP
(427, 640, 3)
>>> flower = load_sample_image('flower.jpg') # doctest: +SKIP
>>> flower.dtype # doctest: +SKIP
dtype('uint8')
>>> flower.shape # doctest: +SKIP
(427, 640, 3)
"""
images = load_sample_images()
index = None
for i, filename in enumerate(images.filenames):
if filename.endswith(image_name):
index = i
break
if index is None:
raise AttributeError("Cannot find sample image: %s" % image_name)
return images.images[index]
| bsd-3-clause |
jor-/scipy | scipy/optimize/minpack.py | 1 | 34280 | from __future__ import division, print_function, absolute_import
import threading
import warnings
from . import _minpack
import numpy as np
from numpy import (atleast_1d, dot, take, triu, shape, eye,
transpose, zeros, prod, greater, array,
all, where, isscalar, asarray, inf, abs,
finfo, inexact, issubdtype, dtype)
from scipy.linalg import svd, cholesky, solve_triangular, LinAlgError
from scipy._lib._util import _asarray_validated, _lazywhere
from .optimize import OptimizeResult, _check_unknown_options, OptimizeWarning
from ._lsq import least_squares
from ._lsq.common import make_strictly_feasible
from ._lsq.least_squares import prepare_bounds
error = _minpack.error
__all__ = ['fsolve', 'leastsq', 'fixed_point', 'curve_fit']
def _check_func(checker, argname, thefunc, x0, args, numinputs,
output_shape=None):
res = atleast_1d(thefunc(*((x0[:numinputs],) + args)))
if (output_shape is not None) and (shape(res) != output_shape):
if (output_shape[0] != 1):
if len(output_shape) > 1:
if output_shape[1] == 1:
return shape(res)
msg = "%s: there is a mismatch between the input and output " \
"shape of the '%s' argument" % (checker, argname)
func_name = getattr(thefunc, '__name__', None)
if func_name:
msg += " '%s'." % func_name
else:
msg += "."
msg += 'Shape should be %s but it is %s.' % (output_shape, shape(res))
raise TypeError(msg)
if issubdtype(res.dtype, inexact):
dt = res.dtype
else:
dt = dtype(float)
return shape(res), dt
def fsolve(func, x0, args=(), fprime=None, full_output=0,
col_deriv=0, xtol=1.49012e-8, maxfev=0, band=None,
epsfcn=None, factor=100, diag=None):
"""
Find the roots of a function.
Return the roots of the (non-linear) equations defined by
``func(x) = 0`` given a starting estimate.
Parameters
----------
func : callable ``f(x, *args)``
A function that takes at least one (possibly vector) argument,
and returns a value of the same length.
x0 : ndarray
The starting estimate for the roots of ``func(x) = 0``.
args : tuple, optional
Any extra arguments to `func`.
fprime : callable ``f(x, *args)``, optional
A function to compute the Jacobian of `func` with derivatives
across the rows. By default, the Jacobian will be estimated.
full_output : bool, optional
If True, return optional outputs.
col_deriv : bool, optional
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float, optional
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int, optional
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple, optional
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
epsfcn : float, optional
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`epsfcn` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the
variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for
an unsuccessful call).
infodict : dict
A dictionary of optional outputs with the keys:
``nfev``
number of function calls
``njev``
number of Jacobian calls
``fvec``
function evaluated at the output
``fjac``
the orthogonal matrix, q, produced by the QR
factorization of the final approximate Jacobian
matrix, stored column wise
``r``
upper triangular matrix produced by QR factorization
of the same matrix
``qtf``
the vector ``(transpose(q) * fvec)``
ier : int
An integer flag. Set to 1 if a solution was found, otherwise refer
to `mesg` for more information.
mesg : str
If no solution is found, `mesg` details the cause of failure.
See Also
--------
root : Interface to root finding algorithms for multivariate
functions. See the ``method=='hybr'`` in particular.
Notes
-----
``fsolve`` is a wrapper around MINPACK's hybrd and hybrj algorithms.
"""
options = {'col_deriv': col_deriv,
'xtol': xtol,
'maxfev': maxfev,
'band': band,
'eps': epsfcn,
'factor': factor,
'diag': diag}
res = _root_hybr(func, x0, args, jac=fprime, **options)
if full_output:
x = res['x']
info = dict((k, res.get(k))
for k in ('nfev', 'njev', 'fjac', 'r', 'qtf') if k in res)
info['fvec'] = res['fun']
return x, info, res['status'], res['message']
else:
status = res['status']
msg = res['message']
if status == 0:
raise TypeError(msg)
elif status == 1:
pass
elif status in [2, 3, 4, 5]:
warnings.warn(msg, RuntimeWarning)
else:
raise TypeError(msg)
return res['x']
def _root_hybr(func, x0, args=(), jac=None,
col_deriv=0, xtol=1.49012e-08, maxfev=0, band=None, eps=None,
factor=100, diag=None, **unknown_options):
"""
Find the roots of a multivariate function using MINPACK's hybrd and
hybrj routines (modified Powell method).
Options
-------
col_deriv : bool
Specify whether the Jacobian function computes derivatives down
the columns (faster, because there is no transpose operation).
xtol : float
The calculation will terminate if the relative error between two
consecutive iterates is at most `xtol`.
maxfev : int
The maximum number of calls to the function. If zero, then
``100*(N+1)`` is the maximum where N is the number of elements
in `x0`.
band : tuple
If set to a two-sequence containing the number of sub- and
super-diagonals within the band of the Jacobi matrix, the
Jacobi matrix is considered banded (only for ``fprime=None``).
eps : float
A suitable step length for the forward-difference
approximation of the Jacobian (for ``fprime=None``). If
`eps` is less than the machine precision, it is assumed
that the relative errors in the functions are of the order of
the machine precision.
factor : float
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in the interval
``(0.1, 100)``.
diag : sequence
N positive entries that serve as a scale factors for the
variables.
"""
_check_unknown_options(unknown_options)
epsfcn = eps
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('fsolve', 'func', func, x0, args, n, (n,))
if epsfcn is None:
epsfcn = finfo(dtype).eps
Dfun = jac
if Dfun is None:
if band is None:
ml, mu = -10, -10
else:
ml, mu = band[:2]
if maxfev == 0:
maxfev = 200 * (n + 1)
retval = _minpack._hybrd(func, x0, args, 1, xtol, maxfev,
ml, mu, epsfcn, factor, diag)
else:
_check_func('fsolve', 'fprime', Dfun, x0, args, n, (n, n))
if (maxfev == 0):
maxfev = 100 * (n + 1)
retval = _minpack._hybrj(func, Dfun, x0, args, 1,
col_deriv, xtol, maxfev, factor, diag)
x, status = retval[0], retval[-1]
errors = {0: "Improper input parameters were entered.",
1: "The solution converged.",
2: "The number of calls to function has "
"reached maxfev = %d." % maxfev,
3: "xtol=%f is too small, no further improvement "
"in the approximate\n solution "
"is possible." % xtol,
4: "The iteration is not making good progress, as measured "
"by the \n improvement from the last five "
"Jacobian evaluations.",
5: "The iteration is not making good progress, "
"as measured by the \n improvement from the last "
"ten iterations.",
'unknown': "An error occurred."}
info = retval[1]
info['fun'] = info.pop('fvec')
sol = OptimizeResult(x=x, success=(status == 1), status=status)
sol.update(info)
try:
sol['message'] = errors[status]
except KeyError:
sol['message'] = errors['unknown']
return sol
LEASTSQ_SUCCESS = [1, 2, 3, 4]
LEASTSQ_FAILURE = [5, 6, 7, 8]
def leastsq(func, x0, args=(), Dfun=None, full_output=0,
col_deriv=0, ftol=1.49012e-8, xtol=1.49012e-8,
gtol=0.0, maxfev=0, epsfcn=None, factor=100, diag=None):
"""
Minimize the sum of squares of a set of equations.
::
x = arg min(sum(func(y)**2,axis=0))
y
Parameters
----------
func : callable
should take at least one (possibly length N vector) argument and
returns M floating point numbers. It must not return NaNs or
fitting might fail.
x0 : ndarray
The starting estimate for the minimization.
args : tuple, optional
Any extra arguments to func are placed in this tuple.
Dfun : callable, optional
A function or method to compute the Jacobian of func with derivatives
across the rows. If this is None, the Jacobian will be estimated.
full_output : bool, optional
non-zero to return all optional outputs.
col_deriv : bool, optional
non-zero to specify that the Jacobian function computes derivatives
down the columns (faster, because there is no transpose operation).
ftol : float, optional
Relative error desired in the sum of squares.
xtol : float, optional
Relative error desired in the approximate solution.
gtol : float, optional
Orthogonality desired between the function vector and the columns of
the Jacobian.
maxfev : int, optional
The maximum number of calls to the function. If `Dfun` is provided
then the default `maxfev` is 100*(N+1) where N is the number of elements
in x0, otherwise the default `maxfev` is 200*(N+1).
epsfcn : float, optional
A variable used in determining a suitable step length for the forward-
difference approximation of the Jacobian (for Dfun=None).
Normally the actual step length will be sqrt(epsfcn)*x
If epsfcn is less than the machine precision, it is assumed that the
relative errors are of the order of the machine precision.
factor : float, optional
A parameter determining the initial step bound
(``factor * || diag * x||``). Should be in interval ``(0.1, 100)``.
diag : sequence, optional
N positive entries that serve as a scale factors for the variables.
Returns
-------
x : ndarray
The solution (or the result of the last iteration for an unsuccessful
call).
cov_x : ndarray
The inverse of the Hessian. `fjac` and `ipvt` are used to construct an
estimate of the Hessian. A value of None indicates a singular matrix,
which means the curvature in parameters `x` is numerically flat. To
obtain the covariance matrix of the parameters `x`, `cov_x` must be
multiplied by the variance of the residuals -- see curve_fit.
infodict : dict
a dictionary of optional outputs with the keys:
``nfev``
The number of function calls
``fvec``
The function evaluated at the output
``fjac``
A permutation of the R matrix of a QR
factorization of the final approximate
Jacobian matrix, stored column wise.
Together with ipvt, the covariance of the
estimate can be approximated.
``ipvt``
An integer array of length N which defines
a permutation matrix, p, such that
fjac*p = q*r, where r is upper triangular
with diagonal elements of nonincreasing
magnitude. Column j of p is column ipvt(j)
of the identity matrix.
``qtf``
The vector (transpose(q) * fvec).
mesg : str
A string message giving information about the cause of failure.
ier : int
An integer flag. If it is equal to 1, 2, 3 or 4, the solution was
found. Otherwise, the solution was not found. In either case, the
optional output variable 'mesg' gives more information.
See Also
--------
least_squares : Newer interface to solve nonlinear least-squares problems
with bounds on the variables. See ``method=='lm'`` in particular.
Notes
-----
"leastsq" is a wrapper around MINPACK's lmdif and lmder algorithms.
cov_x is a Jacobian approximation to the Hessian of the least squares
objective function.
This approximation assumes that the objective function is based on the
difference between some observed target data (ydata) and a (non-linear)
function of the parameters `f(xdata, params)` ::
func(params) = ydata - f(xdata, params)
so that the objective function is ::
min sum((ydata - f(xdata, params))**2, axis=0)
params
The solution, `x`, is always a 1D array, regardless of the shape of `x0`,
or whether `x0` is a scalar.
"""
x0 = asarray(x0).flatten()
n = len(x0)
if not isinstance(args, tuple):
args = (args,)
shape, dtype = _check_func('leastsq', 'func', func, x0, args, n)
m = shape[0]
if n > m:
raise TypeError('Improper input: N=%s must not exceed M=%s' % (n, m))
if epsfcn is None:
epsfcn = finfo(dtype).eps
if Dfun is None:
if maxfev == 0:
maxfev = 200*(n + 1)
retval = _minpack._lmdif(func, x0, args, full_output, ftol, xtol,
gtol, maxfev, epsfcn, factor, diag)
else:
if col_deriv:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (n, m))
else:
_check_func('leastsq', 'Dfun', Dfun, x0, args, n, (m, n))
if maxfev == 0:
maxfev = 100 * (n + 1)
retval = _minpack._lmder(func, Dfun, x0, args, full_output,
col_deriv, ftol, xtol, gtol, maxfev,
factor, diag)
errors = {0: ["Improper input parameters.", TypeError],
1: ["Both actual and predicted relative reductions "
"in the sum of squares\n are at most %f" % ftol, None],
2: ["The relative error between two consecutive "
"iterates is at most %f" % xtol, None],
3: ["Both actual and predicted relative reductions in "
"the sum of squares\n are at most %f and the "
"relative error between two consecutive "
"iterates is at \n most %f" % (ftol, xtol), None],
4: ["The cosine of the angle between func(x) and any "
"column of the\n Jacobian is at most %f in "
"absolute value" % gtol, None],
5: ["Number of calls to function has reached "
"maxfev = %d." % maxfev, ValueError],
6: ["ftol=%f is too small, no further reduction "
"in the sum of squares\n is possible." % ftol,
ValueError],
7: ["xtol=%f is too small, no further improvement in "
"the approximate\n solution is possible." % xtol,
ValueError],
8: ["gtol=%f is too small, func(x) is orthogonal to the "
"columns of\n the Jacobian to machine "
"precision." % gtol, ValueError]}
# The FORTRAN return value (possible return values are >= 0 and <= 8)
info = retval[-1]
if full_output:
cov_x = None
if info in LEASTSQ_SUCCESS:
from numpy.dual import inv
perm = take(eye(n), retval[1]['ipvt'] - 1, 0)
r = triu(transpose(retval[1]['fjac'])[:n, :])
R = dot(r, perm)
try:
cov_x = inv(dot(transpose(R), R))
except (LinAlgError, ValueError):
pass
return (retval[0], cov_x) + retval[1:-1] + (errors[info][0], info)
else:
if info in LEASTSQ_FAILURE:
warnings.warn(errors[info][0], RuntimeWarning)
elif info == 0:
raise errors[info][1](errors[info][0])
return retval[0], info
def _wrap_func(func, xdata, ydata, transform):
if transform is None:
def func_wrapped(params):
return func(xdata, *params) - ydata
elif transform.ndim == 1:
def func_wrapped(params):
return transform * (func(xdata, *params) - ydata)
else:
# Chisq = (y - yd)^T C^{-1} (y-yd)
# transform = L such that C = L L^T
# C^{-1} = L^{-T} L^{-1}
# Chisq = (y - yd)^T L^{-T} L^{-1} (y-yd)
# Define (y-yd)' = L^{-1} (y-yd)
# by solving
# L (y-yd)' = (y-yd)
# and minimize (y-yd)'^T (y-yd)'
def func_wrapped(params):
return solve_triangular(transform, func(xdata, *params) - ydata, lower=True)
return func_wrapped
def _wrap_jac(jac, xdata, transform):
if transform is None:
def jac_wrapped(params):
return jac(xdata, *params)
elif transform.ndim == 1:
def jac_wrapped(params):
return transform[:, np.newaxis] * np.asarray(jac(xdata, *params))
else:
def jac_wrapped(params):
return solve_triangular(transform, np.asarray(jac(xdata, *params)), lower=True)
return jac_wrapped
def _initialize_feasible(lb, ub):
p0 = np.ones_like(lb)
lb_finite = np.isfinite(lb)
ub_finite = np.isfinite(ub)
mask = lb_finite & ub_finite
p0[mask] = 0.5 * (lb[mask] + ub[mask])
mask = lb_finite & ~ub_finite
p0[mask] = lb[mask] + 1
mask = ~lb_finite & ub_finite
p0[mask] = ub[mask] - 1
return p0
def curve_fit(f, xdata, ydata, p0=None, sigma=None, absolute_sigma=False,
check_finite=True, bounds=(-np.inf, np.inf), method=None,
jac=None, **kwargs):
"""
Use non-linear least squares to fit a function, f, to data.
Assumes ``ydata = f(xdata, *params) + eps``
Parameters
----------
f : callable
The model function, f(x, ...). It must take the independent
variable as the first argument and the parameters to fit as
separate remaining arguments.
xdata : array_like or object
The independent variable where the data is measured.
Should usually be an M-length sequence or an (k,M)-shaped array for
functions with k predictors, but can actually be any object.
ydata : array_like
The dependent data, a length M array - nominally ``f(xdata, ...)``.
p0 : array_like, optional
Initial guess for the parameters (length N). If None, then the
initial values will all be 1 (if the number of parameters for the
function can be determined using introspection, otherwise a
ValueError is raised).
sigma : None or M-length sequence or MxM array, optional
Determines the uncertainty in `ydata`. If we define residuals as
``r = ydata - f(xdata, *popt)``, then the interpretation of `sigma`
depends on its number of dimensions:
- A 1-d `sigma` should contain values of standard deviations of
errors in `ydata`. In this case, the optimized function is
``chisq = sum((r / sigma) ** 2)``.
- A 2-d `sigma` should contain the covariance matrix of
errors in `ydata`. In this case, the optimized function is
``chisq = r.T @ inv(sigma) @ r``.
.. versionadded:: 0.19
None (default) is equivalent of 1-d `sigma` filled with ones.
absolute_sigma : bool, optional
If True, `sigma` is used in an absolute sense and the estimated parameter
covariance `pcov` reflects these absolute values.
If False, only the relative magnitudes of the `sigma` values matter.
The returned parameter covariance matrix `pcov` is based on scaling
`sigma` by a constant factor. This constant is set by demanding that the
reduced `chisq` for the optimal parameters `popt` when using the
*scaled* `sigma` equals unity. In other words, `sigma` is scaled to
match the sample variance of the residuals after the fit.
Mathematically,
``pcov(absolute_sigma=False) = pcov(absolute_sigma=True) * chisq(popt)/(M-N)``
check_finite : bool, optional
If True, check that the input arrays do not contain nans of infs,
and raise a ValueError if they do. Setting this parameter to
False may silently produce nonsensical results if the input arrays
do contain nans. Default is True.
bounds : 2-tuple of array_like, optional
Lower and upper bounds on parameters. Defaults to no bounds.
Each element of the tuple must be either an array with the length equal
to the number of parameters, or a scalar (in which case the bound is
taken to be the same for all parameters.) Use ``np.inf`` with an
appropriate sign to disable bounds on all or some parameters.
.. versionadded:: 0.17
method : {'lm', 'trf', 'dogbox'}, optional
Method to use for optimization. See `least_squares` for more details.
Default is 'lm' for unconstrained problems and 'trf' if `bounds` are
provided. The method 'lm' won't work when the number of observations
is less than the number of variables, use 'trf' or 'dogbox' in this
case.
.. versionadded:: 0.17
jac : callable, string or None, optional
Function with signature ``jac(x, ...)`` which computes the Jacobian
matrix of the model function with respect to parameters as a dense
array_like structure. It will be scaled according to provided `sigma`.
If None (default), the Jacobian will be estimated numerically.
String keywords for 'trf' and 'dogbox' methods can be used to select
a finite difference scheme, see `least_squares`.
.. versionadded:: 0.18
kwargs
Keyword arguments passed to `leastsq` for ``method='lm'`` or
`least_squares` otherwise.
Returns
-------
popt : array
Optimal values for the parameters so that the sum of the squared
residuals of ``f(xdata, *popt) - ydata`` is minimized
pcov : 2d array
The estimated covariance of popt. The diagonals provide the variance
of the parameter estimate. To compute one standard deviation errors
on the parameters use ``perr = np.sqrt(np.diag(pcov))``.
How the `sigma` parameter affects the estimated covariance
depends on `absolute_sigma` argument, as described above.
If the Jacobian matrix at the solution doesn't have a full rank, then
'lm' method returns a matrix filled with ``np.inf``, on the other hand
'trf' and 'dogbox' methods use Moore-Penrose pseudoinverse to compute
the covariance matrix.
Raises
------
ValueError
if either `ydata` or `xdata` contain NaNs, or if incompatible options
are used.
RuntimeError
if the least-squares minimization fails.
OptimizeWarning
if covariance of the parameters can not be estimated.
See Also
--------
least_squares : Minimize the sum of squares of nonlinear functions.
scipy.stats.linregress : Calculate a linear least squares regression for
two sets of measurements.
Notes
-----
With ``method='lm'``, the algorithm uses the Levenberg-Marquardt algorithm
through `leastsq`. Note that this algorithm can only deal with
unconstrained problems.
Box constraints can be handled by methods 'trf' and 'dogbox'. Refer to
the docstring of `least_squares` for more information.
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy.optimize import curve_fit
>>> def func(x, a, b, c):
... return a * np.exp(-b * x) + c
Define the data to be fit with some noise:
>>> xdata = np.linspace(0, 4, 50)
>>> y = func(xdata, 2.5, 1.3, 0.5)
>>> np.random.seed(1729)
>>> y_noise = 0.2 * np.random.normal(size=xdata.size)
>>> ydata = y + y_noise
>>> plt.plot(xdata, ydata, 'b-', label='data')
Fit for the parameters a, b, c of the function `func`:
>>> popt, pcov = curve_fit(func, xdata, ydata)
>>> popt
array([ 2.55423706, 1.35190947, 0.47450618])
>>> plt.plot(xdata, func(xdata, *popt), 'r-',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
Constrain the optimization to the region of ``0 <= a <= 3``,
``0 <= b <= 1`` and ``0 <= c <= 0.5``:
>>> popt, pcov = curve_fit(func, xdata, ydata, bounds=(0, [3., 1., 0.5]))
>>> popt
array([ 2.43708906, 1. , 0.35015434])
>>> plt.plot(xdata, func(xdata, *popt), 'g--',
... label='fit: a=%5.3f, b=%5.3f, c=%5.3f' % tuple(popt))
>>> plt.xlabel('x')
>>> plt.ylabel('y')
>>> plt.legend()
>>> plt.show()
"""
if p0 is None:
# determine number of parameters by inspecting the function
from scipy._lib._util import getargspec_no_self as _getargspec
args, varargs, varkw, defaults = _getargspec(f)
if len(args) < 2:
raise ValueError("Unable to determine number of fit parameters.")
n = len(args) - 1
else:
p0 = np.atleast_1d(p0)
n = p0.size
lb, ub = prepare_bounds(bounds, n)
if p0 is None:
p0 = _initialize_feasible(lb, ub)
bounded_problem = np.any((lb > -np.inf) | (ub < np.inf))
if method is None:
if bounded_problem:
method = 'trf'
else:
method = 'lm'
if method == 'lm' and bounded_problem:
raise ValueError("Method 'lm' only works for unconstrained problems. "
"Use 'trf' or 'dogbox' instead.")
# optimization may produce garbage for float32 inputs, cast them to float64
# NaNs can not be handled
if check_finite:
ydata = np.asarray_chkfinite(ydata, float)
else:
ydata = np.asarray(ydata, float)
if isinstance(xdata, (list, tuple, np.ndarray)):
# `xdata` is passed straight to the user-defined `f`, so allow
# non-array_like `xdata`.
if check_finite:
xdata = np.asarray_chkfinite(xdata, float)
else:
xdata = np.asarray(xdata, float)
if ydata.size == 0:
raise ValueError("`ydata` must not be empty!")
# Determine type of sigma
if sigma is not None:
sigma = np.asarray(sigma)
# if 1-d, sigma are errors, define transform = 1/sigma
if sigma.shape == (ydata.size, ):
transform = 1.0 / sigma
# if 2-d, sigma is the covariance matrix,
# define transform = L such that L L^T = C
elif sigma.shape == (ydata.size, ydata.size):
try:
# scipy.linalg.cholesky requires lower=True to return L L^T = A
transform = cholesky(sigma, lower=True)
except LinAlgError:
raise ValueError("`sigma` must be positive definite.")
else:
raise ValueError("`sigma` has incorrect shape.")
else:
transform = None
func = _wrap_func(f, xdata, ydata, transform)
if callable(jac):
jac = _wrap_jac(jac, xdata, transform)
elif jac is None and method != 'lm':
jac = '2-point'
if 'args' in kwargs:
# The specification for the model function `f` does not support
# additional arguments. Refer to the `curve_fit` docstring for
# acceptable call signatures of `f`.
raise ValueError("'args' is not a supported keyword argument.")
if method == 'lm':
# Remove full_output from kwargs, otherwise we're passing it in twice.
return_full = kwargs.pop('full_output', False)
res = leastsq(func, p0, Dfun=jac, full_output=1, **kwargs)
popt, pcov, infodict, errmsg, ier = res
ysize = len(infodict['fvec'])
cost = np.sum(infodict['fvec'] ** 2)
if ier not in [1, 2, 3, 4]:
raise RuntimeError("Optimal parameters not found: " + errmsg)
else:
# Rename maxfev (leastsq) to max_nfev (least_squares), if specified.
if 'max_nfev' not in kwargs:
kwargs['max_nfev'] = kwargs.pop('maxfev', None)
res = least_squares(func, p0, jac=jac, bounds=bounds, method=method,
**kwargs)
if not res.success:
raise RuntimeError("Optimal parameters not found: " + res.message)
ysize = len(res.fun)
cost = 2 * res.cost # res.cost is half sum of squares!
popt = res.x
# Do Moore-Penrose inverse discarding zero singular values.
_, s, VT = svd(res.jac, full_matrices=False)
threshold = np.finfo(float).eps * max(res.jac.shape) * s[0]
s = s[s > threshold]
VT = VT[:s.size]
pcov = np.dot(VT.T / s**2, VT)
return_full = False
warn_cov = False
if pcov is None:
# indeterminate covariance
pcov = zeros((len(popt), len(popt)), dtype=float)
pcov.fill(inf)
warn_cov = True
elif not absolute_sigma:
if ysize > p0.size:
s_sq = cost / (ysize - p0.size)
pcov = pcov * s_sq
else:
pcov.fill(inf)
warn_cov = True
if warn_cov:
warnings.warn('Covariance of the parameters could not be estimated',
category=OptimizeWarning)
if return_full:
return popt, pcov, infodict, errmsg, ier
else:
return popt, pcov
def check_gradient(fcn, Dfcn, x0, args=(), col_deriv=0):
"""Perform a simple check on the gradient for correctness.
"""
x = atleast_1d(x0)
n = len(x)
x = x.reshape((n,))
fvec = atleast_1d(fcn(x, *args))
m = len(fvec)
fvec = fvec.reshape((m,))
ldfjac = m
fjac = atleast_1d(Dfcn(x, *args))
fjac = fjac.reshape((m, n))
if col_deriv == 0:
fjac = transpose(fjac)
xp = zeros((n,), float)
err = zeros((m,), float)
fvecp = None
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 1, err)
fvecp = atleast_1d(fcn(xp, *args))
fvecp = fvecp.reshape((m,))
_minpack._chkder(m, n, x, fvec, fjac, ldfjac, xp, fvecp, 2, err)
good = (prod(greater(err, 0.5), axis=0))
return (good, err)
def _del2(p0, p1, d):
return p0 - np.square(p1 - p0) / d
def _relerr(actual, desired):
return (actual - desired) / desired
def _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel):
p0 = x0
for i in range(maxiter):
p1 = func(p0, *args)
if use_accel:
p2 = func(p1, *args)
d = p2 - 2.0 * p1 + p0
p = _lazywhere(d != 0, (p0, p1, d), f=_del2, fillvalue=p2)
else:
p = p1
relerr = _lazywhere(p0 != 0, (p, p0), f=_relerr, fillvalue=p)
if np.all(np.abs(relerr) < xtol):
return p
p0 = p
msg = "Failed to converge after %d iterations, value is %s" % (maxiter, p)
raise RuntimeError(msg)
def fixed_point(func, x0, args=(), xtol=1e-8, maxiter=500, method='del2'):
"""
Find a fixed point of the function.
Given a function of one or more variables and a starting point, find a
fixed-point of the function: i.e. where ``func(x0) == x0``.
Parameters
----------
func : function
Function to evaluate.
x0 : array_like
Fixed point of function.
args : tuple, optional
Extra arguments to `func`.
xtol : float, optional
Convergence tolerance, defaults to 1e-08.
maxiter : int, optional
Maximum number of iterations, defaults to 500.
method : {"del2", "iteration"}, optional
Method of finding the fixed-point, defaults to "del2"
which uses Steffensen's Method with Aitken's ``Del^2``
convergence acceleration [1]_. The "iteration" method simply iterates
the function until convergence is detected, without attempting to
accelerate the convergence.
References
----------
.. [1] Burden, Faires, "Numerical Analysis", 5th edition, pg. 80
Examples
--------
>>> from scipy import optimize
>>> def func(x, c1, c2):
... return np.sqrt(c1/(x+c2))
>>> c1 = np.array([10,12.])
>>> c2 = np.array([3, 5.])
>>> optimize.fixed_point(func, [1.2, 1.3], args=(c1,c2))
array([ 1.4920333 , 1.37228132])
"""
use_accel = {'del2': True, 'iteration': False}[method]
x0 = _asarray_validated(x0, as_inexact=True)
return _fixed_point_helper(func, x0, args, xtol, maxiter, use_accel)
| bsd-3-clause |
ARUNSOORAJPS/flipkart_gridlock | src/main.py | 1 | 2686 | # -*- coding: utf-8 -*-
# @Author: chandan
# @Date: 2017-07-08 00:32:09
# @Last Modified by: chandan
# @Last Modified time: 2017-07-08 11:13:46
from data_utils import read_file
from config import DATA_DIR, SCORE_COLUMNS
import os
from model import train_model, test_model
import pandas as pd
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
import os.path as osp
ACC_FILE = 'RAW_ACCELEROMETERS.txt'
GPS_FILE = 'RAW_GPS.txt'
VEHDET_FILE = 'PROC_VEHICLE_DETECTION.txt'
SCORE_FILE = 'SEMANTIC_ONLINE.txt'
def main():
# read acc, gps, veh det for multiple drivers, scenes
X_dfs, Y_dfs = [], []
driver_dir = 'D1'
for drive_dir in os.listdir(osp.join(DATA_DIR, driver_dir)):
drive_path = osp.join(DATA_DIR, driver_dir, drive_dir)
print drive_path
acc = read_file(osp.join(drive_path, ACC_FILE))
gps = read_file(osp.join(drive_path, GPS_FILE))
veh = read_file(osp.join(drive_path, VEHDET_FILE))
score = read_file(osp.join(drive_path, SCORE_FILE))
datasets = [acc, gps, veh, score]
n_rows = min(map(len, datasets))
# sample high frequency data to lowest frequency
for i in range(len(datasets)):
# drop time column
datasets[i].drop(0, 1, inplace=True)
if len(datasets[i]) > n_rows:
step = len(datasets[i]) / n_rows
ndx = xrange(0, n_rows * step, step)
datasets[i] = datasets[i].ix[ndx]
datasets[i] = datasets[i].reset_index(drop=True)
score_df = datasets[-1]
datasets = datasets[:-1]
Y_df = score.ix[:, SCORE_COLUMNS]
# create dataset
X_df = pd.concat(datasets, axis=1, ignore_index=True)
X_df.fillna(0, inplace=True)
print "X:", X_df.shape
print "Y:", score_df.shape
X_dfs.append(X_df)
Y_dfs.append(Y_df)
# preprocess
X_df = pd.concat(X_dfs, ignore_index=True)
X = X_df.values.astype('float32')
Y = pd.concat(Y_dfs, ignore_index=True).values
print "X shape:", X.shape
print "Y shape:", Y.shape
scaler = MinMaxScaler(feature_range=(0, 1))
X = scaler.fit_transform(X)
X_tr, X_ts, Y_tr, Y_ts = train_test_split(X, Y, test_size=0.2)
# train
print "X Train shape:", X_tr.shape
print "Y Train shape:", Y_tr.shape
print "X test shape:", X_ts.shape
print "Y test shape:", Y_ts.shape
seq_len = 16
X_tr_seq = X_to_seq(X, seq_len, 1)
Y_tr = Y_tr[seq_len:]
X_ts_seq = X_to_seq(X_ts, seq_len, 1)
Y_ts = Y_ts[seq_len:]
#train_model(X_tr, Y_tr)
loss = test_model(X_ts_seq, Y_ts)
print loss
def X_to_seq(X, seq_len=16, stride=1):
X_seqs = []
for start_ndx in range(0, len(X) - seq_len, stride):
X_seqs.append(X[start_ndx : start_ndx + seq_len])
return np.array(X_seqs)
if __name__ == '__main__':
main() | mit |
sugiare418/tensorfx | build/tensorflow/scripts/003_lstm_keras_171203/model.py | 2 | 5565 | # -*- coding: utf-8 -*-
# Python 2.7.6
# tensorflow (0.7.1)
import numpy as np
import random
from param import *
import matplotlib
matplotlib.use('Agg') # GUI Off設定
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers.normalization import BatchNormalization
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping
# =================================================================
class Model:
'''機械学習のモデル定義を管理するクラス
TensorFlowのSessionや学習済みモデルの保存・読込機能も持たせる
'''
def __init__(self):
''' 初期化処理(準コンストラクタ) '''
self.__setup_model()
def __enter__(self):
''' コンテキストマネージャ(With開始時に呼ばれる) '''
return self
def __exit__(self, exc_type, exc_value, traceback):
''' コンテキストマネージャ(With終了時に呼ばれる) '''
return False
def save(self, str_path):
"""TensorFlowのモデルを保存する
Args:
str_path: モデル保存先のパス
Returns:
-
"""
def restore(self, str_path):
"""TensorFlowのモデルを読み込むする
Args:
str_path: モデル保存先のパス
Returns:
-
"""
def __setup_model(self):
''' NNモデル定義 '''
def weight_variable(shape, name=None):
return np.random.normal(scale=.01, size=shape)
model = Sequential()
model.add(BatchNormalization(input_shape=(Param.IN_CYCLE_SIZE, Param.IN_COLUMN_SIZE)))
model.add(
LSTM(Param.HIDDEN_UNIT_SIZE,
kernel_initializer=weight_variable,
input_shape=(Param.IN_CYCLE_SIZE, Param.IN_COLUMN_SIZE), # seq_length, dim
dropout=Param.DROPOUT_KEEP_PROB,
recurrent_dropout=Param.DROPOUT_KEEP_PROB))
model.add(Dropout(Param.DROPOUT_KEEP_PROB))
model.add(Dense(Param.OUT_NODES_SIZE, kernel_initializer=weight_variable))
# 分類
# model.add(Activation('softmax'))
# optimizer = Adam(lr=0.001, beta_1=0.9, beta_2=0.999)
# model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=["accuracy"])
# 回帰
model.add(Activation('linear'))
model.compile(loss='mean_squared_error', optimizer="rmsprop")
self.model = model
# =================================================================
class Trainer(Model):
'''
学習用クラス
jijiトレードデータをもとに、TensorFlowでの学習を行わせる
'''
def train(self, steps, data):
'''
モデルの学習
'''
(ary_in, ary_actual, df_jiji) = data.train_data()
hist = self.model.fit(
ary_in,
ary_actual,
batch_size=Param.IN_BATCH_SIZE,
epochs=Param.TRANING_EPOCH_SIZE,
)
'''
学習結果の可視化
'''
loss = hist.history['loss']
plt.rc('font', family='serif')
fig = plt.figure()
plt.plot(range(len(loss)), loss, label='loss', color='black')
plt.xlabel('epochs')
plt.show()
plt.savefig(__file__ + '.eps')
'''
テストデータでの確認
'''
(ary_in, ary_actual, df_jiji) = data.test_data()
acc_count = 0
for i in range(1000):
k = random.randrange(len(ary_in))
x = ary_in[k]
y = self.model.predict(np.array([x]), batch_size=1)
l = ary_actual[k]
close = df_jiji.loc[k]['n03_close']
sub_y = close - y
sub_l = close - l
if sub_y > 0 and sub_l > 0:
acc_count = acc_count + 1
if sub_y < 0 and sub_l < 0:
acc_count = acc_count + 1
print 'y : {} , l : {}, sub_y:{}, sub_l:{}'.format(y, l, sub_y ,sub_l)
print '=============================================='
print 'acc_count:{}'.format(acc_count)
# # score = self.model.evaluate(ary_in, ary_actual, batch_size=Param.IN_BATCH_SIZE)
# # print '¥n evaluate : {} '.format(score)
# score = self.model.predict(ary_in, batch_size=Param.IN_BATCH_SIZE)
# print '\n predict : {} '.format(score)
# score = self.model.predict_classes(ary_in, batch_size=Param.IN_BATCH_SIZE)
# print '\n predict_classes : {} '.format(score)
# score = self.model.predict_proba(ary_in, batch_size=Param.IN_BATCH_SIZE)
# print '\n predict_proba : {} '.format(score)
# =================================================================
class Estimator(Model):
def estimate( self, data ):
output = self.session.run(self.out_softmax, feed_dict=self.estimate_feed_dict(data))
#loss = self.session.run(self.loss, feed_dict=self.estimate_feed_dict(data))
#print output
return 'up' if output[0][0] > output[0][1] else 'down'
#return self.session.run(tf.argmax(self.output,1), feed_dict=self.estimate_feed_dict(data))
def estimate_feed_dict(self, data):
return {
self.ph_trade_data: data,
self.ph_state_size: 1,
self.ph_dropout_keep_prob: 1.0,
} | mit |
nesterione/scikit-learn | sklearn/tests/test_learning_curve.py | 225 | 10791 | # Author: Alexander Fabisch <afabisch@informatik.uni-bremen.de>
#
# License: BSD 3 clause
import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import warnings
from sklearn.base import BaseEstimator
from sklearn.learning_curve import learning_curve, validation_curve
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.linear_model import PassiveAggressiveClassifier
class MockImprovingEstimator(BaseEstimator):
"""Dummy classifier to test the learning curve"""
def __init__(self, n_max_train_sizes):
self.n_max_train_sizes = n_max_train_sizes
self.train_sizes = 0
self.X_subset = None
def fit(self, X_subset, y_subset=None):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, Y=None):
# training score becomes worse (2 -> 1), test error better (0 -> 1)
if self._is_training_data(X):
return 2. - float(self.train_sizes) / self.n_max_train_sizes
else:
return float(self.train_sizes) / self.n_max_train_sizes
def _is_training_data(self, X):
return X is self.X_subset
class MockIncrementalImprovingEstimator(MockImprovingEstimator):
"""Dummy classifier that provides partial_fit"""
def __init__(self, n_max_train_sizes):
super(MockIncrementalImprovingEstimator,
self).__init__(n_max_train_sizes)
self.x = None
def _is_training_data(self, X):
return self.x in X
def partial_fit(self, X, y=None, **params):
self.train_sizes += X.shape[0]
self.x = X[0]
class MockEstimatorWithParameter(BaseEstimator):
"""Dummy classifier to test the validation curve"""
def __init__(self, param=0.5):
self.X_subset = None
self.param = param
def fit(self, X_subset, y_subset):
self.X_subset = X_subset
self.train_sizes = X_subset.shape[0]
return self
def predict(self, X):
raise NotImplementedError
def score(self, X=None, y=None):
return self.param if self._is_training_data(X) else 1 - self.param
def _is_training_data(self, X):
return X is self.X_subset
def test_learning_curve():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
with warnings.catch_warnings(record=True) as w:
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_equal(train_scores.shape, (10, 3))
assert_equal(test_scores.shape, (10, 3))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_verbose():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
train_sizes, train_scores, test_scores = \
learning_curve(estimator, X, y, cv=3, verbose=1)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[learning_curve]" in out)
def test_learning_curve_incremental_learning_not_possible():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
# The mockup does not have partial_fit()
estimator = MockImprovingEstimator(1)
assert_raises(ValueError, learning_curve, estimator, X, y,
exploit_incremental_learning=True)
def test_learning_curve_incremental_learning():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_incremental_learning_unsupervised():
X, _ = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockIncrementalImprovingEstimator(20)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y=None, cv=3, exploit_incremental_learning=True,
train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_learning_curve_batch_and_incremental_learning_are_equal():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
train_sizes = np.linspace(0.2, 1.0, 5)
estimator = PassiveAggressiveClassifier(n_iter=1, shuffle=False)
train_sizes_inc, train_scores_inc, test_scores_inc = \
learning_curve(
estimator, X, y, train_sizes=train_sizes,
cv=3, exploit_incremental_learning=True)
train_sizes_batch, train_scores_batch, test_scores_batch = \
learning_curve(
estimator, X, y, cv=3, train_sizes=train_sizes,
exploit_incremental_learning=False)
assert_array_equal(train_sizes_inc, train_sizes_batch)
assert_array_almost_equal(train_scores_inc.mean(axis=1),
train_scores_batch.mean(axis=1))
assert_array_almost_equal(test_scores_inc.mean(axis=1),
test_scores_batch.mean(axis=1))
def test_learning_curve_n_sample_range_out_of_bounds():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.0, 1.0])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0.1, 1.1])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[0, 20])
assert_raises(ValueError, learning_curve, estimator, X, y, cv=3,
train_sizes=[1, 21])
def test_learning_curve_remove_duplicate_sample_sizes():
X, y = make_classification(n_samples=3, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(2)
train_sizes, _, _ = assert_warns(
RuntimeWarning, learning_curve, estimator, X, y, cv=3,
train_sizes=np.linspace(0.33, 1.0, 3))
assert_array_equal(train_sizes, [1, 2])
def test_learning_curve_with_boolean_indices():
X, y = make_classification(n_samples=30, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
estimator = MockImprovingEstimator(20)
cv = KFold(n=30, n_folds=3)
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=cv, train_sizes=np.linspace(0.1, 1.0, 10))
assert_array_equal(train_sizes, np.linspace(2, 20, 10))
assert_array_almost_equal(train_scores.mean(axis=1),
np.linspace(1.9, 1.0, 10))
assert_array_almost_equal(test_scores.mean(axis=1),
np.linspace(0.1, 1.0, 10))
def test_validation_curve():
X, y = make_classification(n_samples=2, n_features=1, n_informative=1,
n_redundant=0, n_classes=2,
n_clusters_per_class=1, random_state=0)
param_range = np.linspace(0, 1, 10)
with warnings.catch_warnings(record=True) as w:
train_scores, test_scores = validation_curve(
MockEstimatorWithParameter(), X, y, param_name="param",
param_range=param_range, cv=2
)
if len(w) > 0:
raise RuntimeError("Unexpected warning: %r" % w[0].message)
assert_array_almost_equal(train_scores.mean(axis=1), param_range)
assert_array_almost_equal(test_scores.mean(axis=1), 1 - param_range)
| bsd-3-clause |
vighneshbirodkar/scikit-image | skimage/viewer/viewers/core.py | 33 | 13265 | """
ImageViewer class for viewing and interacting with images.
"""
import numpy as np
from ... import io, img_as_float
from ...util.dtype import dtype_range
from ...exposure import rescale_intensity
from ..qt import QtWidgets, Qt, Signal
from ..widgets import Slider
from ..utils import (dialogs, init_qtapp, figimage, start_qtapp,
update_axes_image)
from ..utils.canvas import BlitManager, EventManager
from ..plugins.base import Plugin
__all__ = ['ImageViewer', 'CollectionViewer']
def mpl_image_to_rgba(mpl_image):
"""Return RGB image from the given matplotlib image object.
Each image in a matplotlib figure has its own colormap and normalization
function. Return RGBA (RGB + alpha channel) image with float dtype.
Parameters
----------
mpl_image : matplotlib.image.AxesImage object
The image being converted.
Returns
-------
img : array of float, shape (M, N, 4)
An image of float values in [0, 1].
"""
image = mpl_image.get_array()
if image.ndim == 2:
input_range = (mpl_image.norm.vmin, mpl_image.norm.vmax)
image = rescale_intensity(image, in_range=input_range)
# cmap complains on bool arrays
image = mpl_image.cmap(img_as_float(image))
elif image.ndim == 3 and image.shape[2] == 3:
# add alpha channel if it's missing
image = np.dstack((image, np.ones_like(image)))
return img_as_float(image)
class ImageViewer(QtWidgets.QMainWindow):
"""Viewer for displaying images.
This viewer is a simple container object that holds a Matplotlib axes
for showing images. `ImageViewer` doesn't subclass the Matplotlib axes (or
figure) because of the high probability of name collisions.
Subclasses and plugins will likely extend the `update_image` method to add
custom overlays or filter the displayed image.
Parameters
----------
image : array
Image being viewed.
Attributes
----------
canvas, fig, ax : Matplotlib canvas, figure, and axes
Matplotlib canvas, figure, and axes used to display image.
image : array
Image being viewed. Setting this value will update the displayed frame.
original_image : array
Plugins typically operate on (but don't change) the *original* image.
plugins : list
List of attached plugins.
Examples
--------
>>> from skimage import data
>>> image = data.coins()
>>> viewer = ImageViewer(image) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
dock_areas = {'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea}
# Signal that the original image has been changed
original_image_changed = Signal(np.ndarray)
def __init__(self, image, useblit=True):
# Start main loop
init_qtapp()
super(ImageViewer, self).__init__()
#TODO: Add ImageViewer to skimage.io window manager
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("Image Viewer")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('Open file', self.open_file,
Qt.CTRL + Qt.Key_O)
self.file_menu.addAction('Save to file', self.save_to_file,
Qt.CTRL + Qt.Key_S)
self.file_menu.addAction('Quit', self.close,
Qt.CTRL + Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.main_widget = QtWidgets.QWidget()
self.setCentralWidget(self.main_widget)
if isinstance(image, Plugin):
plugin = image
image = plugin.filtered_image
plugin.image_changed.connect(self._update_original_image)
# When plugin is started, start
plugin._started.connect(self._show)
self.fig, self.ax = figimage(image)
self.canvas = self.fig.canvas
self.canvas.setParent(self)
self.ax.autoscale(enable=False)
self._tools = []
self.useblit = useblit
if useblit:
self._blit_manager = BlitManager(self.ax)
self._event_manager = EventManager(self.ax)
self._image_plot = self.ax.images[0]
self._update_original_image(image)
self.plugins = []
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.layout.addWidget(self.canvas)
status_bar = self.statusBar()
self.status_message = status_bar.showMessage
sb_size = status_bar.sizeHint()
cs_size = self.canvas.sizeHint()
self.resize(cs_size.width(), cs_size.height() + sb_size.height())
self.connect_event('motion_notify_event', self._update_status_bar)
def __add__(self, plugin):
"""Add plugin to ImageViewer"""
plugin.attach(self)
self.original_image_changed.connect(plugin._update_original_image)
if plugin.dock:
location = self.dock_areas[plugin.dock]
dock_location = Qt.DockWidgetArea(location)
dock = QtWidgets.QDockWidget()
dock.setWidget(plugin)
dock.setWindowTitle(plugin.name)
self.addDockWidget(dock_location, dock)
horiz = (self.dock_areas['left'], self.dock_areas['right'])
dimension = 'width' if location in horiz else 'height'
self._add_widget_size(plugin, dimension=dimension)
return self
def _add_widget_size(self, widget, dimension='width'):
widget_size = widget.sizeHint()
viewer_size = self.frameGeometry()
dx = dy = 0
if dimension == 'width':
dx = widget_size.width()
elif dimension == 'height':
dy = widget_size.height()
w = viewer_size.width()
h = viewer_size.height()
self.resize(w + dx, h + dy)
def open_file(self, filename=None):
"""Open image file and display in viewer."""
if filename is None:
filename = dialogs.open_file_dialog()
if filename is None:
return
image = io.imread(filename)
self._update_original_image(image)
def update_image(self, image):
"""Update displayed image.
This method can be overridden or extended in subclasses and plugins to
react to image changes.
"""
self._update_original_image(image)
def _update_original_image(self, image):
self.original_image = image # update saved image
self.image = image.copy() # update displayed image
self.original_image_changed.emit(image)
def save_to_file(self, filename=None):
"""Save current image to file.
The current behavior is not ideal: It saves the image displayed on
screen, so all images will be converted to RGB, and the image size is
not preserved (resizing the viewer window will alter the size of the
saved image).
"""
if filename is None:
filename = dialogs.save_file_dialog()
if filename is None:
return
if len(self.ax.images) == 1:
io.imsave(filename, self.image)
else:
underlay = mpl_image_to_rgba(self.ax.images[0])
overlay = mpl_image_to_rgba(self.ax.images[1])
alpha = overlay[:, :, 3]
# alpha can be set by channel of array or by a scalar value.
# Prefer the alpha channel, but fall back to scalar value.
if np.all(alpha == 1):
alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()
alpha = alpha[:, :, np.newaxis]
composite = (overlay[:, :, :3] * alpha +
underlay[:, :, :3] * (1 - alpha))
io.imsave(filename, composite)
def closeEvent(self, event):
self.close()
def _show(self, x=0):
self.move(x, 0)
for p in self.plugins:
p.show()
super(ImageViewer, self).show()
self.activateWindow()
self.raise_()
def show(self, main_window=True):
"""Show ImageViewer and attached plugins.
This behaves much like `matplotlib.pyplot.show` and `QWidget.show`.
"""
self._show()
if main_window:
start_qtapp()
return [p.output() for p in self.plugins]
def redraw(self):
if self.useblit:
self._blit_manager.redraw()
else:
self.canvas.draw_idle()
@property
def image(self):
return self._img
@image.setter
def image(self, image):
self._img = image
update_axes_image(self._image_plot, image)
# update display (otherwise image doesn't fill the canvas)
h, w = image.shape[:2]
self.ax.set_xlim(0, w)
self.ax.set_ylim(h, 0)
# update color range
clim = dtype_range[image.dtype.type]
if clim[0] < 0 and image.min() >= 0:
clim = (0, clim[1])
self._image_plot.set_clim(clim)
if self.useblit:
self._blit_manager.background = None
self.redraw()
def reset_image(self):
self.image = self.original_image.copy()
def connect_event(self, event, callback):
"""Connect callback function to matplotlib event and return id."""
cid = self.canvas.mpl_connect(event, callback)
return cid
def disconnect_event(self, callback_id):
"""Disconnect callback by its id (returned by `connect_event`)."""
self.canvas.mpl_disconnect(callback_id)
def _update_status_bar(self, event):
if event.inaxes and event.inaxes.get_navigate():
self.status_message(self._format_coord(event.xdata, event.ydata))
else:
self.status_message('')
def add_tool(self, tool):
if self.useblit:
self._blit_manager.add_artists(tool.artists)
self._tools.append(tool)
self._event_manager.attach(tool)
def remove_tool(self, tool):
if tool not in self._tools:
return
if self.useblit:
self._blit_manager.remove_artists(tool.artists)
self._tools.remove(tool)
self._event_manager.detach(tool)
def _format_coord(self, x, y):
# callback function to format coordinate display in status bar
x = int(x + 0.5)
y = int(y + 0.5)
try:
return "%4s @ [%4s, %4s]" % (self.image[y, x], x, y)
except IndexError:
return ""
class CollectionViewer(ImageViewer):
"""Viewer for displaying image collections.
Select the displayed frame of the image collection using the slider or
with the following keyboard shortcuts:
left/right arrows
Previous/next image in collection.
number keys, 0--9
0% to 90% of collection. For example, "5" goes to the image in the
middle (i.e. 50%) of the collection.
home/end keys
First/last image in collection.
Parameters
----------
image_collection : list of images
List of images to be displayed.
update_on : {'move' | 'release'}
Control whether image is updated on slide or release of the image
slider. Using 'on_release' will give smoother behavior when displaying
large images or when writing a plugin/subclass that requires heavy
computation.
"""
def __init__(self, image_collection, update_on='move', **kwargs):
self.image_collection = image_collection
self.index = 0
self.num_images = len(self.image_collection)
first_image = image_collection[0]
super(CollectionViewer, self).__init__(first_image)
slider_kws = dict(value=0, low=0, high=self.num_images - 1)
slider_kws['update_on'] = update_on
slider_kws['callback'] = self.update_index
slider_kws['value_type'] = 'int'
self.slider = Slider('frame', **slider_kws)
self.layout.addWidget(self.slider)
#TODO: Adjust height to accomodate slider; the following doesn't work
# s_size = self.slider.sizeHint()
# cs_size = self.canvas.sizeHint()
# self.resize(cs_size.width(), cs_size.height() + s_size.height())
def update_index(self, name, index):
"""Select image on display using index into image collection."""
index = int(round(index))
if index == self.index:
return
# clip index value to collection limits
index = max(index, 0)
index = min(index, self.num_images - 1)
self.index = index
self.slider.val = index
self.update_image(self.image_collection[index])
def keyPressEvent(self, event):
if type(event) == QtWidgets.QKeyEvent:
key = event.key()
# Number keys (code: 0 = key 48, 9 = key 57) move to deciles
if 48 <= key < 58:
index = 0.1 * int(key - 48) * self.num_images
self.update_index('', index)
event.accept()
else:
event.ignore()
else:
event.ignore()
| bsd-3-clause |
chiu/vincent | examples/scatter_chart_examples.py | 9 | 2130 | # -*- coding: utf-8 -*-
"""
Vincent Scatter Examples
"""
#Build a Line Chart from scratch
from vincent import *
import pandas as pd
import pandas.io.data as web
import datetime
all_data = {}
date_start = datetime.datetime(2010, 1, 1)
date_end = datetime.datetime(2014, 1, 1)
for ticker in ['AAPL', 'IBM', 'YHOO', 'MSFT']:
all_data[ticker] = web.DataReader(ticker, 'yahoo', date_start, date_end)
price = pd.DataFrame({tic: data['Adj Close']
for tic, data in all_data.items()})
#Note that we're using timeseries, so x-scale type is "time". For non
#timeseries data, use "linear"
vis = Visualization(width=500, height=300)
vis.scales['x'] = Scale(name='x', type='time', range='width',
domain=DataRef(data='table', field="data.idx"))
vis.scales['y'] = Scale(name='y', range='height', type='linear', nice=True,
domain=DataRef(data='table', field="data.val"))
vis.scales['color'] = Scale(name='color', type='ordinal',
domain=DataRef(data='table', field='data.col'),
range='category20')
vis.axes.extend([Axis(type='x', scale='x'),
Axis(type='y', scale='y')])
#Marks
transform = MarkRef(data='table',
transform=[Transform(type='facet', keys=['data.col'])])
enter_props = PropertySet(x=ValueRef(scale='x', field="data.idx"),
y=ValueRef(scale='y', field="data.val"),
fill=ValueRef(scale='color', field='data.col'),
size=ValueRef(value=10))
mark = Mark(type='group', from_=transform,
marks=[Mark(type='symbol',
properties=MarkProperties(enter=enter_props))])
vis.marks.append(mark)
data = Data.from_pandas(price[['MSFT', 'AAPL']])
#Using a Vincent Keyed List here
vis.data['table'] = data
vis.axis_titles(x='Date', y='Price')
vis.legend(title='MSFT vs AAPL')
vis.to_json('vega.json')
#Convenience method
vis = Scatter(price[['MSFT', 'AAPL']])
vis.axis_titles(x='Date', y='Price')
vis.legend(title='MSFT vs AAPL')
vis.colors(brew='RdBu')
vis.to_json('vega.json')
| mit |
jia-kai/hearv | disp_freq.py | 1 | 1942 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# $File: disp_freq.py
# $Date: Sun Nov 23 12:45:25 2014 +0800
# $Author: jiakai <jia.kai66@gmail.com>
import matplotlib.pyplot as plt
import numpy as np
import argparse
import json
def main():
parser = argparse.ArgumentParser()
parser.add_argument('fpath', help='array json fpath')
parser.add_argument('--sample_rate', type=float, default=59.940)
parser.add_argument('--fl', type=float, default=5,
help='low cutoff')
parser.add_argument('--dmin', type=int, help='min index of data')
parser.add_argument('--dnr', type=int, help='number of data points used')
parser.add_argument('--no_shift_mean', action='store_true',
help='do not shift mean value to zero')
parser.add_argument('--clip', type=float,
help='clip all samples to be within range [-x, x]')
parser.add_argument('-o', '--output',
help='outpout the plot')
args = parser.parse_args()
with open(args.fpath) as fin:
vals = np.array(json.load(fin))
if not args.no_shift_mean:
vals -= np.mean(vals)
if args.clip:
vals = np.clip(vals, -args.clip, args.clip)
if args.dmin:
vals = vals[args.dmin:]
if args.dnr:
vals = vals[:args.dnr]
fig = plt.figure()
ax = fig.add_subplot(2, 1, 1)
ax.set_xlabel('sample number')
ax.set_ylabel('displacement')
ax.plot(vals)
fft = np.fft.fft(vals)[:len(vals) / 2]
freq = args.sample_rate / len(vals) * np.arange(1, len(fft) + 1)
if args.fl > 0:
fl = min(np.nonzero(freq >= args.fl)[0])
fft = fft[fl:]
freq = freq[fl:]
ax = fig.add_subplot(2, 1, 2)
ax.set_xlabel('freq')
ax.set_ylabel('amplitude')
ax.plot(freq, np.abs(fft))
if args.output:
fig.savefig(args.output)
plt.show()
if __name__ == '__main__':
main()
| unlicense |
samuel1208/scikit-learn | sklearn/decomposition/truncated_svd.py | 199 | 7744 | """Truncated SVD for sparse matrices, aka latent semantic analysis (LSA).
"""
# Author: Lars Buitinck <L.J.Buitinck@uva.nl>
# Olivier Grisel <olivier.grisel@ensta.org>
# Michael Becker <mike@beckerfuffle.com>
# License: 3-clause BSD.
import numpy as np
import scipy.sparse as sp
try:
from scipy.sparse.linalg import svds
except ImportError:
from ..utils.arpack import svds
from ..base import BaseEstimator, TransformerMixin
from ..utils import check_array, as_float_array, check_random_state
from ..utils.extmath import randomized_svd, safe_sparse_dot, svd_flip
from ..utils.sparsefuncs import mean_variance_axis
__all__ = ["TruncatedSVD"]
class TruncatedSVD(BaseEstimator, TransformerMixin):
"""Dimensionality reduction using truncated SVD (aka LSA).
This transformer performs linear dimensionality reduction by means of
truncated singular value decomposition (SVD). It is very similar to PCA,
but operates on sample vectors directly, instead of on a covariance matrix.
This means it can work with scipy.sparse matrices efficiently.
In particular, truncated SVD works on term count/tf-idf matrices as
returned by the vectorizers in sklearn.feature_extraction.text. In that
context, it is known as latent semantic analysis (LSA).
This estimator supports two algorithm: a fast randomized SVD solver, and
a "naive" algorithm that uses ARPACK as an eigensolver on (X * X.T) or
(X.T * X), whichever is more efficient.
Read more in the :ref:`User Guide <LSA>`.
Parameters
----------
n_components : int, default = 2
Desired dimensionality of output data.
Must be strictly less than the number of features.
The default value is useful for visualisation. For LSA, a value of
100 is recommended.
algorithm : string, default = "randomized"
SVD solver to use. Either "arpack" for the ARPACK wrapper in SciPy
(scipy.sparse.linalg.svds), or "randomized" for the randomized
algorithm due to Halko (2009).
n_iter : int, optional
Number of iterations for randomized SVD solver. Not used by ARPACK.
random_state : int or RandomState, optional
(Seed for) pseudo-random number generator. If not given, the
numpy.random singleton is used.
tol : float, optional
Tolerance for ARPACK. 0 means machine precision. Ignored by randomized
SVD solver.
Attributes
----------
components_ : array, shape (n_components, n_features)
explained_variance_ratio_ : array, [n_components]
Percentage of variance explained by each of the selected components.
explained_variance_ : array, [n_components]
The variance of the training samples transformed by a projection to
each component.
Examples
--------
>>> from sklearn.decomposition import TruncatedSVD
>>> from sklearn.random_projection import sparse_random_matrix
>>> X = sparse_random_matrix(100, 100, density=0.01, random_state=42)
>>> svd = TruncatedSVD(n_components=5, random_state=42)
>>> svd.fit(X) # doctest: +NORMALIZE_WHITESPACE
TruncatedSVD(algorithm='randomized', n_components=5, n_iter=5,
random_state=42, tol=0.0)
>>> print(svd.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.07825... 0.05528... 0.05445... 0.04997... 0.04134...]
>>> print(svd.explained_variance_ratio_.sum()) # doctest: +ELLIPSIS
0.27930...
See also
--------
PCA
RandomizedPCA
References
----------
Finding structure with randomness: Stochastic algorithms for constructing
approximate matrix decompositions
Halko, et al., 2009 (arXiv:909) http://arxiv.org/pdf/0909.4061
Notes
-----
SVD suffers from a problem called "sign indeterminancy", which means the
sign of the ``components_`` and the output from transform depend on the
algorithm and random state. To work around this, fit instances of this
class to data once, then keep the instance around to do transformations.
"""
def __init__(self, n_components=2, algorithm="randomized", n_iter=5,
random_state=None, tol=0.):
self.algorithm = algorithm
self.n_components = n_components
self.n_iter = n_iter
self.random_state = random_state
self.tol = tol
def fit(self, X, y=None):
"""Fit LSI model on training data X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
self : object
Returns the transformer object.
"""
self.fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit LSI model to X and perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = as_float_array(X, copy=False)
random_state = check_random_state(self.random_state)
# If sparse and not csr or csc, convert to csr
if sp.issparse(X) and X.getformat() not in ["csr", "csc"]:
X = X.tocsr()
if self.algorithm == "arpack":
U, Sigma, VT = svds(X, k=self.n_components, tol=self.tol)
# svds doesn't abide by scipy.linalg.svd/randomized_svd
# conventions, so reverse its outputs.
Sigma = Sigma[::-1]
U, VT = svd_flip(U[:, ::-1], VT[::-1])
elif self.algorithm == "randomized":
k = self.n_components
n_features = X.shape[1]
if k >= n_features:
raise ValueError("n_components must be < n_features;"
" got %d >= %d" % (k, n_features))
U, Sigma, VT = randomized_svd(X, self.n_components,
n_iter=self.n_iter,
random_state=random_state)
else:
raise ValueError("unknown algorithm %r" % self.algorithm)
self.components_ = VT
# Calculate explained variance & explained variance ratio
X_transformed = np.dot(U, np.diag(Sigma))
self.explained_variance_ = exp_var = np.var(X_transformed, axis=0)
if sp.issparse(X):
_, full_var = mean_variance_axis(X, axis=0)
full_var = full_var.sum()
else:
full_var = np.var(X, axis=0).sum()
self.explained_variance_ratio_ = exp_var / full_var
return X_transformed
def transform(self, X):
"""Perform dimensionality reduction on X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
New data.
Returns
-------
X_new : array, shape (n_samples, n_components)
Reduced version of X. This will always be a dense array.
"""
X = check_array(X, accept_sparse='csr')
return safe_sparse_dot(X, self.components_.T)
def inverse_transform(self, X):
"""Transform X back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data.
Returns
-------
X_original : array, shape (n_samples, n_features)
Note that this is always a dense array.
"""
X = check_array(X)
return np.dot(X, self.components_)
| bsd-3-clause |
hainm/scikit-learn | sklearn/externals/joblib/parallel.py | 86 | 35087 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto', temp_folder=None,
max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = None
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
gbugaisky/bimm_185_conotoxin | wip-scripts_data/kNNProc.py | 1 | 1321 | #!usr/bin/env/python
import numpy as np
import pylab as pl
from matplotlib.colors import ListedColormap
from sklearn import neighbors
def kNNGen(trainfile, testfile):
features = np.genfromtxt(trainfile, delimiter=' ', usecols=(0, 1, 2))
labels = np.genfromtxt(trainfile, delimiter=' ', usecols=(-1))
tests = np.genfromtxt(testfile, delimiter=' ', usecols=(0, 1, 2))
testlabels = np.genfromtxt(testfile, delimiter=' ', usecols=(-1))
n_neighbors = 10
h = 0.02
accuracyScores = []
for weights in ['uniform', 'distance']:
clf = neighbors.KNeighborsClassifier(n_neighbors, leaf_size=20, weights=weights)
clf.fit(features, labels)
accuracyScores.append(clf.score(tests, testlabels))
return accuracyScores
if __name__ == "__main__":
FILEPATH = ".\\SeparatedTrainTest\\"
accuracyVals = []
for i in range(0, 10):
accuracyVals.append(kNNGen(FILEPATH + "trainDataSet" + str(i) + ".csv", FILEPATH + "testDataSet" + str(i) + ".csv"))
uniformScore = 0
distanceScore = 0
with open("kNNAverageAccuracy.txt", 'w') as results:
for element in accuracyVals:
results.write(str(element) + '\n')
uniformScore += element[0]
distanceScore += element[1]
results.write("Uniform kNN Accuracy: " + str(uniformScore / 10.0) + '\n')
results.write("Distance kNN Accuracy: " + str(distanceScore / 10.0) + '\n') | gpl-2.0 |
sinhrks/scikit-learn | sklearn/tree/tests/test_tree.py | 32 | 52369 | """
Testing for the tree module (sklearn.tree).
"""
import pickle
from functools import partial
from itertools import product
import platform
import numpy as np
from scipy.sparse import csc_matrix
from scipy.sparse import csr_matrix
from scipy.sparse import coo_matrix
from sklearn.random_projection import sparse_random_matrix
from sklearn.metrics import accuracy_score
from sklearn.metrics import mean_squared_error
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_in
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_greater_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.validation import check_random_state
from sklearn.exceptions import NotFittedError
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.tree import ExtraTreeClassifier
from sklearn.tree import ExtraTreeRegressor
from sklearn import tree
from sklearn.tree.tree import SPARSE_SPLITTERS
from sklearn.tree._tree import TREE_LEAF
from sklearn import datasets
from sklearn.utils import compute_sample_weight
CLF_CRITERIONS = ("gini", "entropy")
REG_CRITERIONS = ("mse", )
CLF_TREES = {
"DecisionTreeClassifier": DecisionTreeClassifier,
"Presort-DecisionTreeClassifier": partial(DecisionTreeClassifier,
presort=True),
"ExtraTreeClassifier": ExtraTreeClassifier,
}
REG_TREES = {
"DecisionTreeRegressor": DecisionTreeRegressor,
"Presort-DecisionTreeRegressor": partial(DecisionTreeRegressor,
presort=True),
"ExtraTreeRegressor": ExtraTreeRegressor,
}
ALL_TREES = dict()
ALL_TREES.update(CLF_TREES)
ALL_TREES.update(REG_TREES)
SPARSE_TREES = ["DecisionTreeClassifier", "DecisionTreeRegressor",
"ExtraTreeClassifier", "ExtraTreeRegressor"]
X_small = np.array([
[0, 0, 4, 0, 0, 0, 1, -14, 0, -4, 0, 0, 0, 0, ],
[0, 0, 5, 3, 0, -4, 0, 0, 1, -5, 0.2, 0, 4, 1, ],
[-1, -1, 0, 0, -4.5, 0, 0, 2.1, 1, 0, 0, -4.5, 0, 1, ],
[-1, -1, 0, -1.2, 0, 0, 0, 0, 0, 0, 0.2, 0, 0, 1, ],
[-1, -1, 0, 0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 1, ],
[-1, -2, 0, 4, -3, 10, 4, 0, -3.2, 0, 4, 3, -4, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 0, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0, 0, -2, 1, ],
[2.11, 8, -6, -0.5, 0, 11, 0, 0, -3.2, 6, 0.5, 0, -1, 0, ],
[2, 8, 5, 1, 0.5, -4, 10, 0, 1, -5, 3, 0, 2, 0, ],
[2, 0, 1, 1, 1, -1, 1, 0, 0, -2, 3, 0, 1, 0, ],
[2, 0, 1, 2, 3, -1, 10, 2, 0, -1, 1, 2, 2, 0, ],
[1, 1, 0, 2, 2, -1, 1, 2, 0, -5, 1, 2, 3, 0, ],
[3, 1, 0, 3, 0, -4, 10, 0, 1, -5, 3, 0, 3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 0.5, 0, -3, 1, ],
[2.11, 8, -6, -0.5, 0, 1, 0, 0, -3.2, 6, 1.5, 1, -1, -1, ],
[2.11, 8, -6, -0.5, 0, 10, 0, 0, -3.2, 6, 0.5, 0, -1, -1, ],
[2, 0, 5, 1, 0.5, -2, 10, 0, 1, -5, 3, 1, 0, -1, ],
[2, 0, 1, 1, 1, -2, 1, 0, 0, -2, 0, 0, 0, 1, ],
[2, 1, 1, 1, 2, -1, 10, 2, 0, -1, 0, 2, 1, 1, ],
[1, 1, 0, 0, 1, -3, 1, 2, 0, -5, 1, 2, 1, 1, ],
[3, 1, 0, 1, 0, -4, 1, 0, 1, -2, 0, 0, 1, 0, ]])
y_small = [1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0,
0, 0]
y_small_reg = [1.0, 2.1, 1.2, 0.05, 10, 2.4, 3.1, 1.01, 0.01, 2.98, 3.1, 1.1,
0.0, 1.2, 2, 11, 0, 0, 4.5, 0.201, 1.06, 0.9, 0]
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
digits = datasets.load_digits()
perm = rng.permutation(digits.target.size)
digits.data = digits.data[perm]
digits.target = digits.target[perm]
random_state = check_random_state(0)
X_multilabel, y_multilabel = datasets.make_multilabel_classification(
random_state=0, n_samples=30, n_features=10)
X_sparse_pos = random_state.uniform(size=(20, 5))
X_sparse_pos[X_sparse_pos <= 0.8] = 0.
y_random = random_state.randint(0, 4, size=(20, ))
X_sparse_mix = sparse_random_matrix(20, 10, density=0.25, random_state=0)
DATASETS = {
"iris": {"X": iris.data, "y": iris.target},
"boston": {"X": boston.data, "y": boston.target},
"digits": {"X": digits.data, "y": digits.target},
"toy": {"X": X, "y": y},
"clf_small": {"X": X_small, "y": y_small},
"reg_small": {"X": X_small, "y": y_small_reg},
"multilabel": {"X": X_multilabel, "y": y_multilabel},
"sparse-pos": {"X": X_sparse_pos, "y": y_random},
"sparse-neg": {"X": - X_sparse_pos, "y": y_random},
"sparse-mix": {"X": X_sparse_mix, "y": y_random},
"zeros": {"X": np.zeros((20, 3)), "y": y_random}
}
for name in DATASETS:
DATASETS[name]["X_sparse"] = csc_matrix(DATASETS[name]["X"])
def assert_tree_equal(d, s, message):
assert_equal(s.node_count, d.node_count,
"{0}: inequal number of node ({1} != {2})"
"".format(message, s.node_count, d.node_count))
assert_array_equal(d.children_right, s.children_right,
message + ": inequal children_right")
assert_array_equal(d.children_left, s.children_left,
message + ": inequal children_left")
external = d.children_right == TREE_LEAF
internal = np.logical_not(external)
assert_array_equal(d.feature[internal], s.feature[internal],
message + ": inequal features")
assert_array_equal(d.threshold[internal], s.threshold[internal],
message + ": inequal threshold")
assert_array_equal(d.n_node_samples.sum(), s.n_node_samples.sum(),
message + ": inequal sum(n_node_samples)")
assert_array_equal(d.n_node_samples, s.n_node_samples,
message + ": inequal n_node_samples")
assert_almost_equal(d.impurity, s.impurity,
err_msg=message + ": inequal impurity")
assert_array_almost_equal(d.value[external], s.value[external],
err_msg=message + ": inequal value")
def test_classification_toy():
# Check classification on a toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_weighted_classification_toy():
# Check classification on a weighted toy dataset.
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y, sample_weight=np.ones(len(X)))
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
clf.fit(X, y, sample_weight=np.ones(len(X)) * 0.5)
assert_array_equal(clf.predict(T), true_result,
"Failed with {0}".format(name))
def test_regression_toy():
# Check regression on a toy dataset.
for name, Tree in REG_TREES.items():
reg = Tree(random_state=1)
reg.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
clf = Tree(max_features=1, random_state=1)
clf.fit(X, y)
assert_almost_equal(reg.predict(T), true_result,
err_msg="Failed with {0}".format(name))
def test_xor():
# Check on a XOR problem
y = np.zeros((10, 10))
y[:5, :5] = 1
y[5:, 5:] = 1
gridx, gridy = np.indices(y.shape)
X = np.vstack([gridx.ravel(), gridy.ravel()]).T
y = y.ravel()
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
clf = Tree(random_state=0, max_features=1)
clf.fit(X, y)
assert_equal(clf.score(X, y), 1.0,
"Failed with {0}".format(name))
def test_iris():
# Check consistency on dataset iris.
for (name, Tree), criterion in product(CLF_TREES.items(), CLF_CRITERIONS):
clf = Tree(criterion=criterion, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.9,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
clf = Tree(criterion=criterion, max_features=2, random_state=0)
clf.fit(iris.data, iris.target)
score = accuracy_score(clf.predict(iris.data), iris.target)
assert_greater(score, 0.5,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_boston():
# Check consistency on dataset boston house prices.
for (name, Tree), criterion in product(REG_TREES.items(), REG_CRITERIONS):
reg = Tree(criterion=criterion, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 1,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
# using fewer features reduces the learning ability of this tree,
# but reduces training time.
reg = Tree(criterion=criterion, max_features=6, random_state=0)
reg.fit(boston.data, boston.target)
score = mean_squared_error(boston.target, reg.predict(boston.data))
assert_less(score, 2,
"Failed with {0}, criterion = {1} and score = {2}"
"".format(name, criterion, score))
def test_probability():
# Predict probabilities using DecisionTreeClassifier.
for name, Tree in CLF_TREES.items():
clf = Tree(max_depth=1, max_features=1, random_state=42)
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(np.sum(prob_predict, 1),
np.ones(iris.data.shape[0]),
err_msg="Failed with {0}".format(name))
assert_array_equal(np.argmax(prob_predict, 1),
clf.predict(iris.data),
err_msg="Failed with {0}".format(name))
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8,
err_msg="Failed with {0}".format(name))
def test_arrayrepr():
# Check the array representation.
# Check resize
X = np.arange(10000)[:, np.newaxis]
y = np.arange(10000)
for name, Tree in REG_TREES.items():
reg = Tree(max_depth=None, random_state=0)
reg.fit(X, y)
def test_pure_set():
# Check when y is pure.
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [1, 1, 1, 1, 1, 1]
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_array_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
reg.fit(X, y)
assert_almost_equal(clf.predict(X), y,
err_msg="Failed with {0}".format(name))
def test_numerical_stability():
# Check numerical stability.
X = np.array([
[152.08097839, 140.40744019, 129.75102234, 159.90493774],
[142.50700378, 135.81935120, 117.82884979, 162.75781250],
[127.28772736, 140.40744019, 129.75102234, 159.90493774],
[132.37025452, 143.71923828, 138.35694885, 157.84558105],
[103.10237122, 143.71928406, 138.35696411, 157.84559631],
[127.71276855, 143.71923828, 138.35694885, 157.84558105],
[120.91514587, 140.40744019, 129.75102234, 159.90493774]])
y = np.array(
[1., 0.70209277, 0.53896582, 0., 0.90914464, 0.48026916, 0.49622521])
with np.errstate(all="raise"):
for name, Tree in REG_TREES.items():
reg = Tree(random_state=0)
reg.fit(X, y)
reg.fit(X, -y)
reg.fit(-X, y)
reg.fit(-X, -y)
def test_importances():
# Check variable importances.
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
for name, Tree in CLF_TREES.items():
clf = Tree(random_state=0)
clf.fit(X, y)
importances = clf.feature_importances_
n_important = np.sum(importances > 0.1)
assert_equal(importances.shape[0], 10, "Failed with {0}".format(name))
assert_equal(n_important, 3, "Failed with {0}".format(name))
X_new = assert_warns(
DeprecationWarning, clf.transform, X, threshold="mean")
assert_less(0, X_new.shape[1], "Failed with {0}".format(name))
assert_less(X_new.shape[1], X.shape[1], "Failed with {0}".format(name))
# Check on iris that importances are the same for all builders
clf = DecisionTreeClassifier(random_state=0)
clf.fit(iris.data, iris.target)
clf2 = DecisionTreeClassifier(random_state=0,
max_leaf_nodes=len(iris.data))
clf2.fit(iris.data, iris.target)
assert_array_equal(clf.feature_importances_,
clf2.feature_importances_)
@raises(ValueError)
def test_importances_raises():
# Check if variable importance before fit raises ValueError.
clf = DecisionTreeClassifier()
clf.feature_importances_
def test_importances_gini_equal_mse():
# Check that gini is equivalent to mse for binary output variable
X, y = datasets.make_classification(n_samples=2000,
n_features=10,
n_informative=3,
n_redundant=0,
n_repeated=0,
shuffle=False,
random_state=0)
# The gini index and the mean square error (variance) might differ due
# to numerical instability. Since those instabilities mainly occurs at
# high tree depth, we restrict this maximal depth.
clf = DecisionTreeClassifier(criterion="gini", max_depth=5,
random_state=0).fit(X, y)
reg = DecisionTreeRegressor(criterion="mse", max_depth=5,
random_state=0).fit(X, y)
assert_almost_equal(clf.feature_importances_, reg.feature_importances_)
assert_array_equal(clf.tree_.feature, reg.tree_.feature)
assert_array_equal(clf.tree_.children_left, reg.tree_.children_left)
assert_array_equal(clf.tree_.children_right, reg.tree_.children_right)
assert_array_equal(clf.tree_.n_node_samples, reg.tree_.n_node_samples)
def test_max_features():
# Check max_features.
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(max_features="auto")
reg.fit(boston.data, boston.target)
assert_equal(reg.max_features_, boston.data.shape[1])
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(max_features="auto")
clf.fit(iris.data, iris.target)
assert_equal(clf.max_features_, 2)
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_features="sqrt")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.sqrt(iris.data.shape[1])))
est = TreeEstimator(max_features="log2")
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(np.log2(iris.data.shape[1])))
est = TreeEstimator(max_features=1)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=3)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 3)
est = TreeEstimator(max_features=0.01)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, 1)
est = TreeEstimator(max_features=0.5)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_,
int(0.5 * iris.data.shape[1]))
est = TreeEstimator(max_features=1.0)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
est = TreeEstimator(max_features=None)
est.fit(iris.data, iris.target)
assert_equal(est.max_features_, iris.data.shape[1])
# use values of max_features that are invalid
est = TreeEstimator(max_features=10)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=-1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=0.0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features=1.5)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_features="foobar")
assert_raises(ValueError, est.fit, X, y)
def test_error():
# Test that it gives proper exception on deficient input.
for name, TreeEstimator in CLF_TREES.items():
# predict before fit
est = TreeEstimator()
assert_raises(NotFittedError, est.predict_proba, X)
est.fit(X, y)
X2 = [[-2, -1, 1]] # wrong feature shape for sample
assert_raises(ValueError, est.predict_proba, X2)
for name, TreeEstimator in ALL_TREES.items():
# Invalid values for parameters
assert_raises(ValueError, TreeEstimator(min_samples_leaf=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=.6).fit, X, y)
assert_raises(ValueError, TreeEstimator(min_samples_leaf=0.).fit, X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=-1).fit,
X, y)
assert_raises(ValueError,
TreeEstimator(min_weight_fraction_leaf=0.51).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=-1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=0.0).fit,
X, y)
assert_raises(ValueError, TreeEstimator(min_samples_split=1.1).fit,
X, y)
assert_raises(ValueError, TreeEstimator(max_depth=-1).fit, X, y)
assert_raises(ValueError, TreeEstimator(max_features=42).fit, X, y)
# Wrong dimensions
est = TreeEstimator()
y2 = y[:-1]
assert_raises(ValueError, est.fit, X, y2)
# Test with arrays that are non-contiguous.
Xf = np.asfortranarray(X)
est = TreeEstimator()
est.fit(Xf, y)
assert_almost_equal(est.predict(T), true_result)
# predict before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.predict, T)
# predict on vector with different dims
est.fit(X, y)
t = np.asarray(T)
assert_raises(ValueError, est.predict, t[:, 1:])
# wrong sample shape
Xt = np.array(X).T
est = TreeEstimator()
est.fit(np.dot(X, Xt), y)
assert_raises(ValueError, est.predict, X)
assert_raises(ValueError, est.apply, X)
clf = TreeEstimator()
clf.fit(X, y)
assert_raises(ValueError, clf.predict, Xt)
assert_raises(ValueError, clf.apply, Xt)
# apply before fitting
est = TreeEstimator()
assert_raises(NotFittedError, est.apply, T)
def test_min_samples_split():
"""Test min_samples_split parameter"""
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test for integer parameter
est = TreeEstimator(min_samples_split=10,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
# test for float parameter
est = TreeEstimator(min_samples_split=0.2,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
# count samples on nodes, -1 means it is a leaf
node_samples = est.tree_.n_node_samples[est.tree_.children_left != -1]
assert_greater(np.min(node_samples), 9,
"Failed with {0}".format(name))
def test_min_samples_leaf():
# Test if leaves contain more than leaf_count training examples
X = np.asfortranarray(iris.data.astype(tree._tree.DTYPE))
y = iris.target
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, name in product((None, 1000), ALL_TREES.keys()):
TreeEstimator = ALL_TREES[name]
# test integer parameter
est = TreeEstimator(min_samples_leaf=5,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
# test float parameter
est = TreeEstimator(min_samples_leaf=0.1,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y)
out = est.tree_.apply(X)
node_counts = np.bincount(out)
# drop inner nodes
leaf_count = node_counts[node_counts != 0]
assert_greater(np.min(leaf_count), 4,
"Failed with {0}".format(name))
def check_min_weight_fraction_leaf(name, datasets, sparse=False):
"""Test if leaves contain at least min_weight_fraction_leaf of the
training set"""
if sparse:
X = DATASETS[datasets]["X_sparse"].astype(np.float32)
else:
X = DATASETS[datasets]["X"].astype(np.float32)
y = DATASETS[datasets]["y"]
weights = rng.rand(X.shape[0])
total_weight = np.sum(weights)
TreeEstimator = ALL_TREES[name]
# test both DepthFirstTreeBuilder and BestFirstTreeBuilder
# by setting max_leaf_nodes
for max_leaf_nodes, frac in product((None, 1000), np.linspace(0, 0.5, 6)):
est = TreeEstimator(min_weight_fraction_leaf=frac,
max_leaf_nodes=max_leaf_nodes,
random_state=0)
est.fit(X, y, sample_weight=weights)
if sparse:
out = est.tree_.apply(X.tocsr())
else:
out = est.tree_.apply(X)
node_weights = np.bincount(out, weights=weights)
# drop inner nodes
leaf_weights = node_weights[node_weights != 0]
assert_greater_equal(
np.min(leaf_weights),
total_weight * est.min_weight_fraction_leaf,
"Failed with {0} "
"min_weight_fraction_leaf={1}".format(
name, est.min_weight_fraction_leaf))
def test_min_weight_fraction_leaf():
# Check on dense input
for name in ALL_TREES:
yield check_min_weight_fraction_leaf, name, "iris"
# Check on sparse input
for name in SPARSE_TREES:
yield check_min_weight_fraction_leaf, name, "multilabel", True
def test_pickle():
for name, TreeEstimator in ALL_TREES.items():
if "Classifier" in name:
X, y = iris.data, iris.target
else:
X, y = boston.data, boston.target
est = TreeEstimator(random_state=0)
est.fit(X, y)
score = est.score(X, y)
fitted_attribute = dict()
for attribute in ["max_depth", "node_count", "capacity"]:
fitted_attribute[attribute] = getattr(est.tree_, attribute)
serialized_object = pickle.dumps(est)
est2 = pickle.loads(serialized_object)
assert_equal(type(est2), est.__class__)
score2 = est2.score(X, y)
assert_equal(score, score2,
"Failed to generate same score after pickling "
"with {0}".format(name))
for attribute in fitted_attribute:
assert_equal(getattr(est2.tree_, attribute),
fitted_attribute[attribute],
"Failed to generate same attribute {0} after "
"pickling with {1}".format(attribute, name))
def test_multioutput():
# Check estimators on multi-output problems.
X = [[-2, -1],
[-1, -1],
[-1, -2],
[1, 1],
[1, 2],
[2, 1],
[-2, 1],
[-1, 1],
[-1, 2],
[2, -1],
[1, -1],
[1, -2]]
y = [[-1, 0],
[-1, 0],
[-1, 0],
[1, 1],
[1, 1],
[1, 1],
[-1, 2],
[-1, 2],
[-1, 2],
[1, 3],
[1, 3],
[1, 3]]
T = [[-1, -1], [1, 1], [-1, 1], [1, -1]]
y_true = [[-1, 0], [1, 1], [-1, 2], [1, 3]]
# toy classification problem
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
y_hat = clf.fit(X, y).predict(T)
assert_array_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
proba = clf.predict_proba(T)
assert_equal(len(proba), 2)
assert_equal(proba[0].shape, (4, 2))
assert_equal(proba[1].shape, (4, 4))
log_proba = clf.predict_log_proba(T)
assert_equal(len(log_proba), 2)
assert_equal(log_proba[0].shape, (4, 2))
assert_equal(log_proba[1].shape, (4, 4))
# toy regression problem
for name, TreeRegressor in REG_TREES.items():
reg = TreeRegressor(random_state=0)
y_hat = reg.fit(X, y).predict(T)
assert_almost_equal(y_hat, y_true)
assert_equal(y_hat.shape, (4, 2))
def test_classes_shape():
# Test that n_classes_ and classes_ have proper shape.
for name, TreeClassifier in CLF_TREES.items():
# Classification, single output
clf = TreeClassifier(random_state=0)
clf.fit(X, y)
assert_equal(clf.n_classes_, 2)
assert_array_equal(clf.classes_, [-1, 1])
# Classification, multi-output
_y = np.vstack((y, np.array(y) * 2)).T
clf = TreeClassifier(random_state=0)
clf.fit(X, _y)
assert_equal(len(clf.n_classes_), 2)
assert_equal(len(clf.classes_), 2)
assert_array_equal(clf.n_classes_, [2, 2])
assert_array_equal(clf.classes_, [[-1, 1], [-2, 2]])
def test_unbalanced_iris():
# Check class rebalancing.
unbalanced_X = iris.data[:125]
unbalanced_y = iris.target[:125]
sample_weight = compute_sample_weight("balanced", unbalanced_y)
for name, TreeClassifier in CLF_TREES.items():
clf = TreeClassifier(random_state=0)
clf.fit(unbalanced_X, unbalanced_y, sample_weight=sample_weight)
assert_almost_equal(clf.predict(unbalanced_X), unbalanced_y)
def test_memory_layout():
# Check that it works no matter the memory layout
for (name, TreeEstimator), dtype in product(ALL_TREES.items(),
[np.float64, np.float32]):
est = TreeEstimator(random_state=0)
# Nothing
X = np.asarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# C-order
X = np.asarray(iris.data, order="C", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# F-order
X = np.asarray(iris.data, order="F", dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Contiguous
X = np.ascontiguousarray(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
if not est.presort:
# csr matrix
X = csr_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# csc_matrix
X = csc_matrix(iris.data, dtype=dtype)
y = iris.target
assert_array_equal(est.fit(X, y).predict(X), y)
# Strided
X = np.asarray(iris.data[::3], dtype=dtype)
y = iris.target[::3]
assert_array_equal(est.fit(X, y).predict(X), y)
def test_sample_weight():
# Check sample weighting.
# Test that zero-weighted samples are not taken into account
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
sample_weight = np.ones(100)
sample_weight[y == 0] = 0.0
clf = DecisionTreeClassifier(random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), np.ones(100))
# Test that low weighted samples are not taken into account at low depth
X = np.arange(200)[:, np.newaxis]
y = np.zeros(200)
y[50:100] = 1
y[100:200] = 2
X[100:200, 0] = 200
sample_weight = np.ones(200)
sample_weight[y == 2] = .51 # Samples of class '2' are still weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 149.5)
sample_weight[y == 2] = .5 # Samples of class '2' are no longer weightier
clf = DecisionTreeClassifier(max_depth=1, random_state=0)
clf.fit(X, y, sample_weight=sample_weight)
assert_equal(clf.tree_.threshold[0], 49.5) # Threshold should have moved
# Test that sample weighting is the same as having duplicates
X = iris.data
y = iris.target
duplicates = rng.randint(0, X.shape[0], 100)
clf = DecisionTreeClassifier(random_state=1)
clf.fit(X[duplicates], y[duplicates])
sample_weight = np.bincount(duplicates, minlength=X.shape[0])
clf2 = DecisionTreeClassifier(random_state=1)
clf2.fit(X, y, sample_weight=sample_weight)
internal = clf.tree_.children_left != tree._tree.TREE_LEAF
assert_array_almost_equal(clf.tree_.threshold[internal],
clf2.tree_.threshold[internal])
def test_sample_weight_invalid():
# Check sample weighting raises errors.
X = np.arange(100)[:, np.newaxis]
y = np.ones(100)
y[:50] = 0.0
clf = DecisionTreeClassifier(random_state=0)
sample_weight = np.random.rand(100, 1)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.array(0)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(101)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
sample_weight = np.ones(99)
assert_raises(ValueError, clf.fit, X, y, sample_weight=sample_weight)
def check_class_weights(name):
"""Check class_weights resemble sample_weights behavior."""
TreeClassifier = CLF_TREES[name]
# Iris is balanced, so no effect expected for using 'balanced' weights
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target)
clf2 = TreeClassifier(class_weight='balanced', random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Make a multi-output problem with three copies of Iris
iris_multi = np.vstack((iris.target, iris.target, iris.target)).T
# Create user-defined weights that should balance over the outputs
clf3 = TreeClassifier(class_weight=[{0: 2., 1: 2., 2: 1.},
{0: 2., 1: 1., 2: 2.},
{0: 1., 1: 2., 2: 2.}],
random_state=0)
clf3.fit(iris.data, iris_multi)
assert_almost_equal(clf2.feature_importances_, clf3.feature_importances_)
# Check against multi-output "auto" which should also have no effect
clf4 = TreeClassifier(class_weight='balanced', random_state=0)
clf4.fit(iris.data, iris_multi)
assert_almost_equal(clf3.feature_importances_, clf4.feature_importances_)
# Inflate importance of class 1, check against user-defined weights
sample_weight = np.ones(iris.target.shape)
sample_weight[iris.target == 1] *= 100
class_weight = {0: 1., 1: 100., 2: 1.}
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
# Check that sample_weight and class_weight are multiplicative
clf1 = TreeClassifier(random_state=0)
clf1.fit(iris.data, iris.target, sample_weight ** 2)
clf2 = TreeClassifier(class_weight=class_weight, random_state=0)
clf2.fit(iris.data, iris.target, sample_weight)
assert_almost_equal(clf1.feature_importances_, clf2.feature_importances_)
def test_class_weights():
for name in CLF_TREES:
yield check_class_weights, name
def check_class_weight_errors(name):
# Test if class_weight raises errors and warnings when expected.
TreeClassifier = CLF_TREES[name]
_y = np.vstack((y, np.array(y) * 2)).T
# Invalid preset string
clf = TreeClassifier(class_weight='the larch', random_state=0)
assert_raises(ValueError, clf.fit, X, y)
assert_raises(ValueError, clf.fit, X, _y)
# Not a list or preset for multi-output
clf = TreeClassifier(class_weight=1, random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
# Incorrect length list for multi-output
clf = TreeClassifier(class_weight=[{-1: 0.5, 1: 1.}], random_state=0)
assert_raises(ValueError, clf.fit, X, _y)
def test_class_weight_errors():
for name in CLF_TREES:
yield check_class_weight_errors, name
def test_max_leaf_nodes():
# Test greedy trees with max_depth + 1 leafs.
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=None, max_leaf_nodes=k + 1).fit(X, y)
tree = est.tree_
assert_equal((tree.children_left == TREE_LEAF).sum(), k + 1)
# max_leaf_nodes in (0, 1) should raise ValueError
est = TreeEstimator(max_depth=None, max_leaf_nodes=0)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=1)
assert_raises(ValueError, est.fit, X, y)
est = TreeEstimator(max_depth=None, max_leaf_nodes=0.1)
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
# Test precedence of max_leaf_nodes over max_depth.
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.tree_
assert_greater(tree.max_depth, 1)
def test_arrays_persist():
# Ensure property arrays' memory stays alive when tree disappears
# non-regression for #2726
for attr in ['n_classes', 'value', 'children_left', 'children_right',
'threshold', 'impurity', 'feature', 'n_node_samples']:
value = getattr(DecisionTreeClassifier().fit([[0], [1]], [0, 1]).tree_, attr)
# if pointing to freed memory, contents may be arbitrary
assert_true(-3 <= value.flat[0] < 3,
'Array points to arbitrary memory')
def test_only_constant_features():
random_state = check_random_state(0)
X = np.zeros((10, 20))
y = random_state.randint(0, 2, (10, ))
for name, TreeEstimator in ALL_TREES.items():
est = TreeEstimator(random_state=0)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 0)
def test_with_only_one_non_constant_features():
X = np.hstack([np.array([[1.], [1.], [0.], [0.]]),
np.zeros((4, 1000))])
y = np.array([0., 1., 0., 1.0])
for name, TreeEstimator in CLF_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict_proba(X), 0.5 * np.ones((4, 2)))
for name, TreeEstimator in REG_TREES.items():
est = TreeEstimator(random_state=0, max_features=1)
est.fit(X, y)
assert_equal(est.tree_.max_depth, 1)
assert_array_equal(est.predict(X), 0.5 * np.ones((4, )))
def test_big_input():
# Test if the warning for too large inputs is appropriate.
X = np.repeat(10 ** 40., 4).astype(np.float64).reshape(-1, 1)
clf = DecisionTreeClassifier()
try:
clf.fit(X, [0, 1, 0, 1])
except ValueError as e:
assert_in("float32", str(e))
def test_realloc():
from sklearn.tree._utils import _realloc_test
assert_raises(MemoryError, _realloc_test)
def test_huge_allocations():
n_bits = int(platform.architecture()[0].rstrip('bit'))
X = np.random.randn(10, 2)
y = np.random.randint(0, 2, 10)
# Sanity check: we cannot request more memory than the size of the address
# space. Currently raises OverflowError.
huge = 2 ** (n_bits + 1)
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(Exception, clf.fit, X, y)
# Non-regression test: MemoryError used to be dropped by Cython
# because of missing "except *".
huge = 2 ** (n_bits - 1) - 1
clf = DecisionTreeClassifier(splitter='best', max_leaf_nodes=huge)
assert_raises(MemoryError, clf.fit, X, y)
def check_sparse_input(tree, dataset, max_depth=None):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Gain testing time
if dataset in ["digits", "boston"]:
n_samples = X.shape[0] // 5
X = X[:n_samples]
X_sparse = X_sparse[:n_samples]
y = y[:n_samples]
for sparse_format in (csr_matrix, csc_matrix, coo_matrix):
X_sparse = sparse_format(X_sparse)
# Check the default (depth first search)
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
y_pred = d.predict(X)
if tree in CLF_TREES:
y_proba = d.predict_proba(X)
y_log_proba = d.predict_log_proba(X)
for sparse_matrix in (csr_matrix, csc_matrix, coo_matrix):
X_sparse_test = sparse_matrix(X_sparse, dtype=np.float32)
assert_array_almost_equal(s.predict(X_sparse_test), y_pred)
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X_sparse_test),
y_proba)
assert_array_almost_equal(s.predict_log_proba(X_sparse_test),
y_log_proba)
def test_sparse_input():
for tree, dataset in product(SPARSE_TREES,
("clf_small", "toy", "digits", "multilabel",
"sparse-pos", "sparse-neg", "sparse-mix",
"zeros")):
max_depth = 3 if dataset == "digits" else None
yield (check_sparse_input, tree, dataset, max_depth)
# Due to numerical instability of MSE and too strict test, we limit the
# maximal depth
for tree, dataset in product(REG_TREES, ["boston", "reg_small"]):
if tree in SPARSE_TREES:
yield (check_sparse_input, tree, dataset, 2)
def check_sparse_parameters(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check max_features
d = TreeEstimator(random_state=0, max_features=1, max_depth=2).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
max_depth=2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_split
d = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X, y)
s = TreeEstimator(random_state=0, max_features=1,
min_samples_split=10).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check min_samples_leaf
d = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X, y)
s = TreeEstimator(random_state=0,
min_samples_leaf=X_sparse.shape[0] // 2).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
# Check best-first search
d = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X, y)
s = TreeEstimator(random_state=0, max_leaf_nodes=3).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_parameters():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_parameters, tree, dataset)
def check_sparse_criterion(tree, dataset):
TreeEstimator = ALL_TREES[tree]
X = DATASETS[dataset]["X"]
X_sparse = DATASETS[dataset]["X_sparse"]
y = DATASETS[dataset]["y"]
# Check various criterion
CRITERIONS = REG_CRITERIONS if tree in REG_TREES else CLF_CRITERIONS
for criterion in CRITERIONS:
d = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=3,
criterion=criterion).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
assert_array_almost_equal(s.predict(X), d.predict(X))
def test_sparse_criterion():
for tree, dataset in product(SPARSE_TREES,
["sparse-pos", "sparse-neg", "sparse-mix",
"zeros"]):
yield (check_sparse_criterion, tree, dataset)
def check_explicit_sparse_zeros(tree, max_depth=3,
n_features=10):
TreeEstimator = ALL_TREES[tree]
# n_samples set n_feature to ease construction of a simultaneous
# construction of a csr and csc matrix
n_samples = n_features
samples = np.arange(n_samples)
# Generate X, y
random_state = check_random_state(0)
indices = []
data = []
offset = 0
indptr = [offset]
for i in range(n_features):
n_nonzero_i = random_state.binomial(n_samples, 0.5)
indices_i = random_state.permutation(samples)[:n_nonzero_i]
indices.append(indices_i)
data_i = random_state.binomial(3, 0.5, size=(n_nonzero_i, )) - 1
data.append(data_i)
offset += n_nonzero_i
indptr.append(offset)
indices = np.concatenate(indices)
data = np.array(np.concatenate(data), dtype=np.float32)
X_sparse = csc_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X = X_sparse.toarray()
X_sparse_test = csr_matrix((data, indices, indptr),
shape=(n_samples, n_features))
X_test = X_sparse_test.toarray()
y = random_state.randint(0, 3, size=(n_samples, ))
# Ensure that X_sparse_test owns its data, indices and indptr array
X_sparse_test = X_sparse_test.copy()
# Ensure that we have explicit zeros
assert_greater((X_sparse.data == 0.).sum(), 0)
assert_greater((X_sparse_test.data == 0.).sum(), 0)
# Perform the comparison
d = TreeEstimator(random_state=0, max_depth=max_depth).fit(X, y)
s = TreeEstimator(random_state=0, max_depth=max_depth).fit(X_sparse, y)
assert_tree_equal(d.tree_, s.tree_,
"{0} with dense and sparse format gave different "
"trees".format(tree))
Xs = (X_test, X_sparse_test)
for X1, X2 in product(Xs, Xs):
assert_array_almost_equal(s.tree_.apply(X1), d.tree_.apply(X2))
assert_array_almost_equal(s.apply(X1), d.apply(X2))
assert_array_almost_equal(s.apply(X1), s.tree_.apply(X1))
assert_array_almost_equal(s.tree_.decision_path(X1).toarray(),
d.tree_.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
d.decision_path(X2).toarray())
assert_array_almost_equal(s.decision_path(X1).toarray(),
s.tree_.decision_path(X1).toarray())
assert_array_almost_equal(s.predict(X1), d.predict(X2))
if tree in CLF_TREES:
assert_array_almost_equal(s.predict_proba(X1),
d.predict_proba(X2))
def test_explicit_sparse_zeros():
for tree in SPARSE_TREES:
yield (check_explicit_sparse_zeros, tree)
@ignore_warnings
def check_raise_error_on_1d_input(name):
TreeEstimator = ALL_TREES[name]
X = iris.data[:, 0].ravel()
X_2d = iris.data[:, 0].reshape((-1, 1))
y = iris.target
assert_raises(ValueError, TreeEstimator(random_state=0).fit, X, y)
est = TreeEstimator(random_state=0)
est.fit(X_2d, y)
assert_raises(ValueError, est.predict, [X])
@ignore_warnings
def test_1d_input():
for name in ALL_TREES:
yield check_raise_error_on_1d_input, name
def _check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight):
# Private function to keep pretty printing in nose yielded tests
est = TreeEstimator(random_state=0)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 1)
est = TreeEstimator(random_state=0, min_weight_fraction_leaf=0.4)
est.fit(X, y, sample_weight=sample_weight)
assert_equal(est.tree_.max_depth, 0)
def check_min_weight_leaf_split_level(name):
TreeEstimator = ALL_TREES[name]
X = np.array([[0], [0], [0], [0], [1]])
y = [0, 0, 0, 0, 1]
sample_weight = [0.2, 0.2, 0.2, 0.2, 0.2]
_check_min_weight_leaf_split_level(TreeEstimator, X, y, sample_weight)
if not TreeEstimator().presort:
_check_min_weight_leaf_split_level(TreeEstimator, csc_matrix(X), y,
sample_weight)
def test_min_weight_leaf_split_level():
for name in ALL_TREES:
yield check_min_weight_leaf_split_level, name
def check_public_apply(name):
X_small32 = X_small.astype(tree._tree.DTYPE)
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def check_public_apply_sparse(name):
X_small32 = csr_matrix(X_small.astype(tree._tree.DTYPE))
est = ALL_TREES[name]()
est.fit(X_small, y_small)
assert_array_equal(est.apply(X_small),
est.tree_.apply(X_small32))
def test_public_apply():
for name in ALL_TREES:
yield (check_public_apply, name)
for name in SPARSE_TREES:
yield (check_public_apply_sparse, name)
def check_presort_sparse(est, X, y):
assert_raises(ValueError, est.fit, X, y)
def test_presort_sparse():
ests = (DecisionTreeClassifier(presort=True),
DecisionTreeRegressor(presort=True))
sparse_matrices = (csr_matrix, csc_matrix, coo_matrix)
y, X = datasets.make_multilabel_classification(random_state=0,
n_samples=50,
n_features=1,
n_classes=20)
y = y[:, 0]
for est, sparse_matrix in product(ests, sparse_matrices):
yield check_presort_sparse, est, sparse_matrix(X), y
def test_decision_path_hardcoded():
X = iris.data
y = iris.target
est = DecisionTreeClassifier(random_state=0, max_depth=1).fit(X, y)
node_indicator = est.decision_path(X[:2]).toarray()
assert_array_equal(node_indicator, [[1, 1, 0], [1, 0, 1]])
def check_decision_path(name):
X = iris.data
y = iris.target
n_samples = X.shape[0]
TreeEstimator = ALL_TREES[name]
est = TreeEstimator(random_state=0, max_depth=2)
est.fit(X, y)
node_indicator_csr = est.decision_path(X)
node_indicator = node_indicator_csr.toarray()
assert_equal(node_indicator.shape, (n_samples, est.tree_.node_count))
# Assert that leaves index are correct
leaves = est.apply(X)
leave_indicator = [node_indicator[i, j] for i, j in enumerate(leaves)]
assert_array_almost_equal(leave_indicator, np.ones(shape=n_samples))
# Ensure only one leave node per sample
all_leaves = est.tree_.children_left == TREE_LEAF
assert_array_almost_equal(np.dot(node_indicator, all_leaves),
np.ones(shape=n_samples))
# Ensure max depth is consistent with sum of indicator
max_depth = node_indicator.sum(axis=1).max()
assert_less_equal(est.tree_.max_depth, max_depth)
def test_decision_path():
for name in ALL_TREES:
yield (check_decision_path, name)
def check_no_sparse_y_support(name):
X, y = X_multilabel, csr_matrix(y_multilabel)
TreeEstimator = ALL_TREES[name]
assert_raises(TypeError, TreeEstimator(random_state=0).fit, X, y)
def test_no_sparse_y_support():
# Currently we don't support sparse y
for name in ALL_TREES:
yield (check_no_sparse_y_support, name)
| bsd-3-clause |
kwikadi/orange3 | Orange/widgets/data/owdatasampler.py | 2 | 14322 | import sys
import math
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
import numpy as np
import sklearn.cross_validation as skl_cross_validation
from Orange.widgets import widget, gui
from Orange.widgets.settings import Setting
from Orange.data import Table
from Orange.data.sql.table import SqlTable
class OWDataSampler(widget.OWWidget):
name = "Data Sampler"
description = "Randomly draw a subset of data points " \
"from the input data set."
icon = "icons/DataSampler.svg"
priority = 100
category = "Data"
keywords = ["data", "sample"]
inputs = [("Data", Table, "set_data")]
outputs = [("Data Sample", Table, widget.Default),
("Remaining Data", Table)]
want_main_area = False
resizing_enabled = False
RandomSeed = 42
FixedProportion, FixedSize, CrossValidation, Bootstrap = range(4)
SqlTime, SqlProportion = range(2)
use_seed = Setting(False)
replacement = Setting(False)
stratify = Setting(False)
sql_dl = Setting(False)
sampling_type = Setting(FixedProportion)
sampleSizeNumber = Setting(1)
sampleSizePercentage = Setting(70)
sampleSizeSqlTime = Setting(1)
sampleSizeSqlPercentage = Setting(0.1)
number_of_folds = Setting(10)
selectedFold = Setting(1)
def __init__(self):
super().__init__()
self.data = None
self.indices = None
self.sampled_instances = self.remaining_instances = None
box = gui.widgetBox(self.controlArea, "Information")
self.dataInfoLabel = gui.widgetLabel(box, 'No data on input.')
self.outputInfoLabel = gui.widgetLabel(box, ' ')
self.sampling_box = gui.widgetBox(self.controlArea, "Sampling Type")
sampling = gui.radioButtons(self.sampling_box, self, "sampling_type",
callback=self.sampling_type_changed)
def set_sampling_type(i):
def f():
self.sampling_type = i
self.sampling_type_changed()
return f
gui.appendRadioButton(sampling, "Fixed proportion of data:")
self.sampleSizePercentageSlider = gui.hSlider(
gui.indentedBox(sampling), self,
"sampleSizePercentage",
minValue=0, maxValue=99, ticks=10, labelFormat="%d %%",
callback=set_sampling_type(self.FixedProportion),
addSpace=12)
gui.appendRadioButton(sampling, "Fixed sample size:")
ibox = gui.indentedBox(sampling)
self.sampleSizeSpin = gui.spin(
ibox, self, "sampleSizeNumber", label="Instances: ",
minv=1, maxv=2 ** 31 - 1,
callback=set_sampling_type(self.FixedSize))
gui.checkBox(
ibox, self, "replacement", "Sample with replacement",
callback=set_sampling_type(self.FixedSize),
addSpace=12)
gui.appendRadioButton(sampling, "Cross Validation:")
form = QtGui.QFormLayout(
formAlignment=Qt.AlignLeft | Qt.AlignTop,
labelAlignment=Qt.AlignLeft,
fieldGrowthPolicy=QtGui.QFormLayout.AllNonFixedFieldsGrow)
ibox = gui.indentedBox(sampling, addSpace=True, orientation=form)
form.addRow("Number of folds",
gui.spin(
ibox, self, "number_of_folds", 2, 100,
addToLayout=False,
callback=self.number_of_folds_changed))
self.selected_fold_spin = gui.spin(
ibox, self, "selectedFold", 1, self.number_of_folds,
addToLayout=False, callback=self.fold_changed)
form.addRow("Selected fold", self.selected_fold_spin)
gui.appendRadioButton(sampling, "Boostrap")
self.sql_box = gui.widgetBox(self.controlArea, "Sampling Type")
sampling = gui.radioButtons(self.sql_box, self, "sampling_type",
callback=self.sampling_type_changed)
gui.appendRadioButton(sampling, "Time:")
ibox = gui.indentedBox(sampling)
spin = gui.spin(ibox, self, "sampleSizeSqlTime", minv=1, maxv=3600,
callback=set_sampling_type(self.SqlTime))
spin.setSuffix(" sec")
gui.appendRadioButton(sampling, "Percentage")
ibox = gui.indentedBox(sampling)
spin = gui.spin(ibox, self, "sampleSizeSqlPercentage", spinType=float,
minv=0.0001, maxv=100, step=0.1, decimals=4,
callback=set_sampling_type(self.SqlProportion))
spin.setSuffix(" %")
self.sql_box.setVisible(False)
self.options_box = gui.widgetBox(self.controlArea, "Options")
self.cb_seed = gui.checkBox(
self.options_box, self, "use_seed",
"Replicable (deterministic) sampling",
callback=self.settings_changed)
self.cb_stratify = gui.checkBox(
self.options_box, self, "stratify",
"Stratify sample (when possible)", callback=self.settings_changed)
self.cb_sql_dl = gui.checkBox(
self.options_box, self, "sql_dl", "Download data to local memory",
callback=self.settings_changed)
self.cb_sql_dl.setVisible(False)
gui.button(self.controlArea, self, "Sample Data",
callback=self.commit, addSpace=8)
self.controlArea.layout().addWidget(self.report_button)
def sampling_type_changed(self):
self.settings_changed()
def number_of_folds_changed(self):
self.selected_fold_spin.setMaximum(self.number_of_folds)
self.sampling_type = self.CrossValidation
self.settings_changed()
def fold_changed(self):
# a separate callback - if we decide to cache indices
self.sampling_type = self.CrossValidation
def settings_changed(self):
self.indices = None
def set_data(self, dataset):
self.data = dataset
if dataset is not None:
sql = isinstance(dataset, SqlTable)
self.sampling_box.setVisible(not sql)
self.sql_box.setVisible(sql)
self.cb_seed.setVisible(not sql)
self.cb_stratify.setVisible(not sql)
self.cb_sql_dl.setVisible(sql)
self.dataInfoLabel.setText(
'{}{} instances in input data set.'.format(*(
('~', dataset.approx_len()) if sql else
('', len(dataset)))))
if not sql:
self.sampleSizeSpin.setMaximum(len(dataset))
self.updateindices()
else:
self.dataInfoLabel.setText('No data on input.')
self.outputInfoLabel.setText('')
self.indices = None
self.commit()
def commit(self):
if self.data is None:
sample = other = None
self.sampled_instances = self.remaining_instances = None
self.outputInfoLabel.setText("")
elif isinstance(self.data, SqlTable):
other = None
if self.sampling_type == self.SqlProportion:
sample = self.data.sample_percentage(
self.sampleSizeSqlPercentage, no_cache=True)
else:
sample = self.data.sample_time(
self.sampleSizeSqlTime, no_cache=True)
if self.sql_dl:
sample.download_data()
sample = Table(sample)
else:
if self.indices is None or not self.use_seed:
self.updateindices()
if self.indices is None:
return
if self.sampling_type in (
self.FixedProportion, self.FixedSize, self.Bootstrap):
remaining, sample = self.indices
self.outputInfoLabel.setText(
'Outputting %d instance%s.' %
(len(sample), "s" * (len(sample) != 1)))
elif self.sampling_type == self.CrossValidation:
remaining, sample = self.indices[self.selectedFold - 1]
self.outputInfoLabel.setText(
'Outputting fold %d, %d instance%s.' %
(self.selectedFold, len(sample), "s" * (len(sample) != 1))
)
sample = self.data[sample]
other = self.data[remaining]
self.sampled_instances = len(sample)
self.remaining_instances = len(other)
self.send("Data Sample", sample)
self.send("Remaining Data", other)
def updateindices(self):
err_msg = ""
repl = True
data_length = len(self.data)
num_classes = len(self.data.domain.class_var.values) \
if self.data.domain.has_discrete_class else 0
size = None
if self.sampling_type == self.FixedSize:
size = self.sampleSizeNumber
repl = self.replacement
elif self.sampling_type == self.FixedProportion:
size = np.ceil(self.sampleSizePercentage / 100 * data_length)
repl = False
elif self.sampling_type == self.CrossValidation:
if data_length < self.number_of_folds:
err_msg = "Number of folds exceeds the data size"
else:
assert self.sampling_type == self.Bootstrap
if not repl and size is not None and (data_length <= size):
err_msg = "Sample must be smaller than data"
if not repl and data_length <= num_classes and self.stratify:
err_msg = "Not enough data for stratified sampling"
self.error(0)
if err_msg:
self.error(err_msg)
self.indices = None
return
rnd = self.RandomSeed if self.use_seed else None
stratified = (self.stratify and
type(self.data) == Table and
self.data.domain.has_discrete_class)
if self.sampling_type == self.FixedSize:
self.indices = sample_random_n(
self.data, size,
stratified=stratified, replace=self.replacement,
random_state=rnd)
elif self.sampling_type == self.FixedProportion:
self.indices = sample_random_p(
self.data, self.sampleSizePercentage / 100,
stratified=stratified, random_state=rnd)
elif self.sampling_type == self.Bootstrap:
self.indices = sample_bootstrap(data_length, random_state=rnd)
else:
self.indices = sample_fold_indices(
self.data, self.number_of_folds, stratified=stratified,
random_state=rnd)
def send_report(self):
if self.sampling_type == self.FixedProportion:
tpe = "Random sample with {} % of data".format(
self.sampleSizePercentage)
elif self.sampling_type == self.FixedSize:
if self.sampleSizeNumber == 1:
tpe = "Random data instance"
else:
tpe = "Random sample with {} data instances".format(
self.sampleSizeNumber)
if self.replacement:
tpe += ", with replacement"
elif self.sampling_type == self.CrossValidation:
tpe = "Fold {} of {}-fold cross-validation".format(
self.selectedFold, self.number_of_folds)
else:
tpe = "Undefined" # should not come here at all
if self.stratify:
tpe += ", stratified (if possible)"
if self.use_seed:
tpe += ", deterministic"
items = [("Sampling type", tpe)]
if self.sampled_instances is not None:
items += [
("Input", "{} instances".format(len(self.data))),
("Sample", "{} instances".format(self.sampled_instances)),
("Remaining", "{} instances".format(self.remaining_instances)),
]
self.report_items(items)
def sample_fold_indices(table, folds=10, stratified=False, random_state=None):
"""
:param Orange.data.Table table:
:param int folds: Number of folds
:param bool stratified: Return stratified indices (if applicable).
:param Random random_state:
:rval tuple-of-arrays: A tuple of array indices one for each fold.
"""
if stratified and table.domain.has_discrete_class:
# XXX: StratifiedKFold does not support random_state
ind = skl_cross_validation.StratifiedKFold(
table.Y.ravel(), folds, random_state=random_state)
else:
ind = skl_cross_validation.KFold(
len(table), folds, shuffle=True, random_state=random_state)
return tuple(ind)
def sample_random_n(table, n, stratified=False, replace=False,
random_state=None):
if replace:
if random_state is None:
rgen = np.random
else:
rgen = np.random.mtrand.RandomState(random_state)
sample = rgen.random_integers(0, len(table) - 1, n)
o = np.ones(len(table))
o[sample] = 0
others = np.nonzero(o)[0]
return others, sample
if stratified and table.domain.has_discrete_class:
test_size = max(len(table.domain.class_var.values), n)
ind = skl_cross_validation.StratifiedShuffleSplit(
table.Y.ravel(), n_iter=1,
test_size=test_size, train_size=len(table) - test_size,
random_state=random_state)
else:
ind = skl_cross_validation.ShuffleSplit(
len(table), n_iter=1,
test_size=n, random_state=random_state)
return next(iter(ind))
def sample_random_p(table, p, stratified=False, random_state=None):
n = int(math.ceil(len(table) * p))
return sample_random_n(table, n, stratified, False, random_state)
def sample_bootstrap(size, random_state=None):
rgen = np.random.RandomState(random_state)
sample = rgen.randint(0, size, size)
sample.sort() # not needed for the code below, just for the user
insample = np.ones((size,), dtype=np.bool)
insample[sample] = False
remaining = np.flatnonzero(insample)
return remaining, sample
def test_main():
app = QtGui.QApplication([])
data = Table("iris")
w = OWDataSampler()
w.set_data(data)
w.show()
return app.exec_()
if __name__ == "__main__":
sys.exit(test_main())
| bsd-2-clause |
idlead/scikit-learn | sklearn/preprocessing/label.py | 16 | 26702 | # Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Joel Nothman <joel.nothman@gmail.com>
# Hamzeh Alsalhi <ha258@cornell.edu>
# License: BSD 3 clause
from collections import defaultdict
import itertools
import array
import numpy as np
import scipy.sparse as sp
from ..base import BaseEstimator, TransformerMixin
from ..utils.fixes import np_version
from ..utils.fixes import sparse_min_max
from ..utils.fixes import astype
from ..utils.fixes import in1d
from ..utils import column_or_1d
from ..utils.validation import check_array
from ..utils.validation import check_is_fitted
from ..utils.validation import _num_samples
from ..utils.multiclass import unique_labels
from ..utils.multiclass import type_of_target
from ..externals import six
zip = six.moves.zip
map = six.moves.map
__all__ = [
'label_binarize',
'LabelBinarizer',
'LabelEncoder',
'MultiLabelBinarizer',
]
def _check_numpy_unicode_bug(labels):
"""Check that user is not subject to an old numpy bug
Fixed in master before 1.7.0:
https://github.com/numpy/numpy/pull/243
"""
if np_version[:3] < (1, 7, 0) and labels.dtype.kind == 'U':
raise RuntimeError("NumPy < 1.7.0 does not implement searchsorted"
" on unicode data correctly. Please upgrade"
" NumPy to use LabelEncoder with unicode inputs.")
class LabelEncoder(BaseEstimator, TransformerMixin):
"""Encode labels with value between 0 and n_classes-1.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Attributes
----------
classes_ : array of shape (n_class,)
Holds the label for each class.
Examples
--------
`LabelEncoder` can be used to normalize labels.
>>> from sklearn import preprocessing
>>> le = preprocessing.LabelEncoder()
>>> le.fit([1, 2, 2, 6])
LabelEncoder()
>>> le.classes_
array([1, 2, 6])
>>> le.transform([1, 1, 2, 6]) #doctest: +ELLIPSIS
array([0, 0, 1, 2]...)
>>> le.inverse_transform([0, 0, 1, 2])
array([1, 1, 2, 6])
It can also be used to transform non-numerical labels (as long as they are
hashable and comparable) to numerical labels.
>>> le = preprocessing.LabelEncoder()
>>> le.fit(["paris", "paris", "tokyo", "amsterdam"])
LabelEncoder()
>>> list(le.classes_)
['amsterdam', 'paris', 'tokyo']
>>> le.transform(["tokyo", "tokyo", "paris"]) #doctest: +ELLIPSIS
array([2, 2, 1]...)
>>> list(le.inverse_transform([2, 2, 1]))
['tokyo', 'tokyo', 'paris']
"""
def fit(self, y):
"""Fit label encoder
Parameters
----------
y : array-like of shape (n_samples,)
Target values.
Returns
-------
self : returns an instance of self.
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_ = np.unique(y)
return self
def fit_transform(self, y):
"""Fit label encoder and return encoded labels
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
y = column_or_1d(y, warn=True)
_check_numpy_unicode_bug(y)
self.classes_, y = np.unique(y, return_inverse=True)
return y
def transform(self, y):
"""Transform labels to normalized encoding.
Parameters
----------
y : array-like of shape [n_samples]
Target values.
Returns
-------
y : array-like of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
classes = np.unique(y)
_check_numpy_unicode_bug(classes)
if len(np.intersect1d(classes, self.classes_)) < len(classes):
diff = np.setdiff1d(classes, self.classes_)
raise ValueError("y contains new labels: %s" % str(diff))
return np.searchsorted(self.classes_, y)
def inverse_transform(self, y):
"""Transform labels back to original encoding.
Parameters
----------
y : numpy array of shape [n_samples]
Target values.
Returns
-------
y : numpy array of shape [n_samples]
"""
check_is_fitted(self, 'classes_')
diff = np.setdiff1d(y, np.arange(len(self.classes_)))
if diff:
raise ValueError("y contains new labels: %s" % str(diff))
y = np.asarray(y)
return self.classes_[y]
class LabelBinarizer(BaseEstimator, TransformerMixin):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
At learning time, this simply consists in learning one regressor
or binary classifier per class. In doing so, one needs to convert
multi-class labels to binary labels (belong or does not belong
to the class). LabelBinarizer makes this process easy with the
transform method.
At prediction time, one assigns the class for which the corresponding
model gave the greatest confidence. LabelBinarizer makes this easy
with the inverse_transform method.
Read more in the :ref:`User Guide <preprocessing_targets>`.
Parameters
----------
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False)
True if the returned array from transform is desired to be in sparse
CSR format.
Attributes
----------
classes_ : array of shape [n_class]
Holds the label for each class.
y_type_ : str,
Represents the type of the target data as evaluated by
utils.multiclass.type_of_target. Possible type are 'continuous',
'continuous-multioutput', 'binary', 'multiclass',
'mutliclass-multioutput', 'multilabel-indicator', and 'unknown'.
sparse_input_ : boolean,
True if the input data to transform is given as a sparse matrix, False
otherwise.
Examples
--------
>>> from sklearn import preprocessing
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit([1, 2, 6, 4, 2])
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([1, 2, 4, 6])
>>> lb.transform([1, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
Binary targets transform to a column vector
>>> lb = preprocessing.LabelBinarizer()
>>> lb.fit_transform(['yes', 'no', 'no', 'yes'])
array([[1],
[0],
[0],
[1]])
Passing a 2D matrix for multilabel classification
>>> import numpy as np
>>> lb.fit(np.array([[0, 1, 1], [1, 0, 0]]))
LabelBinarizer(neg_label=0, pos_label=1, sparse_output=False)
>>> lb.classes_
array([0, 1, 2])
>>> lb.transform([0, 1, 2, 1])
array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1],
[0, 1, 0]])
See also
--------
label_binarize : function to perform the transform operation of
LabelBinarizer with fixed classes.
"""
def __init__(self, neg_label=0, pos_label=1, sparse_output=False):
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if sparse_output and (pos_label == 0 or neg_label != 0):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
self.neg_label = neg_label
self.pos_label = pos_label
self.sparse_output = sparse_output
def fit(self, y):
"""Fit label binarizer
Parameters
----------
y : numpy array of shape (n_samples,) or (n_samples, n_classes)
Target values. The 2-d matrix should only contain 0 and 1,
represents multilabel classification.
Returns
-------
self : returns an instance of self.
"""
self.y_type_ = type_of_target(y)
if 'multioutput' in self.y_type_:
raise ValueError("Multioutput target data is not supported with "
"label binarization")
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
self.sparse_input_ = sp.issparse(y)
self.classes_ = unique_labels(y)
return self
def transform(self, y):
"""Transform multi-class labels to binary labels
The output of transform is sometimes referred to by some authors as the
1-of-K coding scheme.
Parameters
----------
y : numpy array or sparse matrix of shape (n_samples,) or
(n_samples, n_classes) Target values. The 2-d matrix should only
contain 0 and 1, represents multilabel classification. Sparse
matrix can be CSR, CSC, COO, DOK, or LIL.
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
"""
check_is_fitted(self, 'classes_')
y_is_multilabel = type_of_target(y).startswith('multilabel')
if y_is_multilabel and not self.y_type_.startswith('multilabel'):
raise ValueError("The object was not fitted with multilabel"
" input.")
return label_binarize(y, self.classes_,
pos_label=self.pos_label,
neg_label=self.neg_label,
sparse_output=self.sparse_output)
def inverse_transform(self, Y, threshold=None):
"""Transform binary labels back to multi-class labels
Parameters
----------
Y : numpy array or sparse matrix with shape [n_samples, n_classes]
Target values. All sparse matrices are converted to CSR before
inverse transformation.
threshold : float or None
Threshold used in the binary and multi-label cases.
Use 0 when:
- Y contains the output of decision_function (classifier)
Use 0.5 when:
- Y contains the output of predict_proba
If None, the threshold is assumed to be half way between
neg_label and pos_label.
Returns
-------
y : numpy array or CSR matrix of shape [n_samples] Target values.
Notes
-----
In the case when the binary labels are fractional
(probabilistic), inverse_transform chooses the class with the
greatest value. Typically, this allows to use the output of a
linear model's decision_function method directly as the input
of inverse_transform.
"""
check_is_fitted(self, 'classes_')
if threshold is None:
threshold = (self.pos_label + self.neg_label) / 2.
if self.y_type_ == "multiclass":
y_inv = _inverse_binarize_multiclass(Y, self.classes_)
else:
y_inv = _inverse_binarize_thresholding(Y, self.y_type_,
self.classes_, threshold)
if self.sparse_input_:
y_inv = sp.csr_matrix(y_inv)
elif sp.issparse(y_inv):
y_inv = y_inv.toarray()
return y_inv
def label_binarize(y, classes, neg_label=0, pos_label=1, sparse_output=False):
"""Binarize labels in a one-vs-all fashion
Several regression and binary classification algorithms are
available in the scikit. A simple way to extend these algorithms
to the multi-class classification case is to use the so-called
one-vs-all scheme.
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
Parameters
----------
y : array-like
Sequence of integer labels or multilabel data to encode.
classes : array-like of shape [n_classes]
Uniquely holds the label for each class.
neg_label : int (default: 0)
Value with which negative labels must be encoded.
pos_label : int (default: 1)
Value with which positive labels must be encoded.
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Returns
-------
Y : numpy array or CSR matrix of shape [n_samples, n_classes]
Shape will be [n_samples, 1] for binary problems.
Examples
--------
>>> from sklearn.preprocessing import label_binarize
>>> label_binarize([1, 6], classes=[1, 2, 4, 6])
array([[1, 0, 0, 0],
[0, 0, 0, 1]])
The class ordering is preserved:
>>> label_binarize([1, 6], classes=[1, 6, 4, 2])
array([[1, 0, 0, 0],
[0, 1, 0, 0]])
Binary targets transform to a column vector
>>> label_binarize(['yes', 'no', 'no', 'yes'], classes=['no', 'yes'])
array([[1],
[0],
[0],
[1]])
See also
--------
LabelBinarizer : class used to wrap the functionality of label_binarize and
allow for fitting to classes independently of the transform operation
"""
if not isinstance(y, list):
# XXX Workaround that will be removed when list of list format is
# dropped
y = check_array(y, accept_sparse='csr', ensure_2d=False, dtype=None)
else:
if _num_samples(y) == 0:
raise ValueError('y has 0 samples: %r' % y)
if neg_label >= pos_label:
raise ValueError("neg_label={0} must be strictly less than "
"pos_label={1}.".format(neg_label, pos_label))
if (sparse_output and (pos_label == 0 or neg_label != 0)):
raise ValueError("Sparse binarization is only supported with non "
"zero pos_label and zero neg_label, got "
"pos_label={0} and neg_label={1}"
"".format(pos_label, neg_label))
# To account for pos_label == 0 in the dense case
pos_switch = pos_label == 0
if pos_switch:
pos_label = -neg_label
y_type = type_of_target(y)
if 'multioutput' in y_type:
raise ValueError("Multioutput target data is not supported with label "
"binarization")
if y_type == 'unknown':
raise ValueError("The type of target data is not known")
n_samples = y.shape[0] if sp.issparse(y) else len(y)
n_classes = len(classes)
classes = np.asarray(classes)
if y_type == "binary":
if len(classes) == 1:
Y = np.zeros((len(y), 1), dtype=np.int)
Y += neg_label
return Y
elif len(classes) >= 3:
y_type = "multiclass"
sorted_class = np.sort(classes)
if (y_type == "multilabel-indicator" and classes.size != y.shape[1]):
raise ValueError("classes {0} missmatch with the labels {1}"
"found in the data".format(classes, unique_labels(y)))
if y_type in ("binary", "multiclass"):
y = column_or_1d(y)
# pick out the known labels from y
y_in_classes = in1d(y, classes)
y_seen = y[y_in_classes]
indices = np.searchsorted(sorted_class, y_seen)
indptr = np.hstack((0, np.cumsum(y_in_classes)))
data = np.empty_like(indices)
data.fill(pos_label)
Y = sp.csr_matrix((data, indices, indptr),
shape=(n_samples, n_classes))
elif y_type == "multilabel-indicator":
Y = sp.csr_matrix(y)
if pos_label != 1:
data = np.empty_like(Y.data)
data.fill(pos_label)
Y.data = data
else:
raise ValueError("%s target data is not supported with label "
"binarization" % y_type)
if not sparse_output:
Y = Y.toarray()
Y = astype(Y, int, copy=False)
if neg_label != 0:
Y[Y == 0] = neg_label
if pos_switch:
Y[Y == pos_label] = 0
else:
Y.data = astype(Y.data, int, copy=False)
# preserve label ordering
if np.any(classes != sorted_class):
indices = np.searchsorted(sorted_class, classes)
Y = Y[:, indices]
if y_type == "binary":
if sparse_output:
Y = Y.getcol(-1)
else:
Y = Y[:, -1].reshape((-1, 1))
return Y
def _inverse_binarize_multiclass(y, classes):
"""Inverse label binarization transformation for multiclass.
Multiclass uses the maximal score instead of a threshold.
"""
classes = np.asarray(classes)
if sp.issparse(y):
# Find the argmax for each row in y where y is a CSR matrix
y = y.tocsr()
n_samples, n_outputs = y.shape
outputs = np.arange(n_outputs)
row_max = sparse_min_max(y, 1)[1]
row_nnz = np.diff(y.indptr)
y_data_repeated_max = np.repeat(row_max, row_nnz)
# picks out all indices obtaining the maximum per row
y_i_all_argmax = np.flatnonzero(y_data_repeated_max == y.data)
# For corner case where last row has a max of 0
if row_max[-1] == 0:
y_i_all_argmax = np.append(y_i_all_argmax, [len(y.data)])
# Gets the index of the first argmax in each row from y_i_all_argmax
index_first_argmax = np.searchsorted(y_i_all_argmax, y.indptr[:-1])
# first argmax of each row
y_ind_ext = np.append(y.indices, [0])
y_i_argmax = y_ind_ext[y_i_all_argmax[index_first_argmax]]
# Handle rows of all 0
y_i_argmax[np.where(row_nnz == 0)[0]] = 0
# Handles rows with max of 0 that contain negative numbers
samples = np.arange(n_samples)[(row_nnz > 0) &
(row_max.ravel() == 0)]
for i in samples:
ind = y.indices[y.indptr[i]:y.indptr[i + 1]]
y_i_argmax[i] = classes[np.setdiff1d(outputs, ind)][0]
return classes[y_i_argmax]
else:
return classes.take(y.argmax(axis=1), mode="clip")
def _inverse_binarize_thresholding(y, output_type, classes, threshold):
"""Inverse label binarization transformation using thresholding."""
if output_type == "binary" and y.ndim == 2 and y.shape[1] > 2:
raise ValueError("output_type='binary', but y.shape = {0}".
format(y.shape))
if output_type != "binary" and y.shape[1] != len(classes):
raise ValueError("The number of class is not equal to the number of "
"dimension of y.")
classes = np.asarray(classes)
# Perform thresholding
if sp.issparse(y):
if threshold > 0:
if y.format not in ('csr', 'csc'):
y = y.tocsr()
y.data = np.array(y.data > threshold, dtype=np.int)
y.eliminate_zeros()
else:
y = np.array(y.toarray() > threshold, dtype=np.int)
else:
y = np.array(y > threshold, dtype=np.int)
# Inverse transform data
if output_type == "binary":
if sp.issparse(y):
y = y.toarray()
if y.ndim == 2 and y.shape[1] == 2:
return classes[y[:, 1]]
else:
if len(classes) == 1:
y = np.empty(len(y), dtype=classes.dtype)
y.fill(classes[0])
return y
else:
return classes[y.ravel()]
elif output_type == "multilabel-indicator":
return y
else:
raise ValueError("{0} format is not supported".format(output_type))
class MultiLabelBinarizer(BaseEstimator, TransformerMixin):
"""Transform between iterable of iterables and a multilabel format
Although a list of sets or tuples is a very intuitive format for multilabel
data, it is unwieldy to process. This transformer converts between this
intuitive format and the supported multilabel format: a (samples x classes)
binary matrix indicating the presence of a class label.
Parameters
----------
classes : array-like of shape [n_classes] (optional)
Indicates an ordering for the class labels
sparse_output : boolean (default: False),
Set to true if output binary array is desired in CSR sparse format
Attributes
----------
classes_ : array of labels
A copy of the `classes` parameter where provided,
or otherwise, the sorted set of classes found when fitting.
Examples
--------
>>> mlb = MultiLabelBinarizer()
>>> mlb.fit_transform([(1, 2), (3,)])
array([[1, 1, 0],
[0, 0, 1]])
>>> mlb.classes_
array([1, 2, 3])
>>> mlb.fit_transform([set(['sci-fi', 'thriller']), set(['comedy'])])
array([[0, 1, 1],
[1, 0, 0]])
>>> list(mlb.classes_)
['comedy', 'sci-fi', 'thriller']
"""
def __init__(self, classes=None, sparse_output=False):
self.classes = classes
self.sparse_output = sparse_output
def fit(self, y):
"""Fit the label sets binarizer, storing `classes_`
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
self : returns this MultiLabelBinarizer instance
"""
if self.classes is None:
classes = sorted(set(itertools.chain.from_iterable(y)))
else:
classes = self.classes
dtype = np.int if all(isinstance(c, int) for c in classes) else object
self.classes_ = np.empty(len(classes), dtype=dtype)
self.classes_[:] = classes
return self
def fit_transform(self, y):
"""Fit the label sets binarizer and transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
if self.classes is not None:
return self.fit(y).transform(y)
# Automatically increment on new class
class_mapping = defaultdict(int)
class_mapping.default_factory = class_mapping.__len__
yt = self._transform(y, class_mapping)
# sort classes and reorder columns
tmp = sorted(class_mapping, key=class_mapping.get)
# (make safe for tuples)
dtype = np.int if all(isinstance(c, int) for c in tmp) else object
class_mapping = np.empty(len(tmp), dtype=dtype)
class_mapping[:] = tmp
self.classes_, inverse = np.unique(class_mapping, return_inverse=True)
yt.indices = np.take(inverse, yt.indices)
if not self.sparse_output:
yt = yt.toarray()
return yt
def transform(self, y):
"""Transform the given label sets
Parameters
----------
y : iterable of iterables
A set of labels (any orderable and hashable object) for each
sample. If the `classes` parameter is set, `y` will not be
iterated.
Returns
-------
y_indicator : array or CSR matrix, shape (n_samples, n_classes)
A matrix such that `y_indicator[i, j] = 1` iff `classes_[j]` is in
`y[i]`, and 0 otherwise.
"""
class_to_index = dict(zip(self.classes_, range(len(self.classes_))))
yt = self._transform(y, class_to_index)
if not self.sparse_output:
yt = yt.toarray()
return yt
def _transform(self, y, class_mapping):
"""Transforms the label sets with a given mapping
Parameters
----------
y : iterable of iterables
class_mapping : Mapping
Maps from label to column index in label indicator matrix
Returns
-------
y_indicator : sparse CSR matrix, shape (n_samples, n_classes)
Label indicator matrix
"""
indices = array.array('i')
indptr = array.array('i', [0])
for labels in y:
indices.extend(set(class_mapping[label] for label in labels))
indptr.append(len(indices))
data = np.ones(len(indices), dtype=int)
return sp.csr_matrix((data, indices, indptr),
shape=(len(indptr) - 1, len(class_mapping)))
def inverse_transform(self, yt):
"""Transform the given indicator matrix into label sets
Parameters
----------
yt : array or sparse matrix of shape (n_samples, n_classes)
A matrix containing only 1s ands 0s.
Returns
-------
y : list of tuples
The set of labels for each sample such that `y[i]` consists of
`classes_[j]` for each `yt[i, j] == 1`.
"""
if yt.shape[1] != len(self.classes_):
raise ValueError('Expected indicator for {0} classes, but got {1}'
.format(len(self.classes_), yt.shape[1]))
if sp.issparse(yt):
yt = yt.tocsr()
if len(yt.data) != 0 and len(np.setdiff1d(yt.data, [0, 1])) > 0:
raise ValueError('Expected only 0s and 1s in label indicator.')
return [tuple(self.classes_.take(yt.indices[start:end]))
for start, end in zip(yt.indptr[:-1], yt.indptr[1:])]
else:
unexpected = np.setdiff1d(yt, [0, 1])
if len(unexpected) > 0:
raise ValueError('Expected only 0s and 1s in label indicator. '
'Also got {0}'.format(unexpected))
return [tuple(self.classes_.compress(indicators)) for indicators
in yt]
| bsd-3-clause |
jm-begon/scikit-learn | sklearn/metrics/cluster/__init__.py | 312 | 1322 | """
The :mod:`sklearn.metrics.cluster` submodule contains evaluation metrics for
cluster analysis results. There are two forms of evaluation:
- supervised, which uses a ground truth class values for each sample.
- unsupervised, which does not and measures the 'quality' of the model itself.
"""
from .supervised import adjusted_mutual_info_score
from .supervised import normalized_mutual_info_score
from .supervised import adjusted_rand_score
from .supervised import completeness_score
from .supervised import contingency_matrix
from .supervised import expected_mutual_information
from .supervised import homogeneity_completeness_v_measure
from .supervised import homogeneity_score
from .supervised import mutual_info_score
from .supervised import v_measure_score
from .supervised import entropy
from .unsupervised import silhouette_samples
from .unsupervised import silhouette_score
from .bicluster import consensus_score
__all__ = ["adjusted_mutual_info_score", "normalized_mutual_info_score",
"adjusted_rand_score", "completeness_score", "contingency_matrix",
"expected_mutual_information", "homogeneity_completeness_v_measure",
"homogeneity_score", "mutual_info_score", "v_measure_score",
"entropy", "silhouette_samples", "silhouette_score",
"consensus_score"]
| bsd-3-clause |
edhuckle/statsmodels | statsmodels/tools/parallel.py | 32 | 2180 | """Parallel utility function using joblib
copied from https://github.com/mne-tools/mne-python
Author: Alexandre Gramfort <gramfort@nmr.mgh.harvard.edu>
License: Simplified BSD
changes for statsmodels (Josef Perktold)
- try import from joblib directly, (doesn't import all of sklearn)
"""
from __future__ import print_function
from statsmodels.tools.sm_exceptions import (ModuleUnavailableWarning,
module_unavailable_doc)
def parallel_func(func, n_jobs, verbose=5):
"""Return parallel instance with delayed function
Util function to use joblib only if available
Parameters
----------
func: callable
A function
n_jobs: int
Number of jobs to run in parallel
verbose: int
Verbosity level
Returns
-------
parallel: instance of joblib.Parallel or list
The parallel object
my_func: callable
func if not parallel or delayed(func)
n_jobs: int
Number of jobs >= 0
Examples
--------
>>> from math import sqrt
>>> from statsmodels.tools.parallel import parallel_func
>>> parallel, p_func, n_jobs = parallel_func(sqrt, n_jobs=-1, verbose=0)
>>> print(n_jobs)
>>> parallel(p_func(i**2) for i in range(10))
"""
try:
try:
from joblib import Parallel, delayed
except ImportError:
from sklearn.externals.joblib import Parallel, delayed
parallel = Parallel(n_jobs, verbose=verbose)
my_func = delayed(func)
if n_jobs == -1:
try:
import multiprocessing
n_jobs = multiprocessing.cpu_count()
except (ImportError, NotImplementedError):
import warnings
warnings.warn(module_unavailable_doc.format('multiprocessing'),
ModuleUnavailableWarning)
n_jobs = 1
except ImportError:
import warnings
warnings.warn(module_unavailable_doc.format('joblib'),
ModuleUnavailableWarning)
n_jobs = 1
my_func = func
parallel = list
return parallel, my_func, n_jobs
| bsd-3-clause |
combust/mleap | python/tests/sklearn/tree/tree_test.py | 2 | 3507 | import unittest
import os
import shutil
import json
import tempfile
import uuid
from mleap.sklearn.tree.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import load_iris
class TransformerTests(unittest.TestCase):
def setUp(self):
self.tmp_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_decision_tree_classifier(self):
X = [[0, 1], [1, 1]]
Y = [0, 0]
dt_classifier = DecisionTreeClassifier()
dt_classifier = dt_classifier.fit(X, Y)
dt_classifier.mlinit(input_features ='feature', prediction_column = 'pred', feature_names = ['a'])
dt_classifier.serialize_to_bundle(self.tmp_dir, dt_classifier.name)
expected_model = {
"attributes": {
"num_features": {
"long": 2
},
"num_classes": {
"long": 1
}
},
"op": "decision_tree_classifier"
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, dt_classifier.name)) as json_data:
model = json.load(json_data)
self.assertEqual(dt_classifier.op, expected_model['op'])
self.assertEqual(expected_model['attributes']['num_features']['long'], model['attributes']['num_features']['long'])
self.assertEqual(expected_model['attributes']['num_classes']['long'], model['attributes']['num_classes']['long'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, dt_classifier.name)) as json_data:
node = json.load(json_data)
self.assertEqual(dt_classifier.name, node['name'])
self.assertEqual(dt_classifier.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(dt_classifier.prediction_column, node['shape']['outputs'][0]['name'])
def test_decision_tree_classifier_with_iris_dataset(self):
iris = load_iris()
dt_classifier = DecisionTreeClassifier()
dt_classifier.mlinit(input_features = 'features', prediction_column = 'species', feature_names = iris.feature_names)
dt_classifier = dt_classifier.fit(iris.data, iris.target)
dt_classifier.serialize_to_bundle(self.tmp_dir, dt_classifier.name)
expected_model = {
"attributes": {
"num_features": {
"long": 4
},
"num_classes": {
"long": 3
}
},
"op": "decision_tree_classifier"
}
# Test model.json
with open("{}/{}.node/model.json".format(self.tmp_dir, dt_classifier.name)) as json_data:
model = json.load(json_data)
self.assertEqual(dt_classifier.op, expected_model['op'])
self.assertEqual(expected_model['attributes']['num_features']['long'], model['attributes']['num_features']['long'])
self.assertEqual(expected_model['attributes']['num_classes']['long'], model['attributes']['num_classes']['long'])
# Test node.json
with open("{}/{}.node/node.json".format(self.tmp_dir, dt_classifier.name)) as json_data:
node = json.load(json_data)
self.assertEqual(dt_classifier.name, node['name'])
self.assertEqual(dt_classifier.input_features, node['shape']['inputs'][0]['name'])
self.assertEqual(dt_classifier.prediction_column, node['shape']['outputs'][0]['name'])
| apache-2.0 |
colin2328/asciiclass | lectures/lec6/match-loop.py | 3 | 2094 | import csv
from sklearn import tree
import editdist
import re
def string_match_score(p1,p2,field):
s1 = p1[field]
s2 = p2[field]
return editdist.distance(s1.lower(),s2.lower())/float(len(s1))
def jaccard_score(p1,p2,field):
name1 = p1[field]
name2 = p2[field]
set1 = set(name1.lower().split())
set2 = set(name2.lower().split())
c = set1.intersection(set2)
return float(len(c)) / (len(set1) + len(set2) - len(c))
def price_score(p1,p2,field):
price1 = p1[field]
if (len(price1) == 0): return 10000
price2 = p2[field]
if (len(price2) == 0): return 10000
price1 = re.sub('[\$,]', '', price1)
price2 = re.sub('[\$,]', '', price2)
price1 = float(price1)
price2 = float(price2)
return abs(price1 - price2)
print "Loading Data"
abtReader = csv.DictReader(open("Abt.csv","rU"))
buyReader = csv.DictReader(open("Buy.csv","rU"))
gtLines = csv.DictReader(open("abt_buy_perfectMapping.csv","rU"))
gtBuyMap = {}
gtAbtMap = {}
abtAr = []
buyAr = []
for r in abtReader:
abtAr.append(r)
for r in buyReader:
buyAr.append(r)
for r in gtLines:
gtAbtMap[r["idAbt"]] = r["idBuy"]
gtBuyMap[r["idBuy"]] = r["idAbt"]
for loop in range(0,10,1):
falsePos = 0
truePos = 0
falseNeg = 0
trueNeg = 0
thresh = float(loop)/10.0
for r1 in buyAr:
bestMatch = 0
bestVal = []
j = 0
for r2 in abtAr:
s = jaccard_score(r1,r2,"name")
if (s > bestMatch):
bestMatch = s
bestVal = r2
if (bestMatch > thresh):
# print "Best match: ",r1["name"],bestVal["name"],"score=",bestMatch
if (gtBuyMap[r1["id"]] == bestVal["id"]):
truePos = truePos + 1
else:
falsePos = falsePos + 1
precision = truePos / float(truePos + falsePos)
recall = truePos / float(len(buyAr))
fmeas = (2.0 * precision * recall) / (precision + recall)
print "THRESH = ",thresh,"TP = ",truePos,"FP = ",falsePos,"PREC = ",precision,"RECALL = ",recall,"F = ",fmeas
| mit |
steven-cutting/latinpigsay | data/text/samples.py | 1 | 6123 | # -*- coding: utf-8 -*-
__title__ = 'latinpigsay'
__license__ = 'MIT'
__author__ = 'Steven Cutting'
__author_email__ = 'steven.c.projects@gmail.com'
__created_on__ = '12/3/2014'
"""
Created on Wed Dec 3 17:36:17 2014
@author: steven_c
"""
acidtest = """Can you talk piglatin to piglatin.
"""
quotes = """A Tale of Two Cities LITE(tm)
-- by Charles Dickens
A lawyer who looks like a French Nobleman is executed in his place.
The Metamorphosis LITE(tm)
-- by Franz Kafka
A man turns into a bug and his family gets annoyed.
Lord of the Rings LITE(tm)
-- by J. R. R. Tolkien
Some guys take a long vacation to throw a ring into a volcano.
Hamlet LITE(tm)
-- by Wm. Shakespeare
A college student on vacation with family problems, a screwy
girl-friend and a mother who won't act her age.
"""
paragraphs = """For many people (myself among them), the Python language is easy to fall in love with.
Since its first appearance in 1991, Python has become one of the most popular dynamic,
programming languages, along with Perl, Ruby, and others. Python and Ruby have
become especially popular in recent years for building websites using their numerous
web frameworks, like Rails (Ruby) and Django (Python). Such languages are often
called scripting languages as they can be used to write quick-and-dirty small programs,
or scripts. I don’t like the term “scripting language” as it carries a connotation that they
cannot be used for building mission-critical software. Among interpreted languages
Python is distinguished by its large and active scientific computing community.
Adoption of Python for scientific computing in both industry applications and academic
research has increased significantly since the early 2000s.
For data analysis and interactive, exploratory computing and data visualization, Python
will inevitably draw comparisons with the many other domain-specific open source
and commercial programming languages and tools in wide use, such as R, MATLAB,
SAS, Stata, and others. In recent years, Python’s improved library support (primarily
pandas) has made it a strong alternative for data manipulation tasks. Combined with
Python’s strength in general purpose programming, it is an excellent choice as a single
language for building data-centric applications.
"""
simplepgs = """Simple test.
Paragraphs. test.
Derp, derp a.
Simple test.
Let's sentence ma'am let's full of ain't contractions I'm i'm couldn't've I'd.
Fred's stapler.
Fred's going to the movie.
O'clock o'clock.
Paragraphs. test.
Derp, derp.
"""
contsentence = "Let's sentence ma'am let's full of ain't contractions I'm i'm couldn't've I'd."
sentence = 'If capture groups are used, then the matched text is also included in the result.'
listofwords = ['Pig Latin',
'hello',
'switch',
'glove',
'fruit smoothie',
'egg',
'ultimate',
'I',
'yellow',
'my',
'rhythm',
'436',
'5',
]
txt = """
The Gettysburg Address
Four score and seven years ago our fathers brought forth on this continent,
a new nation, conceived in Liberty, and dedicated to the proposition that all
men are created equal.
Now we are engaged in a great civil war, testing whether that nation, or any
nation so conceived and so dedicated, can long endure. We are met on a great
battlefield of that war. We have come to dedicate a portion of that field, as
a final resting place for those who here gave their lives that that nation
might live. It is altogether fitting and proper that we should do this.
But, in a larger sense, we cannot dedicate - we cannot consecrate - we cannot
hallow - this ground. The brave men, living and dead, who struggled here, have
consecrated it, far above our poor power to add or detract. The world will
little note, nor long remember what we say here, but it can never forget what
they did here. It is for us the living, rather, to be dedicated here to the
unfinished work which they who fought here have thus far so nobly advanced.
It is rather for us to be here dedicated to the great task remaining before
us - that from these honored dead we take increased devotion to that cause for
which they gave the last full measure of devotion - that we here highly resolve
that these dead shall not have died in vain - that this nation, under God,
shall have a new birth of freedom - and that government of the people, by
the people, for the people, shall not perish from the earth.
"""
paragraphs_og = """For many people (myself among them), the Python language is easy to fall in love with.
Since its first appearance in 1991, Python has become one of the most popular dynamic,
programming languages, along with Perl, Ruby, and others. Python and Ruby have
become especially popular in recent years for building websites using their numerous
web frameworks, like Rails (Ruby) and Django (Python). Such languages are often
called scripting languages as they can be used to write quick-and-dirty small programs,
or scripts. I don’t like the term “scripting language” as it carries a connotation that they
cannot be used for building mission-critical software. Among interpreted languages
Python is distinguished by its large and active scientific computing community. Adop-
tion of Python for scientific computing in both industry applications and academic
research has increased significantly since the early 2000s.
For data analysis and interactive, exploratory computing and data visualization, Python
will inevitably draw comparisons with the many other domain-specific open source
and commercial programming languages and tools in wide use, such as R, MATLAB,
SAS, Stata, and others. In recent years, Python’s improved library support (primarily
pandas) has made it a strong alternative for data manipulation tasks. Combined with
Python’s strength in general purpose programming, it is an excellent choice as a single
language for building data-centric applications.
"""
| mit |
Duncan93/dbm-project2 | Ttest.py | 1 | 5148 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 22 22:52:24 2015
@author: lorraine
"""
import json
from pprint import pprint
import numpy as np
from scipy.stats import mstats
from scipy import stats
import csv
import pandas as pd
#json_data=open("data/{0}_B.json".format("pizza")).read()
#data = json.loads(json_data)
#pprint(data)
def normaltest_data(category):
data,population = load_rating_data(category)
z,pval = mstats.normaltest(data)
print(category+" p value is "+str(pval))
if(pval < 0.01):
print "Not normal distribution"
else:
print "normal"
# normaltest_data
# null hypothesis is the pizza ratings all over the states follow a normal distribution
# A a significan level of 0.01 was chosen.
#Since the calculated p value is greater than the significan level, we do not reject the null hypothesis
#Therefore we can safely assume the ratings follows a normal distribution
#Suppose the top-40 rated pizza rating nationwide is 4.0, the one sample t-test returns a p value of 0.0019 < significance level=0.05
#therefore we can reject the null hypothesis. Do not have sufficient evidence to conclude the population mean is 4.0
#one-sided t-test, H0: score = 4.0, H1: score < 4.0
# As t<0 & p/2<alpha, we reject null hypothesis. Enough evidence to conclude best pizza score < 4.0
#assume the best pizza and best chinese have the same score in the population
#p-val = 2.32e-07 < 0.01, reject the null hypothesis. Do not have sufficient confidence to conclude the best scores are the same
#One-tailed greater than test. H0: pizza = chinese, H1:pizza >= chinese.
#As t>0 p/2<alpha, we reject null hypothesis. Enough evidence to conclude that best pizza socre is significantly greater than best chinese food
#two side p-val=0.003<0.01, t>0, reject null
#H0: best pizza score = best mexican, H1:best pizza >= mexican
#As t>0 and p/2<alpha, we reject null hypothesis. Best pizza is significantly greater than best mexican
#H0: best chinese = best mexican
#H1: best chinese not equal
# p>0.01, do not reject null. Mexican rating is not significantly different than Chinese
#assume the best pizza and the best bar have the same score in the population
#p-val=0.64 > 0.05, do ont reject the null hyphothesis. The best bar score is not significantly different from best pizza
def anova_test(cat1,cat2,cat3,cat4):
x1,pop1=load_rating_data(cat1)
x2,pop2=load_rating_data(cat2)
x3,pop3=load_rating_data(cat3)
x4,pop4=load_rating_data(cat4)
F_val, p_val_anova = stats.f_oneway(x1,x2,x3,x4)
print("anova f val"+str(F_val))
print("anova p val"+str(p_val_anova))
# anova test null hypothesis:the population mean of the best pizza, bar, chinese and mexican restaurant ratings are the same
#p_val=1.13e-05<0.01, reject null hypothesis
#need to state the assumption of Anova Test
def pearson_rapop(category):
rating,population = load_rating_data(category)
pearson, p_val = stats.pearsonr(rating,population)
print("pearson rapop is "+str(pearson))
print("pearson rapop p_val is "+str(p_val))
# pearson coefficient = 0.23, 0.20<pearson<0.29,weak positive correlation
# p_val=0.09>0.05, H0: There is so statistically significant relationship between the two variables
# do not reject null hypothesis
def load_rating_data(category):
with open("data/{0}_B.json".format(category),"r") as f:
cat = f.read()
cat = json.loads(cat)
rating=[]
population=[]
for i in xrange(len(cat[category])):
score = cat[category][i].values()
rating.append(score[0]["rating"])
population.append(score[0]["population"])
return rating,population
def pearson_raAge(category):
rating,population = load_rating_data(category)
rating = np.array(rating)
population=np.array(population)
age = []
f = open('data/MedianAge.csv')
csv_f = csv.reader(f)
for row in csv_f:
age.append(float(row[2]))
#rating = np.array(rating)
age=np.array(age)
pearson, p_val = stats.pearsonr(rating,age)
print("pearson raAge is "+str(pearson))
print("pearson raAge p_val is "+str(p_val))
#neglible correlation between rating and median age
def one_sample_ttest(category,base):
rating,population=load_rating_data(category)
rating = np.array(rating)
population=np.array(population)
t4, prob4 = stats.ttest_1samp(rating,base)
print("t value of "+category+str(t4))
print("p value of "+category+str(prob4))
def two_sample_ttest(category1, category2):
data1,populaton1=load_rating_data(category1)
data1 = np.array(data1)
data2,population2=load_rating_data(category2)
data2 = np.array(data2)
t, prob = stats.ttest_rel(data1,data2)
print("t value of "+ category1+category2+str(t))
print("p value of "+ category1+category2+str(prob))
category_filter = ["pizza","chinese","mexican","bars"]
#for category in category_filter:
normaltest_data("pizza")
# pearson_raAge("pizza")
# pearson_rapop("pizza")
# one_sample_ttest("pizza",4)
# two_sample_ttest("pizza","chinese")
# anova_test("pizza","chinese","mexican","bars")
| mit |
Obus/scikit-learn | examples/feature_stacker.py | 246 | 1906 | """
=================================================
Concatenating multiple feature extraction methods
=================================================
In many real-world examples, there are many ways to extract features from a
dataset. Often it is beneficial to combine several methods to obtain good
performance. This example shows how to use ``FeatureUnion`` to combine
features obtained by PCA and univariate selection.
Combining features using this transformer has the benefit that it allows
cross validation and grid searches over the whole process.
The combination used in this example is not particularly helpful on this
dataset and is only used to illustrate the usage of FeatureUnion.
"""
# Author: Andreas Mueller <amueller@ais.uni-bonn.de>
#
# License: BSD 3 clause
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.grid_search import GridSearchCV
from sklearn.svm import SVC
from sklearn.datasets import load_iris
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
iris = load_iris()
X, y = iris.data, iris.target
# This dataset is way to high-dimensional. Better do PCA:
pca = PCA(n_components=2)
# Maybe some original features where good, too?
selection = SelectKBest(k=1)
# Build estimator from PCA and Univariate selection:
combined_features = FeatureUnion([("pca", pca), ("univ_select", selection)])
# Use combined features to transform dataset:
X_features = combined_features.fit(X, y).transform(X)
svm = SVC(kernel="linear")
# Do grid search over k, n_components and C:
pipeline = Pipeline([("features", combined_features), ("svm", svm)])
param_grid = dict(features__pca__n_components=[1, 2, 3],
features__univ_select__k=[1, 2],
svm__C=[0.1, 1, 10])
grid_search = GridSearchCV(pipeline, param_grid=param_grid, verbose=10)
grid_search.fit(X, y)
print(grid_search.best_estimator_)
| bsd-3-clause |
mindriot101/bokeh | sphinx/source/docs/user_guide/examples/categorical_heatmap_unemployment.py | 11 | 1638 | import pandas as pd
from bokeh.io import output_file, show
from bokeh.models import BasicTicker, ColorBar, ColumnDataSource, LinearColorMapper, PrintfTickFormatter
from bokeh.plotting import figure
from bokeh.sampledata.unemployment1948 import data
from bokeh.transform import transform
output_file("unemploymemt.html")
data.Year = data.Year.astype(str)
data = data.set_index('Year')
data.drop('Annual', axis=1, inplace=True)
data.columns.name = 'Month'
# reshape to 1D array or rates with a month and year for each row.
df = pd.DataFrame(data.stack(), columns=['rate']).reset_index()
source = ColumnDataSource(df)
# this is the colormap from the original NYTimes plot
colors = ["#75968f", "#a5bab7", "#c9d9d3", "#e2e2e2", "#dfccce", "#ddb7b1", "#cc7878", "#933b41", "#550b1d"]
mapper = LinearColorMapper(palette=colors, low=df.rate.min(), high=df.rate.max())
p = figure(plot_width=800, plot_height=300, title="US Unemployment 1948—2016",
x_range=list(data.index), y_range=list(reversed(data.columns)),
toolbar_location=None, tools="", x_axis_location="above")
p.rect(x="Year", y="Month", width=1, height=1, source=source,
line_color=None, fill_color=transform('rate', mapper))
color_bar = ColorBar(color_mapper=mapper, location=(0, 0),
ticker=BasicTicker(desired_num_ticks=len(colors)),
formatter=PrintfTickFormatter(format="%d%%"))
p.add_layout(color_bar, 'right')
p.axis.axis_line_color = None
p.axis.major_tick_line_color = None
p.axis.major_label_text_font_size = "5pt"
p.axis.major_label_standoff = 0
p.xaxis.major_label_orientation = 1.0
show(p)
| bsd-3-clause |
kyleabeauchamp/vcfnp | example.py | 2 | 1198 | from __future__ import print_function, division
import numpy as np
import matplotlib.pyplot as plt
import vcfnp
vcfnp.__version__
filename = 'fixture/sample.vcf'
# load data from fixed fields (including INFO)
v = vcfnp.variants(filename, cache=True).view(np.recarray)
# print some simple variant metrics
print('found %s variants (%s SNPs)' % (v.size, np.count_nonzero(v.is_snp)))
print('QUAL mean (std): %s (%s)' % (np.mean(v.QUAL), np.std(v.QUAL)))
# plot a histogram of variant depth
fig = plt.figure(1)
ax = fig.add_subplot(111)
ax.hist(v.DP)
ax.set_title('DP histogram')
ax.set_xlabel('DP')
plt.show()
# load data from sample columns
c = vcfnp.calldata_2d(filename, cache=True).view(np.recarray)
# print some simple genotype metrics
count_phased = np.count_nonzero(c.is_phased)
count_variant = np.count_nonzero(np.any(c.genotype > 0, axis=2))
count_missing = np.count_nonzero(~c.is_called)
print('calls (phased, variant, missing): %s (%s, %s, %s)'
% (c.flatten().size, count_phased, count_variant, count_missing))
# plot a histogram of genotype quality
fig = plt.figure(2)
ax = fig.add_subplot(111)
ax.hist(c.GQ.flatten())
ax.set_title('GQ histogram')
ax.set_xlabel('GQ')
plt.show()
| mit |
sumitsourabh/opencog | opencog/python/utility/functions.py | 34 | 11056 | from math import fabs, isnan
from datetime import datetime
from spatiotemporal.unix_time import UnixTime
from utility.generic import convert_dict_to_sorted_lists
from utility.numeric.globals import EPSILON
from numpy import NINF as NEGATIVE_INFINITY, PINF as POSITIVE_INFINITY
from scipy.integrate import quad
__author__ = 'keyvan'
def integral(function, start, end):
if hasattr(function, 'integral'):
return function.integral(start, end)
area, error = quad(function, start, end)
return area
def almost_equals(a, b, epsilon=EPSILON):
if fabs(a - b) < epsilon:
return True
return False
def invoke_method_on(method, sequence_or_point):
if method is None:
return None
if not callable(method):
raise TypeError("'method' is not callable")
result = []
try:
for point in sequence_or_point:
if type(point) is datetime:
point = UnixTime(point)
result.append(method(point))
except TypeError:
if type(sequence_or_point) is datetime:
sequence_or_point = UnixTime(sequence_or_point)
return method(sequence_or_point)
return result
def index_of_first_local_maximum(sequence):
first_time = True
index = 0
for element in sequence:
if first_time:
previous = element
first_time = False
continue
if element <= previous:
return index
previous = element
index += 1
return None
class Function(object):
_domain = None
_range = None
_function_undefined = None
def __init__(self, function_undefined=None, domain=None):
if function_undefined is not None:
self.function_undefined = function_undefined
if domain is not None:
if not hasattr(domain, '__iter__') or not hasattr(domain, '__getitem__'):
raise TypeError("'domain' should be iterable and support indexing")
self._domain = domain
def call_on_single_point(self, x):
"""
to override, __call__ invokes this to handle both points and sequences
"""
return 0
def derivative(self, point):
return None
def _check_domain_for(self, feature_name):
if self.domain is None:
raise TypeError("'{0}' object does not support {1}, 'domain' should be specified".format(
self.__class__.__name__, feature_name))
def plot(self, plt=None):
self._check_domain_for('plotting')
if plt is None:
import matplotlib.pyplot as plt
plt.plot(self.domain, self.range)
return plt
@property
def function_undefined(self):
return self._function_undefined
@function_undefined.setter
def function_undefined(self, value):
if value is not None and not isinstance(value, Function):
raise TypeError("'function_undefined' should be of type 'Function'")
self._function_undefined = value
@property
def domain(self):
return self._domain
@property
def range(self):
return self()
def __call__(self, x=None):
if x is None:
self._check_domain_for("call with 'None'")
x = self.domain
return invoke_method_on(self.call_on_single_point, x)
def __getitem__(self, index):
self._check_domain_for('indexing')
return self.range[index]
def __len__(self):
self._check_domain_for('len()')
return len(self.range)
def __iter__(self):
self._check_domain_for('iter()')
return iter(self.range)
def __reversed__(self):
self._check_domain_for('reversed()')
return reversed(self.range)
class FunctionLinear(Function):
def __init__(self, a=None, b=None, x_0=None, y_0=None, x_1=None, y_1=None):
#(x_0, y_0), (x_1, y_1) = sorted([(x_0, y_0), (x_1, y_1)])
if (a, b) == (None, None):
a = (float(y_1) - y_0) / (x_1 - x_0)
b = y_0 - a * x_0
if isnan(a) or isnan(b):
pass
self.a = a
self.b = b
def call_on_single_point(self, x):
return float(self.a * x + self.b)
def intersect(self, other):
if almost_equals(self.a, other.a):
return None
x = (float(other.b) - self.b) / (self.a - other.a)
return x, self(x)
def integral(self, start, end):
if start >= end:
return 0
if self.a == 0:
return self.b * (end - start)
x_intercept = self.x_intercept
if start > x_intercept or end < x_intercept or almost_equals(end, x_intercept) or almost_equals(start, x_intercept):
return (self(start) + self(end)) * (end - start) / 2.0
minus_triangle = (x_intercept - start) * self(start)
plus_triangle = (end - x_intercept) * self(end)
return minus_triangle + plus_triangle
def derivative(self, point):
return self.a
@property
def x_intercept(self):
return - float(self.b) / self.a
@property
def y_intercept(self):
return self(0)
class FunctionHorizontalLinear(FunctionLinear):
def __init__(self, y_intercept):
FunctionLinear.__init__(self, a=0, b=y_intercept)
def call_on_single_point(self, x):
return self.b
def integral(self, start, end):
if start >= end:
return 0
if almost_equals(self.b, 0):
return 0
return float(self.b) * (end - start)
def derivative(self, point):
return 0
FUNCTION_ZERO = FunctionHorizontalLinear(0)
FUNCTION_ONE = FunctionHorizontalLinear(1)
class FunctionComposite(Function):
is_normalised = False
def __init__(self, dictionary_bounds_function, function_undefined=None, domain=None, is_normalised=False):
if is_normalised is not False:
self.is_normalised = True
Function.__init__(self, function_undefined=function_undefined, domain=domain)
if not isinstance(dictionary_bounds_function, dict):
raise TypeError("'dictionary_bounds_function' should be a dictionary with (lower_bound, higher_bound) "
"tuple keys and values of type 'Function'")
self._dictionary_bounds_function = dictionary_bounds_function
def call_on_single_point(self, x):
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= x:
if b >= x:
if self.dictionary_bounds_function[function_bounds] is None:
return None
return self.dictionary_bounds_function[function_bounds](x)
return self.function_undefined(x)
def integral(self, start, end):
if self.is_normalised and self.domain is not None:
if (start < self.domain[0] or almost_equals(start, self.domain[0])) and (
end > self.domain[-1] or almost_equals(end, self.domain[-1])):
return 1.0
if start >= end:
return 0
result = 0
for function_bounds in self.dictionary_bounds_function:
(a, b) = function_bounds
if a <= start:
if b >= end:
return self.dictionary_bounds_function[function_bounds].integral(start, end)
not_ordered = {
(start, 0): 's', (end, 0): 'e',
(a, 1): 'a', (b, 1): 'b'
}
order = ''.join([not_ordered[i] for i in sorted(not_ordered)])
if (a == start or a == end) and order == 'saeb' or (b == start or b == end) and order == 'asbe':
continue
if order in 'seab abse':
continue
if order == 'saeb':
b = end
elif order == 'asbe':
a = start
result += self.dictionary_bounds_function[function_bounds].integral(a, b)
return result
def find_bounds_for(self, point):
for bounds in self.dictionary_bounds_function:
(a, b) = bounds
if a <= point and b >= point:
return bounds
def derivative(self, point):
return self.dictionary_bounds_function[self.find_bounds_for(point)].derivative(point)
def function_in_point(self, point):
for bounds in self.dictionary_bounds_function:
a, b = bounds
if a <= point <= b:
return self.dictionary_bounds_function[bounds]
return None
# def functions_in_interval(self, interval_start, interval_end):
# dictionary_bounds_function = {}
# for bounds in self.dictionary_bounds_function:
# a, b = bounds
# if (interval_start < a or almost_equals(interval_start, a)) and (
#
# ):
@property
def dictionary_bounds_function(self):
return self._dictionary_bounds_function
class FunctionPiecewiseLinear(FunctionComposite):
def __init__(self, dictionary_input_output, function_undefined=None, is_normalised=False):
self.input_list, self.output_list = convert_dict_to_sorted_lists(dictionary_input_output)
dictionary_bounds_function = {}
for i in xrange(1, len(self.input_list)):
x_0, x_1 = self.input_list[i - 1], self.input_list[i]
y_0, y_1 = self.output_list[i - 1], self.output_list[i]
dictionary_bounds_function[(x_0, x_1)] = FunctionLinear(x_0=x_0, x_1=x_1, y_0=y_0, y_1=y_1)
if NEGATIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(NEGATIVE_INFINITY, self.input_list[0])] = function_undefined
if POSITIVE_INFINITY not in self.input_list:
dictionary_bounds_function[(self.input_list[-1], POSITIVE_INFINITY)] = function_undefined
FunctionComposite.__init__(self, dictionary_bounds_function,
function_undefined=function_undefined,
domain=self.input_list,
is_normalised=is_normalised)
def normalised(self):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
if almost_equals(area, 0):
area = self.integral(NEGATIVE_INFINITY, POSITIVE_INFINITY)
dictionary_input_output = {}
output_list = [y / area for y in self.output_list]
for i in xrange(len(self.input_list)):
dictionary_input_output[self.input_list[i]] = output_list[i]
result = FunctionPiecewiseLinear(dictionary_input_output, function_undefined=self.function_undefined)
result.is_normalised = True
return result
def __and__(self, other):
for bounds in self.dictionary_bounds_function:
a, b = bounds
linear_function = self.dictionary_bounds_function[bounds]
if __name__ == '__main__':
a = FunctionLinear(1, 0)
b = FunctionLinear(-1, 1)
print a.intersect(b)
| agpl-3.0 |
theandygross/Figures | src/Figures/Boxplots.py | 1 | 11851 | """
Created on Apr 24, 2013
@author: agross
"""
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import Stats.Scipy as Stats
from Figures.FigureHelpers import latex_float, init_ax
from Figures.FigureHelpers import prettify_ax
from Helpers.Pandas import match_series, true_index
colors = plt.rcParams['axes.color_cycle'] * 10
def _violin_plot(ax, data, pos=[], bp=False):
"""
http://pyinsci.blogspot.com/2009/09/violin-plot-with-matplotlib.html
Create violin plots on an axis. Internal to module as it does not
use Pandas data-structures. This is split off due to it's being a
reuse of the code from the blog-post linked above, and I wanted to keep
the original code untouched.
"""
from scipy.stats import gaussian_kde
from numpy import arange
# dist = max(pos)-min(pos)
dist = len(pos)
w = min(0.25 * max(dist, 1.0), 0.5)
for p, d in enumerate(data):
try:
k = gaussian_kde(d) # calculates the kernel density
m = k.dataset.min() # lower bound of violin
M = k.dataset.max() # upper bound of violin
x = arange(m, M, (M - m) / 100.) # support for violin
v = k.evaluate(x) # violin profile (density curve)
v = v / v.max() * w # scaling the violin to the available space
ax.fill_betweenx(x, p, v + p, facecolor='y', alpha=0.1)
ax.fill_betweenx(x, p, -v + p, facecolor='y', alpha=0.1)
except:
pass
if bp:
box_plot = ax.boxplot(data, notch=1, positions=range(len(pos)), vert=1,
widths=.25)
return box_plot
def box_plot_pandas(bin_vec, real_vec, ax=None, order=None):
"""
Wrapper around matplotlib's boxplot function.
Inputs
bin_vec: Series of labels
real_vec: Series of measurements to be grouped according to bin_vec
"""
_, ax = init_ax(ax)
bin_vec, real_vec = match_series(bin_vec, real_vec)
if order is not None:
categories = order
else:
categories = bin_vec.value_counts().index
data = [real_vec[bin_vec == num] for num in categories]
bp = ax.boxplot(data, positions=range(len(categories)), widths=.3,
patch_artist=True)
if real_vec.name:
ax.set_ylabel(real_vec.name)
if bin_vec.name:
ax.set_xlabel(bin_vec.name)
ax.set_xticklabels(categories)
[p.set_visible(False) for p in bp['fliers']]
[p.set_visible(False) for p in bp['caps']]
[p.set_visible(False) for p in bp['whiskers']]
for p in bp['medians']:
p.set_color(colors[0])
p.set_lw(3)
p.set_alpha(.8)
for i, p in enumerate(bp['boxes']):
p.set_color('grey')
p.set_lw(3)
p.set_alpha(.7)
if len(data[i]) < 3:
p.set_alpha(0)
def violin_plot_pandas(bin_vec, real_vec, ann='p', order=None, ax=None,
filename=None):
"""
http://pyinsci.blogspot.com/2009/09/violin-plot-with-matplotlib.html
Wrapper around matplotlib's boxplot function to add violin profile.
Inputs
bin_vec: Series of labels
real_vec: Series of measurements to be grouped according to bin_vec
"""
fig, ax = init_ax(ax)
ax.set_ylabel(real_vec.name)
ax.set_xlabel(bin_vec.name)
bin_vec, real_vec = match_series(bin_vec, real_vec)
try:
if order is None:
categories = bin_vec.value_counts().index
else:
categories = order
_violin_plot(ax, [real_vec[bin_vec == num] for num in categories],
pos=categories, bp=True)
ax.set_xticklabels([str(c) + '\n(n=%i)' % sum(bin_vec == c)
for c in categories])
except:
box_plot_pandas(bin_vec, real_vec, ax=ax)
#if type(bin_vec.name) == str:
# ax.set_title(str(bin_vec.name) + ' x ' + str(real_vec.name))
p_value = Stats.kruskal_pandas(bin_vec, real_vec)['p']
if ann == 'p_fancy':
ax.annotate('$p = {}$'.format(latex_float(p_value)), (.95, -.02),
xycoords='axes fraction', ha='right', va='bottom', size=14)
if ann == 'p':
ax.annotate('p = {0:.1e}'.format(p_value), (.95, .02),
xycoords='axes fraction', ha='right', va='bottom', size=12)
elif ann is not None:
ax.annotate(ann, (.95, .02), xycoords='axes fraction', ha='right',
va='bottom', size=12)
if filename is not None:
fig.savefig(filename)
return
def violin_plot_series(s, **kw_args):
"""
Wrapper for drawing a violin plot on a series with a multi-index.
The second level of the index is used as the binning variable.
"""
assert s.index.levshape[1] > 1
violin_plot_pandas(pd.Series(s.index.get_level_values(1), s.index), s,
**kw_args)
def paired_boxplot_o(boxes):
"""
Wrapper around plt.boxplot to draw paired boxplots
for a set of boxes.
Input is the same as plt.boxplot:
Array or a sequence of vectors.
"""
fig = plt.figure(figsize=(len(boxes) / 2.5, 4))
ax1 = fig.add_subplot(111)
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(boxes, notch=0, positions=np.arange(len(boxes)) +
1.5 * (np.arange(len(boxes)) / 2), patch_artist=True)
[p.set_color(colors[0]) for p in bp['boxes'][::2]]
[p.set_color('black') for p in bp['whiskers']]
[p.set_color('black') for p in bp['fliers']]
[p.set_alpha(.4) for p in bp['fliers']]
[p.set_alpha(.6) for p in bp['boxes']]
[p.set_edgecolor('black') for p in bp['boxes']]
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_ylabel('$Log_{2}$ RNA Expression')
ax1.set_xticks(3.5 * np.arange(len(boxes) / 2) + .5)
return ax1, bp
def paired_boxplot(boxes, ax1=None):
if not ax1:
fig = plt.figure(figsize=(len(boxes) / 2.5, 4))
ax1 = fig.add_subplot(111)
plt.subplots_adjust(left=0.075, right=0.95, top=0.9, bottom=0.25)
bp = ax1.boxplot(boxes, notch=0, positions=np.arange(len(boxes)) +
1.5 * (np.arange(len(boxes)) / 2), patch_artist=True)
[p.set_color(colors[0]) for p in bp['boxes'][::2]]
[p.set_color(colors[1]) for p in bp['boxes'][1::2]]
[p.set_color('black') for p in bp['whiskers']]
[p.set_color('black') for p in bp['fliers']]
[p.set_alpha(.4) for p in bp['fliers']]
[p.set_alpha(.8) for p in bp['boxes']]
[p.set_edgecolor('black') for p in bp['boxes']]
ax1.yaxis.grid(True, linestyle='-', which='major', color='lightgrey',
alpha=0.5)
# Hide these grid behind plot objects
ax1.set_axisbelow(True)
ax1.set_ylabel('$Log_{2}$ RNA Expression')
ax1.set_xticks(3.5 * np.arange(len(boxes) / 2) + .5)
return ax1, bp
def paired_boxplot_tumor_normal(df, sig=True, cutoffs=[.01, .00001],
order=None, ax=None):
"""
Draws a paired boxplot given a DataFrame with both tumor and normal
samples on the index. '01' and '11' are hard-coded as the ids for
tumor/normal.
"""
n = df.groupby(level=0).size() == 2
df = df.ix[n[n].index]
if order is None:
o = df.xs('11', level=1).median().order().index
df = df[o[::-1]]
else:
df = df[order]
l1 = list(df.xs('01', level=1).as_matrix().T)
l2 = list(df.xs('11', level=1).as_matrix().T)
boxes = [x for t in zip(l1, l2) for x in t]
ax1, bp = paired_boxplot(boxes, ax)
test = lambda v: Stats.ttest_rel(v.unstack()['01'], v.unstack()['11'])
res = df.apply(test).T
p = res.p
if sig:
pts = [(i * 3.5 + .5, 18) for i, n in enumerate(p) if n < cutoffs[1]]
if len(pts) > 0:
s1 = ax1.scatter(*zip(*pts), marker='$**$', label='$p<10^{-5}$', s=200)
else:
s1 = None
pts = [(i * 3.5 + .5, 18) for i, n in enumerate(p)
if (n < cutoffs[0]) and (n > cutoffs[1])]
if len(pts) > 0:
s2 = ax1.scatter(*zip(*pts), marker='$*$', label='$p<10^{-2}$', s=30)
else:
s2 = None
ax1.legend(bp['boxes'][:2] + [s2, s1],
('Tumor', 'Normal', '$p<10^{-2}$', '$p<10^{-5}$'),
loc='best', scatterpoints=1)
else:
ax1.legend(bp['boxes'][:2], ('Tumor', 'Normal'), loc='best')
ax1.set_xticklabels(df.columns)
def boxplot_panel(hit_vec, response_df):
"""
Draws a series of paired boxplots with the rows of the response_df
split according to hit_vec.
"""
b = response_df.copy()
b.columns = pd.MultiIndex.from_arrays([b.columns, hit_vec.ix[b.columns]])
b = b.T
v1, v2 = hit_vec.unique()
test = lambda v: Stats.anova(v.reset_index(level=1)[v.index.names[1]],
v.reset_index(level=1)[v.name])
res = b.apply(test).T
p = res.p.order()
b = b.ix[:, p.index]
l1 = list(b.xs(v1, level=1).as_matrix().T)
l2 = list(b.xs(v2, level=1).as_matrix().T)
boxes = [x for t in zip(l1, l2) for x in t]
ax1, bp = paired_boxplot(boxes)
y_lim = (response_df.T.quantile(.9).max()) * 1.2
pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if n < .00001]
if len(pts) > 0:
s1 = ax1.scatter(*zip(*pts), marker='$**$', label='$p<10^{-5}$', s=200)
else:
s1 = None
pts = [(i * 3.5 + .5, y_lim) for i, n in enumerate(p) if (n < .01)
and (n > .00001)]
if len(pts) > 0:
s2 = ax1.scatter(*zip(*pts), marker='$*$', label='$p<10^{-2}$', s=30)
else:
s2 = None
ax1.set_xticklabels(b.columns)
ax1.legend(bp['boxes'][:2] + [s2, s1],
(v1, v2, '$p<10^{-2}$', '$p<10^{-5}$'),
loc='best', scatterpoints=1)
def paired_bp_tn_split(vec, assignment, ax=None, split_vals=('01', '11'),
data_type='gene expression'):
"""
Paired boxplot for a single Series, with splitting on the index,
grouped by assignment. I.E. Tumor-Normal gene expression split by
cancer.
vec:
vector of values to plot.
assignment:
vector mapping keys to group assignment
ax (None):
matplotlib axis to plot on or None
split_vals ('01','11'):
Values to split the boxplot pairing on. The default of
('01','11') indicates tumor vs. normal in the standard
TCGA barcode nomenclature. This should coorespond to values
on the second level of the index for vec and assignment.
**both vec and assignment should have an overlapping index with
multiple levels**
"""
_, ax = init_ax(ax, figsize=(8, 3))
if vec.name != None:
label = vec.name # lose label in manipulation
else:
label = ''
g1 = split_vals[0]
g2 = split_vals[1]
vec = pd.concat([vec[:, g1], vec[:, g2]], keys=[g1, g2],
axis=1)
vec = vec.dropna().stack()
counts = vec.unstack().groupby(assignment).size()
groups = list(true_index(counts > 5))
groups = vec.unstack().groupby(assignment).median()[g1].ix[groups]
groups = groups.order().index[::-1]
l1 = [np.array(vec[:, g1].ix[true_index(assignment == c)].dropna())
for c in groups]
l2 = [np.array(vec[:, g2].ix[true_index(assignment == c)].dropna())
for c in groups]
boxes = [x for t in zip(l1, l2) for x in t if len(t[1]) > 5]
ax, bp = paired_boxplot(boxes, ax)
labels = ['{}\n({})'.format(c, counts[c]) for c in groups]
ax.set_xticklabels(labels)
prettify_ax(ax)
ax.set_ylabel('{} {}'.format(label, data_type))
| mit |
cg31/tensorflow | tensorflow/examples/learn/iris_with_pipeline.py | 17 | 1848 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset, with pipeline."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn.datasets import load_iris
from sklearn.metrics import accuracy_score
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
iris = load_iris()
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# It's useful to scale to ensure Stochastic Gradient Descent
# will do the right thing.
scaler = StandardScaler()
# DNN classifier.
classifier = learn.DNNClassifier(
feature_columns=learn.infer_real_valued_columns_from_input(x_train),
hidden_units=[10, 20, 10], n_classes=3)
pipeline = Pipeline([('scaler', scaler),
('DNNclassifier', classifier)])
pipeline.fit(x_train, y_train, DNNclassifier__steps=200)
score = accuracy_score(y_test, pipeline.predict(x_test))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
Machyne/econ_comps | full_2011.py | 1 | 6827 | import os
import numpy as np
import pandas as pd
from pandas.tools.plotting import scatter_matrix
import pylab
import statsmodels.formula.api as smf
import statsmodels.stats.api as sms
from industry_to_days import get_census_mapper
"""
USAGE:
python full_2011.py
CREATES:
results/2011/clean.csv
results/2011/corr.txt
results/2011/het_breushpagan.txt
results/2011/ols1.txt
results/2011/ols2.txt
results/2011/scatter_matrix.png
results/2011/summary.txt
"""
COL_ORDER = ['vacation', 'paid_vacation', 'age', 'fam_size', 'is_female',
'income10', 'salary', 'is_employed']
PSID_CSV = os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'psid', '2011.csv'))
def get_f_path(fname):
return os.path.abspath(
os.path.join(
os.path.dirname(__file__),
'results', '2011', fname))
CLEAN_CSV = get_f_path('clean.csv')
CORR_TXT = get_f_path('corr.txt')
HET_BP_TXT = get_f_path('het_breushpagan.txt')
OLS1_TXT = get_f_path('ols1.txt')
OLS2_TXT = get_f_path('ols2.txt')
SCAT_MATRIX_PNG = get_f_path('scatter_matrix.png')
SUMMARY_TXT = get_f_path('summary.txt')
f_exists = (lambda file_: os.path.isfile(file_))
def _calc_vacation(row):
took, days, weeks, months = (row['took_vac'], row['days_vac'],
row['weeks_vac'], row['months_vac'])
if took in [8, 9] or (days in [998, 999]) or (months in [98, 99]) or (
weeks in [98, 99]):
return np.nan
elif took == 5:
return 0
else:
return days + (5 * weeks) + (22 * months)
def _calc_salary(row):
amt, unit = row['salary_amt'], row['salary_unit']
if amt in [0.0, 9999998.0] or unit in [0, 7, 8, 9]:
return np.nan
if unit == 3: # one week
scalar = 52.0
elif unit == 4: # two weeks
scalar = 26.0
elif unit == 5: # one month
scalar = 12.0
elif unit == 6: # one year
scalar = 1.0
return scalar * amt
def clean(df):
# make sex into dummy for is_female
df['is_female'] = df['sex'] - 1
# remove unknown age values
df.age = df.age.replace(999, np.nan)
# figure out total vacation taken
df['vacation'] = df.apply(_calc_vacation, axis=1)
# fix salary to be annual amount
df['salary'] = df.apply(_calc_salary, axis=1)
# remove outliers
df.ix[df.salary < 1e3] = np.nan
df.ix[df.salary >= 400e3] = np.nan
df.ix[df.income10 < 1e3] = np.nan
df.ix[df.income10 >= 400e3] = np.nan
# make employment into dummy for is_employed
df['is_employed'] = df.employment
# remove all those not working
for i in range(2,10) + [99]:
df.is_employed.replace(i, 0, inplace=True)
# merge industry data
df['paid_vacation'] = df.industry.map(get_census_mapper())
# drop old values
for col in ['took_vac', 'days_vac', 'weeks_vac', 'months_vac', 'industry',
'salary_amt', 'salary_unit', 'sex', 'employment']:
df.drop(col, axis=1, inplace=True)
df = df.reindex_axis(sorted(df.columns, key=COL_ORDER.index), axis=1)
return df
def do_stats(df):
# Only view those that received vacation and are employed
df.is_employed.replace(0.0, np.nan, inplace=True)
df.paid_vacation.replace(0.0, np.nan, inplace=True)
df.dropna(inplace=True)
# No longer need this dummy
df.drop('is_employed', axis=1, inplace=True)
# Summary stats
if not f_exists(SUMMARY_TXT):
summary = df.describe().T
summary = np.round(summary, decimals=3)
with open(SUMMARY_TXT, 'w') as f:
f.write(summary.to_string())
# Test for autocorrelation: scatter matrix, correlation, run OLS
if not f_exists(SCAT_MATRIX_PNG):
scatter_matrix(df, alpha=0.2, figsize=(64, 64), diagonal='hist')
pylab.savefig(SCAT_MATRIX_PNG, bbox_inches='tight')
if not f_exists(CORR_TXT):
corr = df.corr()
corr = corr.reindex_axis(
sorted(corr.columns, key=COL_ORDER.index), axis=0)
corr = corr.reindex_axis(
sorted(corr.columns, key=COL_ORDER.index), axis=1)
for i, k in enumerate(corr):
row = corr[k]
for j in range(len(row)):
if j > i:
row[j] = np.nan
with open(CORR_TXT, 'w') as f:
f.write(np.round(corr, decimals=3).to_string(na_rep=''))
if not f_exists(OLS1_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + is_female + income10 + salary + '
'np.square(salary)',
data=df).fit()
with open(OLS1_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
# Need to drop salary, too much autocorrelation
df.drop('salary', axis=1, inplace=True)
# Test for autocorrelation: scatter matrix, correlation, run OLS
if not f_exists(HET_BP_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + is_female + income10',
data=df).fit()
names = ['LM', 'LM P val.', 'F Stat.', 'F Stat. P val.']
test = sms.het_breushpagan(ols_results.resid, ols_results.model.exog)
f_p = test[3]
with open(HET_BP_TXT, 'w') as f:
str_ = '\n'.join('{}: {}'.format(n, v)
for n, v in zip(names, test))
f.write(str_ + '\n\n')
if f_p < .01:
f.write('No Heteroskedasticity found.\n')
else:
f.write('Warning: Heteroskedasticity found!\n')
# no Heteroskedasticity found
# final OLS results with robust standard errors
if not f_exists(OLS2_TXT):
ols_results = smf.ols(
formula='vacation ~ paid_vacation + np.square(paid_vacation) + '
'age + fam_size + is_female + income10',
data=df).fit().get_robustcov_results(cov_type='HAC', maxlags=1)
with open(OLS2_TXT, 'w') as f:
f.write(str(ols_results.summary()))
f.write('\n\nCondition Number: {}'.format(
np.linalg.cond(ols_results.model.exog)))
return df
def main():
df = None
if f_exists(CLEAN_CSV):
df = pd.io.parsers.read_csv(CLEAN_CSV)
df.drop('Unnamed: 0', axis=1, inplace=True)
else:
with open(PSID_CSV) as csv:
df = pd.io.parsers.read_csv(csv)
df = clean(df)
# write output to a file
with open(CLEAN_CSV, 'w+') as csv:
df.to_csv(path_or_buf=csv)
return do_stats(df)
if __name__ == '__main__':
main()
print '2011 succeeds! :)'
| bsd-3-clause |
toastedcornflakes/scikit-learn | benchmarks/bench_plot_ward.py | 117 | 1283 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import matplotlib.pyplot as plt
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
plt.figure("scikit-learn Ward's method benchmark results")
plt.imshow(np.log(ratio), aspect='auto', origin="lower")
plt.colorbar()
plt.contour(ratio, levels=[1, ], colors='k')
plt.yticks(range(len(n_features)), n_features.astype(np.int))
plt.ylabel('N features')
plt.xticks(range(len(n_samples)), n_samples.astype(np.int))
plt.xlabel('N samples')
plt.title("Scikit's time, in units of scipy time (log)")
plt.show()
| bsd-3-clause |
gavruskin/microinteractions | data_preprocess_6_reps_with_negatives.py | 1 | 9983 | import pandas as pd
data = pd.read_csv("development_times_exp1and2.csv", sep="\t")
# Add all parameters (Taylor coefficients) as 0 in rows following the data:
for i in range(data.shape[0]):
for j in range(10, 42):
data.set_value(i, j, 0)
data.rename(columns={10: "a", 11: "a1", 12: "a2", 13: "a3", 14: "a4", 15: "a5",
16: "b12", 17: "b13", 18: "b14", 19: "b15", 20: "b23", 21: "b24",
22: "b25", 23: "b34", 24: "b35", 25: "b45", 26: "c123", 27: "c124",
28: "c125", 29: "c134", 30: "c135", 31: "c145", 32: "c234", 33: "c235",
34: "c245", 35: "c345", 36: "d1234", 37: "d1235", 38: "d1245",
39: "d1345", 40: "d2345", 41: "e12345"}, inplace=True)
# Change coefficients corresponding to present effects to 1:
for index, row in data.iterrows():
species = row["LP"] + row["LB"] + row["AP"] + row["AT"] + row["AO"]
if species == "YNNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
if species == "NYNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
if species == "NNYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
if species == "NNNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
if species == "NNNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a5", 1)
if species == "YYNNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "b12", -1)
if species == "YNYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b13", -1)
if species == "YNNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b14", -1)
if species == "YNNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b15", -1)
if species == "NYYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b23", -1)
if species == "NYNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b24", -1)
if species == "NYNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b25", -1)
if species == "NNYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b34", -1)
if species == "NNYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b35", -1)
if species == "NNNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b45", -1)
if species == "YYYNN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "c123", 1)
if species == "YYNYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "c124", 1)
if species == "YYNNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "c125", 1)
if species == "NYYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c234", 1)
if species == "NNYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c345", 1)
if species == "YNYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c134", 1)
if species == "YNYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c135", 1)
if species == "YNNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c145", 1)
if species == "NYNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c245", 1)
if species == "NYYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c235", 1)
if species == "YYYYN":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "d1234", -1)
if species == "YYYNY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "d1235", -1)
if species == "YYNYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "d1245", -1)
if species == "YNYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1345", -1)
if species == "NYYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d2345", -1)
if species == "YYYYY":
data.set_value(index, "a", 1)
data.set_value(index, "a1", 1)
data.set_value(index, "a2", 1)
data.set_value(index, "a3", 1)
data.set_value(index, "a4", 1)
data.set_value(index, "a5", 1)
data.set_value(index, "b12", -1)
data.set_value(index, "b13", -1)
data.set_value(index, "b14", -1)
data.set_value(index, "b15", -1)
data.set_value(index, "b23", -1)
data.set_value(index, "b24", -1)
data.set_value(index, "b25", -1)
data.set_value(index, "b34", -1)
data.set_value(index, "b35", -1)
data.set_value(index, "b45", -1)
data.set_value(index, "c123", 1)
data.set_value(index, "c124", 1)
data.set_value(index, "c125", 1)
data.set_value(index, "c134", 1)
data.set_value(index, "c135", 1)
data.set_value(index, "c145", 1)
data.set_value(index, "c234", 1)
data.set_value(index, "c235", 1)
data.set_value(index, "c245", 1)
data.set_value(index, "c345", 1)
data.set_value(index, "d1234", -1)
data.set_value(index, "d1235", -1)
data.set_value(index, "d1245", -1)
data.set_value(index, "d1345", -1)
data.set_value(index, "d2345", -1)
data.set_value(index, "e12345", 1)
if species == "NNNNN":
data.set_value(index, "a", 1)
data.to_csv("fitness_summary_6_replicates_parameters.csv", sep="\t")
| mit |
redarmy30/Eurobot-2017 | old year/RESET-master/Machine_vision/get_position.py | 2 | 3426 | #!/usr/bin/env python2
import numpy as np
import cv2
from matplotlib import pyplot as plt
from math import sin, cos, tan, sqrt, pi, atan
from operator import itemgetter
import timeit
#start = timeit.timeit()
h = 0.37 #the vertical distance from the ground to camera [in meters]
alpha = pi*(28.3)/180.0 #the inclination angle in degrees
F = 0.25 #the focal distance [in meters]0.00367
Nx = 640.0 #number of pixels along x axis on the focal plane
Ny = 480.0 #number of pixels along the y axis on the focal plane
psi = 78.0*pi/180.0 # maximum angular resolution in diagonal
Tetha = 2.0*atan((tan(psi/2.0))*3.0/5.0) # maximum resolution angle for vertical view
Fi = 2.0*atan((tan(psi/2.0))*4.0/5.0) # maximum resolution angle for horizontal view
#Initial calculations
gamma = pi/2.0 - alpha #calculate the inclination of focal plane
YM = F/cos(alpha) - h*tan(alpha)
YA = F*cos(alpha)
ZA = h - F*sin(alpha)
ksim = 2.0*F*tan(Tetha/2.0)
etham = 2.0*F*tan(Tetha/2.0)
# camera initialisation
#DEFINE CALSSIFICATION OF OBJECTS
params = cv2.SimpleBlobDetector_Params()
# Change thresholds
params.minThreshold = 1
params.maxThreshold = 2000
# Filter by Area.
params.filterByArea = 1
params.minArea = 1000
params.maxArea = 100000
"""# Filter by Circularity
params.filterByCircularity = True
params.minCircularity = 0.1"""
"""# Filter by Convexity
params.filterByConvexity = True
params.minConvexity = 0.1
params.maxConvexity = 1"""
"""# Filter by Inertia
params.filterByInertia = True
params.minInertiaRatio = 0"""
"""#Filter by color
params.filterByColor = 1
params.blobColor = 0;0;0"""
#detector = cv2.SimpleBlobDetector_create(params)
detector = cv2.SimpleBlobDetector(params) #- use this if line 57 returns error!!!
class GetObjectPosition(object):
def get_position(self):
cap = cv2.VideoCapture(0)
#cap.set(7, 15)
_, frame = cap.read()
im = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
keypoints = detector.detect(im)
im_with_keypoints = cv2.drawKeypoints(frame, keypoints, np.array([]), (0,0,255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
a = len (keypoints)
points = []
screenpoints = []
for keypoint in keypoints:
x0 = keypoint.pt[0]
y0 = keypoint.pt[1]
cx = x0
cy = y0
ksiE = cx*ksim/Nx
ethaE = cy*etham/Ny
Nx1 = ksim
Ny1 = etham
ksiA = ksiE - Nx1/2.0
ethaA = -(ethaE - Ny1/2.0)
YA1 = YA + ethaA*cos(gamma)
ZA1 = (YA1 - YM)*tan(gamma)
XA1 = ksiA
t = h/(h-ZA1)
X = XA1*t
Y = YA1*t
R0 = sqrt(X**2.0+Y**2.0)
X = int(X*1000.0)
Y = int(Y*1000.0)
R0 = int(R0*1000.0)
points.append((X, Y, R0))
screenpoints.append((x0,y0))
points1 = str(points)
cv2.imwrite('result.png',im_with_keypoints)
if not points:
return
z = sorted(points, key=itemgetter(2))
z1 = str(z)
b = z[0]
points = str(points)
file = open("result.txt", "w")
file.write("unsorted list")
file.write(points)#unsorted
file.write("\n")
file.write("sorted list")
file.write(z1)#sorted
file.write("\n")
file.write("The nearest objest is:")
file.write(str(b))
file.close()
#img1 = cv2.imread('result.png')
#img2 = cv2.putText(img = img1,text = points,org = (0,Ny),fontFace = cv2.FONT_HERSHEY_DUPLEX,fontScale = 0.5,
#color = (1,1,255))
cv2.imwrite('result1.png',img2)
return(b)
del(cap)
a = GetObjectPosition()
coordinates = a.get_position()
print coordinates
#end = timeit.timeit()
#print end - start | mit |
einarhuseby/arctic | tests/integration/test_arctic.py | 4 | 6898 | from datetime import datetime as dt, timedelta as dtd
from mock import patch
from pandas import DataFrame
from pandas.util.testing import assert_frame_equal
import pytest
import time
import numpy as np
from arctic.arctic import Arctic, VERSION_STORE
from arctic.exceptions import LibraryNotFoundException, QuotaExceededException
from ..util import get_large_ts
def test_connect_to_Arctic_string(mongo_host):
arctic = Arctic(mongo_host=mongo_host)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_connect_to_Arctic_connection(mongodb, mongo_host):
arctic = Arctic(mongodb)
assert arctic.list_libraries() == []
assert arctic.mongo_host == mongo_host
def test_simple(library):
sym = 'symbol'
data = get_large_ts(100)
library.write(sym, data)
orig = dt.now()
time.sleep(1) # Move the timestamp on 1ms
data2 = get_large_ts(100)
library.write(sym, data2, prune_previous_version=False)
# Get the timeseries, it should be the same
read2 = library.read(sym).data
assert_frame_equal(read2, data2)
# Ensure we can get the previous version
read = library.read(sym, as_of=orig).data
assert_frame_equal(read, data)
def test_indexes(arctic):
c = arctic._conn
arctic.initialize_library("library", VERSION_STORE, segment='month')
chunk = c.arctic.library.index_information()
assert chunk == {u'_id_': {u'key': [(u'_id', 1)], u'ns': u'arctic.library', u'v': 1},
u'symbol_1_parent_1_segment_1': {u'background': True,
u'key': [(u'symbol', 1),
(u'parent', 1),
(u'segment', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_1_sha_1': {u'background': True,
u'key': [(u'symbol', 1), (u'sha', 1)],
u'ns': u'arctic.library',
u'unique': True,
u'v': 1},
u'symbol_hashed': {u'background': True,
u'key': [(u'symbol', u'hashed')],
u'ns': u'arctic.library',
u'v': 1}}
snapshots = c.arctic.library.snapshots.index_information()
assert snapshots == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.snapshots',
u'v': 1},
u'name_1': {u'background': True,
u'key': [(u'name', 1)],
u'ns': u'arctic.library.snapshots',
u'unique': True,
u'v': 1}}
versions = c.arctic.library.versions.index_information()
assert versions == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1__id_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'_id', -1)],
u'ns': u'arctic.library.versions',
u'v': 1},
u'symbol_1_version_-1': {u'background': True,
u'key': [(u'symbol', 1), (u'version', -1)],
u'ns': u'arctic.library.versions',
u'unique': True,
u'v': 1}}
version_nums = c.arctic.library.version_nums.index_information()
assert version_nums == {u'_id_': {u'key': [(u'_id', 1)],
u'ns': u'arctic.library.version_nums',
u'v': 1},
u'symbol_1': {u'background': True,
u'key': [(u'symbol', 1)],
u'ns': u'arctic.library.version_nums',
u'unique': True,
u'v': 1}}
def test_delete_library(arctic, library, library_name):
mongo = arctic._conn
# create a library2 library too - ensure that this isn't deleted
arctic.initialize_library('user.library2', VERSION_STORE, segment='month')
library.write('asdf', get_large_ts(1))
assert 'TEST' in mongo.arctic_test.collection_names()
assert 'TEST.versions' in mongo.arctic_test.collection_names()
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
arctic.delete_library(library_name)
assert 'TEST' not in mongo.arctic_user.collection_names()
assert 'TEST.versions' not in mongo.arctic_user.collection_names()
with pytest.raises(LibraryNotFoundException):
arctic[library_name]
with pytest.raises(LibraryNotFoundException):
arctic['arctic_{}'.format(library_name)]
assert 'library2' in mongo.arctic_user.collection_names()
assert 'library2.versions' in mongo.arctic_user.collection_names()
def test_quota(arctic, library, library_name):
thing = list(range(100))
library._arctic_lib.set_quota(10)
assert arctic.get_quota(library_name) == 10
assert library._arctic_lib.get_quota() == 10
library.write('thing', thing)
with pytest.raises(QuotaExceededException):
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
library.write('ts', thing)
with pytest.raises(QuotaExceededException):
arctic.check_quota(library_name)
def test_check_quota(arctic, library, library_name):
with patch('arctic.arctic.logger.info') as info:
arctic.check_quota(library_name)
assert info.call_count == 1
def test_default_mongo_retry_timout():
now = time.time()
with pytest.raises(LibraryNotFoundException):
Arctic('unresolved-host', serverSelectionTimeoutMS=0)['some.lib']
assert time.time() - now < 1.
| lgpl-2.1 |
chrsrds/scikit-learn | sklearn/neighbors/lof.py | 3 | 20358 | # Authors: Nicolas Goix <nicolas.goix@telecom-paristech.fr>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
import warnings
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import UnsupervisedMixin
from ..base import OutlierMixin
from ..utils.validation import check_is_fitted
from ..utils import check_array
__all__ = ["LocalOutlierFactor"]
class LocalOutlierFactor(NeighborsBase, KNeighborsMixin, UnsupervisedMixin,
OutlierMixin):
"""Unsupervised Outlier Detection using Local Outlier Factor (LOF)
The anomaly score of each sample is called Local Outlier Factor.
It measures the local deviation of density of a given sample with
respect to its neighbors.
It is local in that the anomaly score depends on how isolated the object
is with respect to the surrounding neighborhood.
More precisely, locality is given by k-nearest neighbors, whose distance
is used to estimate the local density.
By comparing the local density of a sample to the local densities of
its neighbors, one can identify samples that have a substantially lower
density than their neighbors. These are considered outliers.
Parameters
----------
n_neighbors : int, optional (default=20)
Number of neighbors to use by default for :meth:`kneighbors` queries.
If n_neighbors is larger than the number of samples provided,
all samples will be used.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDTree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default=30)
Leaf size passed to :class:`BallTree` or :class:`KDTree`. This can
affect the speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
metric : string or callable, default 'minkowski'
metric used for the distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If 'precomputed', the training input X is expected to be a distance
matrix.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean',
'yule']
See the documentation for scipy.spatial.distance for details on these
metrics:
https://docs.scipy.org/doc/scipy/reference/spatial.distance.html
p : integer, optional (default=2)
Parameter for the Minkowski metric from
:func:`sklearn.metrics.pairwise.pairwise_distances`. When p = 1, this
is equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric_params : dict, optional (default=None)
Additional keyword arguments for the metric function.
contamination : 'auto' or float, optional (default='auto')
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. When fitting this is used to define the
threshold on the scores of the samples.
- if 'auto', the threshold is determined as in the
original paper,
- if a float, the contamination should be in the range [0, 0.5].
.. versionchanged:: 0.22
The default value of ``contamination`` changed from 0.1
to ``'auto'``.
novelty : boolean, default False
By default, LocalOutlierFactor is only meant to be used for outlier
detection (novelty=False). Set novelty to True if you want to use
LocalOutlierFactor for novelty detection. In this case be aware that
that you should only use predict, decision_function and score_samples
on new unseen data and not on the training set.
n_jobs : int or None, optional (default=None)
The number of parallel jobs to run for neighbors search.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
Affects only :meth:`kneighbors` and :meth:`kneighbors_graph` methods.
Attributes
----------
negative_outlier_factor_ : numpy array, shape (n_samples,)
The opposite LOF of the training samples. The higher, the more normal.
Inliers tend to have a LOF score close to 1 (``negative_outlier_factor_``
close to -1), while outliers tend to have a larger LOF score.
The local outlier factor (LOF) of a sample captures its
supposed 'degree of abnormality'.
It is the average of the ratio of the local reachability density of
a sample and those of its k-nearest neighbors.
n_neighbors_ : integer
The actual number of neighbors used for :meth:`kneighbors` queries.
offset_ : float
Offset used to obtain binary labels from the raw scores.
Observations having a negative_outlier_factor smaller than `offset_`
are detected as abnormal.
The offset is set to -1.5 (inliers score around -1), except when a
contamination parameter different than "auto" is provided. In that
case, the offset is defined in such a way we obtain the expected
number of outliers in training.
References
----------
.. [1] Breunig, M. M., Kriegel, H. P., Ng, R. T., & Sander, J. (2000, May).
LOF: identifying density-based local outliers. In ACM sigmod record.
"""
def __init__(self, n_neighbors=20, algorithm='auto', leaf_size=30,
metric='minkowski', p=2, metric_params=None,
contamination="auto", novelty=False, n_jobs=None):
super().__init__(
n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, n_jobs=n_jobs)
self.contamination = contamination
self.novelty = novelty
@property
def fit_predict(self):
""""Fits the model to the training set X and returns the labels.
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
y : Ignored
not used, present for API consistency by convention.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
if self.novelty:
msg = ('fit_predict is not available when novelty=True. Use '
'novelty=False if you want to predict on the training set.')
raise AttributeError(msg)
return self._fit_predict
def _fit_predict(self, X, y=None):
""""Fits the model to the training set X and returns the labels.
Label is 1 for an inlier and -1 for an outlier according to the LOF
score and the contamination parameter.
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and 1 for inliers.
"""
# As fit_predict would be different from fit.predict, fit_predict is
# only available for outlier detection (novelty=False)
return self.fit(X)._predict()
def fit(self, X, y=None):
"""Fit the model using X as training data.
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree}
Training data. If array or matrix, shape [n_samples, n_features],
or [n_samples, n_samples] if metric='precomputed'.
y : Ignored
not used, present for API consistency by convention.
Returns
-------
self : object
"""
if self.contamination != 'auto':
if not(0. < self.contamination <= .5):
raise ValueError("contamination must be in (0, 0.5], "
"got: %f" % self.contamination)
super().fit(X)
n_samples = self._fit_X.shape[0]
if self.n_neighbors > n_samples:
warnings.warn("n_neighbors (%s) is greater than the "
"total number of samples (%s). n_neighbors "
"will be set to (n_samples - 1) for estimation."
% (self.n_neighbors, n_samples))
self.n_neighbors_ = max(1, min(self.n_neighbors, n_samples - 1))
self._distances_fit_X_, _neighbors_indices_fit_X_ = (
self.kneighbors(None, n_neighbors=self.n_neighbors_))
self._lrd = self._local_reachability_density(
self._distances_fit_X_, _neighbors_indices_fit_X_)
# Compute lof score over training samples to define offset_:
lrd_ratios_array = (self._lrd[_neighbors_indices_fit_X_] /
self._lrd[:, np.newaxis])
self.negative_outlier_factor_ = -np.mean(lrd_ratios_array, axis=1)
if self.contamination == "auto":
# inliers score around -1 (the higher, the less abnormal).
self.offset_ = -1.5
else:
self.offset_ = np.percentile(self.negative_outlier_factor_,
100. * self.contamination)
return self
@property
def predict(self):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
This method allows to generalize prediction to *new observations* (not
in the training set). Only available for novelty detection (when
novelty is set to True).
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
if not self.novelty:
msg = ('predict is not available when novelty=False, use '
'fit_predict if you want to predict on training data. Use '
'novelty=True if you want to use LOF for novelty detection '
'and predict on new unseen data.')
raise AttributeError(msg)
return self._predict
def _predict(self, X=None):
"""Predict the labels (1 inlier, -1 outlier) of X according to LOF.
If X is None, returns the same as fit_predict(X_train).
Parameters
----------
X : array-like, shape (n_samples, n_features), default=None
The query sample or samples to compute the Local Outlier Factor
w.r.t. to the training samples. If None, makes prediction on the
training data without considering them as their own neighbors.
Returns
-------
is_inlier : array, shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
check_is_fitted(self, ["offset_", "negative_outlier_factor_",
"n_neighbors_", "_distances_fit_X_"])
if X is not None:
X = check_array(X, accept_sparse='csr')
is_inlier = np.ones(X.shape[0], dtype=int)
is_inlier[self.decision_function(X) < 0] = -1
else:
is_inlier = np.ones(self._fit_X.shape[0], dtype=int)
is_inlier[self.negative_outlier_factor_ < self.offset_] = -1
return is_inlier
@property
def decision_function(self):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
The shift offset allows a zero threshold for being an outlier.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : array, shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
if not self.novelty:
msg = ('decision_function is not available when novelty=False. '
'Use novelty=True if you want to use LOF for novelty '
'detection and compute decision_function for new unseen '
'data. Note that the opposite LOF of the training samples '
'is always available by considering the '
'negative_outlier_factor_ attribute.')
raise AttributeError(msg)
return self._decision_function
def _decision_function(self, X):
"""Shifted opposite of the Local Outlier Factor of X.
Bigger is better, i.e. large values correspond to inliers.
The shift offset allows a zero threshold for being an outlier.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
shifted_opposite_lof_scores : array, shape (n_samples,)
The shifted opposite of the Local Outlier Factor of each input
samples. The lower, the more abnormal. Negative scores represent
outliers, positive scores represent inliers.
"""
return self._score_samples(X) - self.offset_
@property
def score_samples(self):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
if not self.novelty:
msg = ('score_samples is not available when novelty=False. The '
'scores of the training samples are always available '
'through the negative_outlier_factor_ attribute. Use '
'novelty=True if you want to use LOF for novelty detection '
'and compute score_samples for new unseen data.')
raise AttributeError(msg)
return self._score_samples
def _score_samples(self, X):
"""Opposite of the Local Outlier Factor of X.
It is the opposite as bigger is better, i.e. large values correspond
to inliers.
Only available for novelty detection (when novelty is set to True).
The argument X is supposed to contain *new data*: if X contains a
point from training, it considers the later in its own neighborhood.
Also, the samples in X are not considered in the neighborhood of any
point.
The score_samples on training data is available by considering the
the ``negative_outlier_factor_`` attribute.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The query sample or samples to compute the Local Outlier Factor
w.r.t. the training samples.
Returns
-------
opposite_lof_scores : array, shape (n_samples,)
The opposite of the Local Outlier Factor of each input samples.
The lower, the more abnormal.
"""
check_is_fitted(self, ["offset_", "negative_outlier_factor_",
"_distances_fit_X_"])
X = check_array(X, accept_sparse='csr')
distances_X, neighbors_indices_X = (
self.kneighbors(X, n_neighbors=self.n_neighbors_))
X_lrd = self._local_reachability_density(distances_X,
neighbors_indices_X)
lrd_ratios_array = (self._lrd[neighbors_indices_X] /
X_lrd[:, np.newaxis])
# as bigger is better:
return -np.mean(lrd_ratios_array, axis=1)
def _local_reachability_density(self, distances_X, neighbors_indices):
"""The local reachability density (LRD)
The LRD of a sample is the inverse of the average reachability
distance of its k-nearest neighbors.
Parameters
----------
distances_X : array, shape (n_query, self.n_neighbors)
Distances to the neighbors (in the training samples `self._fit_X`)
of each query point to compute the LRD.
neighbors_indices : array, shape (n_query, self.n_neighbors)
Neighbors indices (of each query point) among training samples
self._fit_X.
Returns
-------
local_reachability_density : array, shape (n_samples,)
The local reachability density of each sample.
"""
dist_k = self._distances_fit_X_[neighbors_indices,
self.n_neighbors_ - 1]
reach_dist_array = np.maximum(distances_X, dist_k)
# 1e-10 to avoid `nan' when nb of duplicates > n_neighbors_:
return 1. / (np.mean(reach_dist_array, axis=1) + 1e-10)
| bsd-3-clause |
thientu/scikit-learn | sklearn/gaussian_process/gaussian_process.py | 78 | 34552 | # -*- coding: utf-8 -*-
# Author: Vincent Dubourg <vincent.dubourg@gmail.com>
# (mostly translation, see implementation details)
# Licence: BSD 3 clause
from __future__ import print_function
import numpy as np
from scipy import linalg, optimize
from ..base import BaseEstimator, RegressorMixin
from ..metrics.pairwise import manhattan_distances
from ..utils import check_random_state, check_array, check_X_y
from ..utils.validation import check_is_fitted
from . import regression_models as regression
from . import correlation_models as correlation
MACHINE_EPSILON = np.finfo(np.double).eps
def l1_cross_distances(X):
"""
Computes the nonzero componentwise L1 cross-distances between the vectors
in X.
Parameters
----------
X: array_like
An array with shape (n_samples, n_features)
Returns
-------
D: array with shape (n_samples * (n_samples - 1) / 2, n_features)
The array of componentwise L1 cross-distances.
ij: arrays with shape (n_samples * (n_samples - 1) / 2, 2)
The indices i and j of the vectors in X associated to the cross-
distances in D: D[k] = np.abs(X[ij[k, 0]] - Y[ij[k, 1]]).
"""
X = check_array(X)
n_samples, n_features = X.shape
n_nonzero_cross_dist = n_samples * (n_samples - 1) // 2
ij = np.zeros((n_nonzero_cross_dist, 2), dtype=np.int)
D = np.zeros((n_nonzero_cross_dist, n_features))
ll_1 = 0
for k in range(n_samples - 1):
ll_0 = ll_1
ll_1 = ll_0 + n_samples - k - 1
ij[ll_0:ll_1, 0] = k
ij[ll_0:ll_1, 1] = np.arange(k + 1, n_samples)
D[ll_0:ll_1] = np.abs(X[k] - X[(k + 1):n_samples])
return D, ij
class GaussianProcess(BaseEstimator, RegressorMixin):
"""The Gaussian Process model class.
Read more in the :ref:`User Guide <gaussian_process>`.
Parameters
----------
regr : string or callable, optional
A regression function returning an array of outputs of the linear
regression functional basis. The number of observations n_samples
should be greater than the size p of this basis.
Default assumes a simple constant regression trend.
Available built-in regression models are::
'constant', 'linear', 'quadratic'
corr : string or callable, optional
A stationary autocorrelation function returning the autocorrelation
between two points x and x'.
Default assumes a squared-exponential autocorrelation model.
Built-in correlation models are::
'absolute_exponential', 'squared_exponential',
'generalized_exponential', 'cubic', 'linear'
beta0 : double array_like, optional
The regression weight vector to perform Ordinary Kriging (OK).
Default assumes Universal Kriging (UK) so that the vector beta of
regression weights is estimated using the maximum likelihood
principle.
storage_mode : string, optional
A string specifying whether the Cholesky decomposition of the
correlation matrix should be stored in the class (storage_mode =
'full') or not (storage_mode = 'light').
Default assumes storage_mode = 'full', so that the
Cholesky decomposition of the correlation matrix is stored.
This might be a useful parameter when one is not interested in the
MSE and only plan to estimate the BLUP, for which the correlation
matrix is not required.
verbose : boolean, optional
A boolean specifying the verbose level.
Default is verbose = False.
theta0 : double array_like, optional
An array with shape (n_features, ) or (1, ).
The parameters in the autocorrelation model.
If thetaL and thetaU are also specified, theta0 is considered as
the starting point for the maximum likelihood estimation of the
best set of parameters.
Default assumes isotropic autocorrelation model with theta0 = 1e-1.
thetaL : double array_like, optional
An array with shape matching theta0's.
Lower bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
thetaU : double array_like, optional
An array with shape matching theta0's.
Upper bound on the autocorrelation parameters for maximum
likelihood estimation.
Default is None, so that it skips maximum likelihood estimation and
it uses theta0.
normalize : boolean, optional
Input X and observations y are centered and reduced wrt
means and standard deviations estimated from the n_samples
observations provided.
Default is normalize = True so that data is normalized to ease
maximum likelihood estimation.
nugget : double or ndarray, optional
Introduce a nugget effect to allow smooth predictions from noisy
data. If nugget is an ndarray, it must be the same length as the
number of data points used for the fit.
The nugget is added to the diagonal of the assumed training covariance;
in this way it acts as a Tikhonov regularization in the problem. In
the special case of the squared exponential correlation function, the
nugget mathematically represents the variance of the input values.
Default assumes a nugget close to machine precision for the sake of
robustness (nugget = 10. * MACHINE_EPSILON).
optimizer : string, optional
A string specifying the optimization algorithm to be used.
Default uses 'fmin_cobyla' algorithm from scipy.optimize.
Available optimizers are::
'fmin_cobyla', 'Welch'
'Welch' optimizer is dued to Welch et al., see reference [WBSWM1992]_.
It consists in iterating over several one-dimensional optimizations
instead of running one single multi-dimensional optimization.
random_start : int, optional
The number of times the Maximum Likelihood Estimation should be
performed from a random starting point.
The first MLE always uses the specified starting point (theta0),
the next starting points are picked at random according to an
exponential distribution (log-uniform on [thetaL, thetaU]).
Default does not use random starting point (random_start = 1).
random_state: integer or numpy.RandomState, optional
The generator used to shuffle the sequence of coordinates of theta in
the Welch optimizer. If an integer is given, it fixes the seed.
Defaults to the global numpy random number generator.
Attributes
----------
theta_ : array
Specified theta OR the best set of autocorrelation parameters (the \
sought maximizer of the reduced likelihood function).
reduced_likelihood_function_value_ : array
The optimal reduced likelihood function value.
Examples
--------
>>> import numpy as np
>>> from sklearn.gaussian_process import GaussianProcess
>>> X = np.array([[1., 3., 5., 6., 7., 8.]]).T
>>> y = (X * np.sin(X)).ravel()
>>> gp = GaussianProcess(theta0=0.1, thetaL=.001, thetaU=1.)
>>> gp.fit(X, y) # doctest: +ELLIPSIS
GaussianProcess(beta0=None...
...
Notes
-----
The presentation implementation is based on a translation of the DACE
Matlab toolbox, see reference [NLNS2002]_.
References
----------
.. [NLNS2002] `H.B. Nielsen, S.N. Lophaven, H. B. Nielsen and J.
Sondergaard. DACE - A MATLAB Kriging Toolbox.` (2002)
http://www2.imm.dtu.dk/~hbn/dace/dace.pdf
.. [WBSWM1992] `W.J. Welch, R.J. Buck, J. Sacks, H.P. Wynn, T.J. Mitchell,
and M.D. Morris (1992). Screening, predicting, and computer
experiments. Technometrics, 34(1) 15--25.`
http://www.jstor.org/pss/1269548
"""
_regression_types = {
'constant': regression.constant,
'linear': regression.linear,
'quadratic': regression.quadratic}
_correlation_types = {
'absolute_exponential': correlation.absolute_exponential,
'squared_exponential': correlation.squared_exponential,
'generalized_exponential': correlation.generalized_exponential,
'cubic': correlation.cubic,
'linear': correlation.linear}
_optimizer_types = [
'fmin_cobyla',
'Welch']
def __init__(self, regr='constant', corr='squared_exponential', beta0=None,
storage_mode='full', verbose=False, theta0=1e-1,
thetaL=None, thetaU=None, optimizer='fmin_cobyla',
random_start=1, normalize=True,
nugget=10. * MACHINE_EPSILON, random_state=None):
self.regr = regr
self.corr = corr
self.beta0 = beta0
self.storage_mode = storage_mode
self.verbose = verbose
self.theta0 = theta0
self.thetaL = thetaL
self.thetaU = thetaU
self.normalize = normalize
self.nugget = nugget
self.optimizer = optimizer
self.random_start = random_start
self.random_state = random_state
def fit(self, X, y):
"""
The Gaussian Process model fitting method.
Parameters
----------
X : double array_like
An array with shape (n_samples, n_features) with the input at which
observations were made.
y : double array_like
An array with shape (n_samples, ) or shape (n_samples, n_targets)
with the observations of the output to be predicted.
Returns
-------
gp : self
A fitted Gaussian Process model object awaiting data to perform
predictions.
"""
# Run input checks
self._check_params()
self.random_state = check_random_state(self.random_state)
# Force data to 2D numpy.array
X, y = check_X_y(X, y, multi_output=True, y_numeric=True)
self.y_ndim_ = y.ndim
if y.ndim == 1:
y = y[:, np.newaxis]
# Check shapes of DOE & observations
n_samples, n_features = X.shape
_, n_targets = y.shape
# Run input checks
self._check_params(n_samples)
# Normalize data or don't
if self.normalize:
X_mean = np.mean(X, axis=0)
X_std = np.std(X, axis=0)
y_mean = np.mean(y, axis=0)
y_std = np.std(y, axis=0)
X_std[X_std == 0.] = 1.
y_std[y_std == 0.] = 1.
# center and scale X if necessary
X = (X - X_mean) / X_std
y = (y - y_mean) / y_std
else:
X_mean = np.zeros(1)
X_std = np.ones(1)
y_mean = np.zeros(1)
y_std = np.ones(1)
# Calculate matrix of distances D between samples
D, ij = l1_cross_distances(X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple input features cannot have the same"
" target value.")
# Regression matrix and parameters
F = self.regr(X)
n_samples_F = F.shape[0]
if F.ndim > 1:
p = F.shape[1]
else:
p = 1
if n_samples_F != n_samples:
raise Exception("Number of rows in F and X do not match. Most "
"likely something is going wrong with the "
"regression model.")
if p > n_samples_F:
raise Exception(("Ordinary least squares problem is undetermined "
"n_samples=%d must be greater than the "
"regression model size p=%d.") % (n_samples, p))
if self.beta0 is not None:
if self.beta0.shape[0] != p:
raise Exception("Shapes of beta0 and F do not match.")
# Set attributes
self.X = X
self.y = y
self.D = D
self.ij = ij
self.F = F
self.X_mean, self.X_std = X_mean, X_std
self.y_mean, self.y_std = y_mean, y_std
# Determine Gaussian Process model parameters
if self.thetaL is not None and self.thetaU is not None:
# Maximum Likelihood Estimation of the parameters
if self.verbose:
print("Performing Maximum Likelihood Estimation of the "
"autocorrelation parameters...")
self.theta_, self.reduced_likelihood_function_value_, par = \
self._arg_max_reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad parameter region. "
"Try increasing upper bound")
else:
# Given parameters
if self.verbose:
print("Given autocorrelation parameters. "
"Computing Gaussian Process model parameters...")
self.theta_ = self.theta0
self.reduced_likelihood_function_value_, par = \
self.reduced_likelihood_function()
if np.isinf(self.reduced_likelihood_function_value_):
raise Exception("Bad point. Try increasing theta0.")
self.beta = par['beta']
self.gamma = par['gamma']
self.sigma2 = par['sigma2']
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
if self.storage_mode == 'light':
# Delete heavy data (it will be computed again if required)
# (it is required only when MSE is wanted in self.predict)
if self.verbose:
print("Light storage mode specified. "
"Flushing autocorrelation matrix...")
self.D = None
self.ij = None
self.F = None
self.C = None
self.Ft = None
self.G = None
return self
def predict(self, X, eval_MSE=False, batch_size=None):
"""
This function evaluates the Gaussian Process model at x.
Parameters
----------
X : array_like
An array with shape (n_eval, n_features) giving the point(s) at
which the prediction(s) should be made.
eval_MSE : boolean, optional
A boolean specifying whether the Mean Squared Error should be
evaluated or not.
Default assumes evalMSE = False and evaluates only the BLUP (mean
prediction).
batch_size : integer, optional
An integer giving the maximum number of points that can be
evaluated simultaneously (depending on the available memory).
Default is None so that all given points are evaluated at the same
time.
Returns
-------
y : array_like, shape (n_samples, ) or (n_samples, n_targets)
An array with shape (n_eval, ) if the Gaussian Process was trained
on an array of shape (n_samples, ) or an array with shape
(n_eval, n_targets) if the Gaussian Process was trained on an array
of shape (n_samples, n_targets) with the Best Linear Unbiased
Prediction at x.
MSE : array_like, optional (if eval_MSE == True)
An array with shape (n_eval, ) or (n_eval, n_targets) as with y,
with the Mean Squared Error at x.
"""
check_is_fitted(self, "X")
# Check input shapes
X = check_array(X)
n_eval, _ = X.shape
n_samples, n_features = self.X.shape
n_samples_y, n_targets = self.y.shape
# Run input checks
self._check_params(n_samples)
if X.shape[1] != n_features:
raise ValueError(("The number of features in X (X.shape[1] = %d) "
"should match the number of features used "
"for fit() "
"which is %d.") % (X.shape[1], n_features))
if batch_size is None:
# No memory management
# (evaluates all given points in a single batch run)
# Normalize input
X = (X - self.X_mean) / self.X_std
# Initialize output
y = np.zeros(n_eval)
if eval_MSE:
MSE = np.zeros(n_eval)
# Get pairwise componentwise L1-distances to the input training set
dx = manhattan_distances(X, Y=self.X, sum_over_features=False)
# Get regression function and correlation
f = self.regr(X)
r = self.corr(self.theta_, dx).reshape(n_eval, n_samples)
# Scaled predictor
y_ = np.dot(f, self.beta) + np.dot(r, self.gamma)
# Predictor
y = (self.y_mean + self.y_std * y_).reshape(n_eval, n_targets)
if self.y_ndim_ == 1:
y = y.ravel()
# Mean Squared Error
if eval_MSE:
C = self.C
if C is None:
# Light storage mode (need to recompute C, F, Ft and G)
if self.verbose:
print("This GaussianProcess used 'light' storage mode "
"at instantiation. Need to recompute "
"autocorrelation matrix...")
reduced_likelihood_function_value, par = \
self.reduced_likelihood_function()
self.C = par['C']
self.Ft = par['Ft']
self.G = par['G']
rt = linalg.solve_triangular(self.C, r.T, lower=True)
if self.beta0 is None:
# Universal Kriging
u = linalg.solve_triangular(self.G.T,
np.dot(self.Ft.T, rt) - f.T,
lower=True)
else:
# Ordinary Kriging
u = np.zeros((n_targets, n_eval))
MSE = np.dot(self.sigma2.reshape(n_targets, 1),
(1. - (rt ** 2.).sum(axis=0)
+ (u ** 2.).sum(axis=0))[np.newaxis, :])
MSE = np.sqrt((MSE ** 2.).sum(axis=0) / n_targets)
# Mean Squared Error might be slightly negative depending on
# machine precision: force to zero!
MSE[MSE < 0.] = 0.
if self.y_ndim_ == 1:
MSE = MSE.ravel()
return y, MSE
else:
return y
else:
# Memory management
if type(batch_size) is not int or batch_size <= 0:
raise Exception("batch_size must be a positive integer")
if eval_MSE:
y, MSE = np.zeros(n_eval), np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to], MSE[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y, MSE
else:
y = np.zeros(n_eval)
for k in range(max(1, n_eval / batch_size)):
batch_from = k * batch_size
batch_to = min([(k + 1) * batch_size + 1, n_eval + 1])
y[batch_from:batch_to] = \
self.predict(X[batch_from:batch_to],
eval_MSE=eval_MSE, batch_size=None)
return y
def reduced_likelihood_function(self, theta=None):
"""
This function determines the BLUP parameters and evaluates the reduced
likelihood function for the given autocorrelation parameters theta.
Maximizing this function wrt the autocorrelation parameters theta is
equivalent to maximizing the likelihood of the assumed joint Gaussian
distribution of the observations y evaluated onto the design of
experiments X.
Parameters
----------
theta : array_like, optional
An array containing the autocorrelation parameters at which the
Gaussian Process model parameters should be determined.
Default uses the built-in autocorrelation parameters
(ie ``theta = self.theta_``).
Returns
-------
reduced_likelihood_function_value : double
The value of the reduced likelihood function associated to the
given autocorrelation parameters theta.
par : dict
A dictionary containing the requested Gaussian Process model
parameters:
sigma2
Gaussian Process variance.
beta
Generalized least-squares regression weights for
Universal Kriging or given beta0 for Ordinary
Kriging.
gamma
Gaussian Process weights.
C
Cholesky decomposition of the correlation matrix [R].
Ft
Solution of the linear equation system : [R] x Ft = F
G
QR decomposition of the matrix Ft.
"""
check_is_fitted(self, "X")
if theta is None:
# Use built-in autocorrelation parameters
theta = self.theta_
# Initialize output
reduced_likelihood_function_value = - np.inf
par = {}
# Retrieve data
n_samples = self.X.shape[0]
D = self.D
ij = self.ij
F = self.F
if D is None:
# Light storage mode (need to recompute D, ij and F)
D, ij = l1_cross_distances(self.X)
if (np.min(np.sum(D, axis=1)) == 0.
and self.corr != correlation.pure_nugget):
raise Exception("Multiple X are not allowed")
F = self.regr(self.X)
# Set up R
r = self.corr(theta, D)
R = np.eye(n_samples) * (1. + self.nugget)
R[ij[:, 0], ij[:, 1]] = r
R[ij[:, 1], ij[:, 0]] = r
# Cholesky decomposition of R
try:
C = linalg.cholesky(R, lower=True)
except linalg.LinAlgError:
return reduced_likelihood_function_value, par
# Get generalized least squares solution
Ft = linalg.solve_triangular(C, F, lower=True)
try:
Q, G = linalg.qr(Ft, econ=True)
except:
#/usr/lib/python2.6/dist-packages/scipy/linalg/decomp.py:1177:
# DeprecationWarning: qr econ argument will be removed after scipy
# 0.7. The economy transform will then be available through the
# mode='economic' argument.
Q, G = linalg.qr(Ft, mode='economic')
pass
sv = linalg.svd(G, compute_uv=False)
rcondG = sv[-1] / sv[0]
if rcondG < 1e-10:
# Check F
sv = linalg.svd(F, compute_uv=False)
condF = sv[0] / sv[-1]
if condF > 1e15:
raise Exception("F is too ill conditioned. Poor combination "
"of regression model and observations.")
else:
# Ft is too ill conditioned, get out (try different theta)
return reduced_likelihood_function_value, par
Yt = linalg.solve_triangular(C, self.y, lower=True)
if self.beta0 is None:
# Universal Kriging
beta = linalg.solve_triangular(G, np.dot(Q.T, Yt))
else:
# Ordinary Kriging
beta = np.array(self.beta0)
rho = Yt - np.dot(Ft, beta)
sigma2 = (rho ** 2.).sum(axis=0) / n_samples
# The determinant of R is equal to the squared product of the diagonal
# elements of its Cholesky decomposition C
detR = (np.diag(C) ** (2. / n_samples)).prod()
# Compute/Organize output
reduced_likelihood_function_value = - sigma2.sum() * detR
par['sigma2'] = sigma2 * self.y_std ** 2.
par['beta'] = beta
par['gamma'] = linalg.solve_triangular(C.T, rho)
par['C'] = C
par['Ft'] = Ft
par['G'] = G
return reduced_likelihood_function_value, par
def _arg_max_reduced_likelihood_function(self):
"""
This function estimates the autocorrelation parameters theta as the
maximizer of the reduced likelihood function.
(Minimization of the opposite reduced likelihood function is used for
convenience)
Parameters
----------
self : All parameters are stored in the Gaussian Process model object.
Returns
-------
optimal_theta : array_like
The best set of autocorrelation parameters (the sought maximizer of
the reduced likelihood function).
optimal_reduced_likelihood_function_value : double
The optimal reduced likelihood function value.
optimal_par : dict
The BLUP parameters associated to thetaOpt.
"""
# Initialize output
best_optimal_theta = []
best_optimal_rlf_value = []
best_optimal_par = []
if self.verbose:
print("The chosen optimizer is: " + str(self.optimizer))
if self.random_start > 1:
print(str(self.random_start) + " random starts are required.")
percent_completed = 0.
# Force optimizer to fmin_cobyla if the model is meant to be isotropic
if self.optimizer == 'Welch' and self.theta0.size == 1:
self.optimizer = 'fmin_cobyla'
if self.optimizer == 'fmin_cobyla':
def minus_reduced_likelihood_function(log10t):
return - self.reduced_likelihood_function(
theta=10. ** log10t)[0]
constraints = []
for i in range(self.theta0.size):
constraints.append(lambda log10t, i=i:
log10t[i] - np.log10(self.thetaL[0, i]))
constraints.append(lambda log10t, i=i:
np.log10(self.thetaU[0, i]) - log10t[i])
for k in range(self.random_start):
if k == 0:
# Use specified starting point as first guess
theta0 = self.theta0
else:
# Generate a random starting point log10-uniformly
# distributed between bounds
log10theta0 = (np.log10(self.thetaL)
+ self.random_state.rand(*self.theta0.shape)
* np.log10(self.thetaU / self.thetaL))
theta0 = 10. ** log10theta0
# Run Cobyla
try:
log10_optimal_theta = \
optimize.fmin_cobyla(minus_reduced_likelihood_function,
np.log10(theta0).ravel(), constraints,
iprint=0)
except ValueError as ve:
print("Optimization failed. Try increasing the ``nugget``")
raise ve
optimal_theta = 10. ** log10_optimal_theta
optimal_rlf_value, optimal_par = \
self.reduced_likelihood_function(theta=optimal_theta)
# Compare the new optimizer to the best previous one
if k > 0:
if optimal_rlf_value > best_optimal_rlf_value:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
else:
best_optimal_rlf_value = optimal_rlf_value
best_optimal_par = optimal_par
best_optimal_theta = optimal_theta
if self.verbose and self.random_start > 1:
if (20 * k) / self.random_start > percent_completed:
percent_completed = (20 * k) / self.random_start
print("%s completed" % (5 * percent_completed))
optimal_rlf_value = best_optimal_rlf_value
optimal_par = best_optimal_par
optimal_theta = best_optimal_theta
elif self.optimizer == 'Welch':
# Backup of the given atrributes
theta0, thetaL, thetaU = self.theta0, self.thetaL, self.thetaU
corr = self.corr
verbose = self.verbose
# This will iterate over fmin_cobyla optimizer
self.optimizer = 'fmin_cobyla'
self.verbose = False
# Initialize under isotropy assumption
if verbose:
print("Initialize under isotropy assumption...")
self.theta0 = check_array(self.theta0.min())
self.thetaL = check_array(self.thetaL.min())
self.thetaU = check_array(self.thetaU.max())
theta_iso, optimal_rlf_value_iso, par_iso = \
self._arg_max_reduced_likelihood_function()
optimal_theta = theta_iso + np.zeros(theta0.shape)
# Iterate over all dimensions of theta allowing for anisotropy
if verbose:
print("Now improving allowing for anisotropy...")
for i in self.random_state.permutation(theta0.size):
if verbose:
print("Proceeding along dimension %d..." % (i + 1))
self.theta0 = check_array(theta_iso)
self.thetaL = check_array(thetaL[0, i])
self.thetaU = check_array(thetaU[0, i])
def corr_cut(t, d):
return corr(check_array(np.hstack([optimal_theta[0][0:i],
t[0],
optimal_theta[0][(i +
1)::]])),
d)
self.corr = corr_cut
optimal_theta[0, i], optimal_rlf_value, optimal_par = \
self._arg_max_reduced_likelihood_function()
# Restore the given atrributes
self.theta0, self.thetaL, self.thetaU = theta0, thetaL, thetaU
self.corr = corr
self.optimizer = 'Welch'
self.verbose = verbose
else:
raise NotImplementedError("This optimizer ('%s') is not "
"implemented yet. Please contribute!"
% self.optimizer)
return optimal_theta, optimal_rlf_value, optimal_par
def _check_params(self, n_samples=None):
# Check regression model
if not callable(self.regr):
if self.regr in self._regression_types:
self.regr = self._regression_types[self.regr]
else:
raise ValueError("regr should be one of %s or callable, "
"%s was given."
% (self._regression_types.keys(), self.regr))
# Check regression weights if given (Ordinary Kriging)
if self.beta0 is not None:
self.beta0 = np.atleast_2d(self.beta0)
if self.beta0.shape[1] != 1:
# Force to column vector
self.beta0 = self.beta0.T
# Check correlation model
if not callable(self.corr):
if self.corr in self._correlation_types:
self.corr = self._correlation_types[self.corr]
else:
raise ValueError("corr should be one of %s or callable, "
"%s was given."
% (self._correlation_types.keys(), self.corr))
# Check storage mode
if self.storage_mode != 'full' and self.storage_mode != 'light':
raise ValueError("Storage mode should either be 'full' or "
"'light', %s was given." % self.storage_mode)
# Check correlation parameters
self.theta0 = np.atleast_2d(self.theta0)
lth = self.theta0.size
if self.thetaL is not None and self.thetaU is not None:
self.thetaL = np.atleast_2d(self.thetaL)
self.thetaU = np.atleast_2d(self.thetaU)
if self.thetaL.size != lth or self.thetaU.size != lth:
raise ValueError("theta0, thetaL and thetaU must have the "
"same length.")
if np.any(self.thetaL <= 0) or np.any(self.thetaU < self.thetaL):
raise ValueError("The bounds must satisfy O < thetaL <= "
"thetaU.")
elif self.thetaL is None and self.thetaU is None:
if np.any(self.theta0 <= 0):
raise ValueError("theta0 must be strictly positive.")
elif self.thetaL is None or self.thetaU is None:
raise ValueError("thetaL and thetaU should either be both or "
"neither specified.")
# Force verbose type to bool
self.verbose = bool(self.verbose)
# Force normalize type to bool
self.normalize = bool(self.normalize)
# Check nugget value
self.nugget = np.asarray(self.nugget)
if np.any(self.nugget) < 0.:
raise ValueError("nugget must be positive or zero.")
if (n_samples is not None
and self.nugget.shape not in [(), (n_samples,)]):
raise ValueError("nugget must be either a scalar "
"or array of length n_samples.")
# Check optimizer
if self.optimizer not in self._optimizer_types:
raise ValueError("optimizer should be one of %s"
% self._optimizer_types)
# Force random_start type to int
self.random_start = int(self.random_start)
| bsd-3-clause |
nmartensen/pandas | pandas/io/sql.py | 3 | 58612 | # -*- coding: utf-8 -*-
"""
Collection of query wrappers / abstractions to both facilitate data
retrieval and to reduce dependency on DB-specific API.
"""
from __future__ import print_function, division
from datetime import datetime, date, time
import warnings
import re
import numpy as np
import pandas._libs.lib as lib
from pandas.core.dtypes.missing import isna
from pandas.core.dtypes.dtypes import DatetimeTZDtype
from pandas.core.dtypes.common import (
is_list_like, is_dict_like,
is_datetime64tz_dtype)
from pandas.compat import (map, zip, raise_with_traceback,
string_types, text_type)
from pandas.core.api import DataFrame, Series
from pandas.core.base import PandasObject
from pandas.core.tools.datetimes import to_datetime
from contextlib import contextmanager
class SQLAlchemyRequired(ImportError):
pass
class DatabaseError(IOError):
pass
# -----------------------------------------------------------------------------
# -- Helper functions
_SQLALCHEMY_INSTALLED = None
def _validate_flavor_parameter(flavor):
"""
Checks whether a database 'flavor' was specified.
If not None, produces FutureWarning if 'sqlite' and
raises a ValueError if anything else.
"""
if flavor is not None:
if flavor == 'sqlite':
warnings.warn("the 'flavor' parameter is deprecated "
"and will be removed in a future version, "
"as 'sqlite' is the only supported option "
"when SQLAlchemy is not installed.",
FutureWarning, stacklevel=2)
else:
raise ValueError("database flavor {flavor} is not "
"supported".format(flavor=flavor))
def _is_sqlalchemy_connectable(con):
global _SQLALCHEMY_INSTALLED
if _SQLALCHEMY_INSTALLED is None:
try:
import sqlalchemy
_SQLALCHEMY_INSTALLED = True
from distutils.version import LooseVersion
ver = LooseVersion(sqlalchemy.__version__)
# For sqlalchemy versions < 0.8.2, the BIGINT type is recognized
# for a sqlite engine, which results in a warning when trying to
# read/write a DataFrame with int64 values. (GH7433)
if ver < '0.8.2':
from sqlalchemy import BigInteger
from sqlalchemy.ext.compiler import compiles
@compiles(BigInteger, 'sqlite')
def compile_big_int_sqlite(type_, compiler, **kw):
return 'INTEGER'
except ImportError:
_SQLALCHEMY_INSTALLED = False
if _SQLALCHEMY_INSTALLED:
import sqlalchemy
return isinstance(con, sqlalchemy.engine.Connectable)
else:
return False
def _convert_params(sql, params):
"""convert sql and params args to DBAPI2.0 compliant format"""
args = [sql]
if params is not None:
if hasattr(params, 'keys'): # test if params is a mapping
args += [params]
else:
args += [list(params)]
return args
def _handle_date_column(col, utc=None, format=None):
if isinstance(format, dict):
return to_datetime(col, errors='ignore', **format)
else:
if format in ['D', 's', 'ms', 'us', 'ns']:
return to_datetime(col, errors='coerce', unit=format, utc=utc)
elif (issubclass(col.dtype.type, np.floating) or
issubclass(col.dtype.type, np.integer)):
# parse dates as timestamp
format = 's' if format is None else format
return to_datetime(col, errors='coerce', unit=format, utc=utc)
elif is_datetime64tz_dtype(col):
# coerce to UTC timezone
# GH11216
return (to_datetime(col, errors='coerce')
.astype('datetime64[ns, UTC]'))
else:
return to_datetime(col, errors='coerce', format=format, utc=utc)
def _parse_date_columns(data_frame, parse_dates):
"""
Force non-datetime columns to be read as such.
Supports both string formatted and integer timestamp columns
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for col_name in parse_dates:
df_col = data_frame[col_name]
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
data_frame[col_name] = _handle_date_column(df_col, format=fmt)
# we want to coerce datetime64_tz dtypes for now
# we could in theory do a 'nice' conversion from a FixedOffset tz
# GH11216
for col_name, df_col in data_frame.iteritems():
if is_datetime64tz_dtype(df_col):
data_frame[col_name] = _handle_date_column(df_col)
return data_frame
def _wrap_result(data, columns, index_col=None, coerce_float=True,
parse_dates=None):
"""Wrap result set of query in a DataFrame """
frame = DataFrame.from_records(data, columns=columns,
coerce_float=coerce_float)
_parse_date_columns(frame, parse_dates)
if index_col is not None:
frame.set_index(index_col, inplace=True)
return frame
def execute(sql, con, cur=None, params=None):
"""
Execute the given SQL query using the provided connection object.
Parameters
----------
sql : string
Query to be executed
con : SQLAlchemy connectable(engine/connection) or sqlite3 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
cur : deprecated, cursor is obtained from connection, default: None
params : list or tuple, optional, default: None
List of parameters to pass to execute method.
Returns
-------
Results Iterable
"""
if cur is None:
pandas_sql = pandasSQL_builder(con)
else:
pandas_sql = pandasSQL_builder(cur, is_cursor=True)
args = _convert_params(sql, params)
return pandas_sql.execute(*args)
# -----------------------------------------------------------------------------
# -- Read and write to DataFrames
def read_sql_table(table_name, con, schema=None, index_col=None,
coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Given a table name and an SQLAlchemy connectable, returns a DataFrame.
This function does not support DBAPI connections.
Parameters
----------
table_name : string
Name of SQL table in database
con : SQLAlchemy connectable (or database string URI)
Sqlite DBAPI connection mode not supported
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If None, use default schema (default).
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point. Can result in loss of Precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information will be converted to UTC
See also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql
"""
con = _engine_builder(con)
if not _is_sqlalchemy_connectable(con):
raise NotImplementedError("read_sql_table only supported for "
"SQLAlchemy connectable.")
import sqlalchemy
from sqlalchemy.schema import MetaData
meta = MetaData(con, schema=schema)
try:
meta.reflect(only=[table_name], views=True)
except sqlalchemy.exc.InvalidRequestError:
raise ValueError("Table %s not found" % table_name)
pandas_sql = SQLDatabase(con, meta=meta)
table = pandas_sql.read_table(
table_name, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
if table is not None:
return table
else:
raise ValueError("Table %s not found" % table_name, con)
def read_sql_query(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default integer index will be used.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed.
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number of
rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
Any datetime values with time zone information parsed via the `parse_dates`
parameter will be converted to UTC
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
pandas_sql = pandasSQL_builder(con)
return pandas_sql.read_query(
sql, index_col=index_col, params=params, coerce_float=coerce_float,
parse_dates=parse_dates, chunksize=chunksize)
def read_sql(sql, con, index_col=None, coerce_float=True, params=None,
parse_dates=None, columns=None, chunksize=None):
"""
Read SQL query or database table into a DataFrame.
Parameters
----------
sql : string SQL query or SQLAlchemy Selectable (select or text object)
to be executed, or database table name.
con : SQLAlchemy connectable(engine/connection) or database string URI
or DBAPI2 connection (fallback mode)
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex)
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict corresponds
to the keyword arguments of :func:`pandas.to_datetime`
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table (only used when reading
a table).
chunksize : int, default None
If specified, return an iterator where `chunksize` is the
number of rows to include in each chunk.
Returns
-------
DataFrame
Notes
-----
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (and for backward compatibility) and will delegate
to the specific function depending on the provided input (database
table name or sql query). The delegated function might have more specific
notes about their functionality not listed here.
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql_query : Read SQL query into a DataFrame
"""
pandas_sql = pandasSQL_builder(con)
if isinstance(pandas_sql, SQLiteDatabase):
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
try:
_is_table_name = pandas_sql.has_table(sql)
except:
_is_table_name = False
if _is_table_name:
pandas_sql.meta.reflect(only=[sql])
return pandas_sql.read_table(
sql, index_col=index_col, coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns, chunksize=chunksize)
else:
return pandas_sql.read_query(
sql, index_col=index_col, params=params,
coerce_float=coerce_float, parse_dates=parse_dates,
chunksize=chunksize)
def to_sql(frame, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
con : SQLAlchemy connectable(engine/connection) or database string URI
or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
used.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single SQLtype or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
If all columns are of the same type, one single value can be used.
"""
if if_exists not in ('fail', 'replace', 'append'):
raise ValueError("'{0}' is not valid for if_exists".format(if_exists))
pandas_sql = pandasSQL_builder(con, schema=schema, flavor=flavor)
if isinstance(frame, Series):
frame = frame.to_frame()
elif not isinstance(frame, DataFrame):
raise NotImplementedError("'frame' argument should be either a "
"Series or a DataFrame")
pandas_sql.to_sql(frame, name, if_exists=if_exists, index=index,
index_label=index_label, schema=schema,
chunksize=chunksize, dtype=dtype)
def has_table(table_name, con, flavor=None, schema=None):
"""
Check if DataBase has named table.
Parameters
----------
table_name: string
Name of SQL table
con: SQLAlchemy connectable(engine/connection) or sqlite3 DBAPI2 connection
Using SQLAlchemy makes it possible to use any DB supported by that
library.
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
installed.
schema : string, default None
Name of SQL schema in database to write to (if database flavor supports
this). If None, use default schema (default).
Returns
-------
boolean
"""
pandas_sql = pandasSQL_builder(con, flavor=flavor, schema=schema)
return pandas_sql.has_table(table_name)
table_exists = has_table
def _engine_builder(con):
"""
Returns a SQLAlchemy engine from a URI (if con is a string)
else it just return con without modifying it
"""
global _SQLALCHEMY_INSTALLED
if isinstance(con, string_types):
try:
import sqlalchemy
except ImportError:
_SQLALCHEMY_INSTALLED = False
else:
con = sqlalchemy.create_engine(con)
return con
return con
def pandasSQL_builder(con, flavor=None, schema=None, meta=None,
is_cursor=False):
"""
Convenience function to return the correct PandasSQL subclass based on the
provided parameters
"""
_validate_flavor_parameter(flavor)
# When support for DBAPI connections is removed,
# is_cursor should not be necessary.
con = _engine_builder(con)
if _is_sqlalchemy_connectable(con):
return SQLDatabase(con, schema=schema, meta=meta)
elif isinstance(con, string_types):
raise ImportError("Using URI string without sqlalchemy installed.")
else:
return SQLiteDatabase(con, is_cursor=is_cursor)
class SQLTable(PandasObject):
"""
For mapping Pandas tables to SQL tables.
Uses fact that table is reflected by SQLAlchemy to
do better type convertions.
Also holds various flags needed to avoid having to
pass them between functions all the time.
"""
# TODO: support for multiIndex
def __init__(self, name, pandas_sql_engine, frame=None, index=True,
if_exists='fail', prefix='pandas', index_label=None,
schema=None, keys=None, dtype=None):
self.name = name
self.pd_sql = pandas_sql_engine
self.prefix = prefix
self.frame = frame
self.index = self._index_name(index, index_label)
self.schema = schema
self.if_exists = if_exists
self.keys = keys
self.dtype = dtype
if frame is not None:
# We want to initialize based on a dataframe
self.table = self._create_table_setup()
else:
# no data provided, read-only mode
self.table = self.pd_sql.get_table(self.name, self.schema)
if self.table is None:
raise ValueError("Could not init table '%s'" % name)
def exists(self):
return self.pd_sql.has_table(self.name, self.schema)
def sql_schema(self):
from sqlalchemy.schema import CreateTable
return str(CreateTable(self.table).compile(self.pd_sql.connectable))
def _execute_create(self):
# Inserting table into database, add to MetaData object
self.table = self.table.tometadata(self.pd_sql.meta)
self.table.create()
def create(self):
if self.exists():
if self.if_exists == 'fail':
raise ValueError("Table '%s' already exists." % self.name)
elif self.if_exists == 'replace':
self.pd_sql.drop_table(self.name, self.schema)
self._execute_create()
elif self.if_exists == 'append':
pass
else:
raise ValueError(
"'{0}' is not valid for if_exists".format(self.if_exists))
else:
self._execute_create()
def insert_statement(self):
return self.table.insert()
def insert_data(self):
if self.index is not None:
temp = self.frame.copy()
temp.index.names = self.index
try:
temp.reset_index(inplace=True)
except ValueError as err:
raise ValueError(
"duplicate name in index/columns: {0}".format(err))
else:
temp = self.frame
column_names = list(map(text_type, temp.columns))
ncols = len(column_names)
data_list = [None] * ncols
blocks = temp._data.blocks
for i in range(len(blocks)):
b = blocks[i]
if b.is_datetime:
# convert to microsecond resolution so this yields
# datetime.datetime
d = b.values.astype('M8[us]').astype(object)
else:
d = np.array(b.get_values(), dtype=object)
# replace NaN with None
if b._can_hold_na:
mask = isna(d)
d[mask] = None
for col_loc, col in zip(b.mgr_locs, d):
data_list[col_loc] = col
return column_names, data_list
def _execute_insert(self, conn, keys, data_iter):
data = [dict((k, v) for k, v in zip(keys, row)) for row in data_iter]
conn.execute(self.insert_statement(), data)
def insert(self, chunksize=None):
keys, data_list = self.insert_data()
nrows = len(self.frame)
if nrows == 0:
return
if chunksize is None:
chunksize = nrows
elif chunksize == 0:
raise ValueError('chunksize argument should be non-zero')
chunks = int(nrows / chunksize) + 1
with self.pd_sql.run_transaction() as conn:
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
chunk_iter = zip(*[arr[start_i:end_i] for arr in data_list])
self._execute_insert(conn, keys, chunk_iter)
def _query_iterator(self, result, chunksize, columns, coerce_float=True,
parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
self.frame = DataFrame.from_records(
data, columns=columns, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
yield self.frame
def read(self, coerce_float=True, parse_dates=None, columns=None,
chunksize=None):
if columns is not None and len(columns) > 0:
from sqlalchemy import select
cols = [self.table.c[n] for n in columns]
if self.index is not None:
[cols.insert(0, self.table.c[idx]) for idx in self.index[::-1]]
sql_select = select(cols)
else:
sql_select = self.table.select()
result = self.pd_sql.execute(sql_select)
column_names = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, column_names,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
self.frame = DataFrame.from_records(
data, columns=column_names, coerce_float=coerce_float)
self._harmonize_columns(parse_dates=parse_dates)
if self.index is not None:
self.frame.set_index(self.index, inplace=True)
return self.frame
def _index_name(self, index, index_label):
# for writing: index=True to include index in sql table
if index is True:
nlevels = self.frame.index.nlevels
# if index_label is specified, set this as index name(s)
if index_label is not None:
if not isinstance(index_label, list):
index_label = [index_label]
if len(index_label) != nlevels:
raise ValueError(
"Length of 'index_label' should match number of "
"levels, which is {0}".format(nlevels))
else:
return index_label
# return the used column labels for the index columns
if (nlevels == 1 and 'index' not in self.frame.columns and
self.frame.index.name is None):
return ['index']
else:
return [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(self.frame.index.names)]
# for reading: index=(list of) string to specify column to set as index
elif isinstance(index, string_types):
return [index]
elif isinstance(index, list):
return index
else:
return None
def _get_column_names_and_types(self, dtype_mapper):
column_names_and_types = []
if self.index is not None:
for i, idx_label in enumerate(self.index):
idx_type = dtype_mapper(
self.frame.index._get_level_values(i))
column_names_and_types.append((text_type(idx_label),
idx_type, True))
column_names_and_types += [
(text_type(self.frame.columns[i]),
dtype_mapper(self.frame.iloc[:, i]),
False)
for i in range(len(self.frame.columns))
]
return column_names_and_types
def _create_table_setup(self):
from sqlalchemy import Table, Column, PrimaryKeyConstraint
column_names_and_types = \
self._get_column_names_and_types(self._sqlalchemy_type)
columns = [Column(name, typ, index=is_index)
for name, typ, is_index in column_names_and_types]
if self.keys is not None:
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
pkc = PrimaryKeyConstraint(*keys, name=self.name + '_pk')
columns.append(pkc)
schema = self.schema or self.pd_sql.meta.schema
# At this point, attach to new metadata, only attach to self.meta
# once table is created.
from sqlalchemy.schema import MetaData
meta = MetaData(self.pd_sql, schema=schema)
return Table(self.name, meta, *columns, schema=schema)
def _harmonize_columns(self, parse_dates=None):
"""
Make the DataFrame's column types align with the SQL table
column types.
Need to work around limited NA value support. Floats are always
fine, ints must always be floats if there are Null values.
Booleans are hard because converting bool column with None replaces
all Nones with false. Therefore only convert bool if there are no
NA values.
Datetimes should already be converted to np.datetime64 if supported,
but here we also force conversion if required
"""
# handle non-list entries for parse_dates gracefully
if parse_dates is True or parse_dates is None or parse_dates is False:
parse_dates = []
if not hasattr(parse_dates, '__iter__'):
parse_dates = [parse_dates]
for sql_col in self.table.columns:
col_name = sql_col.name
try:
df_col = self.frame[col_name]
# the type the dataframe column should have
col_type = self._get_dtype(sql_col.type)
if (col_type is datetime or col_type is date or
col_type is DatetimeTZDtype):
# Convert tz-aware Datetime SQL columns to UTC
utc = col_type is DatetimeTZDtype
self.frame[col_name] = _handle_date_column(df_col, utc=utc)
elif col_type is float:
# floats support NA, can always convert!
self.frame[col_name] = df_col.astype(col_type, copy=False)
elif len(df_col) == df_col.count():
# No NA values, can convert ints and bools
if col_type is np.dtype('int64') or col_type is bool:
self.frame[col_name] = df_col.astype(
col_type, copy=False)
# Handle date parsing
if col_name in parse_dates:
try:
fmt = parse_dates[col_name]
except TypeError:
fmt = None
self.frame[col_name] = _handle_date_column(
df_col, format=fmt)
except KeyError:
pass # this column not in results
def _get_notna_col_dtype(self, col):
"""
Infer datatype of the Series col. In case the dtype of col is 'object'
and it contains NA values, this infers the datatype of the not-NA
values. Needed for inserting typed data containing NULLs, GH8778.
"""
col_for_inference = col
if col.dtype == 'object':
notnadata = col[~isna(col)]
if len(notnadata):
col_for_inference = notnadata
return lib.infer_dtype(col_for_inference)
def _sqlalchemy_type(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return self.dtype[col.name]
col_type = self._get_notna_col_dtype(col)
from sqlalchemy.types import (BigInteger, Integer, Float,
Text, Boolean,
DateTime, Date, Time)
if col_type == 'datetime64' or col_type == 'datetime':
try:
tz = col.tzinfo # noqa
return DateTime(timezone=True)
except:
return DateTime
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
return BigInteger
elif col_type == 'floating':
if col.dtype == 'float32':
return Float(precision=23)
else:
return Float(precision=53)
elif col_type == 'integer':
if col.dtype == 'int32':
return Integer
else:
return BigInteger
elif col_type == 'boolean':
return Boolean
elif col_type == 'date':
return Date
elif col_type == 'time':
return Time
elif col_type == 'complex':
raise ValueError('Complex datatypes not supported')
return Text
def _get_dtype(self, sqltype):
from sqlalchemy.types import (Integer, Float, Boolean, DateTime,
Date, TIMESTAMP)
if isinstance(sqltype, Float):
return float
elif isinstance(sqltype, Integer):
# TODO: Refine integer size.
return np.dtype('int64')
elif isinstance(sqltype, TIMESTAMP):
# we have a timezone capable type
if not sqltype.timezone:
return datetime
return DatetimeTZDtype
elif isinstance(sqltype, DateTime):
# Caution: np.datetime64 is also a subclass of np.number.
return datetime
elif isinstance(sqltype, Date):
return date
elif isinstance(sqltype, Boolean):
return bool
return object
class PandasSQL(PandasObject):
"""
Subclasses Should define read_sql and to_sql
"""
def read_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
def to_sql(self, *args, **kwargs):
raise ValueError("PandasSQL must be created with an SQLAlchemy "
"connectable or sqlite connection")
class SQLDatabase(PandasSQL):
"""
This class enables convertion between DataFrame and SQL databases
using SQLAlchemy to handle DataBase abstraction
Parameters
----------
engine : SQLAlchemy connectable
Connectable to connect with the database. Using SQLAlchemy makes it
possible to use any DB supported by that library.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If None, use default schema (default).
meta : SQLAlchemy MetaData object, default None
If provided, this MetaData object is used instead of a newly
created. This allows to specify database flavor specific
arguments in the MetaData object.
"""
def __init__(self, engine, schema=None, meta=None):
self.connectable = engine
if not meta:
from sqlalchemy.schema import MetaData
meta = MetaData(self.connectable, schema=schema)
self.meta = meta
@contextmanager
def run_transaction(self):
with self.connectable.begin() as tx:
if hasattr(tx, 'execute'):
yield tx
else:
yield self.connectable
def execute(self, *args, **kwargs):
"""Simple passthrough to SQLAlchemy connectable"""
return self.connectable.execute(*args, **kwargs)
def read_table(self, table_name, index_col=None, coerce_float=True,
parse_dates=None, columns=None, schema=None,
chunksize=None):
"""Read SQL database table into a DataFrame.
Parameters
----------
table_name : string
Name of SQL table in database
index_col : string, optional, default: None
Column to set as index
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects
(like decimal.Decimal) to floating point. This can result in
loss of precision.
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg}``, where the arg corresponds
to the keyword arguments of :func:`pandas.to_datetime`.
Especially useful with databases without native Datetime support,
such as SQLite
columns : list, default: None
List of column names to select from sql table
schema : string, default None
Name of SQL schema in database to query (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
pandas.read_sql_table
SQLDatabase.read_query
"""
table = SQLTable(table_name, self, index=index_col, schema=schema)
return table.read(coerce_float=coerce_float,
parse_dates=parse_dates, columns=columns,
chunksize=chunksize)
@staticmethod
def _query_iterator(result, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = result.fetchmany(chunksize)
if not data:
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True,
parse_dates=None, params=None, chunksize=None):
"""Read SQL query into a DataFrame.
Parameters
----------
sql : string
SQL query to be executed
index_col : string, optional, default: None
Column name to use as index for the returned DataFrame object.
coerce_float : boolean, default True
Attempt to convert values of non-string, non-numeric objects (like
decimal.Decimal) to floating point, useful for SQL result sets
params : list, tuple or dict, optional, default: None
List of parameters to pass to execute method. The syntax used
to pass parameters is database driver dependent. Check your
database driver documentation for which of the five syntax styles,
described in PEP 249's paramstyle, is supported.
Eg. for psycopg2, uses %(name)s so use params={'name' : 'value'}
parse_dates : list or dict, default: None
- List of column names to parse as dates
- Dict of ``{column_name: format string}`` where format string is
strftime compatible in case of parsing string times or is one of
(D, s, ns, ms, us) in case of parsing integer timestamps
- Dict of ``{column_name: arg dict}``, where the arg dict
corresponds to the keyword arguments of
:func:`pandas.to_datetime` Especially useful with databases
without native Datetime support, such as SQLite
chunksize : int, default None
If specified, return an iterator where `chunksize` is the number
of rows to include in each chunk.
Returns
-------
DataFrame
See also
--------
read_sql_table : Read SQL database table into a DataFrame
read_sql
"""
args = _convert_params(sql, params)
result = self.execute(*args)
columns = result.keys()
if chunksize is not None:
return self._query_iterator(result, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = result.fetchall()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
read_sql = read_query
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame : DataFrame
name : string
Name of SQL table
if_exists : {'fail', 'replace', 'append'}, default 'fail'
- fail: If table exists, do nothing.
- replace: If table exists, drop it, recreate it, and insert data.
- append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Name of SQL schema in database to write to (if database flavor
supports this). If specified, this overwrites the default
schema of the SQLDatabase object.
chunksize : int, default None
If not None, then rows will be written in batches of this size at a
time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type. If all columns are of the same type, one
single value can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
from sqlalchemy.types import to_instance, TypeEngine
for col, my_type in dtype.items():
if not isinstance(to_instance(my_type), TypeEngine):
raise ValueError('The type of %s is not a SQLAlchemy '
'type ' % col)
table = SQLTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
schema=schema, dtype=dtype)
table.create()
table.insert(chunksize)
if (not name.isdigit() and not name.islower()):
# check for potentially case sensitivity issues (GH7815)
# Only check when name is not a number and name is not lower case
engine = self.connectable.engine
with self.connectable.connect() as conn:
table_names = engine.table_names(
schema=schema or self.meta.schema,
connection=conn,
)
if name not in table_names:
msg = (
"The provided table name '{0}' is not found exactly as "
"such in the database after writing the table, possibly "
"due to case sensitivity issues. Consider using lower "
"case table names."
).format(name)
warnings.warn(msg, UserWarning)
@property
def tables(self):
return self.meta.tables
def has_table(self, name, schema=None):
return self.connectable.run_callable(
self.connectable.dialect.has_table,
name,
schema or self.meta.schema,
)
def get_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if schema:
tbl = self.meta.tables.get('.'.join([schema, table_name]))
else:
tbl = self.meta.tables.get(table_name)
# Avoid casting double-precision floats into decimals
from sqlalchemy import Numeric
for column in tbl.columns:
if isinstance(column.type, Numeric):
column.type.asdecimal = False
return tbl
def drop_table(self, table_name, schema=None):
schema = schema or self.meta.schema
if self.has_table(table_name, schema):
self.meta.reflect(only=[table_name], schema=schema)
self.get_table(table_name, schema).drop()
self.meta.clear()
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLTable(table_name, self, frame=frame, index=False, keys=keys,
dtype=dtype)
return str(table.sql_schema())
# ---- SQL without SQLAlchemy ---
# sqlite-specific sql strings and handler class
# dictionary used for readability purposes
_SQL_TYPES = {
'string': 'TEXT',
'floating': 'REAL',
'integer': 'INTEGER',
'datetime': 'TIMESTAMP',
'date': 'DATE',
'time': 'TIME',
'boolean': 'INTEGER',
}
def _get_unicode_name(name):
try:
uname = text_type(name).encode("utf-8", "strict").decode("utf-8")
except UnicodeError:
raise ValueError("Cannot convert identifier to UTF-8: '%s'" % name)
return uname
def _get_valid_sqlite_name(name):
# See http://stackoverflow.com/questions/6514274/how-do-you-escape-strings\
# -for-sqlite-table-column-names-in-python
# Ensure the string can be encoded as UTF-8.
# Ensure the string does not include any NUL characters.
# Replace all " with "".
# Wrap the entire thing in double quotes.
uname = _get_unicode_name(name)
if not len(uname):
raise ValueError("Empty table or column name specified")
nul_index = uname.find("\x00")
if nul_index >= 0:
raise ValueError('SQLite identifier cannot contain NULs')
return '"' + uname.replace('"', '""') + '"'
_SAFE_NAMES_WARNING = ("The spaces in these column names will not be changed. "
"In pandas versions < 0.14, spaces were converted to "
"underscores.")
class SQLiteTable(SQLTable):
"""
Patch the SQLTable for fallback support.
Instead of a table variable just use the Create Table statement.
"""
def __init__(self, *args, **kwargs):
# GH 8341
# register an adapter callable for datetime.time object
import sqlite3
# this will transform time(12,34,56,789) into '12:34:56.000789'
# (this is what sqlalchemy does)
sqlite3.register_adapter(time, lambda _: _.strftime("%H:%M:%S.%f"))
super(SQLiteTable, self).__init__(*args, **kwargs)
def sql_schema(self):
return str(";\n".join(self.table))
def _execute_create(self):
with self.pd_sql.run_transaction() as conn:
for stmt in self.table:
conn.execute(stmt)
def insert_statement(self):
names = list(map(text_type, self.frame.columns))
wld = '?' # wildcard char
escape = _get_valid_sqlite_name
if self.index is not None:
[names.insert(0, idx) for idx in self.index[::-1]]
bracketed_names = [escape(column) for column in names]
col_names = ','.join(bracketed_names)
wildcards = ','.join([wld] * len(names))
insert_statement = 'INSERT INTO %s (%s) VALUES (%s)' % (
escape(self.name), col_names, wildcards)
return insert_statement
def _execute_insert(self, conn, keys, data_iter):
data_list = list(data_iter)
conn.executemany(self.insert_statement(), data_list)
def _create_table_setup(self):
"""
Return a list of SQL statement that create a table reflecting the
structure of a DataFrame. The first entry will be a CREATE TABLE
statement while the rest will be CREATE INDEX statements
"""
column_names_and_types = \
self._get_column_names_and_types(self._sql_type_name)
pat = re.compile('\s+')
column_names = [col_name for col_name, _, _ in column_names_and_types]
if any(map(pat.search, column_names)):
warnings.warn(_SAFE_NAMES_WARNING, stacklevel=6)
escape = _get_valid_sqlite_name
create_tbl_stmts = [escape(cname) + ' ' + ctype
for cname, ctype, _ in column_names_and_types]
if self.keys is not None and len(self.keys):
if not is_list_like(self.keys):
keys = [self.keys]
else:
keys = self.keys
cnames_br = ", ".join([escape(c) for c in keys])
create_tbl_stmts.append(
"CONSTRAINT {tbl}_pk PRIMARY KEY ({cnames_br})".format(
tbl=self.name, cnames_br=cnames_br))
create_stmts = ["CREATE TABLE " + escape(self.name) + " (\n" +
',\n '.join(create_tbl_stmts) + "\n)"]
ix_cols = [cname for cname, _, is_index in column_names_and_types
if is_index]
if len(ix_cols):
cnames = "_".join(ix_cols)
cnames_br = ",".join([escape(c) for c in ix_cols])
create_stmts.append(
"CREATE INDEX " + escape("ix_" + self.name + "_" + cnames) +
"ON " + escape(self.name) + " (" + cnames_br + ")")
return create_stmts
def _sql_type_name(self, col):
dtype = self.dtype or {}
if col.name in dtype:
return dtype[col.name]
col_type = self._get_notna_col_dtype(col)
if col_type == 'timedelta64':
warnings.warn("the 'timedelta' type is not supported, and will be "
"written as integer values (ns frequency) to the "
"database.", UserWarning, stacklevel=8)
col_type = "integer"
elif col_type == "datetime64":
col_type = "datetime"
elif col_type == "empty":
col_type = "string"
elif col_type == "complex":
raise ValueError('Complex datatypes not supported')
if col_type not in _SQL_TYPES:
col_type = "string"
return _SQL_TYPES[col_type]
class SQLiteDatabase(PandasSQL):
"""
Version of SQLDatabase to support sqlite connections (fallback without
sqlalchemy). This should only be used internally.
Parameters
----------
con : sqlite connection object
"""
def __init__(self, con, flavor=None, is_cursor=False):
_validate_flavor_parameter(flavor)
self.is_cursor = is_cursor
self.con = con
@contextmanager
def run_transaction(self):
cur = self.con.cursor()
try:
yield cur
self.con.commit()
except:
self.con.rollback()
raise
finally:
cur.close()
def execute(self, *args, **kwargs):
if self.is_cursor:
cur = self.con
else:
cur = self.con.cursor()
try:
if kwargs:
cur.execute(*args, **kwargs)
else:
cur.execute(*args)
return cur
except Exception as exc:
try:
self.con.rollback()
except Exception: # pragma: no cover
ex = DatabaseError("Execution failed on sql: %s\n%s\nunable"
" to rollback" % (args[0], exc))
raise_with_traceback(ex)
ex = DatabaseError(
"Execution failed on sql '%s': %s" % (args[0], exc))
raise_with_traceback(ex)
@staticmethod
def _query_iterator(cursor, chunksize, columns, index_col=None,
coerce_float=True, parse_dates=None):
"""Return generator through chunked result set"""
while True:
data = cursor.fetchmany(chunksize)
if type(data) == tuple:
data = list(data)
if not data:
cursor.close()
break
else:
yield _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
def read_query(self, sql, index_col=None, coerce_float=True, params=None,
parse_dates=None, chunksize=None):
args = _convert_params(sql, params)
cursor = self.execute(*args)
columns = [col_desc[0] for col_desc in cursor.description]
if chunksize is not None:
return self._query_iterator(cursor, chunksize, columns,
index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
else:
data = self._fetchall_as_list(cursor)
cursor.close()
frame = _wrap_result(data, columns, index_col=index_col,
coerce_float=coerce_float,
parse_dates=parse_dates)
return frame
def _fetchall_as_list(self, cur):
result = cur.fetchall()
if not isinstance(result, list):
result = list(result)
return result
def to_sql(self, frame, name, if_exists='fail', index=True,
index_label=None, schema=None, chunksize=None, dtype=None):
"""
Write records stored in a DataFrame to a SQL database.
Parameters
----------
frame: DataFrame
name: name of SQL table
if_exists: {'fail', 'replace', 'append'}, default 'fail'
fail: If table exists, do nothing.
replace: If table exists, drop it, recreate it, and insert data.
append: If table exists, insert data. Create if does not exist.
index : boolean, default True
Write DataFrame index as a column
index_label : string or sequence, default None
Column label for index column(s). If None is given (default) and
`index` is True, then the index names are used.
A sequence should be given if the DataFrame uses MultiIndex.
schema : string, default None
Ignored parameter included for compatability with SQLAlchemy
version of ``to_sql``.
chunksize : int, default None
If not None, then rows will be written in batches of this
size at a time. If None, all rows will be written at once.
dtype : single type or dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a string. If all columns are of the same type, one single value
can be used.
"""
if dtype and not is_dict_like(dtype):
dtype = {col_name: dtype for col_name in frame}
if dtype is not None:
for col, my_type in dtype.items():
if not isinstance(my_type, str):
raise ValueError('%s (%s) not a string' % (
col, str(my_type)))
table = SQLiteTable(name, self, frame=frame, index=index,
if_exists=if_exists, index_label=index_label,
dtype=dtype)
table.create()
table.insert(chunksize)
def has_table(self, name, schema=None):
# TODO(wesm): unused?
# escape = _get_valid_sqlite_name
# esc_name = escape(name)
wld = '?'
query = ("SELECT name FROM sqlite_master "
"WHERE type='table' AND name=%s;") % wld
return len(self.execute(query, [name, ]).fetchall()) > 0
def get_table(self, table_name, schema=None):
return None # not supported in fallback mode
def drop_table(self, name, schema=None):
drop_sql = "DROP TABLE %s" % _get_valid_sqlite_name(name)
self.execute(drop_sql)
def _create_sql_schema(self, frame, table_name, keys=None, dtype=None):
table = SQLiteTable(table_name, self, frame=frame, index=False,
keys=keys, dtype=dtype)
return str(table.sql_schema())
def get_schema(frame, name, flavor=None, keys=None, con=None, dtype=None):
"""
Get the SQL db table schema for the given frame.
Parameters
----------
frame : DataFrame
name : string
name of SQL table
keys : string or sequence, default: None
columns to use a primary key
con: an open SQL database connection object or a SQLAlchemy connectable
Using SQLAlchemy makes it possible to use any DB supported by that
library, default: None
If a DBAPI2 object, only sqlite3 is supported.
flavor : 'sqlite', default None
.. deprecated:: 0.19.0
'sqlite' is the only supported option if SQLAlchemy is not
installed.
dtype : dict of column name to SQL type, default None
Optional specifying the datatype for columns. The SQL type should
be a SQLAlchemy type, or a string for sqlite3 fallback connection.
"""
pandas_sql = pandasSQL_builder(con=con, flavor=flavor)
return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
| bsd-3-clause |
anntzer/scikit-learn | sklearn/decomposition/tests/test_online_lda.py | 11 | 14166 | import sys
import numpy as np
from scipy.linalg import block_diag
from scipy.sparse import csr_matrix
from scipy.special import psi
import pytest
from sklearn.decomposition import LatentDirichletAllocation
from sklearn.decomposition._lda import (_dirichlet_expectation_1d,
_dirichlet_expectation_2d)
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import if_safe_multiprocessing_with_blas
from sklearn.exceptions import NotFittedError
from io import StringIO
def _build_sparse_mtx():
# Create 3 topics and each topic has 3 distinct words.
# (Each word only belongs to a single topic.)
n_components = 3
block = np.full((3, 3), n_components, dtype=int)
blocks = [block] * n_components
X = block_diag(*blocks)
X = csr_matrix(X)
return (n_components, X)
def test_lda_default_prior_params():
# default prior parameter should be `1 / topics`
# and verbose params should not affect result
n_components, X = _build_sparse_mtx()
prior = 1. / n_components
lda_1 = LatentDirichletAllocation(n_components=n_components,
doc_topic_prior=prior,
topic_word_prior=prior, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
random_state=0)
topic_distr_1 = lda_1.fit_transform(X)
topic_distr_2 = lda_2.fit_transform(X)
assert_almost_equal(topic_distr_1, topic_distr_2)
def test_lda_fit_batch():
# Test LDA batch learning_offset (`fit` method with 'batch' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
evaluate_every=1, learning_method='batch',
random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_fit_online():
# Test LDA online learning (`fit` method with 'online' learning)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., evaluate_every=1,
learning_method='online', random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_partial_fit():
# Test LDA online learning (`partial_fit` method)
# (same as test_lda_batch)
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=10., total_samples=100,
random_state=rng)
for i in range(3):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_dense_input():
# Test LDA with dense input.
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components,
learning_method='batch', random_state=rng)
lda.fit(X.toarray())
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for component in lda.components_:
# Find top 3 words in each LDA component
top_idx = set(component.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_transform():
# Test LDA transform.
# Transform result cannot be negative and should be normalized
rng = np.random.RandomState(0)
X = rng.randint(5, size=(20, 10))
n_components = 3
lda = LatentDirichletAllocation(n_components=n_components,
random_state=rng)
X_trans = lda.fit_transform(X)
assert (X_trans > 0.0).any()
assert_array_almost_equal(np.sum(X_trans, axis=1),
np.ones(X_trans.shape[0]))
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_fit_transform(method):
# Test LDA fit_transform & transform
# fit_transform and transform result should be the same
rng = np.random.RandomState(0)
X = rng.randint(10, size=(50, 20))
lda = LatentDirichletAllocation(n_components=5, learning_method=method,
random_state=rng)
X_fit = lda.fit_transform(X)
X_trans = lda.transform(X)
assert_array_almost_equal(X_fit, X_trans, 4)
def test_invalid_params():
# test `_check_params` method
X = np.ones((5, 10))
invalid_models = (
('n_components', LatentDirichletAllocation(n_components=0)),
('learning_method',
LatentDirichletAllocation(learning_method='unknown')),
('total_samples', LatentDirichletAllocation(total_samples=0)),
('learning_offset', LatentDirichletAllocation(learning_offset=-1)),
)
for param, model in invalid_models:
regex = r"^Invalid %r parameter" % param
with pytest.raises(ValueError, match=regex):
model.fit(X)
def test_lda_negative_input():
# test pass dense matrix with sparse negative input.
X = np.full((5, 10), -1.)
lda = LatentDirichletAllocation()
regex = r"^Negative values in data passed"
with pytest.raises(ValueError, match=regex):
lda.fit(X)
def test_lda_no_component_error():
# test `perplexity` before `fit`
rng = np.random.RandomState(0)
X = rng.randint(4, size=(20, 10))
lda = LatentDirichletAllocation()
regex = ("This LatentDirichletAllocation instance is not fitted yet. "
"Call 'fit' with appropriate arguments before using this "
"estimator.")
with pytest.raises(NotFittedError, match=regex):
lda.perplexity(X)
@if_safe_multiprocessing_with_blas
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_multi_jobs(method):
n_components, X = _build_sparse_mtx()
# Test LDA batch training with multi CPU
rng = np.random.RandomState(0)
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_method=method,
evaluate_every=1, random_state=rng)
lda.fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
@if_safe_multiprocessing_with_blas
def test_lda_partial_fit_multi_jobs():
# Test LDA online training with multi CPU
rng = np.random.RandomState(0)
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, n_jobs=2,
learning_offset=5., total_samples=30,
random_state=rng)
for i in range(2):
lda.partial_fit(X)
correct_idx_grps = [(0, 1, 2), (3, 4, 5), (6, 7, 8)]
for c in lda.components_:
top_idx = set(c.argsort()[-3:][::-1])
assert tuple(sorted(top_idx)) in correct_idx_grps
def test_lda_preplexity_mismatch():
# test dimension mismatch in `perplexity` method
rng = np.random.RandomState(0)
n_components = rng.randint(3, 6)
n_samples = rng.randint(6, 10)
X = np.random.randint(4, size=(n_samples, 10))
lda = LatentDirichletAllocation(n_components=n_components,
learning_offset=5., total_samples=20,
random_state=rng)
lda.fit(X)
# invalid samples
invalid_n_samples = rng.randint(4, size=(n_samples + 1, n_components))
with pytest.raises(ValueError, match=r'Number of samples'):
lda._perplexity_precomp_distr(X, invalid_n_samples)
# invalid topic number
invalid_n_components = rng.randint(4, size=(n_samples, n_components + 1))
with pytest.raises(ValueError, match=r'Number of topics'):
lda._perplexity_precomp_distr(X, invalid_n_components)
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_perplexity(method):
# Test LDA perplexity for batch training
# perplexity should be lower after each iteration
n_components, X = _build_sparse_mtx()
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit(X)
perp_1 = lda_1.perplexity(X, sub_sampling=False)
lda_2.fit(X)
perp_2 = lda_2.perplexity(X, sub_sampling=False)
assert perp_1 >= perp_2
perp_1_subsampling = lda_1.perplexity(X, sub_sampling=True)
perp_2_subsampling = lda_2.perplexity(X, sub_sampling=True)
assert perp_1_subsampling >= perp_2_subsampling
@pytest.mark.parametrize('method', ('online', 'batch'))
def test_lda_score(method):
# Test LDA score for batch training
# score should be higher after each iteration
n_components, X = _build_sparse_mtx()
lda_1 = LatentDirichletAllocation(n_components=n_components,
max_iter=1, learning_method=method,
total_samples=100, random_state=0)
lda_2 = LatentDirichletAllocation(n_components=n_components,
max_iter=10, learning_method=method,
total_samples=100, random_state=0)
lda_1.fit_transform(X)
score_1 = lda_1.score(X)
lda_2.fit_transform(X)
score_2 = lda_2.score(X)
assert score_2 >= score_1
def test_perplexity_input_format():
# Test LDA perplexity for sparse and dense input
# score should be the same for both dense and sparse input
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch',
total_samples=100, random_state=0)
lda.fit(X)
perp_1 = lda.perplexity(X)
perp_2 = lda.perplexity(X.toarray())
assert_almost_equal(perp_1, perp_2)
def test_lda_score_perplexity():
# Test the relationship between LDA score and perplexity
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=10,
random_state=0)
lda.fit(X)
perplexity_1 = lda.perplexity(X, sub_sampling=False)
score = lda.score(X)
perplexity_2 = np.exp(-1. * (score / np.sum(X.data)))
assert_almost_equal(perplexity_1, perplexity_2)
def test_lda_fit_perplexity():
# Test that the perplexity computed during fit is consistent with what is
# returned by the perplexity method
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=1,
learning_method='batch', random_state=0,
evaluate_every=1)
lda.fit(X)
# Perplexity computed at end of fit method
perplexity1 = lda.bound_
# Result of perplexity method on the train set
perplexity2 = lda.perplexity(X)
assert_almost_equal(perplexity1, perplexity2)
def test_lda_empty_docs():
"""Test LDA on empty document (all-zero rows)."""
Z = np.zeros((5, 4))
for X in [Z, csr_matrix(Z)]:
lda = LatentDirichletAllocation(max_iter=750).fit(X)
assert_almost_equal(lda.components_.sum(axis=0),
np.ones(lda.components_.shape[1]))
def test_dirichlet_expectation():
"""Test Cython version of Dirichlet expectation calculation."""
x = np.logspace(-100, 10, 10000)
expectation = np.empty_like(x)
_dirichlet_expectation_1d(x, 0, expectation)
assert_allclose(expectation, np.exp(psi(x) - psi(np.sum(x))),
atol=1e-19)
x = x.reshape(100, 100)
assert_allclose(_dirichlet_expectation_2d(x),
psi(x) - psi(np.sum(x, axis=1)[:, np.newaxis]),
rtol=1e-11, atol=3e-9)
def check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
n_components, X = _build_sparse_mtx()
lda = LatentDirichletAllocation(n_components=n_components, max_iter=3,
learning_method='batch',
verbose=verbose,
evaluate_every=evaluate_every,
random_state=0)
out = StringIO()
old_out, sys.stdout = sys.stdout, out
try:
lda.fit(X)
finally:
sys.stdout = old_out
n_lines = out.getvalue().count('\n')
n_perplexity = out.getvalue().count('perplexity')
assert expected_lines == n_lines
assert expected_perplexities == n_perplexity
@pytest.mark.parametrize(
'verbose,evaluate_every,expected_lines,expected_perplexities',
[(False, 1, 0, 0),
(False, 0, 0, 0),
(True, 0, 3, 0),
(True, 1, 3, 3),
(True, 2, 3, 1)])
def test_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities):
check_verbosity(verbose, evaluate_every, expected_lines,
expected_perplexities)
| bsd-3-clause |
albertbup/DeepBeliefNetwork | example_classification.py | 3 | 1158 | import numpy as np
np.random.seed(1337) # for reproducibility
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from sklearn.metrics.classification import accuracy_score
from dbn.tensorflow import SupervisedDBNClassification
# Loading dataset
digits = load_digits()
X, Y = digits.data, digits.target
# Data scaling
X = (X / 16).astype(np.float32)
# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
learning_rate_rbm=0.05,
learning_rate=0.1,
n_epochs_rbm=10,
n_iter_backprop=100,
batch_size=32,
activation_function='relu',
dropout_p=0.2)
classifier.fit(X_train, Y_train)
# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: %f' % accuracy_score(Y_test, Y_pred))
| mit |
joshzarrabi/e-mission-server | emission/analysis/classification/inference/mode.py | 2 | 17308 | # Standard imports
from pymongo import MongoClient
import logging
from datetime import datetime
import sys
import os
import numpy as np
import scipy as sp
import time
from datetime import datetime
# Our imports
import emission.analysis.section_features as easf
import emission.core.get_database as edb
# We are not going to use the feature matrix for analysis unless we have at
# least 50 points in the training set. 50 is arbitrary. We could also consider
# combining the old and new training data, but this is really a bootstrapping
# problem, so we don't need to solve it right now.
minTrainingSetSize = 1000
class ModeInferencePipeline:
def __init__(self):
self.featureLabels = ["distance", "duration", "first filter mode", "sectionId", "avg speed",
"speed EV", "speed variance", "max speed", "max accel", "isCommute",
"heading change rate", "stop rate", "velocity change rate",
"start lat", "start lng", "stop lat", "stop lng",
"start hour", "end hour", "close to bus stop", "close to train stop",
"close to airport"]
self.Sections = edb.get_section_db()
def runPipeline(self):
allConfirmedTripsQuery = ModeInferencePipeline.getSectionQueryWithGroundTruth({'$ne': ''})
(self.modeList, self.confirmedSections) = self.loadTrainingDataStep(allConfirmedTripsQuery)
logging.debug("confirmedSections.count() = %s" % (self.confirmedSections.count()))
if (self.confirmedSections.count() < minTrainingSetSize):
logging.info("initial loadTrainingDataStep DONE")
logging.debug("current training set too small, reloading from backup!")
backupSections = MongoClient('localhost').Backup_database.Stage_Sections
(self.modeList, self.confirmedSections) = self.loadTrainingDataStep(allConfirmedTripsQuery, backupSections)
logging.info("loadTrainingDataStep DONE")
(self.bus_cluster, self.train_cluster) = self.generateBusAndTrainStopStep()
logging.info("generateBusAndTrainStopStep DONE")
(self.featureMatrix, self.resultVector) = self.generateFeatureMatrixAndResultVectorStep()
logging.info("generateFeatureMatrixAndResultVectorStep DONE")
(self.cleanedFeatureMatrix, self.cleanedResultVector) = self.cleanDataStep()
logging.info("cleanDataStep DONE")
self.selFeatureIndices = self.selectFeatureIndicesStep()
logging.info("selectFeatureIndicesStep DONE")
self.selFeatureMatrix = self.cleanedFeatureMatrix[:,self.selFeatureIndices]
self.model = self.buildModelStep()
logging.info("buildModelStep DONE")
toPredictTripsQuery = {"$and": [{'type': 'move'},
ModeInferencePipeline.getModeQuery(''),
{'predicted_mode': None}]}
(self.toPredictFeatureMatrix, self.sectionIds, self.sectionUserIds) = self.generateFeatureMatrixAndIDsStep(toPredictTripsQuery)
logging.info("generateFeatureMatrixAndIDsStep DONE")
self.predictedProb = self.predictModesStep()
logging.info("predictModesStep DONE")
self.savePredictionsStep()
logging.info("savePredictionsStep DONE")
# Most of the time, this will be an int, but it can also be a subquery, like
# {'$ne': ''}. This will be used to find the set of entries for the training
# set, for example
@staticmethod
def getModeQuery(groundTruthMode):
# We need the existence check because the corrected mode is not guaranteed to exist,
# and if it doesn't exist, it will end up match the != '' query (since it
# is not '', it is non existent)
correctedModeQuery = lambda mode: {'$and': [{'corrected_mode': {'$exists': True}},
{'corrected_mode': groundTruthMode}]}
return {'$or': [correctedModeQuery(groundTruthMode),
{'confirmed_mode': groundTruthMode}]}
@staticmethod
def getSectionQueryWithGroundTruth(groundTruthMode):
return {"$and": [{'type': 'move'},
ModeInferencePipeline.getModeQuery(groundTruthMode)]}
# TODO: Refactor into generic steps and results
def loadTrainingDataStep(self, sectionQuery, sectionDb = None):
logging.debug("START TRAINING DATA STEP")
if (sectionDb == None):
sectionDb = self.Sections
begin = time.time()
logging.debug("Section data set size = %s" % sectionDb.find({'type': 'move'}).count())
duration = time.time() - begin
logging.debug("Getting dataset size took %s" % (duration))
logging.debug("Querying confirmedSections %s" % (datetime.now()))
begin = time.time()
confirmedSections = sectionDb.find(sectionQuery)
duration = time.time() - begin
logging.debug("Querying confirmedSection took %s" % (duration))
logging.debug("Querying stage modes %s" % (datetime.now()))
begin = time.time()
modeList = []
for mode in edb.get_mode_db().find():
modeList.append(mode)
logging.debug(mode)
duration = time.time() - begin
logging.debug("Querying stage modes took %s" % (duration))
logging.debug("Section query with ground truth %s" % (datetime.now()))
begin = time.time()
logging.debug("Training set total size = %s" %
sectionDb.find(ModeInferencePipeline.getSectionQueryWithGroundTruth({'$ne': ''})).count())
for mode in modeList:
logging.debug("%s: %s" % (mode['mode_name'],
sectionDb.find(ModeInferencePipeline.getSectionQueryWithGroundTruth(mode['mode_id']))))
duration = time.time() - begin
logging.debug("Getting section query with ground truth took %s" % (duration))
duration = time.time() - begin
return (modeList, confirmedSections)
# TODO: Should mode_cluster be in featurecalc or here?
def generateBusAndTrainStopStep(self):
bus_cluster=easf.mode_cluster(5,105,1)
train_cluster=easf.mode_cluster(6,600,1)
air_cluster=easf.mode_cluster(9,600,1)
return (bus_cluster, train_cluster)
# Feature matrix construction
def generateFeatureMatrixAndResultVectorStep(self):
featureMatrix = np.zeros([self.confirmedSections.count(), len(self.featureLabels)])
resultVector = np.zeros(self.confirmedSections.count())
logging.debug("created data structures of size %s" % self.confirmedSections.count())
# There are a couple of additions to the standard confirmedSections cursor here.
# First, we read it in batches of 300 in order to avoid the 10 minute timeout
# Our logging shows that we can process roughly 500 entries in 10 minutes
# Second, it looks like the cursor requeries while iterating. So when we
# first check, we get count of x, but if new entries were read (or in
# this case, classified) while we are iterating over the cursor, we may
# end up processing > x entries.
# This will crash the script because we will try to access a record that
# doesn't exist.
# So we limit the records to the size of the matrix that we have created
for (i, section) in enumerate(self.confirmedSections.limit(featureMatrix.shape[0]).batch_size(300)):
try:
self.updateFeatureMatrixRowWithSection(featureMatrix, i, section)
resultVector[i] = self.getGroundTruthMode(section)
if i % 100 == 0:
logging.debug("Processing record %s " % i)
except Exception, e:
logging.debug("skipping section %s due to error %s " % (section, e))
return (featureMatrix, resultVector)
def getGroundTruthMode(self, section):
# logging.debug("getting ground truth for section %s" % section)
if 'corrected_mode' in section:
# logging.debug("Returning corrected mode %s" % section['corrected_mode'])
return section['corrected_mode']
else:
# logging.debug("Returning confirmed mode %s" % section['confirmed_mode'])
return section['confirmed_mode']
# Features are:
# 0. distance
# 1. duration
# 2. first filter mode
# 3. sectionId
# 4. avg speed
# 5. speed EV
# 6. speed variance
# 7. max speed
# 8. max accel
# 9. isCommute
# 10. heading change rate (currently unfilled)
# 11. stop rate (currently unfilled)
# 12. velocity change rate (currently unfilled)
# 13. start lat
# 14. start lng
# 15. stop lat
# 16. stop lng
# 17. start hour
# 18. end hour
# 19. both start and end close to bus stop
# 20. both start and end close to train station
# 21. both start and end close to airport
def updateFeatureMatrixRowWithSection(self, featureMatrix, i, section):
featureMatrix[i, 0] = section['distance']
featureMatrix[i, 1] = (section['section_end_datetime'] - section['section_start_datetime']).total_seconds()
# Deal with unknown modes like "airplane"
try:
featureMatrix[i, 2] = section['mode']
except ValueError:
featureMatrix[i, 2] = 0
featureMatrix[i, 3] = section['section_id']
featureMatrix[i, 4] = easf.calAvgSpeed(section)
speeds = easf.calSpeeds(section)
if speeds != None and len(speeds) > 0:
featureMatrix[i, 5] = np.mean(speeds)
featureMatrix[i, 6] = np.std(speeds)
featureMatrix[i, 7] = np.max(speeds)
else:
# They will remain zero
pass
accels = easf.calAccels(section)
if accels != None and len(accels) > 0:
featureMatrix[i, 8] = np.max(accels)
else:
# They will remain zero
pass
featureMatrix[i, 9] = ('commute' in section) and (section['commute'] == 'to' or section['commute'] == 'from')
featureMatrix[i, 10] = easf.calHCR(section)
featureMatrix[i, 11] = easf.calSR(section)
featureMatrix[i, 12] = easf.calVCR(section)
if 'section_start_point' in section and section['section_start_point'] != None:
startCoords = section['section_start_point']['coordinates']
featureMatrix[i, 13] = startCoords[0]
featureMatrix[i, 14] = startCoords[1]
if 'section_end_point' in section and section['section_end_point'] != None:
endCoords = section['section_end_point']['coordinates']
featureMatrix[i, 15] = endCoords[0]
featureMatrix[i, 16] = endCoords[1]
featureMatrix[i, 17] = section['section_start_datetime'].time().hour
featureMatrix[i, 18] = section['section_end_datetime'].time().hour
if (hasattr(self, "bus_cluster")):
featureMatrix[i, 19] = easf.mode_start_end_coverage(section, self.bus_cluster,105)
if (hasattr(self, "train_cluster")):
featureMatrix[i, 20] = easf.mode_start_end_coverage(section, self.train_cluster,600)
if (hasattr(self, "air_cluster")):
featureMatrix[i, 21] = easf.mode_start_end_coverage(section, self.air_cluster,600)
# Replace NaN and inf by zeros so that it doesn't crash later
featureMatrix[i] = np.nan_to_num(featureMatrix[i])
def cleanDataStep(self):
runIndices = self.resultVector == 2
transportIndices = self.resultVector == 4
mixedIndices = self.resultVector == 8
airIndices = self.resultVector == 9
unknownIndices = self.resultVector == 0
strippedIndices = np.logical_not(runIndices | transportIndices | mixedIndices | unknownIndices)
logging.debug("Stripped trips with mode: run %s, transport %s, mixed %s, unknown %s unstripped %s" %
(np.count_nonzero(runIndices), np.count_nonzero(transportIndices),
np.count_nonzero(mixedIndices), np.count_nonzero(unknownIndices),
np.count_nonzero(strippedIndices)))
strippedFeatureMatrix = self.featureMatrix[strippedIndices]
strippedResultVector = self.resultVector[strippedIndices]
# In spite of stripping out the values, we see that there are clear
# outliers. This is almost certainly a mis-classified trip, because the
# distance and speed are both really large, but the mode is walking. Let's
# manually filter out this outlier.
distanceOutliers = strippedFeatureMatrix[:,0] > 500000
speedOutliers = strippedFeatureMatrix[:,4] > 100
speedMeanOutliers = strippedFeatureMatrix[:,5] > 80
speedVarianceOutliers = strippedFeatureMatrix[:,6] > 70
maxSpeedOutliers = strippedFeatureMatrix[:,7] > 160
logging.debug("Stripping out distanceOutliers %s, speedOutliers %s, speedMeanOutliers %s, speedVarianceOutliers %s, maxSpeedOutliers %s" %
(np.nonzero(distanceOutliers), np.nonzero(speedOutliers),
np.nonzero(speedMeanOutliers), np.nonzero(speedVarianceOutliers),
np.nonzero(maxSpeedOutliers)))
nonOutlierIndices = np.logical_not(distanceOutliers | speedOutliers | speedMeanOutliers | speedVarianceOutliers | maxSpeedOutliers)
logging.debug("nonOutlierIndices.shape = %s" % nonOutlierIndices.shape)
return (strippedFeatureMatrix[nonOutlierIndices],
strippedResultVector[nonOutlierIndices])
# Feature Indices
def selectFeatureIndicesStep(self):
genericFeatureIndices = list(xrange(0,10))
AdvancedFeatureIndices = list(xrange(10,13))
LocationFeatureIndices = list(xrange(13,17))
TimeFeatureIndices = list(xrange(17,19))
BusTrainFeatureIndices = list(xrange(19,22))
logging.debug("generic features = %s" % genericFeatureIndices)
logging.debug("advanced features = %s" % AdvancedFeatureIndices)
logging.debug("location features = %s" % LocationFeatureIndices)
logging.debug("time features = %s" % TimeFeatureIndices)
logging.debug("bus train features = %s" % BusTrainFeatureIndices)
return genericFeatureIndices + BusTrainFeatureIndices
def buildModelStep(self):
from sklearn import ensemble
forestClf = ensemble.RandomForestClassifier()
model = forestClf.fit(self.selFeatureMatrix, self.cleanedResultVector)
return model
def generateFeatureMatrixAndIDsStep(self, sectionQuery):
toPredictSections = self.Sections.find(sectionQuery)
logging.debug("Predicting values for %d sections" % toPredictSections.count())
featureMatrix = np.zeros([toPredictSections.count(), len(self.featureLabels)])
sectionIds = []
sectionUserIds = []
for (i, section) in enumerate(toPredictSections.limit(featureMatrix.shape[0]).batch_size(300)):
if i % 50 == 0:
logging.debug("Processing test record %s " % i)
self.updateFeatureMatrixRowWithSection(featureMatrix, i, section)
sectionIds.append(section['_id'])
sectionUserIds.append(section['user_id'])
return (featureMatrix[:,self.selFeatureIndices], sectionIds, sectionUserIds)
def predictModesStep(self):
return self.model.predict_proba(self.toPredictFeatureMatrix)
# The current probability will only have results for values from the set of
# unique values in the resultVector. This means that the location of the
# highest probability is not a 1:1 mapping to the mode, which will probably
# have issues down the road. We are going to fix this here by storing the
# non-zero probabilities in a map instead of in a list. We used to have an
# list here, but we move to a map instead because we plan to support lots of
# different modes, and having an giant array consisting primarily of zeros
# doesn't sound like a great option.
# In other words, uniqueModes = [1, 5]
# predictedProb = [[1,0], [0,1]]
# allModes has length 8
# returns [{'walking': 1}, {'bus': 1}]
def convertPredictedProbToMap(self, allModeList, uniqueModes, predictedProbArr):
currProbMap = {}
uniqueModesInt = [int(um) for um in uniqueModes]
logging.debug("predictedProbArr has %s non-zero elements" % np.count_nonzero(predictedProbArr))
logging.debug("uniqueModes are %s " % uniqueModesInt)
for (j, uniqueMode) in enumerate(uniqueModesInt):
if predictedProbArr[j] != 0:
# Modes start from 1, but allModeList indices start from 0
# so walking (mode id 1) -> modeList[0]
modeName = allModeList[uniqueMode-1]['mode_name']
logging.debug("Setting probability of mode %s (%s) to %s" %
(uniqueMode, modeName, predictedProbArr[j]))
currProbMap[modeName] = predictedProbArr[j]
return currProbMap
def savePredictionsStep(self):
from emission.core.wrapper.user import User
from emission.core.wrapper.client import Client
uniqueModes = sorted(set(self.cleanedResultVector))
for i in range(self.predictedProb.shape[0]):
currSectionId = self.sectionIds[i]
currProb = self.convertPredictedProbToMap(self.modeList, uniqueModes, self.predictedProb[i])
logging.debug("Updating probability for section with id = %s" % currSectionId)
self.Sections.update({'_id': currSectionId}, {"$set": {"predicted_mode": currProb}})
currUser = User.fromUUID(self.sectionUserIds[i])
clientSpecificUpdate = Client(currUser.getFirstStudy()).clientSpecificSetters(currUser.uuid, currSectionId, currProb)
if clientSpecificUpdate != None:
self.Sections.update({'_id': currSectionId}, clientSpecificUpdate)
if __name__ == "__main__":
import json
config_data = json.load(open('config.json'))
log_base_dir = config_data['paths']['log_base_dir']
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s',
filename="%s/pipeline.log" % log_base_dir, level=logging.DEBUG)
modeInferPipeline = ModeInferencePipeline()
modeInferPipeline.runPipeline()
| bsd-3-clause |
calispac/digicampipe | digicampipe/scripts/spe.py | 1 | 14553 | #!/usr/bin/env python
"""
Do the Single Photoelectron anaylsis
Usage:
digicam-spe [options] [--] <INPUT>...
Options:
-h --help Show this screen.
--max_events=N Maximum number of events to analyse.
--max_histo_filename=FILE File path of the max histogram.
[Default: ./max_histo.pk]
--charge_histo_filename=FILE File path of the charge histogram
[Default: ./charge_histo.pk]
--raw_histo_filename=FILE File path of the raw histogram
[Default: ./raw_histo.pk]
-o OUTPUT --output=OUTPUT Output file path to store the results.
[Default: ./results.npz]
-c --compute Compute the data.
-f --fit Fit.
-d --display Display.
-v --debug Enter the debug mode.
-p --pixel=<PIXEL> Give a list of pixel IDs.
--shift=N Number of bins to shift before integrating
[default: 0].
--integral_width=N Number of bins to integrate over
[default: 7].
--pulse_finder_threshold=F Threshold of pulse finder in arbitrary units
[default: 2.0].
--save_figures=PATH Save the plots to the indicated folder.
Figures are not saved is set to none
[default: none]
--ncall=N Number of calls for the fit [default: 10000]
--n_samples=N Number of samples per waveform
"""
import os
import matplotlib.pyplot as plt
import numpy as np
from docopt import docopt
from histogram.histogram import Histogram1D
from tqdm import tqdm
from digicampipe.calib.baseline import fill_baseline, subtract_baseline
from digicampipe.calib.charge import compute_charge
from digicampipe.calib.peak import find_pulse_with_max, \
find_pulse_fast
from digicampipe.io.event_stream import calibration_event_stream
from digicampipe.scripts import raw
from digicampipe.scripts.fmpe import FMPEFitter
from digicampipe.utils.docopt import convert_pixel_args, \
convert_int, convert_text
from digicampipe.utils.pdf import fmpe_pdf_10
class MaxHistoFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 2
super(MaxHistoFitter, self).__init__(histogram, estimated_gain,
n_peaks, **kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_0': None, 'a_1': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_0, a_1):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': a_0, 'a_1': a_1, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
class SPEFitter(FMPEFitter):
def __init__(self, histogram, estimated_gain, **kwargs):
n_peaks = 4
super(SPEFitter, self).__init__(histogram, estimated_gain, n_peaks,
**kwargs)
self.parameters_plot_name = {'baseline': '$B$', 'gain': 'G',
'sigma_e': '$\sigma_e$',
'sigma_s': '$\sigma_s$',
'a_1': None, 'a_2': None, 'a_3': None,
'a_4': None}
def pdf(self, x, baseline, gain, sigma_e, sigma_s, a_1, a_2, a_3, a_4):
params = {'baseline': baseline, 'gain': gain, 'sigma_e': sigma_e,
'sigma_s': sigma_s, 'a_0': 0, 'a_1': a_1, 'a_2': a_2,
'a_3': a_3, 'a_4': a_4, 'bin_width': 0}
return fmpe_pdf_10(x, **params)
def initialize_fit(self):
init_params = super(SPEFitter, self).initialize_fit()
init_params['a_4'] = init_params['a_3']
init_params['a_3'] = init_params['a_2']
init_params['a_2'] = init_params['a_1']
init_params['a_1'] = init_params['a_0']
init_params['baseline'] = init_params['baseline'] - init_params['gain']
del init_params['a_0']
self.initial_parameters = init_params
return init_params
def compute_dark_rate(number_of_zeros, total_number_of_events, time):
p_0 = number_of_zeros / total_number_of_events
rate = - np.log(p_0)
rate /= time
return rate
def compute_max_histo(files, histo_filename, pixel_id, max_events,
integral_width, shift, baseline):
n_pixels = len(pixel_id)
if not os.path.exists(histo_filename):
events = calibration_event_stream(files, pixel_id=pixel_id,
max_events=max_events)
# events = compute_baseline_with_min(events)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
events = find_pulse_with_max(events)
events = compute_charge(events, integral_width, shift)
max_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * integral_width,
4095 * integral_width),
)
for event in events:
max_histo.fill(event.data.reconstructed_charge)
max_histo.save(histo_filename)
return max_histo
else:
max_histo = Histogram1D.load(histo_filename)
return max_histo
def compute_spe(files, histo_filename, pixel_id, baseline, max_events,
integral_width, shift, pulse_finder_threshold, debug=False):
if not os.path.exists(histo_filename):
n_pixels = len(pixel_id)
events = calibration_event_stream(files,
max_events=max_events,
pixel_id=pixel_id)
events = fill_baseline(events, baseline)
events = subtract_baseline(events)
# events = find_pulse_1(events, 0.5, 20)
# events = find_pulse_2(events, widths=[5, 6], threshold_sigma=2)
events = find_pulse_fast(events, threshold=pulse_finder_threshold)
# events = find_pulse_fast_2(events, threshold=pulse_finder_threshold,
# min_dist=3)
# events = find_pulse_correlate(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_gaussian_filter(events,
# threshold=pulse_finder_threshold)
# events = find_pulse_wavelets(events, widths=[4, 5, 6],
# threshold_sigma=2)
events = compute_charge(events, integral_width=integral_width,
shift=shift)
# events = compute_amplitude(events)
# events = fit_template(events)
# events = compute_full_waveform_charge(events)
spe_histo = Histogram1D(
data_shape=(n_pixels,),
bin_edges=np.arange(-4095 * 50, 4095 * 50)
)
for event in events:
spe_histo.fill(event.data.reconstructed_charge)
spe_histo.save(histo_filename)
return spe_histo
else:
spe_histo = Histogram1D.load(histo_filename)
return spe_histo
def entry():
args = docopt(__doc__)
files = args['<INPUT>']
debug = args['--debug']
max_events = convert_int(args['--max_events'])
raw_histo_filename = args['--raw_histo_filename']
charge_histo_filename = args['--charge_histo_filename']
max_histo_filename = args['--max_histo_filename']
results_filename = args['--output']
pixel_id = convert_pixel_args(args['--pixel'])
n_pixels = len(pixel_id)
integral_width = int(args['--integral_width'])
shift = int(args['--shift'])
pulse_finder_threshold = float(args['--pulse_finder_threshold'])
n_samples = int(args['--n_samples']) # TODO access this in a better way !
estimated_gain = 20
ncall = int(args['--ncall'])
if args['--compute']:
raw_histo = raw.compute(files, max_events=max_events,
pixel_id=pixel_id, filename=raw_histo_filename)
baseline = raw_histo.mode()
compute_max_histo(files, max_histo_filename, pixel_id, max_events,
integral_width, shift, baseline)
compute_spe(files, charge_histo_filename, pixel_id, baseline,
max_events, integral_width, shift, pulse_finder_threshold,
debug=debug)
if args['--fit']:
spe_histo = Histogram1D.load(charge_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
dark_count_rate = np.zeros(n_pixels) * np.nan
electronic_noise = np.zeros(n_pixels) * np.nan
crosstalk = np.zeros(n_pixels) * np.nan
gain = np.zeros(n_pixels) * np.nan
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = max_histo[i]
fitter = MaxHistoFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
n_entries = histo.data.sum()
number_of_zeros = fitter.parameters['a_0']
window_length = 4 * n_samples
rate = compute_dark_rate(number_of_zeros,
n_entries,
window_length)
electronic_noise[i] = fitter.parameters['sigma_e']
dark_count_rate[i] = rate
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute dark count rate'
' in pixel {}'.format(pixel))
print(e)
np.savez(results_filename, dcr=dark_count_rate,
sigma_e=electronic_noise, pixel_id=pixel_id)
for i, pixel in tqdm(enumerate(pixel_id), total=n_pixels,
desc='Pixel'):
histo = spe_histo[i]
fitter = SPEFitter(histo, estimated_gain, throw_nan=True)
try:
fitter.fit(ncall=100)
fitter.fit(ncall=ncall)
params = fitter.parameters
n_entries = params['a_1']
n_entries += params['a_2']
n_entries += params['a_3']
n_entries += params['a_4']
crosstalk[i] = (n_entries - params['a_1']) / n_entries
gain[i] = params['gain']
if debug:
fitter.draw()
fitter.draw_init(x_label='[LSB]')
fitter.draw_fit(x_label='[LSB]')
plt.show()
except Exception as e:
print('Could not compute gain and crosstalk'
' in pixel {}'.format(pixel))
print(e)
data = dict(np.load(results_filename))
data['crosstalk'] = crosstalk
data['gain'] = gain
np.savez(results_filename, **data)
save_figure = convert_text(args['--save_figures'])
if save_figure is not None:
output_path = save_figure
spe_histo = Histogram1D.load(charge_histo_filename)
spe_amplitude = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(raw_histo_filename)
max_histo = Histogram1D.load(max_histo_filename)
figure_directory = output_path + 'figures/'
if not os.path.exists(figure_directory):
os.makedirs(figure_directory)
histograms = [spe_histo, spe_amplitude, raw_histo, max_histo]
names = ['histogram_charge/', 'histogram_amplitude/', 'histogram_raw/',
'histo_max/']
for i, histo in enumerate(histograms):
figure = plt.figure()
histogram_figure_directory = figure_directory + names[i]
if not os.path.exists(histogram_figure_directory):
os.makedirs(histogram_figure_directory)
for j, pixel in enumerate(pixel_id):
axis = figure.add_subplot(111)
figure_path = histogram_figure_directory + 'pixel_{}'. \
format(pixel)
try:
histo.draw(index=(j,), axis=axis, log=True, legend=False)
figure.savefig(figure_path)
except Exception as e:
print('Could not save pixel {} to : {} \n'.
format(pixel, figure_path))
print(e)
axis.remove()
if args['--display']:
spe_histo = Histogram1D.load(charge_histo_filename)
raw_histo = Histogram1D.load(os.path.join(output_path,
raw_histo_filename))
max_histo = Histogram1D.load(max_histo_filename)
spe_histo.draw(index=(0,), log=True, legend=False)
raw_histo.draw(index=(0,), log=True, legend=False)
max_histo.draw(index=(0,), log=True, legend=False)
try:
data = np.load(results_filename)
dark_count_rate = data['dcr']
electronic_noise = data['sigma_e']
crosstalk = data['crosstalk']
gain = data['gain']
except IOError as e:
print(e)
print('Could not find the analysis files !')
plt.figure()
plt.hist(dark_count_rate[np.isfinite(dark_count_rate)],
bins='auto')
plt.xlabel('dark count rate [GHz]')
plt.legend(loc='best')
plt.figure()
plt.hist(crosstalk[np.isfinite(crosstalk)],
bins='auto')
plt.xlabel('Crosstalk []')
plt.legend(loc='best')
plt.figure()
plt.hist(gain[np.isfinite(gain)],
bins='auto')
plt.xlabel('Gain [LSB/p.e.]')
plt.legend(loc='best')
plt.figure()
plt.hist(electronic_noise[np.isfinite(electronic_noise)],
bins='auto')
plt.xlabel('$\sigma_e$ [LSB]')
plt.legend(loc='best')
plt.show()
return
if __name__ == '__main__':
entry()
| gpl-3.0 |
stoneflyop1/py_machine_learning | ch08/main.py | 1 | 1080 | import pandas as pd
df = pd.read_csv('../data/movie_data.csv')
import cleandata
df['review'] = df['review'].apply(cleandata.preprocessor)
# grid search, 非常耗时
#import gridlearn
#gridlearn.learn(df)
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.linear_model import SGDClassifier
import tokendata
vect = HashingVectorizer(
decode_error='ignore', n_features=(2 ** 21),
preprocessor=None, tokenizer=tokendata.tokenizer
)
clf = SGDClassifier(loss='log', random_state=1, n_iter=1)
import ooclearn
doc_stream = ooclearn.stream_docs(path='../data/movie_data.csv')
import pyprind # 进度条
pbar = pyprind.ProgBar(45)
import numpy as np
classes = np.array([0, 1])
for _ in range(45):
X_train, y_train = ooclearn.get_minibatch(doc_stream, size=1000)
if not X_train: break
X_train = vect.transform(X_train)
clf.partial_fit(X_train, y_train, classes=classes)
pbar.update()
X_test, y_test = ooclearn.get_minibatch(doc_stream, size=5000)
X_test = vect.transform(X_test)
print('Accuracy: %.3f' % clf.score(X_test, y_test)) | mit |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/io/pytables.py | 3 | 161411 | """
High level interface to PyTables for reading and writing pandas data structures
to disk
"""
# pylint: disable-msg=E1101,W0613,W0603
from datetime import datetime, date
import time
import re
import copy
import itertools
import warnings
import os
from pandas.core.dtypes.common import (
is_list_like,
is_categorical_dtype,
is_timedelta64_dtype,
is_datetime64tz_dtype,
is_datetime64_dtype,
_ensure_object,
_ensure_int64,
_ensure_platform_int)
from pandas.core.dtypes.missing import array_equivalent
import numpy as np
from pandas import (Series, DataFrame, Panel, Panel4D, Index,
MultiIndex, Int64Index, isnull, concat,
SparseSeries, SparseDataFrame, PeriodIndex,
DatetimeIndex, TimedeltaIndex)
from pandas.core import config
from pandas.io.common import _stringify_path
from pandas.core.sparse.array import BlockIndex, IntIndex
from pandas.core.base import StringMixin
from pandas.io.formats.printing import adjoin, pprint_thing
from pandas.errors import PerformanceWarning
from pandas.core.common import _asarray_tuplesafe
from pandas.core.algorithms import match, unique
from pandas.core.categorical import Categorical, _factorize_from_iterables
from pandas.core.internals import (BlockManager, make_block,
_block2d_to_blocknd,
_factor_indexer, _block_shape)
from pandas.core.index import _ensure_index
from pandas import compat
from pandas.compat import u_safe as u, PY3, range, lrange, string_types, filter
from pandas.core.config import get_option
from pandas.core.computation.pytables import Expr, maybe_expression
from pandas._libs import tslib, algos, lib
from distutils.version import LooseVersion
# versioning attribute
_version = '0.15.2'
# encoding
# PY3 encoding if we don't specify
_default_encoding = 'UTF-8'
def _ensure_decoded(s):
""" if we have bytes, decode them to unicode """
if isinstance(s, np.bytes_):
s = s.decode('UTF-8')
return s
def _ensure_encoding(encoding):
# set the encoding if we need
if encoding is None:
if PY3:
encoding = _default_encoding
return encoding
def _ensure_str(name):
"""Ensure that an index / column name is a str (python 3) or
unicode (python 2); otherwise they may be np.string dtype.
Non-string dtypes are passed through unchanged.
https://github.com/pandas-dev/pandas/issues/13492
"""
if isinstance(name, compat.string_types):
name = compat.text_type(name)
return name
Term = Expr
def _ensure_term(where, scope_level):
"""
ensure that the where is a Term or a list of Term
this makes sure that we are capturing the scope of variables
that are passed
create the terms here with a frame_level=2 (we are 2 levels down)
"""
# only consider list/tuple here as an ndarray is automaticaly a coordinate
# list
level = scope_level + 1
if isinstance(where, (list, tuple)):
wlist = []
for w in filter(lambda x: x is not None, where):
if not maybe_expression(w):
wlist.append(w)
else:
wlist.append(Term(w, scope_level=level))
where = wlist
elif maybe_expression(where):
where = Term(where, scope_level=level)
return where
class PossibleDataLossError(Exception):
pass
class ClosedFileError(Exception):
pass
class IncompatibilityWarning(Warning):
pass
incompatibility_doc = """
where criteria is being ignored as this version [%s] is too old (or
not-defined), read the file in and write it out to a new file to upgrade (with
the copy_to method)
"""
class AttributeConflictWarning(Warning):
pass
attribute_conflict_doc = """
the [%s] attribute of the existing index is [%s] which conflicts with the new
[%s], resetting the attribute to None
"""
class DuplicateWarning(Warning):
pass
duplicate_doc = """
duplicate entries in table, taking most recently appended
"""
performance_doc = """
your performance may suffer as PyTables will pickle object types that it cannot
map directly to c-types [inferred_type->%s,key->%s] [items->%s]
"""
# formats
_FORMAT_MAP = {
u('f'): 'fixed',
u('fixed'): 'fixed',
u('t'): 'table',
u('table'): 'table',
}
format_deprecate_doc = """
the table keyword has been deprecated
use the format='fixed(f)|table(t)' keyword instead
fixed(f) : specifies the Fixed format
and is the default for put operations
table(t) : specifies the Table format
and is the default for append operations
"""
# map object types
_TYPE_MAP = {
Series: u('series'),
SparseSeries: u('sparse_series'),
DataFrame: u('frame'),
SparseDataFrame: u('sparse_frame'),
Panel: u('wide'),
Panel4D: u('ndim'),
}
# storer class map
_STORER_MAP = {
u('Series'): 'LegacySeriesFixed',
u('DataFrame'): 'LegacyFrameFixed',
u('DataMatrix'): 'LegacyFrameFixed',
u('series'): 'SeriesFixed',
u('sparse_series'): 'SparseSeriesFixed',
u('frame'): 'FrameFixed',
u('sparse_frame'): 'SparseFrameFixed',
u('wide'): 'PanelFixed',
}
# table class map
_TABLE_MAP = {
u('generic_table'): 'GenericTable',
u('appendable_series'): 'AppendableSeriesTable',
u('appendable_multiseries'): 'AppendableMultiSeriesTable',
u('appendable_frame'): 'AppendableFrameTable',
u('appendable_multiframe'): 'AppendableMultiFrameTable',
u('appendable_panel'): 'AppendablePanelTable',
u('appendable_ndim'): 'AppendableNDimTable',
u('worm'): 'WORMTable',
u('legacy_frame'): 'LegacyFrameTable',
u('legacy_panel'): 'LegacyPanelTable',
}
# axes map
_AXES_MAP = {
DataFrame: [0],
Panel: [1, 2],
Panel4D: [1, 2, 3],
}
# register our configuration options
dropna_doc = """
: boolean
drop ALL nan rows when appending to a table
"""
format_doc = """
: format
default format writing format, if None, then
put will default to 'fixed' and append will default to 'table'
"""
with config.config_prefix('io.hdf'):
config.register_option('dropna_table', False, dropna_doc,
validator=config.is_bool)
config.register_option(
'default_format', None, format_doc,
validator=config.is_one_of_factory(['fixed', 'table', None])
)
# oh the troubles to reduce import time
_table_mod = None
_table_file_open_policy_is_strict = False
def _tables():
global _table_mod
global _table_file_open_policy_is_strict
if _table_mod is None:
import tables
_table_mod = tables
# version requirements
if LooseVersion(tables.__version__) < '3.0.0':
raise ImportError("PyTables version >= 3.0.0 is required")
# set the file open policy
# return the file open policy; this changes as of pytables 3.1
# depending on the HDF5 version
try:
_table_file_open_policy_is_strict = (
tables.file._FILE_OPEN_POLICY == 'strict')
except:
pass
return _table_mod
# interface to/from ###
def to_hdf(path_or_buf, key, value, mode=None, complevel=None, complib=None,
append=None, **kwargs):
""" store this object, close it if we opened it """
if append:
f = lambda store: store.append(key, value, **kwargs)
else:
f = lambda store: store.put(key, value, **kwargs)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, string_types):
with HDFStore(path_or_buf, mode=mode, complevel=complevel,
complib=complib) as store:
f(store)
else:
f(path_or_buf)
def read_hdf(path_or_buf, key=None, mode='r', **kwargs):
""" read from the store, close it if we opened it
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
path_or_buf : path (string), buffer or path object (pathlib.Path or
py._path.local.LocalPath) designating the file to open, or an
already opened pd.HDFStore object
.. versionadded:: 0.19.0 support for pathlib, py.path.
key : group identifier in the store. Can be omitted if the HDF file
contains a single pandas object.
mode : string, {'r', 'r+', 'a'}, default 'r'. Mode to use when opening
the file. Ignored if path_or_buf is a pd.HDFStore.
where : list of Term (or convertable) objects, optional
start : optional, integer (defaults to None), row number to start
selection
stop : optional, integer (defaults to None), row number to stop
selection
columns : optional, a list of columns that if not None, will limit the
return columns
iterator : optional, boolean, return an iterator, default False
chunksize : optional, nrows to include in iteration, return an iterator
Returns
-------
The selected object
"""
if mode not in ['r', 'r+', 'a']:
raise ValueError('mode {0} is not allowed while performing a read. '
'Allowed modes are r, r+ and a.'.format(mode))
# grab the scope
if 'where' in kwargs:
kwargs['where'] = _ensure_term(kwargs['where'], scope_level=1)
path_or_buf = _stringify_path(path_or_buf)
if isinstance(path_or_buf, string_types):
try:
exists = os.path.exists(path_or_buf)
# if filepath is too long
except (TypeError, ValueError):
exists = False
if not exists:
raise compat.FileNotFoundError(
'File %s does not exist' % path_or_buf)
store = HDFStore(path_or_buf, mode=mode, **kwargs)
# can't auto open/close if we are using an iterator
# so delegate to the iterator
auto_close = True
elif isinstance(path_or_buf, HDFStore):
if not path_or_buf.is_open:
raise IOError('The HDFStore must be open for reading.')
store = path_or_buf
auto_close = False
else:
raise NotImplementedError('Support for generic buffers has not been '
'implemented.')
try:
if key is None:
groups = store.groups()
if len(groups) == 0:
raise ValueError('No dataset in HDF5 file.')
candidate_only_group = groups[0]
# For the HDF file to have only one dataset, all other groups
# should then be metadata groups for that candidate group. (This
# assumes that the groups() method enumerates parent groups
# before their children.)
for group_to_check in groups[1:]:
if not _is_metadata_of(group_to_check, candidate_only_group):
raise ValueError('key must be provided when HDF5 file '
'contains multiple datasets.')
key = candidate_only_group._v_pathname
return store.select(key, auto_close=auto_close, **kwargs)
except:
# if there is an error, close the store
try:
store.close()
except:
pass
raise
def _is_metadata_of(group, parent_group):
"""Check if a given group is a metadata group for a given parent_group."""
if group._v_depth <= parent_group._v_depth:
return False
current = group
while current._v_depth > 1:
parent = current._v_parent
if parent == parent_group and current._v_name == 'meta':
return True
current = current._v_parent
return False
class HDFStore(StringMixin):
"""
dict-like IO interface for storing pandas objects in PyTables
either Fixed or Table format.
Parameters
----------
path : string
File path to HDF5 file
mode : {'a', 'w', 'r', 'r+'}, default 'a'
``'r'``
Read-only; no data can be modified.
``'w'``
Write; a new file is created (an existing file with the same
name would be deleted).
``'a'``
Append; an existing file is opened for reading and writing,
and if the file does not exist it is created.
``'r+'``
It is similar to ``'a'``, but the file must already exist.
complevel : int, 0-9, default 0
Specifies a compression level for data.
A value of 0 disables compression.
complib : {'zlib', 'lzo', 'bzip2', 'blosc', None}, default None
Specifies the compression library to be used.
As of v0.20.2 these additional compressors for Blosc are supported
(default if no compressor specified: 'blosc:blosclz'):
{'blosc:blosclz', 'blosc:lz4', 'blosc:lz4hc', 'blosc:snappy',
'blosc:zlib', 'blosc:zstd'}.
Specifying a compression library which is not available issues
a ValueError.
fletcher32 : bool, default False
If applying compression use the fletcher32 checksum
Examples
--------
>>> from pandas import DataFrame
>>> from numpy.random import randn
>>> bar = DataFrame(randn(10, 4))
>>> store = HDFStore('test.h5')
>>> store['foo'] = bar # write to HDF5
>>> bar = store['foo'] # retrieve
>>> store.close()
"""
def __init__(self, path, mode=None, complevel=None, complib=None,
fletcher32=False, **kwargs):
try:
import tables # noqa
except ImportError as ex: # pragma: no cover
raise ImportError('HDFStore requires PyTables, "{ex}" problem '
'importing'.format(ex=str(ex)))
if complib is not None and complib not in tables.filters.all_complibs:
raise ValueError(
"complib only supports {libs} compression.".format(
libs=tables.filters.all_complibs))
self._path = path
if mode is None:
mode = 'a'
self._mode = mode
self._handle = None
self._complevel = complevel
self._complib = complib
self._fletcher32 = fletcher32
self._filters = None
self.open(mode=mode, **kwargs)
@property
def root(self):
""" return the root node """
self._check_if_open()
return self._handle.root
@property
def filename(self):
return self._path
def __getitem__(self, key):
return self.get(key)
def __setitem__(self, key, value):
self.put(key, value)
def __delitem__(self, key):
return self.remove(key)
def __getattr__(self, name):
""" allow attribute access to get stores """
self._check_if_open()
try:
return self.get(name)
except:
pass
raise AttributeError("'%s' object has no attribute '%s'" %
(type(self).__name__, name))
def __contains__(self, key):
""" check for existance of this key
can match the exact pathname or the pathnm w/o the leading '/'
"""
node = self.get_node(key)
if node is not None:
name = node._v_pathname
if name == key or name[1:] == key:
return True
return False
def __len__(self):
return len(self.groups())
def __unicode__(self):
output = '%s\nFile path: %s\n' % (type(self), pprint_thing(self._path))
if self.is_open:
lkeys = sorted(list(self.keys()))
if len(lkeys):
keys = []
values = []
for k in lkeys:
try:
s = self.get_storer(k)
if s is not None:
keys.append(pprint_thing(s.pathname or k))
values.append(
pprint_thing(s or 'invalid_HDFStore node'))
except Exception as detail:
keys.append(k)
values.append("[invalid_HDFStore node: %s]"
% pprint_thing(detail))
output += adjoin(12, keys, values)
else:
output += 'Empty'
else:
output += "File is CLOSED"
return output
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def keys(self):
"""
Return a (potentially unordered) list of the keys corresponding to the
objects stored in the HDFStore. These are ABSOLUTE path-names (e.g.
have the leading '/'
"""
return [n._v_pathname for n in self.groups()]
def __iter__(self):
return iter(self.keys())
def items(self):
"""
iterate on key->group
"""
for g in self.groups():
yield g._v_pathname, g
iteritems = items
def open(self, mode='a', **kwargs):
"""
Open the file in the specified mode
Parameters
----------
mode : {'a', 'w', 'r', 'r+'}, default 'a'
See HDFStore docstring or tables.open_file for info about modes
"""
tables = _tables()
if self._mode != mode:
# if we are changing a write mode to read, ok
if self._mode in ['a', 'w'] and mode in ['r', 'r+']:
pass
elif mode in ['w']:
# this would truncate, raise here
if self.is_open:
raise PossibleDataLossError(
"Re-opening the file [{0}] with mode [{1}] "
"will delete the current file!"
.format(self._path, self._mode)
)
self._mode = mode
# close and reopen the handle
if self.is_open:
self.close()
if self._complib is not None:
if self._complevel is None:
self._complevel = 9
self._filters = _tables().Filters(self._complevel,
self._complib,
fletcher32=self._fletcher32)
try:
self._handle = tables.open_file(self._path, self._mode, **kwargs)
except (IOError) as e: # pragma: no cover
if 'can not be written' in str(e):
print('Opening %s in read-only mode' % self._path)
self._handle = tables.open_file(self._path, 'r', **kwargs)
else:
raise
except (ValueError) as e:
# trap PyTables >= 3.1 FILE_OPEN_POLICY exception
# to provide an updated message
if 'FILE_OPEN_POLICY' in str(e):
e = ValueError(
"PyTables [{version}] no longer supports opening multiple "
"files\n"
"even in read-only mode on this HDF5 version "
"[{hdf_version}]. You can accept this\n"
"and not open the same file multiple times at once,\n"
"upgrade the HDF5 version, or downgrade to PyTables 3.0.0 "
"which allows\n"
"files to be opened multiple times at once\n"
.format(version=tables.__version__,
hdf_version=tables.get_hdf5_version()))
raise e
except (Exception) as e:
# trying to read from a non-existant file causes an error which
# is not part of IOError, make it one
if self._mode == 'r' and 'Unable to open/create file' in str(e):
raise IOError(str(e))
raise
def close(self):
"""
Close the PyTables file handle
"""
if self._handle is not None:
self._handle.close()
self._handle = None
@property
def is_open(self):
"""
return a boolean indicating whether the file is open
"""
if self._handle is None:
return False
return bool(self._handle.isopen)
def flush(self, fsync=False):
"""
Force all buffered modifications to be written to disk.
Parameters
----------
fsync : bool (default False)
call ``os.fsync()`` on the file handle to force writing to disk.
Notes
-----
Without ``fsync=True``, flushing may not guarantee that the OS writes
to disk. With fsync, the operation will block until the OS claims the
file has been written; however, other caching layers may still
interfere.
"""
if self._handle is not None:
self._handle.flush()
if fsync:
try:
os.fsync(self._handle.fileno())
except:
pass
def get(self, key):
"""
Retrieve pandas object stored in file
Parameters
----------
key : object
Returns
-------
obj : type of object stored in file
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
return self._read_group(group)
def select(self, key, where=None, start=None, stop=None, columns=None,
iterator=False, chunksize=None, auto_close=False, **kwargs):
"""
Retrieve pandas object stored in file, optionally based on where
criteria
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
columns : a list of columns that if not None, will limit the return
columns
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
auto_close : boolean, should automatically close the store when
finished, default is False
Returns
-------
The selected object
"""
group = self.get_node(key)
if group is None:
raise KeyError('No object named %s in the file' % key)
# create the storer and axes
where = _ensure_term(where, scope_level=1)
s = self._create_storer(group)
s.infer_axes()
# function to call on iteration
def func(_start, _stop, _where):
return s.read(start=_start, stop=_stop,
where=_where,
columns=columns, **kwargs)
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=s.nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result()
def select_as_coordinates(
self, key, where=None, start=None, stop=None, **kwargs):
"""
return the selection as an Index
Parameters
----------
key : object
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
"""
where = _ensure_term(where, scope_level=1)
return self.get_storer(key).read_coordinates(where=where, start=start,
stop=stop, **kwargs)
def select_column(self, key, column, **kwargs):
"""
return a single column from the table. This is generally only useful to
select an indexable
Parameters
----------
key : object
column: the column of interest
Exceptions
----------
raises KeyError if the column is not found (or key is not a valid
store)
raises ValueError if the column can not be extracted individually (it
is part of a data block)
"""
return self.get_storer(key).read_column(column=column, **kwargs)
def select_as_multiple(self, keys, where=None, selector=None, columns=None,
start=None, stop=None, iterator=False,
chunksize=None, auto_close=False, **kwargs):
""" Retrieve pandas objects from multiple tables
Parameters
----------
keys : a list of the tables
selector : the table to apply the where criteria (defaults to keys[0]
if not supplied)
columns : the columns I want back
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
iterator : boolean, return an iterator, default False
chunksize : nrows to include in iteration, return an iterator
Exceptions
----------
raises KeyError if keys or selector is not found or keys is empty
raises TypeError if keys is not a list or tuple
raises ValueError if the tables are not ALL THE SAME DIMENSIONS
"""
# default to single select
where = _ensure_term(where, scope_level=1)
if isinstance(keys, (list, tuple)) and len(keys) == 1:
keys = keys[0]
if isinstance(keys, string_types):
return self.select(key=keys, where=where, columns=columns,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, **kwargs)
if not isinstance(keys, (list, tuple)):
raise TypeError("keys must be a list/tuple")
if not len(keys):
raise ValueError("keys must have a non-zero length")
if selector is None:
selector = keys[0]
# collect the tables
tbls = [self.get_storer(k) for k in keys]
s = self.get_storer(selector)
# validate rows
nrows = None
for t, k in itertools.chain([(s, selector)], zip(tbls, keys)):
if t is None:
raise KeyError("Invalid table [%s]" % k)
if not t.is_table:
raise TypeError(
"object [%s] is not a table, and cannot be used in all "
"select as multiple" % t.pathname
)
if nrows is None:
nrows = t.nrows
elif t.nrows != nrows:
raise ValueError(
"all tables must have exactly the same nrows!")
# axis is the concentation axes
axis = list(set([t.non_index_axes[0][0] for t in tbls]))[0]
def func(_start, _stop, _where):
# retrieve the objs, _where is always passed as a set of
# coordinates here
objs = [t.read(where=_where, columns=columns, start=_start,
stop=_stop, **kwargs) for t in tbls]
# concat and return
return concat(objs, axis=axis,
verify_integrity=False)._consolidate()
# create the iterator
it = TableIterator(self, s, func, where=where, nrows=nrows,
start=start, stop=stop, iterator=iterator,
chunksize=chunksize, auto_close=auto_close)
return it.get_result(coordinates=True)
def put(self, key, value, format=None, append=False, **kwargs):
"""
Store object in HDFStore
Parameters
----------
key : object
value : {Series, DataFrame, Panel}
format : 'fixed(f)|table(t)', default is 'fixed'
fixed(f) : Fixed format
Fast writing/reading. Not-appendable, nor searchable
table(t) : Table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default False
This will force Table format, append the input data to the
existing.
data_columns : list of columns to create as data columns, or True to
use all columns. See
`here <http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__ # noqa
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
"""
if format is None:
format = get_option("io.hdf.default_format") or 'fixed'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, **kwargs)
def remove(self, key, where=None, start=None, stop=None):
"""
Remove pandas object partially by specifying the where condition
Parameters
----------
key : string
Node to remove or delete rows from
where : list of Term (or convertable) objects, optional
start : integer (defaults to None), row number to start selection
stop : integer (defaults to None), row number to stop selection
Returns
-------
number of rows removed (or None if not a Table)
Exceptions
----------
raises KeyError if key is not a valid store
"""
where = _ensure_term(where, scope_level=1)
try:
s = self.get_storer(key)
except:
if where is not None:
raise ValueError(
"trying to remove a node with a non-None where clause!")
# we are actually trying to remove a node (with children)
s = self.get_node(key)
if s is not None:
s._f_remove(recursive=True)
return None
if s is None:
raise KeyError('No object named %s in the file' % key)
# remove the node
if where is None and start is None and stop is None:
s.group._f_remove(recursive=True)
# delete from the table
else:
if not s.is_table:
raise ValueError(
'can only remove with where on objects written as tables')
return s.delete(where=where, start=start, stop=stop)
def append(self, key, value, format=None, append=True, columns=None,
dropna=None, **kwargs):
"""
Append to Table in file. Node must already exist and be Table
format.
Parameters
----------
key : object
value : {Series, DataFrame, Panel, Panel4D}
format: 'table' is the default
table(t) : table format
Write as a PyTables Table structure which may perform
worse but allow more flexible operations like searching
/ selecting subsets of the data
append : boolean, default True, append the input data to the
existing
data_columns : list of columns, or True, default None
List of columns to create as indexed data columns for on-disk
queries, or True to use all columns. By default only the axes
of the object are indexed. See `here
<http://pandas.pydata.org/pandas-docs/stable/io.html#query-via-data-columns>`__.
min_itemsize : dict of columns that specify minimum string sizes
nan_rep : string to use as string nan represenation
chunksize : size to chunk the writing
expectedrows : expected TOTAL row size of this table
encoding : default None, provide an encoding for strings
dropna : boolean, default False, do not write an ALL nan row to
the store settable by the option 'io.hdf.dropna_table'
Notes
-----
Does *not* check if data being appended overlaps with existing
data in the table, so be careful
"""
if columns is not None:
raise TypeError("columns is not a supported keyword in append, "
"try data_columns")
if dropna is None:
dropna = get_option("io.hdf.dropna_table")
if format is None:
format = get_option("io.hdf.default_format") or 'table'
kwargs = self._validate_format(format, kwargs)
self._write_to_group(key, value, append=append, dropna=dropna,
**kwargs)
def append_to_multiple(self, d, value, selector, data_columns=None,
axes=None, dropna=False, **kwargs):
"""
Append to multiple tables
Parameters
----------
d : a dict of table_name to table_columns, None is acceptable as the
values of one node (this will get all the remaining columns)
value : a pandas object
selector : a string that designates the indexable table; all of its
columns will be designed as data_columns, unless data_columns is
passed, in which case these are used
data_columns : list of columns to create as data columns, or True to
use all columns
dropna : if evaluates to True, drop rows from all tables if any single
row in each table has all NaN. Default False.
Notes
-----
axes parameter is currently not accepted
"""
if axes is not None:
raise TypeError("axes is currently not accepted as a parameter to"
" append_to_multiple; you can create the "
"tables independently instead")
if not isinstance(d, dict):
raise ValueError(
"append_to_multiple must have a dictionary specified as the "
"way to split the value"
)
if selector not in d:
raise ValueError(
"append_to_multiple requires a selector that is in passed dict"
)
# figure out the splitting axis (the non_index_axis)
axis = list(set(range(value.ndim)) - set(_AXES_MAP[type(value)]))[0]
# figure out how to split the value
remain_key = None
remain_values = []
for k, v in d.items():
if v is None:
if remain_key is not None:
raise ValueError(
"append_to_multiple can only have one value in d that "
"is None"
)
remain_key = k
else:
remain_values.extend(v)
if remain_key is not None:
ordered = value.axes[axis]
ordd = ordered.difference(Index(remain_values))
ordd = sorted(ordered.get_indexer(ordd))
d[remain_key] = ordered.take(ordd)
# data_columns
if data_columns is None:
data_columns = d[selector]
# ensure rows are synchronized across the tables
if dropna:
idxs = (value[cols].dropna(how='all').index for cols in d.values())
valid_index = next(idxs)
for index in idxs:
valid_index = valid_index.intersection(index)
value = value.loc[valid_index]
# append
for k, v in d.items():
dc = data_columns if k == selector else None
# compute the val
val = value.reindex_axis(v, axis=axis)
self.append(k, val, data_columns=dc, **kwargs)
def create_table_index(self, key, **kwargs):
""" Create a pytables index on the table
Parameters
----------
key : object (the node to index)
Exceptions
----------
raises if the node is not a table
"""
# version requirements
_tables()
s = self.get_storer(key)
if s is None:
return
if not s.is_table:
raise TypeError(
"cannot create table index on a Fixed format store")
s.create_index(**kwargs)
def groups(self):
"""return a list of all the top-level nodes (that are not themselves a
pandas storage object)
"""
_tables()
self._check_if_open()
return [
g for g in self._handle.walk_nodes()
if (getattr(g._v_attrs, 'pandas_type', None) or
getattr(g, 'table', None) or
(isinstance(g, _table_mod.table.Table) and
g._v_name != u('table')))
]
def get_node(self, key):
""" return the node with the key or None if it does not exist """
self._check_if_open()
try:
if not key.startswith('/'):
key = '/' + key
return self._handle.get_node(self.root, key)
except:
return None
def get_storer(self, key):
""" return the storer object for a key, raise if not in the file """
group = self.get_node(key)
if group is None:
return None
s = self._create_storer(group)
s.infer_axes()
return s
def copy(self, file, mode='w', propindexes=True, keys=None, complib=None,
complevel=None, fletcher32=False, overwrite=True):
""" copy the existing store to a new file, upgrading in place
Parameters
----------
propindexes: restore indexes in copied file (defaults to True)
keys : list of keys to include in the copy (defaults to all)
overwrite : overwrite (remove and replace) existing nodes in the
new store (default is True)
mode, complib, complevel, fletcher32 same as in HDFStore.__init__
Returns
-------
open file handle of the new store
"""
new_store = HDFStore(
file,
mode=mode,
complib=complib,
complevel=complevel,
fletcher32=fletcher32)
if keys is None:
keys = list(self.keys())
if not isinstance(keys, (tuple, list)):
keys = [keys]
for k in keys:
s = self.get_storer(k)
if s is not None:
if k in new_store:
if overwrite:
new_store.remove(k)
data = self.select(k)
if s.is_table:
index = False
if propindexes:
index = [a.name for a in s.axes if a.is_indexed]
new_store.append(
k, data, index=index,
data_columns=getattr(s, 'data_columns', None),
encoding=s.encoding
)
else:
new_store.put(k, data, encoding=s.encoding)
return new_store
# private methods ######
def _check_if_open(self):
if not self.is_open:
raise ClosedFileError("{0} file is not open!".format(self._path))
def _validate_format(self, format, kwargs):
""" validate / deprecate formats; return the new kwargs """
kwargs = kwargs.copy()
# validate
try:
kwargs['format'] = _FORMAT_MAP[format.lower()]
except:
raise TypeError("invalid HDFStore format specified [{0}]"
.format(format))
return kwargs
def _create_storer(self, group, format=None, value=None, append=False,
**kwargs):
""" return a suitable class to operate """
def error(t):
raise TypeError(
"cannot properly create the storer for: [%s] [group->%s,"
"value->%s,format->%s,append->%s,kwargs->%s]"
% (t, group, type(value), format, append, kwargs)
)
pt = _ensure_decoded(getattr(group._v_attrs, 'pandas_type', None))
tt = _ensure_decoded(getattr(group._v_attrs, 'table_type', None))
# infer the pt from the passed value
if pt is None:
if value is None:
_tables()
if (getattr(group, 'table', None) or
isinstance(group, _table_mod.table.Table)):
pt = u('frame_table')
tt = u('generic_table')
else:
raise TypeError(
"cannot create a storer if the object is not existing "
"nor a value are passed")
else:
try:
pt = _TYPE_MAP[type(value)]
except:
error('_TYPE_MAP')
# we are actually a table
if format == 'table':
pt += u('_table')
# a storer node
if u('table') not in pt:
try:
return globals()[_STORER_MAP[pt]](self, group, **kwargs)
except:
error('_STORER_MAP')
# existing node (and must be a table)
if tt is None:
# if we are a writer, determin the tt
if value is not None:
if pt == u('series_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_series')
elif index.nlevels > 1:
tt = u('appendable_multiseries')
elif pt == u('frame_table'):
index = getattr(value, 'index', None)
if index is not None:
if index.nlevels == 1:
tt = u('appendable_frame')
elif index.nlevels > 1:
tt = u('appendable_multiframe')
elif pt == u('wide_table'):
tt = u('appendable_panel')
elif pt == u('ndim_table'):
tt = u('appendable_ndim')
else:
# distiguish between a frame/table
tt = u('legacy_panel')
try:
fields = group.table._v_attrs.fields
if len(fields) == 1 and fields[0] == u('value'):
tt = u('legacy_frame')
except:
pass
try:
return globals()[_TABLE_MAP[tt]](self, group, **kwargs)
except:
error('_TABLE_MAP')
def _write_to_group(self, key, value, format, index=True, append=False,
complib=None, encoding=None, **kwargs):
group = self.get_node(key)
# remove the node if we are not appending
if group is not None and not append:
self._handle.remove_node(group, recursive=True)
group = None
# we don't want to store a table node at all if are object is 0-len
# as there are not dtypes
if getattr(value, 'empty', None) and (format == 'table' or append):
return
if group is None:
paths = key.split('/')
# recursively create the groups
path = '/'
for p in paths:
if not len(p):
continue
new_path = path
if not path.endswith('/'):
new_path += '/'
new_path += p
group = self.get_node(new_path)
if group is None:
group = self._handle.create_group(path, p)
path = new_path
s = self._create_storer(group, format, value, append=append,
encoding=encoding, **kwargs)
if append:
# raise if we are trying to append to a Fixed format,
# or a table that exists (and we are putting)
if (not s.is_table or
(s.is_table and format == 'fixed' and s.is_exists)):
raise ValueError('Can only append to Tables')
if not s.is_exists:
s.set_object_info()
else:
s.set_object_info()
if not s.is_table and complib:
raise ValueError(
'Compression not supported on Fixed format stores'
)
# write the object
s.write(obj=value, append=append, complib=complib, **kwargs)
if s.is_table and index:
s.create_index(columns=index)
def _read_group(self, group, **kwargs):
s = self._create_storer(group)
s.infer_axes()
return s.read(**kwargs)
def get_store(path, **kwargs):
""" Backwards compatible alias for ``HDFStore``
"""
warnings.warn(
"get_store is deprecated and be "
"removed in a future version\n"
"HDFStore(path, **kwargs) is the replacement",
FutureWarning,
stacklevel=6)
return HDFStore(path, **kwargs)
class TableIterator(object):
""" define the iteration interface on a table
Parameters
----------
store : the reference store
s : the refered storer
func : the function to execute the query
where : the where of the query
nrows : the rows to iterate on
start : the passed start value (default is None)
stop : the passed stop value (default is None)
iterator : boolean, whether to use the default iterator
chunksize : the passed chunking value (default is 50000)
auto_close : boolean, automatically close the store at the end of
iteration, default is False
kwargs : the passed kwargs
"""
def __init__(self, store, s, func, where, nrows, start=None, stop=None,
iterator=False, chunksize=None, auto_close=False):
self.store = store
self.s = s
self.func = func
self.where = where
# set start/stop if they are not set if we are a table
if self.s.is_table:
if nrows is None:
nrows = 0
if start is None:
start = 0
if stop is None:
stop = nrows
stop = min(nrows, stop)
self.nrows = nrows
self.start = start
self.stop = stop
self.coordinates = None
if iterator or chunksize is not None:
if chunksize is None:
chunksize = 100000
self.chunksize = int(chunksize)
else:
self.chunksize = None
self.auto_close = auto_close
def __iter__(self):
# iterate
current = self.start
while current < self.stop:
stop = min(current + self.chunksize, self.stop)
value = self.func(None, None, self.coordinates[current:stop])
current = stop
if value is None or not len(value):
continue
yield value
self.close()
def close(self):
if self.auto_close:
self.store.close()
def get_result(self, coordinates=False):
# return the actual iterator
if self.chunksize is not None:
if not self.s.is_table:
raise TypeError(
"can only use an iterator or chunksize on a table")
self.coordinates = self.s.read_coordinates(where=self.where)
return self
# if specified read via coordinates (necessary for multiple selections
if coordinates:
where = self.s.read_coordinates(where=self.where, start=self.start,
stop=self.stop)
else:
where = self.where
# directly return the result
results = self.func(self.start, self.stop, where)
self.close()
return results
class IndexCol(StringMixin):
""" an index column description class
Parameters
----------
axis : axis which I reference
values : the ndarray like converted values
kind : a string description of this type
typ : the pytables type
pos : the position in the pytables
"""
is_an_indexable = True
is_data_indexable = True
_info_fields = ['freq', 'tz', 'index_name']
def __init__(self, values=None, kind=None, typ=None, cname=None,
itemsize=None, name=None, axis=None, kind_attr=None,
pos=None, freq=None, tz=None, index_name=None, **kwargs):
self.values = values
self.kind = kind
self.typ = typ
self.itemsize = itemsize
self.name = name
self.cname = cname
self.kind_attr = kind_attr
self.axis = axis
self.pos = pos
self.freq = freq
self.tz = tz
self.index_name = index_name
self.table = None
self.meta = None
self.metadata = None
if name is not None:
self.set_name(name, kind_attr)
if pos is not None:
self.set_pos(pos)
def set_name(self, name, kind_attr=None):
""" set the name of this indexer """
self.name = name
self.kind_attr = kind_attr or "%s_kind" % name
if self.cname is None:
self.cname = name
return self
def set_axis(self, axis):
""" set the axis over which I index """
self.axis = axis
return self
def set_pos(self, pos):
""" set the position of this column in the Table """
self.pos = pos
if pos is not None and self.typ is not None:
self.typ._v_pos = pos
return self
def set_table(self, table):
self.table = table
return self
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.axis,
self.pos,
self.kind)))
return "name->%s,cname->%s,axis->%s,pos->%s,kind->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'axis', 'pos']])
def __ne__(self, other):
return not self.__eq__(other)
@property
def is_indexed(self):
""" return whether I am an indexed column """
try:
return getattr(self.table.cols, self.cname).is_indexed
except:
False
def copy(self):
new_self = copy.copy(self)
return new_self
def infer(self, handler):
"""infer this column from the table: create and return a new object"""
table = handler.table
new_self = self.copy()
new_self.set_table(table)
new_self.get_attr()
new_self.read_metadata(handler)
return new_self
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
values = _maybe_convert(values, self.kind, encoding)
kwargs = dict()
if self.freq is not None:
kwargs['freq'] = _ensure_decoded(self.freq)
if self.index_name is not None:
kwargs['name'] = _ensure_decoded(self.index_name)
try:
self.values = Index(values, **kwargs)
except:
# if the output freq is different that what we recorded,
# it should be None (see also 'doc example part 2')
if 'freq' in kwargs:
kwargs['freq'] = None
self.values = Index(values, **kwargs)
self.values = _set_tz(self.values, self.tz)
return self
def take_data(self):
""" return the values & release the memory """
self.values, values = None, self.values
return values
@property
def attrs(self):
return self.table._v_attrs
@property
def description(self):
return self.table.description
@property
def col(self):
""" return my current col description """
return getattr(self.description, self.cname, None)
@property
def cvalues(self):
""" return my cython values """
return self.values
def __iter__(self):
return iter(self.values)
def maybe_set_size(self, min_itemsize=None, **kwargs):
""" maybe set a string col itemsize:
min_itemsize can be an interger or a dict with this columns name
with an integer size """
if _ensure_decoded(self.kind) == u('string'):
if isinstance(min_itemsize, dict):
min_itemsize = min_itemsize.get(self.name)
if min_itemsize is not None and self.typ.itemsize < min_itemsize:
self.typ = _tables(
).StringCol(itemsize=min_itemsize, pos=self.pos)
def validate(self, handler, append, **kwargs):
self.validate_names()
def validate_names(self):
pass
def validate_and_set(self, handler, append, **kwargs):
self.set_table(handler.table)
self.validate_col()
self.validate_attr(append)
self.validate_metadata(handler)
self.write_metadata(handler)
self.set_attr()
def validate_col(self, itemsize=None):
""" validate this column: return the compared against itemsize """
# validate this column for string truncation (or reset to the max size)
if _ensure_decoded(self.kind) == u('string'):
c = self.col
if c is not None:
if itemsize is None:
itemsize = self.itemsize
if c.itemsize < itemsize:
raise ValueError(
"Trying to store a string with len [%s] in [%s] "
"column but\nthis column has a limit of [%s]!\n"
"Consider using min_itemsize to preset the sizes on "
"these columns" % (itemsize, self.cname, c.itemsize))
return c.itemsize
return None
def validate_attr(self, append):
# check for backwards incompatibility
if append:
existing_kind = getattr(self.attrs, self.kind_attr, None)
if existing_kind is not None and existing_kind != self.kind:
raise TypeError("incompatible kind in col [%s - %s]" %
(existing_kind, self.kind))
def update_info(self, info):
""" set/update the info for this indexable with the key/value
if there is a conflict raise/warn as needed """
for key in self._info_fields:
value = getattr(self, key, None)
idx = _get_info(info, self.name)
existing_value = idx.get(key)
if key in idx and value is not None and existing_value != value:
# frequency/name just warn
if key in ['freq', 'index_name']:
ws = attribute_conflict_doc % (key, existing_value, value)
warnings.warn(ws, AttributeConflictWarning, stacklevel=6)
# reset
idx[key] = None
setattr(self, key, None)
else:
raise ValueError(
"invalid info for [%s] for [%s], existing_value [%s] "
"conflicts with new value [%s]"
% (self.name, key, existing_value, value))
else:
if value is not None or existing_value is not None:
idx[key] = value
return self
def set_info(self, info):
""" set my state from the passed info """
idx = info.get(self.name)
if idx is not None:
self.__dict__.update(idx)
def get_attr(self):
""" set the kind for this colummn """
self.kind = getattr(self.attrs, self.kind_attr, None)
def set_attr(self):
""" set the kind for this colummn """
setattr(self.attrs, self.kind_attr, self.kind)
def read_metadata(self, handler):
""" retrieve the metadata for this columns """
self.metadata = handler.read_metadata(self.cname)
def validate_metadata(self, handler):
""" validate that kind=category does not change the categories """
if self.meta == 'category':
new_metadata = self.metadata
cur_metadata = handler.read_metadata(self.cname)
if new_metadata is not None and cur_metadata is not None \
and not array_equivalent(new_metadata, cur_metadata):
raise ValueError("cannot append a categorical with "
"different categories to the existing")
def write_metadata(self, handler):
""" set the meta data """
if self.metadata is not None:
handler.write_metadata(self.cname, self.metadata)
class GenericIndexCol(IndexCol):
""" an index which is not represented in the data of the table """
@property
def is_indexed(self):
return False
def convert(self, values, nan_rep, encoding):
""" set the values from this selection: take = take ownership """
self.values = Int64Index(np.arange(self.table.nrows))
return self
def get_attr(self):
pass
def set_attr(self):
pass
class DataCol(IndexCol):
""" a data holding column, by definition this is not indexable
Parameters
----------
data : the actual data
cname : the column name in the table to hold the data (typically
values)
meta : a string description of the metadata
metadata : the actual metadata
"""
is_an_indexable = False
is_data_indexable = False
_info_fields = ['tz', 'ordered']
@classmethod
def create_for_block(
cls, i=None, name=None, cname=None, version=None, **kwargs):
""" return a new datacol with the block i """
if cname is None:
cname = name or 'values_block_%d' % i
if name is None:
name = cname
# prior to 0.10.1, we named values blocks like: values_block_0 an the
# name values_0
try:
if version[0] == 0 and version[1] <= 10 and version[2] == 0:
m = re.search("values_block_(\d+)", name)
if m:
name = "values_%s" % m.groups()[0]
except:
pass
return cls(name=name, cname=cname, **kwargs)
def __init__(self, values=None, kind=None, typ=None,
cname=None, data=None, meta=None, metadata=None,
block=None, **kwargs):
super(DataCol, self).__init__(values=values, kind=kind, typ=typ,
cname=cname, **kwargs)
self.dtype = None
self.dtype_attr = u("%s_dtype" % self.name)
self.meta = meta
self.meta_attr = u("%s_meta" % self.name)
self.set_data(data)
self.set_metadata(metadata)
def __unicode__(self):
temp = tuple(
map(pprint_thing,
(self.name,
self.cname,
self.dtype,
self.kind,
self.shape)))
return "name->%s,cname->%s,dtype->%s,kind->%s,shape->%s" % temp
def __eq__(self, other):
""" compare 2 col items """
return all([getattr(self, a, None) == getattr(other, a, None)
for a in ['name', 'cname', 'dtype', 'pos']])
def set_data(self, data, dtype=None):
self.data = data
if data is not None:
if dtype is not None:
self.dtype = dtype
self.set_kind()
elif self.dtype is None:
self.dtype = data.dtype.name
self.set_kind()
def take_data(self):
""" return the data & release the memory """
self.data, data = None, self.data
return data
def set_metadata(self, metadata):
""" record the metadata """
if metadata is not None:
metadata = np.array(metadata, copy=False).ravel()
self.metadata = metadata
def set_kind(self):
# set my kind if we can
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
if dtype.startswith(u('string')) or dtype.startswith(u('bytes')):
self.kind = 'string'
elif dtype.startswith(u('float')):
self.kind = 'float'
elif dtype.startswith(u('complex')):
self.kind = 'complex'
elif dtype.startswith(u('int')) or dtype.startswith(u('uint')):
self.kind = 'integer'
elif dtype.startswith(u('date')):
self.kind = 'datetime'
elif dtype.startswith(u('timedelta')):
self.kind = 'timedelta'
elif dtype.startswith(u('bool')):
self.kind = 'bool'
else:
raise AssertionError(
"cannot interpret dtype of [%s] in [%s]" % (dtype, self))
# set my typ if we need
if self.typ is None:
self.typ = getattr(self.description, self.cname, None)
def set_atom(self, block, block_items, existing_col, min_itemsize,
nan_rep, info, encoding=None, **kwargs):
""" create and setup my atom from the block b """
self.values = list(block_items)
# short-cut certain block types
if block.is_categorical:
return self.set_atom_categorical(block, items=block_items,
info=info)
elif block.is_datetimetz:
return self.set_atom_datetime64tz(block, info=info)
elif block.is_datetime:
return self.set_atom_datetime64(block)
elif block.is_timedelta:
return self.set_atom_timedelta64(block)
elif block.is_complex:
return self.set_atom_complex(block)
dtype = block.dtype.name
inferred_type = lib.infer_dtype(block.values)
if inferred_type == 'date':
raise TypeError(
"[date] is not implemented as a table column")
elif inferred_type == 'datetime':
# after 8260
# this only would be hit for a mutli-timezone dtype
# which is an error
raise TypeError(
"too many timezones in this block, create separate "
"data columns"
)
elif inferred_type == 'unicode':
raise TypeError(
"[unicode] is not implemented as a table column")
# this is basically a catchall; if say a datetime64 has nans then will
# end up here ###
elif inferred_type == 'string' or dtype == 'object':
self.set_atom_string(
block, block_items,
existing_col,
min_itemsize,
nan_rep,
encoding)
# set as a data block
else:
self.set_atom_data(block)
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize, shape=block.shape[0])
def set_atom_string(self, block, block_items, existing_col, min_itemsize,
nan_rep, encoding):
# fill nan items with myself, don't disturb the blocks by
# trying to downcast
block = block.fillna(nan_rep, downcast=False)
if isinstance(block, list):
block = block[0]
data = block.values
# see if we have a valid string type
inferred_type = lib.infer_dtype(data.ravel())
if inferred_type != 'string':
# we cannot serialize this data, so report an exception on a column
# by column basis
for i, item in enumerate(block_items):
col = block.iget(i)
inferred_type = lib.infer_dtype(col.ravel())
if inferred_type != 'string':
raise TypeError(
"Cannot serialize the column [%s] because\n"
"its data contents are [%s] object dtype"
% (item, inferred_type)
)
# itemsize is the maximum length of a string (along any dimension)
data_converted = _convert_string_array(data, encoding)
itemsize = data_converted.itemsize
# specified min_itemsize?
if isinstance(min_itemsize, dict):
min_itemsize = int(min_itemsize.get(
self.name) or min_itemsize.get('values') or 0)
itemsize = max(min_itemsize or 0, itemsize)
# check for column in the values conflicts
if existing_col is not None:
eci = existing_col.validate_col(itemsize)
if eci > itemsize:
itemsize = eci
self.itemsize = itemsize
self.kind = 'string'
self.typ = self.get_atom_string(block, itemsize)
self.set_data(data_converted.astype('|S%d' % itemsize, copy=False))
def get_atom_coltype(self, kind=None):
""" return the PyTables column class for this column """
if kind is None:
kind = self.kind
if self.kind.startswith('uint'):
col_name = "UInt%sCol" % kind[4:]
else:
col_name = "%sCol" % kind.capitalize()
return getattr(_tables(), col_name)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)(shape=block.shape[0])
def set_atom_complex(self, block):
self.kind = block.dtype.name
itemsize = int(self.kind.split('complex')[-1]) // 8
self.typ = _tables().ComplexCol(
itemsize=itemsize, shape=block.shape[0])
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_data(self, block):
self.kind = block.dtype.name
self.typ = self.get_atom_data(block)
self.set_data(block.values.astype(self.typ.type, copy=False))
def set_atom_categorical(self, block, items, info=None, values=None):
# currently only supports a 1-D categorical
# in a 1-D block
values = block.values
codes = values.codes
self.kind = 'integer'
self.dtype = codes.dtype.name
if values.ndim > 1:
raise NotImplementedError("only support 1-d categoricals")
if len(items) > 1:
raise NotImplementedError("only support single block categoricals")
# write the codes; must be in a block shape
self.ordered = values.ordered
self.typ = self.get_atom_data(block, kind=codes.dtype.name)
self.set_data(_block_shape(codes))
# write the categories
self.meta = 'category'
self.set_metadata(block.values.categories)
# update the info
self.update_info(info)
def get_atom_datetime64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_datetime64(self, block, values=None):
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'datetime64')
def set_atom_datetime64tz(self, block, info, values=None):
if values is None:
values = block.values
# convert this column to i8 in UTC, and save the tz
values = values.asi8.reshape(block.shape)
# store a converted timezone
self.tz = _get_tz(block.values.tz)
self.update_info(info)
self.kind = 'datetime64'
self.typ = self.get_atom_datetime64(block)
self.set_data(values, 'datetime64')
def get_atom_timedelta64(self, block):
return _tables().Int64Col(shape=block.shape[0])
def set_atom_timedelta64(self, block, values=None):
self.kind = 'timedelta64'
self.typ = self.get_atom_timedelta64(block)
if values is None:
values = block.values.view('i8')
self.set_data(values, 'timedelta64')
@property
def shape(self):
return getattr(self.data, 'shape', None)
@property
def cvalues(self):
""" return my cython values """
return self.data
def validate_attr(self, append):
"""validate that we have the same order as the existing & same dtype"""
if append:
existing_fields = getattr(self.attrs, self.kind_attr, None)
if (existing_fields is not None and
existing_fields != list(self.values)):
raise ValueError("appended items do not match existing items"
" in table!")
existing_dtype = getattr(self.attrs, self.dtype_attr, None)
if (existing_dtype is not None and
existing_dtype != self.dtype):
raise ValueError("appended items dtype do not match existing "
"items dtype in table!")
def convert(self, values, nan_rep, encoding):
"""set the data from this selection (and convert to the correct dtype
if we can)
"""
# values is a recarray
if values.dtype.fields is not None:
values = values[self.cname]
self.set_data(values)
# use the meta if needed
meta = _ensure_decoded(self.meta)
# convert to the correct dtype
if self.dtype is not None:
dtype = _ensure_decoded(self.dtype)
# reverse converts
if dtype == u('datetime64'):
# recreate with tz if indicated
self.data = _set_tz(self.data, self.tz, coerce=True)
elif dtype == u('timedelta64'):
self.data = np.asarray(self.data, dtype='m8[ns]')
elif dtype == u('date'):
try:
self.data = np.asarray(
[date.fromordinal(v) for v in self.data], dtype=object)
except ValueError:
self.data = np.asarray(
[date.fromtimestamp(v) for v in self.data],
dtype=object)
elif dtype == u('datetime'):
self.data = np.asarray(
[datetime.fromtimestamp(v) for v in self.data],
dtype=object)
elif meta == u('category'):
# we have a categorical
categories = self.metadata
codes = self.data.ravel()
# if we have stored a NaN in the categories
# then strip it; in theory we could have BOTH
# -1s in the codes and nulls :<
mask = isnull(categories)
if mask.any():
categories = categories[~mask]
codes[codes != -1] -= mask.astype(int).cumsum().values
self.data = Categorical.from_codes(codes,
categories=categories,
ordered=self.ordered)
else:
try:
self.data = self.data.astype(dtype, copy=False)
except:
self.data = self.data.astype('O', copy=False)
# convert nans / decode
if _ensure_decoded(self.kind) == u('string'):
self.data = _unconvert_string_array(
self.data, nan_rep=nan_rep, encoding=encoding)
return self
def get_attr(self):
""" get the data for this colummn """
self.values = getattr(self.attrs, self.kind_attr, None)
self.dtype = getattr(self.attrs, self.dtype_attr, None)
self.meta = getattr(self.attrs, self.meta_attr, None)
self.set_kind()
def set_attr(self):
""" set the data for this colummn """
setattr(self.attrs, self.kind_attr, self.values)
setattr(self.attrs, self.meta_attr, self.meta)
if self.dtype is not None:
setattr(self.attrs, self.dtype_attr, self.dtype)
class DataIndexableCol(DataCol):
""" represent a data column that can be indexed """
is_data_indexable = True
def validate_names(self):
if not Index(self.values).is_object():
raise ValueError("cannot have non-object label DataIndexableCol")
def get_atom_string(self, block, itemsize):
return _tables().StringCol(itemsize=itemsize)
def get_atom_data(self, block, kind=None):
return self.get_atom_coltype(kind=kind)()
def get_atom_datetime64(self, block):
return _tables().Int64Col()
def get_atom_timedelta64(self, block):
return _tables().Int64Col()
class GenericDataIndexableCol(DataIndexableCol):
""" represent a generic pytables data column """
def get_attr(self):
pass
class Fixed(StringMixin):
""" represent an object in my store
facilitate read/write of various types of objects
this is an abstract base class
Parameters
----------
parent : my parent HDFStore
group : the group node where the table resides
"""
pandas_kind = None
obj_type = None
ndim = None
is_table = False
def __init__(self, parent, group, encoding=None, **kwargs):
self.parent = parent
self.group = group
self.encoding = _ensure_encoding(encoding)
self.set_version()
@property
def is_old_version(self):
return (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1)
def set_version(self):
""" compute and set our version """
version = _ensure_decoded(
getattr(self.group._v_attrs, 'pandas_version', None))
try:
self.version = tuple([int(x) for x in version.split('.')])
if len(self.version) == 2:
self.version = self.version + (0,)
except:
self.version = (0, 0, 0)
@property
def pandas_type(self):
return _ensure_decoded(getattr(self.group._v_attrs,
'pandas_type', None))
@property
def format_type(self):
return 'fixed'
def __unicode__(self):
""" return a pretty representation of myself """
self.infer_axes()
s = self.shape
if s is not None:
if isinstance(s, (list, tuple)):
s = "[%s]" % ','.join([pprint_thing(x) for x in s])
return "%-12.12s (shape->%s)" % (self.pandas_type, s)
return self.pandas_type
def set_object_info(self):
""" set my pandas type & version """
self.attrs.pandas_type = str(self.pandas_kind)
self.attrs.pandas_version = str(_version)
self.set_version()
def copy(self):
new_self = copy.copy(self)
return new_self
@property
def storage_obj_type(self):
return self.obj_type
@property
def shape(self):
return self.nrows
@property
def pathname(self):
return self.group._v_pathname
@property
def _handle(self):
return self.parent._handle
@property
def _filters(self):
return self.parent._filters
@property
def _complevel(self):
return self.parent._complevel
@property
def _fletcher32(self):
return self.parent._fletcher32
@property
def _complib(self):
return self.parent._complib
@property
def attrs(self):
return self.group._v_attrs
def set_attrs(self):
""" set our object attributes """
pass
def get_attrs(self):
""" get our object attributes """
pass
@property
def storable(self):
""" return my storable """
return self.group
@property
def is_exists(self):
return False
@property
def nrows(self):
return getattr(self.storable, 'nrows', None)
def validate(self, other):
""" validate against an existing storable """
if other is None:
return
return True
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
return True
def infer_axes(self):
""" infer the axes of my storer
return a boolean indicating if we have a valid storer or not """
s = self.storable
if s is None:
return False
self.get_attrs()
return True
def read(self, **kwargs):
raise NotImplementedError(
"cannot read on an abstract storer: subclasses should implement")
def write(self, **kwargs):
raise NotImplementedError(
"cannot write on an abstract storer: sublcasses should implement")
def delete(self, where=None, start=None, stop=None, **kwargs):
"""
support fully deleting the node in its entirety (only) - where
specification must be None
"""
if where is None and start is None and stop is None:
self._handle.remove_node(self.group, recursive=True)
return None
raise TypeError("cannot delete on an abstract storer")
class GenericFixed(Fixed):
""" a generified fixed version """
_index_type_map = {DatetimeIndex: 'datetime', PeriodIndex: 'period'}
_reverse_index_map = dict([(v, k)
for k, v in compat.iteritems(_index_type_map)])
attributes = []
# indexer helpders
def _class_to_alias(self, cls):
return self._index_type_map.get(cls, '')
def _alias_to_class(self, alias):
if isinstance(alias, type): # pragma: no cover
# compat: for a short period of time master stored types
return alias
return self._reverse_index_map.get(alias, Index)
def _get_index_factory(self, klass):
if klass == DatetimeIndex:
def f(values, freq=None, tz=None):
return DatetimeIndex._simple_new(values, None, freq=freq,
tz=tz)
return f
elif klass == PeriodIndex:
def f(values, freq=None, tz=None):
return PeriodIndex._simple_new(values, None, freq=freq)
return f
return klass
def validate_read(self, kwargs):
"""
remove table keywords from kwargs and return
raise if any keywords are passed which are not-None
"""
kwargs = copy.copy(kwargs)
columns = kwargs.pop('columns', None)
if columns is not None:
raise TypeError("cannot pass a column specification when reading "
"a Fixed format store. this store must be "
"selected in its entirety")
where = kwargs.pop('where', None)
if where is not None:
raise TypeError("cannot pass a where specification when reading "
"from a Fixed format store. this store must be "
"selected in its entirety")
return kwargs
@property
def is_exists(self):
return True
def set_attrs(self):
""" set our object attributes """
self.attrs.encoding = self.encoding
def get_attrs(self):
""" retrieve our attributes """
self.encoding = _ensure_encoding(getattr(self.attrs, 'encoding', None))
for n in self.attributes:
setattr(self, n, _ensure_decoded(getattr(self.attrs, n, None)))
def write(self, obj, **kwargs):
self.set_attrs()
def read_array(self, key, start=None, stop=None):
""" read an array for the specified node (off of group """
import tables
node = getattr(self.group, key)
data = node[start:stop]
attrs = node._v_attrs
transposed = getattr(attrs, 'transposed', False)
if isinstance(node, tables.VLArray):
ret = data[0]
else:
dtype = getattr(attrs, 'value_type', None)
shape = getattr(attrs, 'shape', None)
if shape is not None:
# length 0 axis
ret = np.empty(shape, dtype=dtype)
else:
ret = data
if dtype == u('datetime64'):
# reconstruct a timezone if indicated
ret = _set_tz(ret, getattr(attrs, 'tz', None), coerce=True)
elif dtype == u('timedelta64'):
ret = np.asarray(ret, dtype='m8[ns]')
if transposed:
return ret.T
else:
return ret
def read_index(self, key, **kwargs):
variety = _ensure_decoded(getattr(self.attrs, '%s_variety' % key))
if variety == u('multi'):
return self.read_multi_index(key, **kwargs)
elif variety == u('block'):
return self.read_block_index(key, **kwargs)
elif variety == u('sparseint'):
return self.read_sparse_intindex(key, **kwargs)
elif variety == u('regular'):
_, index = self.read_index_node(getattr(self.group, key), **kwargs)
return index
else: # pragma: no cover
raise TypeError('unrecognized index variety: %s' % variety)
def write_index(self, key, index):
if isinstance(index, MultiIndex):
setattr(self.attrs, '%s_variety' % key, 'multi')
self.write_multi_index(key, index)
elif isinstance(index, BlockIndex):
setattr(self.attrs, '%s_variety' % key, 'block')
self.write_block_index(key, index)
elif isinstance(index, IntIndex):
setattr(self.attrs, '%s_variety' % key, 'sparseint')
self.write_sparse_intindex(key, index)
else:
setattr(self.attrs, '%s_variety' % key, 'regular')
converted = _convert_index(index, self.encoding,
self.format_type).set_name('index')
self.write_array(key, converted.values)
node = getattr(self.group, key)
node._v_attrs.kind = converted.kind
node._v_attrs.name = index.name
if isinstance(index, (DatetimeIndex, PeriodIndex)):
node._v_attrs.index_class = self._class_to_alias(type(index))
if hasattr(index, 'freq'):
node._v_attrs.freq = index.freq
if hasattr(index, 'tz') and index.tz is not None:
node._v_attrs.tz = _get_tz(index.tz)
def write_block_index(self, key, index):
self.write_array('%s_blocs' % key, index.blocs)
self.write_array('%s_blengths' % key, index.blengths)
setattr(self.attrs, '%s_length' % key, index.length)
def read_block_index(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
blocs = self.read_array('%s_blocs' % key, **kwargs)
blengths = self.read_array('%s_blengths' % key, **kwargs)
return BlockIndex(length, blocs, blengths)
def write_sparse_intindex(self, key, index):
self.write_array('%s_indices' % key, index.indices)
setattr(self.attrs, '%s_length' % key, index.length)
def read_sparse_intindex(self, key, **kwargs):
length = getattr(self.attrs, '%s_length' % key)
indices = self.read_array('%s_indices' % key, **kwargs)
return IntIndex(length, indices)
def write_multi_index(self, key, index):
setattr(self.attrs, '%s_nlevels' % key, index.nlevels)
for i, (lev, lab, name) in enumerate(zip(index.levels,
index.labels,
index.names)):
# write the level
level_key = '%s_level%d' % (key, i)
conv_level = _convert_index(lev, self.encoding,
self.format_type).set_name(level_key)
self.write_array(level_key, conv_level.values)
node = getattr(self.group, level_key)
node._v_attrs.kind = conv_level.kind
node._v_attrs.name = name
# write the name
setattr(node._v_attrs, '%s_name%d' % (key, i), name)
# write the labels
label_key = '%s_label%d' % (key, i)
self.write_array(label_key, lab)
def read_multi_index(self, key, **kwargs):
nlevels = getattr(self.attrs, '%s_nlevels' % key)
levels = []
labels = []
names = []
for i in range(nlevels):
level_key = '%s_level%d' % (key, i)
name, lev = self.read_index_node(getattr(self.group, level_key),
**kwargs)
levels.append(lev)
names.append(name)
label_key = '%s_label%d' % (key, i)
lab = self.read_array(label_key, **kwargs)
labels.append(lab)
return MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=True)
def read_index_node(self, node, start=None, stop=None):
data = node[start:stop]
# If the index was an empty array write_array_empty() will
# have written a sentinel. Here we relace it with the original.
if ('shape' in node._v_attrs and
self._is_empty_array(getattr(node._v_attrs, 'shape'))):
data = np.empty(getattr(node._v_attrs, 'shape'),
dtype=getattr(node._v_attrs, 'value_type'))
kind = _ensure_decoded(node._v_attrs.kind)
name = None
if 'name' in node._v_attrs:
name = _ensure_str(node._v_attrs.name)
index_class = self._alias_to_class(_ensure_decoded(
getattr(node._v_attrs, 'index_class', '')))
factory = self._get_index_factory(index_class)
kwargs = {}
if u('freq') in node._v_attrs:
kwargs['freq'] = node._v_attrs['freq']
if u('tz') in node._v_attrs:
kwargs['tz'] = node._v_attrs['tz']
if kind in (u('date'), u('datetime')):
index = factory(_unconvert_index(data, kind,
encoding=self.encoding),
dtype=object, **kwargs)
else:
index = factory(_unconvert_index(data, kind,
encoding=self.encoding), **kwargs)
index.name = name
return name, index
def write_array_empty(self, key, value):
""" write a 0-len array """
# ugly hack for length 0 axes
arr = np.empty((1,) * value.ndim)
self._handle.create_array(self.group, key, arr)
getattr(self.group, key)._v_attrs.value_type = str(value.dtype)
getattr(self.group, key)._v_attrs.shape = value.shape
def _is_empty_array(self, shape):
"""Returns true if any axis is zero length."""
return any(x == 0 for x in shape)
def write_array(self, key, value, items=None):
if key in self.group:
self._handle.remove_node(self.group, key)
# Transform needed to interface with pytables row/col notation
empty_array = self._is_empty_array(value.shape)
transposed = False
if is_categorical_dtype(value):
raise NotImplementedError('Cannot store a category dtype in '
'a HDF5 dataset that uses format='
'"fixed". Use format="table".')
if not empty_array:
value = value.T
transposed = True
if self._filters is not None:
atom = None
try:
# get the atom for this datatype
atom = _tables().Atom.from_dtype(value.dtype)
except ValueError:
pass
if atom is not None:
# create an empty chunked array and fill it from value
if not empty_array:
ca = self._handle.create_carray(self.group, key, atom,
value.shape,
filters=self._filters)
ca[:] = value
getattr(self.group, key)._v_attrs.transposed = transposed
else:
self.write_array_empty(key, value)
return
if value.dtype.type == np.object_:
# infer the type, warn if we have a non-string type here (for
# performance)
inferred_type = lib.infer_dtype(value.ravel())
if empty_array:
pass
elif inferred_type == 'string':
pass
else:
try:
items = list(items)
except:
pass
ws = performance_doc % (inferred_type, key, items)
warnings.warn(ws, PerformanceWarning, stacklevel=7)
vlarr = self._handle.create_vlarray(self.group, key,
_tables().ObjectAtom())
vlarr.append(value)
else:
if empty_array:
self.write_array_empty(key, value)
else:
if is_datetime64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'datetime64'
elif is_datetime64tz_dtype(value.dtype):
# store as UTC
# with a zone
self._handle.create_array(self.group, key,
value.asi8)
node = getattr(self.group, key)
node._v_attrs.tz = _get_tz(value.tz)
node._v_attrs.value_type = 'datetime64'
elif is_timedelta64_dtype(value.dtype):
self._handle.create_array(
self.group, key, value.view('i8'))
getattr(
self.group, key)._v_attrs.value_type = 'timedelta64'
else:
self._handle.create_array(self.group, key, value)
getattr(self.group, key)._v_attrs.transposed = transposed
class LegacyFixed(GenericFixed):
def read_index_legacy(self, key, start=None, stop=None):
node = getattr(self.group, key)
data = node[start:stop]
kind = node._v_attrs.kind
return _unconvert_index_legacy(data, kind, encoding=self.encoding)
class LegacySeriesFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
values = self.read_array('values')
return Series(values, index=index)
class LegacyFrameFixed(LegacyFixed):
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index_legacy('index')
columns = self.read_index_legacy('columns')
values = self.read_array('values')
return DataFrame(values, index=index, columns=columns)
class SeriesFixed(GenericFixed):
pandas_kind = u('series')
attributes = ['name']
@property
def shape(self):
try:
return len(getattr(self.group, 'values')),
except:
return None
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index', **kwargs)
values = self.read_array('values', **kwargs)
return Series(values, index=index, name=self.name)
def write(self, obj, **kwargs):
super(SeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_array('values', obj.values)
self.attrs.name = obj.name
class SparseFixed(GenericFixed):
def validate_read(self, kwargs):
"""
we don't support start, stop kwds in Sparse
"""
kwargs = super(SparseFixed, self).validate_read(kwargs)
if 'start' in kwargs or 'stop' in kwargs:
raise NotImplementedError("start and/or stop are not supported "
"in fixed Sparse reading")
return kwargs
class SparseSeriesFixed(SparseFixed):
pandas_kind = u('sparse_series')
attributes = ['name', 'fill_value', 'kind']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
index = self.read_index('index')
sp_values = self.read_array('sp_values')
sp_index = self.read_index('sp_index')
return SparseSeries(sp_values, index=index, sparse_index=sp_index,
kind=self.kind or u('block'),
fill_value=self.fill_value,
name=self.name)
def write(self, obj, **kwargs):
super(SparseSeriesFixed, self).write(obj, **kwargs)
self.write_index('index', obj.index)
self.write_index('sp_index', obj.sp_index)
self.write_array('sp_values', obj.sp_values)
self.attrs.name = obj.name
self.attrs.fill_value = obj.fill_value
self.attrs.kind = obj.kind
class SparseFrameFixed(SparseFixed):
pandas_kind = u('sparse_frame')
attributes = ['default_kind', 'default_fill_value']
def read(self, **kwargs):
kwargs = self.validate_read(kwargs)
columns = self.read_index('columns')
sdict = {}
for c in columns:
key = 'sparse_series_%s' % c
s = SparseSeriesFixed(self.parent, getattr(self.group, key))
s.infer_axes()
sdict[c] = s.read()
return SparseDataFrame(sdict, columns=columns,
default_kind=self.default_kind,
default_fill_value=self.default_fill_value)
def write(self, obj, **kwargs):
""" write it as a collection of individual sparse series """
super(SparseFrameFixed, self).write(obj, **kwargs)
for name, ss in compat.iteritems(obj):
key = 'sparse_series_%s' % name
if key not in self.group._v_children:
node = self._handle.create_group(self.group, key)
else:
node = getattr(self.group, key)
s = SparseSeriesFixed(self.parent, node)
s.write(ss)
self.attrs.default_fill_value = obj.default_fill_value
self.attrs.default_kind = obj.default_kind
self.write_index('columns', obj.columns)
class BlockManagerFixed(GenericFixed):
attributes = ['ndim', 'nblocks']
is_shape_reversed = False
@property
def shape(self):
try:
ndim = self.ndim
# items
items = 0
for i in range(self.nblocks):
node = getattr(self.group, 'block%d_items' % i)
shape = getattr(node, 'shape', None)
if shape is not None:
items += shape[0]
# data shape
node = getattr(self.group, 'block0_values')
shape = getattr(node, 'shape', None)
if shape is not None:
shape = list(shape[0:(ndim - 1)])
else:
shape = []
shape.append(items)
# hacky - this works for frames, but is reversed for panels
if self.is_shape_reversed:
shape = shape[::-1]
return shape
except:
return None
def read(self, start=None, stop=None, **kwargs):
# start, stop applied to rows, so 0th axis only
kwargs = self.validate_read(kwargs)
select_axis = self.obj_type()._get_block_manager_axis(0)
axes = []
for i in range(self.ndim):
_start, _stop = (start, stop) if i == select_axis else (None, None)
ax = self.read_index('axis%d' % i, start=_start, stop=_stop)
axes.append(ax)
items = axes[0]
blocks = []
for i in range(self.nblocks):
blk_items = self.read_index('block%d_items' % i)
values = self.read_array('block%d_values' % i,
start=_start, stop=_stop)
blk = make_block(values,
placement=items.get_indexer(blk_items))
blocks.append(blk)
return self.obj_type(BlockManager(blocks, axes))
def write(self, obj, **kwargs):
super(BlockManagerFixed, self).write(obj, **kwargs)
data = obj._data
if not data.is_consolidated():
data = data.consolidate()
self.attrs.ndim = data.ndim
for i, ax in enumerate(data.axes):
if i == 0:
if not ax.is_unique:
raise ValueError(
"Columns index has to be unique for fixed format")
self.write_index('axis%d' % i, ax)
# Supporting mixed-type DataFrame objects...nontrivial
self.attrs.nblocks = len(data.blocks)
for i, blk in enumerate(data.blocks):
# I have no idea why, but writing values before items fixed #2299
blk_items = data.items.take(blk.mgr_locs)
self.write_array('block%d_values' % i, blk.values, items=blk_items)
self.write_index('block%d_items' % i, blk_items)
class FrameFixed(BlockManagerFixed):
pandas_kind = u('frame')
obj_type = DataFrame
class PanelFixed(BlockManagerFixed):
pandas_kind = u('wide')
obj_type = Panel
is_shape_reversed = True
def write(self, obj, **kwargs):
obj._consolidate_inplace()
return super(PanelFixed, self).write(obj, **kwargs)
class Table(Fixed):
""" represent a table:
facilitate read/write of various types of tables
Attrs in Table Node
-------------------
These are attributes that are store in the main table node, they are
necessary to recreate these tables when read back in.
index_axes : a list of tuples of the (original indexing axis and
index column)
non_index_axes: a list of tuples of the (original index axis and
columns on a non-indexing axis)
values_axes : a list of the columns which comprise the data of this
table
data_columns : a list of the columns that we are allowing indexing
(these become single columns in values_axes), or True to force all
columns
nan_rep : the string to use for nan representations for string
objects
levels : the names of levels
metadata : the names of the metadata columns
"""
pandas_kind = u('wide_table')
table_type = None
levels = 1
is_table = True
is_shape_reversed = False
def __init__(self, *args, **kwargs):
super(Table, self).__init__(*args, **kwargs)
self.index_axes = []
self.non_index_axes = []
self.values_axes = []
self.data_columns = []
self.metadata = []
self.info = dict()
self.nan_rep = None
self.selection = None
@property
def table_type_short(self):
return self.table_type.split('_')[0]
@property
def format_type(self):
return 'table'
def __unicode__(self):
""" return a pretty representatgion of myself """
self.infer_axes()
dc = ",dc->[%s]" % ','.join(
self.data_columns) if len(self.data_columns) else ''
ver = ''
if self.is_old_version:
ver = "[%s]" % '.'.join([str(x) for x in self.version])
return "%-12.12s%s (typ->%s,nrows->%s,ncols->%s,indexers->[%s]%s)" % (
self.pandas_type, ver, self.table_type_short, self.nrows,
self.ncols, ','.join([a.name for a in self.index_axes]), dc
)
def __getitem__(self, c):
""" return the axis for c """
for a in self.axes:
if c == a.name:
return a
return None
def validate(self, other):
""" validate against an existing table """
if other is None:
return
if other.table_type != self.table_type:
raise TypeError("incompatible table_type with existing [%s - %s]" %
(other.table_type, self.table_type))
for c in ['index_axes', 'non_index_axes', 'values_axes']:
sv = getattr(self, c, None)
ov = getattr(other, c, None)
if sv != ov:
# show the error for the specific axes
for i, sax in enumerate(sv):
oax = ov[i]
if sax != oax:
raise ValueError(
"invalid combinate of [%s] on appending data [%s] "
"vs current table [%s]" % (c, sax, oax))
# should never get here
raise Exception(
"invalid combinate of [%s] on appending data [%s] vs "
"current table [%s]" % (c, sv, ov))
@property
def is_multi_index(self):
"""the levels attribute is 1 or a list in the case of a multi-index"""
return isinstance(self.levels, list)
def validate_metadata(self, existing):
""" create / validate metadata """
self.metadata = [
c.name for c in self.values_axes if c.metadata is not None]
def validate_multiindex(self, obj):
"""validate that we can store the multi-index; reset and return the
new object
"""
levels = [l if l is not None else "level_{0}".format(i)
for i, l in enumerate(obj.index.names)]
try:
return obj.reset_index(), levels
except ValueError:
raise ValueError("duplicate names/columns in the multi-index when "
"storing as a table")
@property
def nrows_expected(self):
""" based on our axes, compute the expected nrows """
return np.prod([i.cvalues.shape[0] for i in self.index_axes])
@property
def is_exists(self):
""" has this table been created """
return u('table') in self.group
@property
def storable(self):
return getattr(self.group, 'table', None)
@property
def table(self):
""" return the table group (this is my storable) """
return self.storable
@property
def dtype(self):
return self.table.dtype
@property
def description(self):
return self.table.description
@property
def axes(self):
return itertools.chain(self.index_axes, self.values_axes)
@property
def ncols(self):
""" the number of total columns in the values axes """
return sum([len(a.values) for a in self.values_axes])
@property
def is_transposed(self):
return False
@property
def data_orientation(self):
"""return a tuple of my permutated axes, non_indexable at the front"""
return tuple(itertools.chain([int(a[0]) for a in self.non_index_axes],
[int(a.axis) for a in self.index_axes]))
def queryables(self):
""" return a dict of the kinds allowable columns for this object """
# compute the values_axes queryables
return dict(
[(a.cname, a) for a in self.index_axes] +
[(self.storage_obj_type._AXIS_NAMES[axis], None)
for axis, values in self.non_index_axes] +
[(v.cname, v) for v in self.values_axes
if v.name in set(self.data_columns)]
)
def index_cols(self):
""" return a list of my index cols """
return [(i.axis, i.cname) for i in self.index_axes]
def values_cols(self):
""" return a list of my values cols """
return [i.cname for i in self.values_axes]
def _get_metadata_path(self, key):
""" return the metadata pathname for this key """
return "{group}/meta/{key}/meta".format(group=self.group._v_pathname,
key=key)
def write_metadata(self, key, values):
"""
write out a meta data array to the key as a fixed-format Series
Parameters
----------
key : string
values : ndarray
"""
values = Series(values)
self.parent.put(self._get_metadata_path(key), values, format='table',
encoding=self.encoding, nan_rep=self.nan_rep)
def read_metadata(self, key):
""" return the meta data array for this key """
if getattr(getattr(self.group, 'meta', None), key, None) is not None:
return self.parent.select(self._get_metadata_path(key))
return None
def set_info(self):
""" update our table index info """
self.attrs.info = self.info
def set_attrs(self):
""" set our table type & indexables """
self.attrs.table_type = str(self.table_type)
self.attrs.index_cols = self.index_cols()
self.attrs.values_cols = self.values_cols()
self.attrs.non_index_axes = self.non_index_axes
self.attrs.data_columns = self.data_columns
self.attrs.nan_rep = self.nan_rep
self.attrs.encoding = self.encoding
self.attrs.levels = self.levels
self.attrs.metadata = self.metadata
self.set_info()
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = getattr(
self.attrs, 'non_index_axes', None) or []
self.data_columns = getattr(
self.attrs, 'data_columns', None) or []
self.info = getattr(
self.attrs, 'info', None) or dict()
self.nan_rep = getattr(self.attrs, 'nan_rep', None)
self.encoding = _ensure_encoding(
getattr(self.attrs, 'encoding', None))
self.levels = getattr(
self.attrs, 'levels', None) or []
self.index_axes = [
a.infer(self) for a in self.indexables if a.is_an_indexable
]
self.values_axes = [
a.infer(self) for a in self.indexables if not a.is_an_indexable
]
self.metadata = getattr(
self.attrs, 'metadata', None) or []
def validate_version(self, where=None):
""" are we trying to operate on an old version? """
if where is not None:
if (self.version[0] <= 0 and self.version[1] <= 10 and
self.version[2] < 1):
ws = incompatibility_doc % '.'.join(
[str(x) for x in self.version])
warnings.warn(ws, IncompatibilityWarning)
def validate_min_itemsize(self, min_itemsize):
"""validate the min_itemisze doesn't contain items that are not in the
axes this needs data_columns to be defined
"""
if min_itemsize is None:
return
if not isinstance(min_itemsize, dict):
return
q = self.queryables()
for k, v in min_itemsize.items():
# ok, apply generally
if k == 'values':
continue
if k not in q:
raise ValueError(
"min_itemsize has the key [%s] which is not an axis or "
"data_column" % k)
@property
def indexables(self):
""" create/cache the indexables if they don't exist """
if self._indexables is None:
self._indexables = []
# index columns
self._indexables.extend([
IndexCol(name=name, axis=axis, pos=i)
for i, (axis, name) in enumerate(self.attrs.index_cols)
])
# values columns
dc = set(self.data_columns)
base_pos = len(self._indexables)
def f(i, c):
klass = DataCol
if c in dc:
klass = DataIndexableCol
return klass.create_for_block(i=i, name=c, pos=base_pos + i,
version=self.version)
self._indexables.extend(
[f(i, c) for i, c in enumerate(self.attrs.values_cols)])
return self._indexables
def create_index(self, columns=None, optlevel=None, kind=None):
"""
Create a pytables index on the specified columns
note: cannot index Time64Col() or ComplexCol currently;
PyTables must be >= 3.0
Parameters
----------
columns : False (don't create an index), True (create all columns
index), None or list_like (the indexers to index)
optlevel: optimization level (defaults to 6)
kind : kind of index (defaults to 'medium')
Exceptions
----------
raises if the node is not a table
"""
if not self.infer_axes():
return
if columns is False:
return
# index all indexables and data_columns
if columns is None or columns is True:
columns = [a.cname for a in self.axes if a.is_data_indexable]
if not isinstance(columns, (tuple, list)):
columns = [columns]
kw = dict()
if optlevel is not None:
kw['optlevel'] = optlevel
if kind is not None:
kw['kind'] = kind
table = self.table
for c in columns:
v = getattr(table.cols, c, None)
if v is not None:
# remove the index if the kind/optlevel have changed
if v.is_indexed:
index = v.index
cur_optlevel = index.optlevel
cur_kind = index.kind
if kind is not None and cur_kind != kind:
v.remove_index()
else:
kw['kind'] = cur_kind
if optlevel is not None and cur_optlevel != optlevel:
v.remove_index()
else:
kw['optlevel'] = cur_optlevel
# create the index
if not v.is_indexed:
if v.type.startswith('complex'):
raise TypeError(
'Columns containing complex values can be stored '
'but cannot'
' be indexed when using table format. Either use '
'fixed format, set index=False, or do not include '
'the columns containing complex values to '
'data_columns when initializing the table.')
v.create_index(**kw)
def read_axes(self, where, **kwargs):
"""create and return the axes sniffed from the table: return boolean
for success
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(self, where=where, **kwargs)
values = self.selection.select()
# convert the data
for a in self.axes:
a.set_info(self.info)
a.convert(values, nan_rep=self.nan_rep, encoding=self.encoding)
return True
def get_object(self, obj):
""" return the data for this obj """
return obj
def validate_data_columns(self, data_columns, min_itemsize):
"""take the input data_columns and min_itemize and create a data
columns spec
"""
if not len(self.non_index_axes):
return []
axis, axis_labels = self.non_index_axes[0]
info = self.info.get(axis, dict())
if info.get('type') == 'MultiIndex' and data_columns:
raise ValueError("cannot use a multi-index on axis [{0}] with "
"data_columns {1}".format(axis, data_columns))
# evaluate the passed data_columns, True == use all columns
# take only valide axis labels
if data_columns is True:
data_columns = list(axis_labels)
elif data_columns is None:
data_columns = []
# if min_itemsize is a dict, add the keys (exclude 'values')
if isinstance(min_itemsize, dict):
existing_data_columns = set(data_columns)
data_columns.extend([
k for k in min_itemsize.keys()
if k != 'values' and k not in existing_data_columns
])
# return valid columns in the order of our axis
return [c for c in data_columns if c in axis_labels]
def create_axes(self, axes, obj, validate=True, nan_rep=None,
data_columns=None, min_itemsize=None, **kwargs):
""" create and return the axes
leagcy tables create an indexable column, indexable index,
non-indexable fields
Parameters:
-----------
axes: a list of the axes in order to create (names or numbers of
the axes)
obj : the object to create axes on
validate: validate the obj against an existing object already
written
min_itemsize: a dict of the min size for a column in bytes
nan_rep : a values to use for string column nan_rep
encoding : the encoding for string values
data_columns : a list of columns that we want to create separate to
allow indexing (or True will force all columns)
"""
# set the default axes if needed
if axes is None:
try:
axes = _AXES_MAP[type(obj)]
except:
raise TypeError("cannot properly create the storer for: "
"[group->%s,value->%s]"
% (self.group._v_name, type(obj)))
# map axes to numbers
axes = [obj._get_axis_number(a) for a in axes]
# do we have an existing table (if so, use its axes & data_columns)
if self.infer_axes():
existing_table = self.copy()
existing_table.infer_axes()
axes = [a.axis for a in existing_table.index_axes]
data_columns = existing_table.data_columns
nan_rep = existing_table.nan_rep
self.encoding = existing_table.encoding
self.info = copy.copy(existing_table.info)
else:
existing_table = None
# currently support on ndim-1 axes
if len(axes) != self.ndim - 1:
raise ValueError(
"currently only support ndim-1 indexers in an AppendableTable")
# create according to the new data
self.non_index_axes = []
self.data_columns = []
# nan_representation
if nan_rep is None:
nan_rep = 'nan'
self.nan_rep = nan_rep
# create axes to index and non_index
index_axes_map = dict()
for i, a in enumerate(obj.axes):
if i in axes:
name = obj._AXIS_NAMES[i]
index_axes_map[i] = _convert_index(
a, self.encoding, self.format_type
).set_name(name).set_axis(i)
else:
# we might be able to change the axes on the appending data if
# necessary
append_axis = list(a)
if existing_table is not None:
indexer = len(self.non_index_axes)
exist_axis = existing_table.non_index_axes[indexer][1]
if not array_equivalent(np.array(append_axis),
np.array(exist_axis)):
# ahah! -> reindex
if array_equivalent(np.array(sorted(append_axis)),
np.array(sorted(exist_axis))):
append_axis = exist_axis
# the non_index_axes info
info = _get_info(self.info, i)
info['names'] = list(a.names)
info['type'] = a.__class__.__name__
self.non_index_axes.append((i, append_axis))
# set axis positions (based on the axes)
self.index_axes = [
index_axes_map[a].set_pos(j).update_info(self.info)
for j, a in enumerate(axes)
]
j = len(self.index_axes)
# check for column conflicts
for a in self.axes:
a.maybe_set_size(min_itemsize=min_itemsize)
# reindex by our non_index_axes & compute data_columns
for a in self.non_index_axes:
obj = _reindex_axis(obj, a[0], a[1])
def get_blk_items(mgr, blocks):
return [mgr.items.take(blk.mgr_locs) for blk in blocks]
# figure out data_columns and get out blocks
block_obj = self.get_object(obj)._consolidate()
blocks = block_obj._data.blocks
blk_items = get_blk_items(block_obj._data, blocks)
if len(self.non_index_axes):
axis, axis_labels = self.non_index_axes[0]
data_columns = self.validate_data_columns(
data_columns, min_itemsize)
if len(data_columns):
mgr = block_obj.reindex_axis(
Index(axis_labels).difference(Index(data_columns)),
axis=axis
)._data
blocks = list(mgr.blocks)
blk_items = get_blk_items(mgr, blocks)
for c in data_columns:
mgr = block_obj.reindex_axis([c], axis=axis)._data
blocks.extend(mgr.blocks)
blk_items.extend(get_blk_items(mgr, mgr.blocks))
# reorder the blocks in the same order as the existing_table if we can
if existing_table is not None:
by_items = dict([(tuple(b_items.tolist()), (b, b_items))
for b, b_items in zip(blocks, blk_items)])
new_blocks = []
new_blk_items = []
for ea in existing_table.values_axes:
items = tuple(ea.values)
try:
b, b_items = by_items.pop(items)
new_blocks.append(b)
new_blk_items.append(b_items)
except:
raise ValueError(
"cannot match existing table structure for [%s] on "
"appending data" % ','.join(pprint_thing(item) for
item in items))
blocks = new_blocks
blk_items = new_blk_items
# add my values
self.values_axes = []
for i, (b, b_items) in enumerate(zip(blocks, blk_items)):
# shape of the data column are the indexable axes
klass = DataCol
name = None
# we have a data_column
if (data_columns and len(b_items) == 1 and
b_items[0] in data_columns):
klass = DataIndexableCol
name = b_items[0]
self.data_columns.append(name)
# make sure that we match up the existing columns
# if we have an existing table
if existing_table is not None and validate:
try:
existing_col = existing_table.values_axes[i]
except:
raise ValueError("Incompatible appended table [%s] with "
"existing table [%s]"
% (blocks, existing_table.values_axes))
else:
existing_col = None
try:
col = klass.create_for_block(
i=i, name=name, version=self.version)
col.set_atom(block=b, block_items=b_items,
existing_col=existing_col,
min_itemsize=min_itemsize,
nan_rep=nan_rep,
encoding=self.encoding,
info=self.info,
**kwargs)
col.set_pos(j)
self.values_axes.append(col)
except (NotImplementedError, ValueError, TypeError) as e:
raise e
except Exception as detail:
raise Exception(
"cannot find the correct atom type -> "
"[dtype->%s,items->%s] %s"
% (b.dtype.name, b_items, str(detail))
)
j += 1
# validate our min_itemsize
self.validate_min_itemsize(min_itemsize)
# validate our metadata
self.validate_metadata(existing_table)
# validate the axes if we have an existing table
if validate:
self.validate(existing_table)
def process_axes(self, obj, columns=None):
""" process axes filters """
# make a copy to avoid side effects
if columns is not None:
columns = list(columns)
# make sure to include levels if we have them
if columns is not None and self.is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
# reorder by any non_index_axes & limit to the select columns
for axis, labels in self.non_index_axes:
obj = _reindex_axis(obj, axis, labels, columns)
# apply the selection filters (but keep in the same order)
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
def process_filter(field, filt):
for axis_name in obj._AXIS_NAMES.values():
axis_number = obj._get_axis_number(axis_name)
axis_values = obj._get_axis(axis_name)
# see if the field is the name of an axis
if field == axis_name:
# if we have a multi-index, then need to include
# the levels
if self.is_multi_index:
filt = filt.union(Index(self.levels))
takers = op(axis_values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
# this might be the name of a file IN an axis
elif field in axis_values:
# we need to filter on this dimension
values = _ensure_index(getattr(obj, field).values)
filt = _ensure_index(filt)
# hack until we support reversed dim flags
if isinstance(obj, DataFrame):
axis_number = 1 - axis_number
takers = op(values, filt)
return obj.loc._getitem_axis(takers,
axis=axis_number)
raise ValueError(
"cannot find the field [%s] for filtering!" % field)
obj = process_filter(field, filt)
return obj
def create_description(self, complib=None, complevel=None,
fletcher32=False, expectedrows=None):
""" create the description of the table from the axes & values """
# provided expected rows if its passed
if expectedrows is None:
expectedrows = max(self.nrows_expected, 10000)
d = dict(name='table', expectedrows=expectedrows)
# description from the axes & values
d['description'] = dict([(a.cname, a.typ) for a in self.axes])
if complib:
if complevel is None:
complevel = self._complevel or 9
filters = _tables().Filters(
complevel=complevel, complib=complib,
fletcher32=fletcher32 or self._fletcher32)
d['filters'] = filters
elif self._filters is not None:
d['filters'] = self._filters
return d
def read_coordinates(self, where=None, start=None, stop=None, **kwargs):
"""select coordinates (row numbers) from a table; return the
coordinates object
"""
# validate the version
self.validate_version(where)
# infer the data kind
if not self.infer_axes():
return False
# create the selection
self.selection = Selection(
self, where=where, start=start, stop=stop, **kwargs)
coords = self.selection.select_coords()
if self.selection.filter is not None:
for field, op, filt in self.selection.filter.format():
data = self.read_column(
field, start=coords.min(), stop=coords.max() + 1)
coords = coords[
op(data.iloc[coords - coords.min()], filt).values]
return Index(coords)
def read_column(self, column, where=None, start=None, stop=None, **kwargs):
"""return a single column from the table, generally only indexables
are interesting
"""
# validate the version
self.validate_version()
# infer the data kind
if not self.infer_axes():
return False
if where is not None:
raise TypeError("read_column does not currently accept a where "
"clause")
# find the axes
for a in self.axes:
if column == a.name:
if not a.is_data_indexable:
raise ValueError(
"column [%s] can not be extracted individually; it is "
"not data indexable" % column)
# column must be an indexable or a data column
c = getattr(self.table.cols, column)
a.set_info(self.info)
return Series(_set_tz(a.convert(c[start:stop],
nan_rep=self.nan_rep,
encoding=self.encoding
).take_data(),
a.tz, True), name=column)
raise KeyError("column [%s] not found in the table" % column)
class WORMTable(Table):
""" a write-once read-many table: this format DOES NOT ALLOW appending to a
table. writing is a one-time operation the data are stored in a format
that allows for searching the data on disk
"""
table_type = u('worm')
def read(self, **kwargs):
""" read the indicies and the indexing array, calculate offset rows and
return """
raise NotImplementedError("WORMTable needs to implement read")
def write(self, **kwargs):
""" write in a format that we can search later on (but cannot append
to): write out the indicies and the values using _write_array
(e.g. a CArray) create an indexing table so that we can search
"""
raise NotImplementedError("WORKTable needs to implement write")
class LegacyTable(Table):
""" an appendable table: allow append/query/delete operations to a
(possibily) already existing appendable table this table ALLOWS
append (but doesn't require them), and stores the data in a format
that can be easily searched
"""
_indexables = [
IndexCol(name='index', axis=1, pos=0),
IndexCol(name='column', axis=2, pos=1, index_kind='columns_kind'),
DataCol(name='fields', cname='values', kind_attr='fields', pos=2)
]
table_type = u('legacy')
ndim = 3
def write(self, **kwargs):
raise TypeError("write operations are not allowed on legacy tables!")
def read(self, where=None, columns=None, **kwargs):
"""we have n indexable columns, with an arbitrary number of data
axes
"""
if not self.read_axes(where=where, **kwargs):
return None
lst_vals = [a.values for a in self.index_axes]
labels, levels = _factorize_from_iterables(lst_vals)
# labels and levels are tuples but lists are expected
labels = list(labels)
levels = list(levels)
N = [len(lvl) for lvl in levels]
# compute the key
key = _factor_indexer(N[1:], labels)
objs = []
if len(unique(key)) == len(key):
sorter, _ = algos.groupsort_indexer(
_ensure_int64(key), np.prod(N))
sorter = _ensure_platform_int(sorter)
# create the objs
for c in self.values_axes:
# the data need to be sorted
sorted_values = c.take_data().take(sorter, axis=0)
if sorted_values.ndim == 1:
sorted_values = sorted_values.reshape(
(sorted_values.shape[0], 1))
take_labels = [l.take(sorter) for l in labels]
items = Index(c.values)
block = _block2d_to_blocknd(
values=sorted_values, placement=np.arange(len(items)),
shape=tuple(N), labels=take_labels, ref_items=items)
# create the object
mgr = BlockManager([block], [items] + levels)
obj = self.obj_type(mgr)
# permute if needed
if self.is_transposed:
obj = obj.transpose(
*tuple(Series(self.data_orientation).argsort()))
objs.append(obj)
else:
warnings.warn(duplicate_doc, DuplicateWarning, stacklevel=5)
# reconstruct
long_index = MultiIndex.from_arrays(
[i.values for i in self.index_axes])
for c in self.values_axes:
lp = DataFrame(c.data, index=long_index, columns=c.values)
# need a better algorithm
tuple_index = long_index.values
unique_tuples = lib.fast_unique(tuple_index)
unique_tuples = _asarray_tuplesafe(unique_tuples)
indexer = match(unique_tuples, tuple_index)
indexer = _ensure_platform_int(indexer)
new_index = long_index.take(indexer)
new_values = lp.values.take(indexer, axis=0)
lp = DataFrame(new_values, index=new_index, columns=lp.columns)
objs.append(lp.to_panel())
# create the composite object
if len(objs) == 1:
wp = objs[0]
else:
wp = concat(objs, axis=0, verify_integrity=False)._consolidate()
# apply the selection filters & axis orderings
wp = self.process_axes(wp, columns=columns)
return wp
class LegacyFrameTable(LegacyTable):
""" support the legacy frame table """
pandas_kind = u('frame_table')
table_type = u('legacy_frame')
obj_type = Panel
def read(self, *args, **kwargs):
return super(LegacyFrameTable, self).read(*args, **kwargs)['value']
class LegacyPanelTable(LegacyTable):
""" support the legacy panel table """
table_type = u('legacy_panel')
obj_type = Panel
class AppendableTable(LegacyTable):
""" suppor the new appendable table formats """
_indexables = None
table_type = u('appendable')
def write(self, obj, axes=None, append=False, complib=None,
complevel=None, fletcher32=None, min_itemsize=None,
chunksize=None, expectedrows=None, dropna=False, **kwargs):
if not append and self.is_exists:
self._handle.remove_node(self.group, 'table')
# create the axes
self.create_axes(axes=axes, obj=obj, validate=append,
min_itemsize=min_itemsize,
**kwargs)
for a in self.axes:
a.validate(self, append)
if not self.is_exists:
# create the table
options = self.create_description(complib=complib,
complevel=complevel,
fletcher32=fletcher32,
expectedrows=expectedrows)
# set the table attributes
self.set_attrs()
# create the table
self._handle.create_table(self.group, **options)
else:
pass
# table = self.table
# update my info
self.set_info()
# validate the axes and set the kinds
for a in self.axes:
a.validate_and_set(self, append)
# add the rows
self.write_data(chunksize, dropna=dropna)
def write_data(self, chunksize, dropna=False):
""" we form the data into a 2-d including indexes,values,mask
write chunk-by-chunk """
names = self.dtype.names
nrows = self.nrows_expected
# if dropna==True, then drop ALL nan rows
masks = []
if dropna:
for a in self.values_axes:
# figure the mask: only do if we can successfully process this
# column, otherwise ignore the mask
mask = isnull(a.data).all(axis=0)
if isinstance(mask, np.ndarray):
masks.append(mask.astype('u1', copy=False))
# consolidate masks
if len(masks):
mask = masks[0]
for m in masks[1:]:
mask = mask & m
mask = mask.ravel()
else:
mask = None
# broadcast the indexes if needed
indexes = [a.cvalues for a in self.index_axes]
nindexes = len(indexes)
bindexes = []
for i, idx in enumerate(indexes):
# broadcast to all other indexes except myself
if i > 0 and i < nindexes:
repeater = np.prod(
[indexes[bi].shape[0] for bi in range(0, i)])
idx = np.tile(idx, repeater)
if i < nindexes - 1:
repeater = np.prod([indexes[bi].shape[0]
for bi in range(i + 1, nindexes)])
idx = np.repeat(idx, repeater)
bindexes.append(idx)
# transpose the values so first dimension is last
# reshape the values if needed
values = [a.take_data() for a in self.values_axes]
values = [v.transpose(np.roll(np.arange(v.ndim), v.ndim - 1))
for v in values]
bvalues = []
for i, v in enumerate(values):
new_shape = (nrows,) + self.dtype[names[nindexes + i]].shape
bvalues.append(values[i].reshape(new_shape))
# write the chunks
if chunksize is None:
chunksize = 100000
rows = np.empty(min(chunksize, nrows), dtype=self.dtype)
chunks = int(nrows / chunksize) + 1
for i in range(chunks):
start_i = i * chunksize
end_i = min((i + 1) * chunksize, nrows)
if start_i >= end_i:
break
self.write_data_chunk(
rows,
indexes=[a[start_i:end_i] for a in bindexes],
mask=mask[start_i:end_i] if mask is not None else None,
values=[v[start_i:end_i] for v in bvalues])
def write_data_chunk(self, rows, indexes, mask, values):
"""
Parameters
----------
rows : an empty memory space where we are putting the chunk
indexes : an array of the indexes
mask : an array of the masks
values : an array of the values
"""
# 0 len
for v in values:
if not np.prod(v.shape):
return
try:
nrows = indexes[0].shape[0]
if nrows != len(rows):
rows = np.empty(nrows, dtype=self.dtype)
names = self.dtype.names
nindexes = len(indexes)
# indexes
for i, idx in enumerate(indexes):
rows[names[i]] = idx
# values
for i, v in enumerate(values):
rows[names[i + nindexes]] = v
# mask
if mask is not None:
m = ~mask.ravel().astype(bool, copy=False)
if not m.all():
rows = rows[m]
except Exception as detail:
raise Exception("cannot create row-data -> %s" % detail)
try:
if len(rows):
self.table.append(rows)
self.table.flush()
except Exception as detail:
raise TypeError("tables cannot write this data -> %s" % detail)
def delete(self, where=None, start=None, stop=None, **kwargs):
# delete all rows (and return the nrows)
if where is None or not len(where):
if start is None and stop is None:
nrows = self.nrows
self._handle.remove_node(self.group, recursive=True)
else:
# pytables<3.0 would remove a single row with stop=None
if stop is None:
stop = self.nrows
nrows = self.table.remove_rows(start=start, stop=stop)
self.table.flush()
return nrows
# infer the data kind
if not self.infer_axes():
return None
# create the selection
table = self.table
self.selection = Selection(
self, where, start=start, stop=stop, **kwargs)
values = self.selection.select_coords()
# delete the rows in reverse order
l = Series(values).sort_values()
ln = len(l)
if ln:
# construct groups of consecutive rows
diff = l.diff()
groups = list(diff[diff > 1].index)
# 1 group
if not len(groups):
groups = [0]
# final element
if groups[-1] != ln:
groups.append(ln)
# initial element
if groups[0] != 0:
groups.insert(0, 0)
# we must remove in reverse order!
pg = groups.pop()
for g in reversed(groups):
rows = l.take(lrange(g, pg))
table.remove_rows(start=rows[rows.index[0]
], stop=rows[rows.index[-1]] + 1)
pg = g
self.table.flush()
# return the number of rows removed
return ln
class AppendableFrameTable(AppendableTable):
""" suppor the new appendable table formats """
pandas_kind = u('frame_table')
table_type = u('appendable_frame')
ndim = 2
obj_type = DataFrame
@property
def is_transposed(self):
return self.index_axes[0].axis == 1
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.T
return obj
def read(self, where=None, columns=None, **kwargs):
if not self.read_axes(where=where, **kwargs):
return None
info = (self.info.get(self.non_index_axes[0][0], dict())
if len(self.non_index_axes) else dict())
index = self.index_axes[0].values
frames = []
for a in self.values_axes:
# we could have a multi-index constructor here
# _ensure_index doesn't recognized our list-of-tuples here
if info.get('type') == 'MultiIndex':
cols = MultiIndex.from_tuples(a.values)
else:
cols = Index(a.values)
names = info.get('names')
if names is not None:
cols.set_names(names, inplace=True)
if self.is_transposed:
values = a.cvalues
index_ = cols
cols_ = Index(index, name=getattr(index, 'name', None))
else:
values = a.cvalues.T
index_ = Index(index, name=getattr(index, 'name', None))
cols_ = cols
# if we have a DataIndexableCol, its shape will only be 1 dim
if values.ndim == 1 and isinstance(values, np.ndarray):
values = values.reshape((1, values.shape[0]))
block = make_block(values, placement=np.arange(len(cols_)))
mgr = BlockManager([block], [cols_, index_])
frames.append(DataFrame(mgr))
if len(frames) == 1:
df = frames[0]
else:
df = concat(frames, axis=1)
# apply the selection filters & axis orderings
df = self.process_axes(df, columns=columns)
return df
class AppendableSeriesTable(AppendableFrameTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_series')
ndim = 2
obj_type = Series
storage_obj_type = DataFrame
@property
def is_transposed(self):
return False
def get_object(self, obj):
return obj
def write(self, obj, data_columns=None, **kwargs):
""" we are going to write this as a frame table """
if not isinstance(obj, DataFrame):
name = obj.name or 'values'
obj = DataFrame({name: obj}, index=obj.index)
obj.columns = [name]
return super(AppendableSeriesTable, self).write(
obj=obj, data_columns=obj.columns.tolist(), **kwargs)
def read(self, columns=None, **kwargs):
is_multi_index = self.is_multi_index
if columns is not None and is_multi_index:
for n in self.levels:
if n not in columns:
columns.insert(0, n)
s = super(AppendableSeriesTable, self).read(columns=columns, **kwargs)
if is_multi_index:
s.set_index(self.levels, inplace=True)
s = s.iloc[:, 0]
# remove the default name
if s.name == 'values':
s.name = None
return s
class AppendableMultiSeriesTable(AppendableSeriesTable):
""" support the new appendable table formats """
pandas_kind = u('series_table')
table_type = u('appendable_multiseries')
def write(self, obj, **kwargs):
""" we are going to write this as a frame table """
name = obj.name or 'values'
obj, self.levels = self.validate_multiindex(obj)
cols = list(self.levels)
cols.append(name)
obj.columns = cols
return super(AppendableMultiSeriesTable, self).write(obj=obj, **kwargs)
class GenericTable(AppendableFrameTable):
""" a table that read/writes the generic pytables table format """
pandas_kind = u('frame_table')
table_type = u('generic_table')
ndim = 2
obj_type = DataFrame
@property
def pandas_type(self):
return self.pandas_kind
@property
def storable(self):
return getattr(self.group, 'table', None) or self.group
def get_attrs(self):
""" retrieve our attributes """
self.non_index_axes = []
self.nan_rep = None
self.levels = []
self.index_axes = [a.infer(self)
for a in self.indexables if a.is_an_indexable]
self.values_axes = [a.infer(self)
for a in self.indexables if not a.is_an_indexable]
self.data_columns = [a.name for a in self.values_axes]
@property
def indexables(self):
""" create the indexables from the table description """
if self._indexables is None:
d = self.description
# the index columns is just a simple index
self._indexables = [GenericIndexCol(name='index', axis=0)]
for i, n in enumerate(d._v_names):
dc = GenericDataIndexableCol(
name=n, pos=i, values=[n], version=self.version)
self._indexables.append(dc)
return self._indexables
def write(self, **kwargs):
raise NotImplementedError("cannot write on an generic table")
class AppendableMultiFrameTable(AppendableFrameTable):
""" a frame with a multi-index """
table_type = u('appendable_multiframe')
obj_type = DataFrame
ndim = 2
_re_levels = re.compile("^level_\d+$")
@property
def table_type_short(self):
return u('appendable_multi')
def write(self, obj, data_columns=None, **kwargs):
if data_columns is None:
data_columns = []
elif data_columns is True:
data_columns = obj.columns.tolist()
obj, self.levels = self.validate_multiindex(obj)
for n in self.levels:
if n not in data_columns:
data_columns.insert(0, n)
return super(AppendableMultiFrameTable, self).write(
obj=obj, data_columns=data_columns, **kwargs)
def read(self, **kwargs):
df = super(AppendableMultiFrameTable, self).read(**kwargs)
df = df.set_index(self.levels)
# remove names for 'level_%d'
df.index = df.index.set_names([
None if self._re_levels.search(l) else l for l in df.index.names
])
return df
class AppendablePanelTable(AppendableTable):
""" suppor the new appendable table formats """
table_type = u('appendable_panel')
ndim = 3
obj_type = Panel
def get_object(self, obj):
""" these are written transposed """
if self.is_transposed:
obj = obj.transpose(*self.data_orientation)
return obj
@property
def is_transposed(self):
return self.data_orientation != tuple(range(self.ndim))
class AppendableNDimTable(AppendablePanelTable):
""" suppor the new appendable table formats """
table_type = u('appendable_ndim')
ndim = 4
obj_type = Panel4D
def _reindex_axis(obj, axis, labels, other=None):
ax = obj._get_axis(axis)
labels = _ensure_index(labels)
# try not to reindex even if other is provided
# if it equals our current index
if other is not None:
other = _ensure_index(other)
if (other is None or labels.equals(other)) and labels.equals(ax):
return obj
labels = _ensure_index(labels.unique())
if other is not None:
labels = _ensure_index(other.unique()) & labels
if not labels.equals(ax):
slicer = [slice(None, None)] * obj.ndim
slicer[axis] = labels
obj = obj.loc[tuple(slicer)]
return obj
def _get_info(info, name):
""" get/create the info for this name """
try:
idx = info[name]
except:
idx = info[name] = dict()
return idx
# tz to/from coercion
def _get_tz(tz):
""" for a tz-aware type, return an encoded zone """
zone = tslib.get_timezone(tz)
if zone is None:
zone = tslib.tot_seconds(tz.utcoffset())
return zone
def _set_tz(values, tz, preserve_UTC=False, coerce=False):
"""
coerce the values to a DatetimeIndex if tz is set
preserve the input shape if possible
Parameters
----------
values : ndarray
tz : string/pickled tz object
preserve_UTC : boolean,
preserve the UTC of the result
coerce : if we do not have a passed timezone, coerce to M8[ns] ndarray
"""
if tz is not None:
name = getattr(values, 'name', None)
values = values.ravel()
tz = tslib.get_timezone(_ensure_decoded(tz))
values = DatetimeIndex(values, name=name)
if values.tz is None:
values = values.tz_localize('UTC').tz_convert(tz)
if preserve_UTC:
if tz == 'UTC':
values = list(values)
elif coerce:
values = np.asarray(values, dtype='M8[ns]')
return values
def _convert_index(index, encoding=None, format_type=None):
index_name = getattr(index, 'name', None)
if isinstance(index, DatetimeIndex):
converted = index.asi8
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif isinstance(index, TimedeltaIndex):
converted = index.asi8
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif isinstance(index, (Int64Index, PeriodIndex)):
atom = _tables().Int64Col()
# avoid to store ndarray of Period objects
return IndexCol(index._values, 'integer', atom,
freq=getattr(index, 'freq', None),
index_name=index_name)
if isinstance(index, MultiIndex):
raise TypeError('MultiIndex not supported here!')
inferred_type = lib.infer_dtype(index)
values = np.asarray(index)
if inferred_type == 'datetime64':
converted = values.view('i8')
return IndexCol(converted, 'datetime64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
tz=getattr(index, 'tz', None),
index_name=index_name)
elif inferred_type == 'timedelta64':
converted = values.view('i8')
return IndexCol(converted, 'timedelta64', _tables().Int64Col(),
freq=getattr(index, 'freq', None),
index_name=index_name)
elif inferred_type == 'datetime':
converted = np.asarray([(time.mktime(v.timetuple()) +
v.microsecond / 1E6) for v in values],
dtype=np.float64)
return IndexCol(converted, 'datetime', _tables().Time64Col(),
index_name=index_name)
elif inferred_type == 'date':
converted = np.asarray([v.toordinal() for v in values],
dtype=np.int32)
return IndexCol(converted, 'date', _tables().Time32Col(),
index_name=index_name)
elif inferred_type == 'string':
# atom = _tables().ObjectAtom()
# return np.asarray(values, dtype='O'), 'object', atom
converted = _convert_string_array(values, encoding)
itemsize = converted.dtype.itemsize
return IndexCol(
converted, 'string', _tables().StringCol(itemsize),
itemsize=itemsize, index_name=index_name
)
elif inferred_type == 'unicode':
if format_type == 'fixed':
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
raise TypeError(
"[unicode] is not supported as a in index type for [{0}] formats"
.format(format_type)
)
elif inferred_type == 'integer':
# take a guess for now, hope the values fit
atom = _tables().Int64Col()
return IndexCol(np.asarray(values, dtype=np.int64), 'integer', atom,
index_name=index_name)
elif inferred_type == 'floating':
atom = _tables().Float64Col()
return IndexCol(np.asarray(values, dtype=np.float64), 'float', atom,
index_name=index_name)
else: # pragma: no cover
atom = _tables().ObjectAtom()
return IndexCol(np.asarray(values, dtype='O'), 'object', atom,
index_name=index_name)
def _unconvert_index(data, kind, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime64'):
index = DatetimeIndex(data)
elif kind == u('timedelta64'):
index = TimedeltaIndex(data)
elif kind == u('datetime'):
index = np.asarray([datetime.fromtimestamp(v) for v in data],
dtype=object)
elif kind == u('date'):
try:
index = np.asarray(
[date.fromordinal(v) for v in data], dtype=object)
except (ValueError):
index = np.asarray(
[date.fromtimestamp(v) for v in data], dtype=object)
elif kind in (u('integer'), u('float')):
index = np.asarray(data)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
elif kind == u('object'):
index = np.asarray(data[0])
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _unconvert_index_legacy(data, kind, legacy=False, encoding=None):
kind = _ensure_decoded(kind)
if kind == u('datetime'):
index = lib.time64_to_datetime(data)
elif kind in (u('integer')):
index = np.asarray(data, dtype=object)
elif kind in (u('string')):
index = _unconvert_string_array(data, nan_rep=None, encoding=encoding)
else: # pragma: no cover
raise ValueError('unrecognized index type %s' % kind)
return index
def _convert_string_array(data, encoding, itemsize=None):
"""
we take a string-like that is object dtype and coerce to a fixed size
string type
Parameters
----------
data : a numpy array of object dtype
encoding : None or string-encoding
itemsize : integer, optional, defaults to the max length of the strings
Returns
-------
data in a fixed-length string dtype, encoded to bytes if needed
"""
# encode if needed
if encoding is not None and len(data):
data = Series(data.ravel()).str.encode(
encoding).values.reshape(data.shape)
# create the sized dtype
if itemsize is None:
itemsize = lib.max_len_string_array(_ensure_object(data.ravel()))
data = np.asarray(data, dtype="S%d" % itemsize)
return data
def _unconvert_string_array(data, nan_rep=None, encoding=None):
"""
inverse of _convert_string_array
Parameters
----------
data : fixed length string dtyped array
nan_rep : the storage repr of NaN, optional
encoding : the encoding of the data, optional
Returns
-------
an object array of the decoded data
"""
shape = data.shape
data = np.asarray(data.ravel(), dtype=object)
# guard against a None encoding in PY3 (because of a legacy
# where the passed encoding is actually None)
encoding = _ensure_encoding(encoding)
if encoding is not None and len(data):
itemsize = lib.max_len_string_array(_ensure_object(data))
if compat.PY3:
dtype = "U{0}".format(itemsize)
else:
dtype = "S{0}".format(itemsize)
if isinstance(data[0], compat.binary_type):
data = Series(data).str.decode(encoding).values
else:
data = data.astype(dtype, copy=False).astype(object, copy=False)
if nan_rep is None:
nan_rep = 'nan'
data = lib.string_array_replace_from_nan_rep(data, nan_rep)
return data.reshape(shape)
def _maybe_convert(values, val_kind, encoding):
if _need_convert(val_kind):
conv = _get_converter(val_kind, encoding)
# conv = np.frompyfunc(conv, 1, 1)
values = conv(values)
return values
def _get_converter(kind, encoding):
kind = _ensure_decoded(kind)
if kind == 'datetime64':
return lambda x: np.asarray(x, dtype='M8[ns]')
elif kind == 'datetime':
return lib.convert_timestamps
elif kind == 'string':
return lambda x: _unconvert_string_array(x, encoding=encoding)
else: # pragma: no cover
raise ValueError('invalid kind %s' % kind)
def _need_convert(kind):
kind = _ensure_decoded(kind)
if kind in (u('datetime'), u('datetime64'), u('string')):
return True
return False
class Selection(object):
"""
Carries out a selection operation on a tables.Table object.
Parameters
----------
table : a Table object
where : list of Terms (or convertable to)
start, stop: indicies to start and/or stop selection
"""
def __init__(self, table, where=None, start=None, stop=None, **kwargs):
self.table = table
self.where = where
self.start = start
self.stop = stop
self.condition = None
self.filter = None
self.terms = None
self.coordinates = None
if is_list_like(where):
# see if we have a passed coordinate like
try:
inferred = lib.infer_dtype(where)
if inferred == 'integer' or inferred == 'boolean':
where = np.asarray(where)
if where.dtype == np.bool_:
start, stop = self.start, self.stop
if start is None:
start = 0
if stop is None:
stop = self.table.nrows
self.coordinates = np.arange(start, stop)[where]
elif issubclass(where.dtype.type, np.integer):
if ((self.start is not None and
(where < self.start).any()) or
(self.stop is not None and
(where >= self.stop).any())):
raise ValueError(
"where must have index locations >= start and "
"< stop"
)
self.coordinates = where
except:
pass
if self.coordinates is None:
self.terms = self.generate(where)
# create the numexpr & the filter
if self.terms is not None:
self.condition, self.filter = self.terms.evaluate()
def generate(self, where):
""" where can be a : dict,list,tuple,string """
if where is None:
return None
q = self.table.queryables()
try:
return Expr(where, queryables=q, encoding=self.table.encoding)
except NameError:
# raise a nice message, suggesting that the user should use
# data_columns
raise ValueError(
"The passed where expression: {0}\n"
" contains an invalid variable reference\n"
" all of the variable refrences must be a "
"reference to\n"
" an axis (e.g. 'index' or 'columns'), or a "
"data_column\n"
" The currently defined references are: {1}\n"
.format(where, ','.join(q.keys()))
)
def select(self):
"""
generate the selection
"""
if self.condition is not None:
return self.table.table.read_where(self.condition.format(),
start=self.start,
stop=self.stop)
elif self.coordinates is not None:
return self.table.table.read_coordinates(self.coordinates)
return self.table.table.read(start=self.start, stop=self.stop)
def select_coords(self):
"""
generate the selection
"""
start, stop = self.start, self.stop
nrows = self.table.nrows
if start is None:
start = 0
elif start < 0:
start += nrows
if self.stop is None:
stop = nrows
elif stop < 0:
stop += nrows
if self.condition is not None:
return self.table.table.get_where_list(self.condition.format(),
start=start, stop=stop,
sort=True)
elif self.coordinates is not None:
return self.coordinates
return np.arange(start, stop)
# utilities ###
def timeit(key, df, fn=None, remove=True, **kwargs):
if fn is None:
fn = 'timeit.h5'
store = HDFStore(fn, mode='w')
store.append(key, df, **kwargs)
store.close()
if remove:
os.remove(fn)
| mit |
yavalvas/yav_com | build/matplotlib/doc/mpl_examples/user_interfaces/gtk_spreadsheet.py | 13 | 2463 | #!/usr/bin/env python
"""
Example of embedding matplotlib in an application and interacting with
a treeview to store data. Double click on an entry to update plot
data
"""
import pygtk
pygtk.require('2.0')
import gtk
from gtk import gdk
import matplotlib
matplotlib.use('GTKAgg') # or 'GTK'
from matplotlib.backends.backend_gtk import FigureCanvasGTK as FigureCanvas
from numpy.random import random
from matplotlib.figure import Figure
class DataManager(gtk.Window):
numRows, numCols = 20,10
data = random((numRows, numCols))
def __init__(self):
gtk.Window.__init__(self)
self.set_default_size(600, 600)
self.connect('destroy', lambda win: gtk.main_quit())
self.set_title('GtkListStore demo')
self.set_border_width(8)
vbox = gtk.VBox(False, 8)
self.add(vbox)
label = gtk.Label('Double click a row to plot the data')
vbox.pack_start(label, False, False)
sw = gtk.ScrolledWindow()
sw.set_shadow_type(gtk.SHADOW_ETCHED_IN)
sw.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_AUTOMATIC)
vbox.pack_start(sw, True, True)
model = self.create_model()
self.treeview = gtk.TreeView(model)
self.treeview.set_rules_hint(True)
# matplotlib stuff
fig = Figure(figsize=(6,4))
self.canvas = FigureCanvas(fig) # a gtk.DrawingArea
vbox.pack_start(self.canvas, True, True)
ax = fig.add_subplot(111)
self.line, = ax.plot(self.data[0,:], 'go') # plot the first row
self.treeview.connect('row-activated', self.plot_row)
sw.add(self.treeview)
self.add_columns()
self.add_events(gdk.BUTTON_PRESS_MASK |
gdk.KEY_PRESS_MASK|
gdk.KEY_RELEASE_MASK)
def plot_row(self, treeview, path, view_column):
ind, = path # get the index into data
points = self.data[ind,:]
self.line.set_ydata(points)
self.canvas.draw()
def add_columns(self):
for i in range(self.numCols):
column = gtk.TreeViewColumn('%d'%i, gtk.CellRendererText(), text=i)
self.treeview.append_column(column)
def create_model(self):
types = [float]*self.numCols
store = gtk.ListStore(*types)
for row in self.data:
store.append(row)
return store
manager = DataManager()
manager.show_all()
gtk.main()
| mit |
liyu1990/sklearn | sklearn/cluster/dbscan_.py | 7 | 11611 | # -*- coding: utf-8 -*-
"""
DBSCAN: Density-Based Spatial Clustering of Applications with Noise
"""
# Author: Robert Layton <robertlayton@gmail.com>
# Joel Nothman <joel.nothman@gmail.com>
# Lars Buitinck
#
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from ..base import BaseEstimator, ClusterMixin
from ..metrics import pairwise_distances
from ..utils import check_array, check_consistent_length
from ..utils.fixes import astype
from ..neighbors import NearestNeighbors
from ._dbscan_inner import dbscan_inner
def dbscan(X, eps=0.5, min_samples=5, metric='minkowski',
algorithm='auto', leaf_size=30, p=2, sample_weight=None):
"""Perform DBSCAN clustering from vector array or distance matrix.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.pairwise_distances for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
p : float, optional
The power of the Minkowski metric to be used to calculate distance
between points.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
core_samples : array [n_core_samples]
Indices of core samples.
labels : array [n_samples]
Cluster labels for each point. Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
if not eps > 0.0:
raise ValueError("eps must be positive.")
X = check_array(X, accept_sparse='csr')
if sample_weight is not None:
sample_weight = np.asarray(sample_weight)
check_consistent_length(X, sample_weight)
# Calculate neighborhood for all samples. This leaves the original point
# in, which needs to be considered later (i.e. point i is in the
# neighborhood of point i. While True, its useless information)
if metric == 'precomputed' and sparse.issparse(X):
neighborhoods = np.empty(X.shape[0], dtype=object)
X.sum_duplicates() # XXX: modifies X's internals in-place
X_mask = X.data <= eps
masked_indices = astype(X.indices, np.intp, copy=False)[X_mask]
masked_indptr = np.cumsum(X_mask)[X.indptr[1:] - 1]
# insert the diagonal: a point is its own neighbor, but 0 distance
# means absence from sparse matrix data
masked_indices = np.insert(masked_indices, masked_indptr,
np.arange(X.shape[0]))
masked_indptr = masked_indptr[:-1] + np.arange(1, X.shape[0])
# split into rows
neighborhoods[:] = np.split(masked_indices, masked_indptr)
else:
neighbors_model = NearestNeighbors(radius=eps, algorithm=algorithm,
leaf_size=leaf_size,
metric=metric, p=p)
neighbors_model.fit(X)
# This has worst case O(n^2) memory complexity
neighborhoods = neighbors_model.radius_neighbors(X, eps,
return_distance=False)
if sample_weight is None:
n_neighbors = np.array([len(neighbors)
for neighbors in neighborhoods])
else:
n_neighbors = np.array([np.sum(sample_weight[neighbors])
for neighbors in neighborhoods])
# Initially, all samples are noise.
labels = -np.ones(X.shape[0], dtype=np.intp)
# A list of all core samples found.
core_samples = np.asarray(n_neighbors >= min_samples, dtype=np.uint8)
dbscan_inner(core_samples, neighborhoods, labels)
return np.where(core_samples)[0], labels
class DBSCAN(BaseEstimator, ClusterMixin):
"""Perform DBSCAN clustering from vector array or distance matrix.
DBSCAN - Density-Based Spatial Clustering of Applications with Noise.
Finds core samples of high density and expands clusters from them.
Good for data which contains clusters of similar density.
Read more in the :ref:`User Guide <dbscan>`.
Parameters
----------
eps : float, optional
The maximum distance between two samples for them to be considered
as in the same neighborhood.
min_samples : int, optional
The number of samples (or total weight) in a neighborhood for a point
to be considered as a core point. This includes the point itself.
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string or callable, it must be one of
the options allowed by metrics.pairwise.calculate_distance for its
metric parameter.
If metric is "precomputed", X is assumed to be a distance matrix and
must be square. X may be a sparse matrix, in which case only "nonzero"
elements may be considered neighbors for DBSCAN.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
The algorithm to be used by the NearestNeighbors module
to compute pointwise distances and find nearest neighbors.
See NearestNeighbors module documentation for details.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the speed
of the construction and query, as well as the memory required
to store the tree. The optimal value depends
on the nature of the problem.
Attributes
----------
core_sample_indices_ : array, shape = [n_core_samples]
Indices of core samples.
components_ : array, shape = [n_core_samples, n_features]
Copy of each core sample found by training.
labels_ : array, shape = [n_samples]
Cluster labels for each point in the dataset given to fit().
Noisy samples are given the label -1.
Notes
-----
See examples/cluster/plot_dbscan.py for an example.
This implementation bulk-computes all neighborhood queries, which increases
the memory complexity to O(n.d) where d is the average number of neighbors,
while original DBSCAN had memory complexity O(n).
Sparse neighborhoods can be precomputed using
:func:`NearestNeighbors.radius_neighbors_graph
<sklearn.neighbors.NearestNeighbors.radius_neighbors_graph>`
with ``mode='distance'``.
References
----------
Ester, M., H. P. Kriegel, J. Sander, and X. Xu, "A Density-Based
Algorithm for Discovering Clusters in Large Spatial Databases with Noise".
In: Proceedings of the 2nd International Conference on Knowledge Discovery
and Data Mining, Portland, OR, AAAI Press, pp. 226-231. 1996
"""
def __init__(self, eps=0.5, min_samples=5, metric='euclidean',
algorithm='auto', leaf_size=30, p=None):
self.eps = eps
self.min_samples = min_samples
self.metric = metric
self.algorithm = algorithm
self.leaf_size = leaf_size
self.p = p
def fit(self, X, y=None, sample_weight=None):
"""Perform DBSCAN clustering from features or distance matrix.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
"""
X = check_array(X, accept_sparse='csr')
clust = dbscan(X, sample_weight=sample_weight, **self.get_params())
self.core_sample_indices_, self.labels_ = clust
if len(self.core_sample_indices_):
# fix for scipy sparse indexing issue
self.components_ = X[self.core_sample_indices_].copy()
else:
# no core samples
self.components_ = np.empty((0, X.shape[1]))
return self
def fit_predict(self, X, y=None, sample_weight=None):
"""Performs clustering on X and returns cluster labels.
Parameters
----------
X : array or sparse (CSR) matrix of shape (n_samples, n_features), or \
array of shape (n_samples, n_samples)
A feature array, or array of distances between samples if
``metric='precomputed'``.
sample_weight : array, shape (n_samples,), optional
Weight of each sample, such that a sample with a weight of at least
``min_samples`` is by itself a core sample; a sample with negative
weight may inhibit its eps-neighbor from being core.
Note that weights are absolute, and default to 1.
Returns
-------
y : ndarray, shape (n_samples,)
cluster labels
"""
self.fit(X, sample_weight=sample_weight)
return self.labels_
| bsd-3-clause |
kaushik94/tardis | tardis/montecarlo/tests/test_base.py | 1 | 1152 | import os
import pandas as pd
import numpy as np
import pytest
from astropy import units as u
from numpy.testing import assert_almost_equal
###
# Save and Load
###
@pytest.fixture(scope="module", autouse=True)
def to_hdf_buffer(hdf_file_path, simulation_verysimple):
simulation_verysimple.runner.to_hdf(hdf_file_path, name='runner')
runner_properties = ['output_nu', 'output_energy', 'nu_bar_estimator',
'j_estimator', 'montecarlo_virtual_luminosity',
'last_interaction_in_nu',
'last_interaction_type',
'last_line_interaction_in_id',
'last_line_interaction_out_id',
'last_line_interaction_shell_id',
'packet_luminosity']
@pytest.mark.parametrize("attr", runner_properties)
def test_hdf_runner(hdf_file_path, simulation_verysimple, attr):
actual = getattr(simulation_verysimple.runner, attr)
if hasattr(actual, 'cgs'):
actual = actual.cgs.value
path = os.path.join('runner', attr)
expected = pd.read_hdf(hdf_file_path, path)
assert_almost_equal(actual, expected.values)
| bsd-3-clause |