repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
nmartensen/pandas | pandas/tests/indexes/timedeltas/test_setops.py | 15 | 2556 | import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas import TimedeltaIndex, timedelta_range, Int64Index
class TestTimedeltaIndex(object):
_multiprocess_can_split_ = True
def test_union(self):
i1 = timedelta_range('1day', periods=5)
i2 = timedelta_range('3day', periods=5)
result = i1.union(i2)
expected = timedelta_range('1day', periods=7)
tm.assert_index_equal(result, expected)
i1 = Int64Index(np.arange(0, 20, 2))
i2 = TimedeltaIndex(start='1 day', periods=10, freq='D')
i1.union(i2) # Works
i2.union(i1) # Fails with "AttributeError: can't set attribute"
def test_union_coverage(self):
idx = TimedeltaIndex(['3d', '1d', '2d'])
ordered = TimedeltaIndex(idx.sort_values(), freq='infer')
result = ordered.union(idx)
tm.assert_index_equal(result, ordered)
result = ordered[:0].union(ordered)
tm.assert_index_equal(result, ordered)
assert result.freq == ordered.freq
def test_union_bug_1730(self):
rng_a = timedelta_range('1 day', periods=4, freq='3H')
rng_b = timedelta_range('1 day', periods=4, freq='4H')
result = rng_a.union(rng_b)
exp = TimedeltaIndex(sorted(set(list(rng_a)) | set(list(rng_b))))
tm.assert_index_equal(result, exp)
def test_union_bug_1745(self):
left = TimedeltaIndex(['1 day 15:19:49.695000'])
right = TimedeltaIndex(['2 day 13:04:21.322000',
'1 day 15:27:24.873000',
'1 day 15:31:05.350000'])
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_union_bug_4564(self):
left = timedelta_range("1 day", "30d")
right = left + pd.offsets.Minute(15)
result = left.union(right)
exp = TimedeltaIndex(sorted(set(list(left)) | set(list(right))))
tm.assert_index_equal(result, exp)
def test_intersection_bug_1708(self):
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(5)
result = index_1 & index_2
assert len(result) == 0
index_1 = timedelta_range('1 day', periods=4, freq='h')
index_2 = index_1 + pd.offsets.Hour(1)
result = index_1 & index_2
expected = timedelta_range('1 day 01:00:00', periods=3, freq='h')
tm.assert_index_equal(result, expected)
| bsd-3-clause |
yyjiang/scikit-learn | examples/applications/topics_extraction_with_nmf.py | 106 | 2313 | """
========================================================
Topics extraction with Non-Negative Matrix Factorization
========================================================
This is a proof of concept application of Non Negative Matrix
Factorization of the term frequency matrix of a corpus of documents so
as to extract an additive model of the topic structure of the corpus.
The output is a list of topics, each represented as a list of terms
(weights are not shown).
The default parameters (n_samples / n_features / n_topics) should make
the example runnable in a couple of tens of seconds. You can try to
increase the dimensions of the problem, but be aware than the time complexity
is polynomial.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from time import time
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import NMF
from sklearn.datasets import fetch_20newsgroups
n_samples = 2000
n_features = 1000
n_topics = 10
n_top_words = 20
# Load the 20 newsgroups dataset and vectorize it. We use a few heuristics
# to filter out useless terms early on: the posts are stripped of headers,
# footers and quoted replies, and common English words, words occurring in
# only one document or in at least 95% of the documents are removed.
t0 = time()
print("Loading dataset and extracting TF-IDF features...")
dataset = fetch_20newsgroups(shuffle=True, random_state=1,
remove=('headers', 'footers', 'quotes'))
vectorizer = TfidfVectorizer(max_df=0.95, min_df=2, max_features=n_features,
stop_words='english')
tfidf = vectorizer.fit_transform(dataset.data[:n_samples])
print("done in %0.3fs." % (time() - t0))
# Fit the NMF model
print("Fitting the NMF model with n_samples=%d and n_features=%d..."
% (n_samples, n_features))
nmf = NMF(n_components=n_topics, random_state=1).fit(tfidf)
print("done in %0.3fs." % (time() - t0))
feature_names = vectorizer.get_feature_names()
for topic_idx, topic in enumerate(nmf.components_):
print("Topic #%d:" % topic_idx)
print(" ".join([feature_names[i]
for i in topic.argsort()[:-n_top_words - 1:-1]]))
print()
| bsd-3-clause |
wkentaro/fcn | tests/utils_tests/test_visualize_segmentation.py | 1 | 1303 | #!/usr/bin/env python
import os.path as osp
import numpy as np
import PIL.Image
import skimage.io
import skimage.transform
from fcn import utils
here = osp.dirname(osp.abspath(__file__))
def test_label_accuracy_score():
img_file = osp.join(here, '../data/2007_000063.jpg')
lbl_file = osp.join(here, '../data/2007_000063.png')
img = skimage.io.imread(img_file)
lbl_gt = np.array(PIL.Image.open(lbl_file), dtype=np.int32, copy=False)
lbl_gt[lbl_gt == 255] = -1
lbl_pred = lbl_gt.copy()
lbl_pred[lbl_pred == -1] = 0
lbl_pred = skimage.transform.rescale(lbl_pred, 1 / 16., order=0,
preserve_range=True)
lbl_pred = skimage.transform.resize(lbl_pred, lbl_gt.shape, order=0,
preserve_range=True)
lbl_pred = lbl_pred.astype(lbl_gt.dtype)
viz = utils.visualize_segmentation(
lbl_pred=lbl_pred, img=img, n_class=21, lbl_true=lbl_gt)
img_h, img_w = img.shape[:2]
assert isinstance(viz, np.ndarray)
assert viz.shape == (img_h * 2, img_w * 3, 3)
assert viz.dtype == np.uint8
return viz
if __name__ == '__main__':
import matplotlib.pyplot as plt
import skimage.color
viz = test_label_accuracy_score()
plt.imshow(viz)
plt.show()
| mit |
mrjacobagilbert/gnuradio | gr-digital/examples/example_fll.py | 6 | 4947 | #!/usr/bin/env python
#
# Copyright 2011-2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
#
from gnuradio import gr, digital, filter
from gnuradio import blocks
from gnuradio import channels
from gnuradio import eng_notation
from gnuradio.eng_arg import eng_float, intx
from argparse import ArgumentParser
import sys
import numpy
try:
from matplotlib import pyplot
except ImportError:
print("Error: could not from matplotlib import pyplot (http://matplotlib.sourceforge.net/)")
sys.exit(1)
class example_fll(gr.top_block):
def __init__(self, N, sps, rolloff, ntaps, bw, noise, foffset, toffset, poffset):
gr.top_block.__init__(self)
rrc_taps = filter.firdes.root_raised_cosine(
sps, sps, 1.0, rolloff, ntaps)
data = 2.0*numpy.random.randint(0, 2, N) - 1.0
data = numpy.exp(1j*poffset) * data
self.src = blocks.vector_source_c(data.tolist(), False)
self.rrc = filter.interp_fir_filter_ccf(sps, rrc_taps)
self.chn = channels.channel_model(noise, foffset, toffset)
self.fll = digital.fll_band_edge_cc(sps, rolloff, ntaps, bw)
self.vsnk_src = blocks.vector_sink_c()
self.vsnk_fll = blocks.vector_sink_c()
self.vsnk_frq = blocks.vector_sink_f()
self.vsnk_phs = blocks.vector_sink_f()
self.vsnk_err = blocks.vector_sink_f()
self.connect(self.src, self.rrc, self.chn, self.fll, self.vsnk_fll)
self.connect(self.rrc, self.vsnk_src)
self.connect((self.fll,1), self.vsnk_frq)
self.connect((self.fll,2), self.vsnk_phs)
self.connect((self.fll,3), self.vsnk_err)
def main():
parser = ArgumentParser(conflict_handler="resolve")
parser.add_argument("-N", "--nsamples", type=int, default=2000,
help="Set the number of samples to process [default=%(default)r]")
parser.add_argument("-S", "--sps", type=int, default=4,
help="Set the samples per symbol [default=%(default)r]")
parser.add_argument("-r", "--rolloff", type=eng_float, default=0.35,
help="Set the rolloff factor [default=%(default)r]")
parser.add_argument("-W", "--bandwidth", type=eng_float, default=2*numpy.pi/100.0,
help="Set the loop bandwidth [default=%(default)r]")
parser.add_argument("-n", "--ntaps", type=int, default=45,
help="Set the number of taps in the filters [default=%(default)r]")
parser.add_argument("--noise", type=eng_float, default=0.0,
help="Set the simulation noise voltage [default=%(default)r]")
parser.add_argument("-f", "--foffset", type=eng_float, default=0.2,
help="Set the simulation's normalized frequency offset (in Hz) [default=%(default)r]")
parser.add_argument("-t", "--toffset", type=eng_float, default=1.0,
help="Set the simulation's timing offset [default=%(default)r]")
parser.add_argument("-p", "--poffset", type=eng_float, default=0.0,
help="Set the simulation's phase offset [default=%(default)r]")
args = parser.parse_args()
# Adjust N for the interpolation by sps
args.nsamples = args.nsamples // args.sps
# Set up the program-under-test
put = example_fll(args.nsamples, args.sps, args.rolloff,
args.ntaps, args.bandwidth, args.noise,
args.foffset, args.toffset, args.poffset)
put.run()
data_src = numpy.array(put.vsnk_src.data())
data_err = numpy.array(put.vsnk_err.data())
# Convert the FLL's LO frequency from rads/sec to Hz
data_frq = numpy.array(put.vsnk_frq.data()) / (2.0*numpy.pi)
# adjust this to align with the data. There are 2 filters of
# ntaps long and the channel introduces another 4 sample delay.
data_fll = numpy.array(put.vsnk_fll.data()[2*args.ntaps-4:])
# Plot the FLL's LO frequency
f1 = pyplot.figure(1, figsize=(12,10))
s1 = f1.add_subplot(2,2,1)
s1.plot(data_frq)
s1.set_title("FLL LO")
s1.set_xlabel("Samples")
s1.set_ylabel("Frequency (normalized Hz)")
# Plot the FLL's error
s2 = f1.add_subplot(2,2,2)
s2.plot(data_err)
s2.set_title("FLL Error")
s2.set_xlabel("Samples")
s2.set_ylabel("FLL Loop error")
# Plot the IQ symbols
s3 = f1.add_subplot(2,2,3)
s3.plot(data_src.real, data_src.imag, "o")
s3.plot(data_fll.real, data_fll.imag, "rx")
s3.set_title("IQ")
s3.set_xlabel("Real part")
s3.set_ylabel("Imag part")
# Plot the symbols in time
s4 = f1.add_subplot(2,2,4)
s4.plot(data_src.real, "o-")
s4.plot(data_fll.real, "rx-")
s4.set_title("Symbols")
s4.set_xlabel("Samples")
s4.set_ylabel("Real Part of Signals")
pyplot.show()
if __name__ == "__main__":
try:
main()
except KeyboardInterrupt:
pass
| gpl-3.0 |
detrout/debian-statsmodels | statsmodels/examples/ex_kernel_regression.py | 34 | 1785 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 09:17:40 2013
Author: Josef Perktold based on test file by George Panterov
"""
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import statsmodels.nonparametric.api as nparam
#import statsmodels.api as sm
#nparam = sm.nonparametric
italy_gdp = \
[8.556, 12.262, 9.587, 8.119, 5.537, 6.796, 8.638,
6.483, 6.212, 5.111, 6.001, 7.027, 4.616, 3.922,
4.688, 3.957, 3.159, 3.763, 3.829, 5.242, 6.275,
8.518, 11.542, 9.348, 8.02, 5.527, 6.865, 8.666,
6.672, 6.289, 5.286, 6.271, 7.94, 4.72, 4.357,
4.672, 3.883, 3.065, 3.489, 3.635, 5.443, 6.302,
9.054, 12.485, 9.896, 8.33, 6.161, 7.055, 8.717,
6.95]
italy_year = \
[1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951,
1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1951, 1952,
1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952,
1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1952, 1953, 1953,
1953, 1953, 1953, 1953, 1953, 1953]
italy_year = np.asarray(italy_year, float)
model = nparam.KernelReg(endog=[italy_gdp],
exog=[italy_year], reg_type='lc',
var_type='o', bw='cv_ls')
sm_bw = model.bw
R_bw = 0.1390096
sm_mean, sm_mfx = model.fit()
sm_mean2 = sm_mean[0:5]
sm_mfx = sm_mfx[0:5]
R_mean = 6.190486
sm_R2 = model.r_squared()
R_R2 = 0.1435323
npt.assert_allclose(sm_bw, R_bw, atol=1e-2)
npt.assert_allclose(sm_mean2, R_mean, atol=1e-2)
npt.assert_allclose(sm_R2, R_R2, atol=1e-2)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(italy_year, italy_gdp, 'o')
ax.plot(italy_year, sm_mean, '-')
plt.show()
| bsd-3-clause |
benjello/openfisca-france-data | openfisca_france_data/erfs/old/datatable.py | 1 | 19235 | # -*- coding: utf-8 -*-
import os
import pkg_resources
import sys
import gc
from pandas import HDFStore
from openfisca_france.utils import check_consistency
# Uses rpy2.
# On MS Windows, The environment variable R_HOME and R_USER should be set
try:
import pandas.rpy.common as com
import rpy2.rpy_classic as rpy
rpy.set_default_mode(rpy.NO_CONVERSION)
except:
pass
#from openfisca_france.data.sources.config import DATA_DIR
openfisca_france_location = pkg_resources.get_distribution('openfisca-france-data').location
CONFIG_DIR = os.path.join(openfisca_france_location)
#ERF_HDF5_DATA_DIR = os.path.join(SRC_PATH,'countries','france','data', 'erf')
class Survey(object):
"""
An object to describe survey data
"""
def __init__(self, name = None, label = None, **kwargs):
assert name is not None, "A survey should have a name"
self.name = name
self.tables = dict()
if label is None:
self.label = self.name
else:
self.label = label
self.informations = kwarg
def insert_table(self, name=None, **kwargs):
"""
Insert a table in the Survey
"""
if name not in self.tables.keys():
self.tables[name] = dict()
for key, val in kwargs.iteritems():
if key in ["RData_dir", "RData_filename", "variables"]:
self.tables[name][key] = val
class SurveyCollection(object):
"""
A collection of Surveys
"""
def __init__(self, year):
super(SurveyCollection, self).__init__()
self.surveys = dict()
self.hdf5_filename = None
def build_erfs_survey_collection():
# self.hdf5_filename = os.path.join(os.path.dirname(ERF_HDF5_DATA_DIR),'erf','erf.h5')
erfs_survey_collection = SurveyCollection()
for year in range(2006, 2010):
surveys = erfs_survey_collection.surveys
yr = str(year)[2:]
yr1 = str(year+1)[2:]
eec_variables = ['noi','noicon','noindiv','noiper','noimer','ident','naia','naim','lien',
'acteu','stc','contra','titc','mrec','forter','rstg','retrai','lpr','cohab','sexe',
'agepr','rga','statut', 'txtppb', 'encadr', 'prosa', 'nbsala', 'chpub', 'dip11']
eec_rsa_variables = [ "sp0" + str(i) for i in range(0,10)] + ["sp10", "sp11"] + ['sitant', 'adeben',
'datant', 'raistp', 'amois', 'adfdap' , 'ancentr', 'ancchom', 'dimtyp', 'rabsp', 'raistp',
'rdem', 'ancinatm']
eec_aah_variables = ["rc1rev", "maahe"]
eec_variables += eec_rsa_variables + eec_aah_variables
erf_tables = {
"erf_menage" : {
"RData_filename" : "menage" + yr,
"year" : year,
"variables" : None,
},
"eec_menage" : {
"RData_filename" : "mrf" + yr + "e" + yr + "t4",
"year" : year,
"variables" : None,
},
"foyer" : {
"RData_filename" : "foyer" + yr,
"year" : year,
"variables" : None,
},
"erf_indivi" : {
"RData_filename" : "indivi" + yr,
"year" : year,
"variables" : ['noi','noindiv','ident','declar1','quelfic','persfip','declar2','persfipd','wprm',
"zsali","zchoi","ztsai","zreti","zperi","zrsti","zalri","zrtoi","zragi","zrici","zrnci",
"zsalo","zchoo","ztsao","zreto","zpero","zrsto","zalro","zrtoo","zrago","zrico","zrnco",
],
},
"eec_indivi" : {
"RData_filename" : "irf" + yr + "e" + yr + "t4",
"year" : year,
"variables" : eec_variables,
},
"eec_cmp_1" : {
"RData_filename" : "icomprf" + yr + "e" + yr1 + "t1",
"year" : year,
"variables" : eec_variables,
},
"eec_cmp_2" : {
"RData_filename" : "icomprf" + yr + "e" + yr1 + "t2",
"year" : year,
"variables" : eec_variables,
},
"eec_cmp_3" : {
"RData_filename" : "icomprf" + yr + "e" + yr1 + "t3",
"year" : year,
"variables" : eec_variables,
},
}
# Build absolute path for RData_filename
from ConfigParser import SafeConfigParser
parser = SafeConfigParser()
config_local_ini = os.path.join(CONFIG_DIR, 'config_local.ini')
config_ini = os.path.join(CONFIG_DIR, 'config.ini')
found = parser.read(config_local_ini, config_ini)
print found
data_directory = parser.get('data', 'input_directory')
for table in erf_tables:
table["RData_filename"] = os.path.join(os.path.dirname(data_directory),'R','erf')
def initialize(self):
"""
Initialize survey data
"""
self.initialize_erf(tables=tables)
# self.initialize_logement()
def initialize_erf(self, tables=None):
"""
"""
year = self.year
erf = SurveyDescription()
yr = str(year)[2:]
yr1 = str(year+1)[2:]
erf_tables_to_process = {
"erf_menage" : "menage" + yr,
"eec_menage" : "mrf" + yr + "e" + yr + "t4",
"foyer" : "foyer" + yr,
"erf_indivi" : "indivi" + yr,
"eec_indivi" : "irf" + yr + "e" + yr + "t4",
"eec_cmp_1" : "icomprf" + yr + "e" + yr1 + "t1",
"eec_cmp_2" : "icomprf" + yr + "e" + yr1 + "t2",
"eec_cmp_3" : "icomprf" + yr + "e" + yr1 + "t3"
}
RData_dir = os.path.join(os.path.dirname(DATA_DIR),'R','erf')
variables = ['noi','noindiv','ident','declar1','quelfic','persfip','declar2','persfipd','wprm',
"zsali","zchoi","ztsai","zreti","zperi","zrsti","zalri","zrtoi","zragi","zrici","zrnci",
"zsalo","zchoo","ztsao","zreto","zpero","zrsto","zalro","zrtoo","zrago","zrico","zrnco"]
variables_eec = ['noi','noicon','noindiv','noiper','noimer','ident','naia','naim','lien',
'acteu','stc','contra','titc','mrec','forter','rstg','retrai','lpr','cohab','sexe',
'agepr','rga','statut', 'txtppb', 'encadr', 'prosa', 'nbsala', 'chpub', 'dip11']
variables_eec_rsa = [ "sp0" + str(i) for i in range(0,10)] + ["sp10", "sp11"] + ['sitant', 'adeben',
'datant', 'raistp', 'amois', 'adfdap' , 'ancentr', 'ancchom', 'dimtyp', 'rabsp', 'raistp',
'rdem', 'ancinatm']
variables_eec_aah = ["rc1rev", "maahe"]
variables_eec += variables_eec_rsa + variables_eec_aah
erf_tables = {
"erf_menage" : {"RData_filename" : "menage" + yr,
"variables" : None},
"eec_menage" : {"RData_filename" :"mrf" + yr + "e" + yr + "t4",
"variables" : None},
"foyer" : {"RData_filename" :"foyer" + yr,
"variables" : None},
"erf_indivi" : {"RData_filename" :"indivi" + yr,
"variables" : variables},
"eec_indivi" : {"RData_filename" :"irf" + yr + "e" + yr + "t4",
"variables" : variables_eec},
"eec_cmp_1" : {"RData_filename" :"icomprf" + yr + "e" + yr1 + "t1",
"variables" : variables_eec},
"eec_cmp_2" : {"RData_filename" :"icomprf" + yr + "e" + yr1 + "t2",
"variables" : variables_eec},
"eec_cmp_3" : {"RData_filename" :"icomprf" + yr + "e" + yr1 + "t3",
"variables" : variables_eec}}
RData_dir = os.path.join(os.path.dirname(DATA_DIR),'R','erf')
if tables is None:
erf_tables_to_process = erf_tables
else:
erf_tables_to_process = tables
for name in erf_tables_to_process:
erf.insert_table(name=name,
RData_filename=RData_filename,
RData_dir=RData_dir,
variables=variables)
self.surveys["erf"] = erf
def initialize_logement(self):
"""
"""
year = self.year
lgt = SurveyDescription()
yr = str(year)[2:]
yr1 = str(year+1)[2:]
if yr=="03":
lgt_men = "menage"
lgt_logt = None
renameidlgt = dict(ident='ident')
elif yr in ["06","07","08","09"]:
lgt_men = "menage1"
lgt_lgt = "logement"
renameidlgt = dict(idlog='ident')
lgt_tables_to_process = {"adresse" : "adresse",
"lgt_menage" : lgt_men,
"lgt_logt" : lgt_lgt}
RData_dir = os.path.join(os.path.dirname(DATA_DIR),'R','logement')
for name, RData_filename in lgt_tables_to_process.iteritems():
lgt.insert_table(name=name,
RData_filename=RData_filename,
RData_dir=RData_dir)
self.surveys["lgt"] = lgt
def initialize_patrimoine(self, year):
"""
TODO:
"""
pat_tables_to_process = {"pat_individu" : "individu",
"pat_menage" : "meange",
"pat_produit" : "produit",
"pat_transmission" : "transm"}
pat_data_dir = os.path.join(os.path.dirname(DATA_DIR),'R','patrimoine')
pat = {"name" : "patrimoine",
"data_dir" : os.path.join(os.path.dirname(DATA_DIR),'R','patrimoine'),
"tables_to_process" : pat_tables_to_process}
def set_config(self, **kwargs):
"""
Set configuration parameters
Parameters
----------
year : int, default None
year of the survey
"""
if self.year is not None:
year = self.year
else:
raise Exception("year should be defined")
store = HDFStore(self.hdf5_filename)
for survey_name, description in self.surveys.iteritems():
for destination_table_name, tables in description.tables.iteritems():
data_dir = tables["RData_dir"]
R_table_name = tables["RData_filename"]
try:
variables = tables["variables"]
except:
variables = None
print variables
self.store_survey(survey_name, R_table_name, destination_table_name, data_dir, variables)
def store_survey(self, survey_name, R_table_name, destination_table_name, data_dir, variables=None, force_recreation=True):
"""
Store a R data table in an HDF5 file
Parameters
----------
survey_name : string
the name of the survey
R_table_name : string
the name of the R data table
destination_table_name : string
the name of the table in the HDFStore
data_dir : path
the directory where to find the RData file
variables : list of string, default None
When not None, list of the variables to keep
"""
gc.collect()
year = self.year
def get_survey_year(survey_name, year):
if survey_name == "logement":
if year == 2003:
return 2003
elif year in range(2006,2010):
return 2006
if survey_name == "patrimoine":
return 2004
else:
return year
print "creating %s" %(destination_table_name)
table_Rdata = R_table_name + ".Rdata"
filename = os.path.join(data_dir, str(get_survey_year(survey_name, year)), table_Rdata)
print filename
if not os.path.isfile(filename):
raise Exception("filename do not exists")
rpy.r.load(filename)
stored_table = com.load_data(R_table_name)
store = HDFStore(self.hdf5_filename)
store_path = str(self.year)+"/"+destination_table_name
if store_path in store:
if force_recreation is not True:
print store_path + "already exists, do not re-create and exit"
store.close()
return
if variables is not None:
print store
print store_path
print variables
variables_stored = list(set(variables).intersection(set(stored_table.columns)))
print list(set(variables).difference((set(stored_table.columns))))
store[store_path] = stored_table[variables_stored]
else:
store[store_path] = stored_table
store.close()
del stored_table
gc.collect()
def get_value(self, variable, table=None):
"""
Get value
Parameters
----------
variable : string
name of the variable
table : string, default None
name of the table where to get variable
Returns
-------
df : DataFrame, default None
A DataFrame containing the variable
"""
df = self.get_values([variable], table)
return df
def get_values(self, variables=None, table=None):
"""
Get values
Parameters
----------
variables : list of strings, default None
list of variables names, if None return the whole table
table : string, default None
name of the table where to get the variables
Returns
-------
df : DataFrame, default None
A DataFrame containing the variables
"""
store = HDFStore(self.hdf5_filename)
df = store[str(self.year)+"/"+table]
# If no variables read the whole table
if variables is None:
return df
diff = set(variables) - set(df.columns)
if diff:
raise Exception("The following variable(s) %s are missing" %diff)
variables = list( set(variables).intersection(df.columns))
df = df[variables]
return df
def get_of_value(self, variable, table=None):
"""
Get value
Parameters
----------
variable : string
name of the variable
table : string, default None
name of the table where to get variable
Returns
-------
df : DataFrame, default None
A DataFrame containing the variable
"""
df = self.get_of_values([variable], table)
return df
def get_of_values(self, variables=None, table=None):
"""
Get values
Parameters
----------
variables : list of strings, default None
list of variables names, if None return the whole table
table : string, default None
name of the table where to get the variables
Returns
-------
df : DataFrame, default None
A DataFrame containing the variables
"""
store = HDFStore(self.hdf5_filename)
df = store[str(self.year)+"/"+table]
# If no variables read the whole table
if variables is None:
return df
from openfisca_france.data.erf import get_erf2of, get_of2erf
of2erf = get_of2erf()
to_be_renamed_variables = set(of2erf.keys()).intersection(variables)
renamed_variables = []
for variable in to_be_renamed_variables:
renamed_variables.append(of2erf[variable])
if renamed_variables:
variables = list( set(variables).difference(to_be_renamed_variables)) + renamed_variables
# if table is None:
# for test_table in self.tables.keys:
# if set(variables) < set(self.tables[test_table].columns):
# table = test_table
# print "using guessed table :", table
# break
#
# if table is None:
# print "varname not found in any tables"
# df = None
# else:
variables = list( set(variables).intersection(df.columns))
df = df[variables]
# rename variables according to their name in openfisca
erf2of = get_erf2of()
to_be_renamed_variables = set(erf2of.values()).intersection(variables)
if to_be_renamed_variables:
for var in to_be_renamed_variables:
df.rename(columns = {var: erf2of[var]}, inplace=True)
return df
## def test():
## '''
## Validate check_consistency
## '''
## #===========================================================================
## # from pandas import DataFrame
## #res = DataFrame({af_col.name: simulation.output_table.get_value(af_col.name, af_col.entity)})
## # print res
## #===========================================================================
## store = HDFStore(os.path.join(os.path.dirname(os.path.join(SRC_PATH,'countries','france','data','erf')),'fichiertest.h5'))
## datatable = store.get('test12')
## test_simu = store.get('test_simu')
## print check_consistency(test_simu, datatable)
## def test3():
## year=2006
## erf = SurveyCollection(year=year)
## df = erf.get_of_values(table = "eec_menage")
## from openfisca_core.simulations import SurveySimulation
## simulation = SurveySimulation()
## simulation.set_config(year=year)
## simulation.set_param()
## simulation.compute() # TODO: this should not be mandatory
## check_consistency(simulation.input_table, df)
## def test_init():
## for year in range(2009,2010):
## data = SurveyCollection(year=year)
## data.initialize(tables=["eec_indivi"])
## data.set_config()
## #def test_reading_stata_tables():
## # from pandas.io.stata import StataReader, read_stata # TODO: wait for the next release ...
## #
## # filename = os.path.join(DATA_DIR,"erf","2006","Tables complémentaires","icomprf06e07t1.dta")
## # reader = StataReader(filename)
## # print reader.data()
if __name__ == '__main__':
# test3()
test_init()
## hdf5_filename = os.path.join(os.path.dirname(ERF_HDF5_DATA_DIR),'erf','erf.h5')
## print hdf5_filename
## store = HDFStore(hdf5_filename)
## print store
build_erfs_survey_collection()
#
# hdf5_filename = os.path.join(os.path.dirname(ERF_HDF5_DATA_DIR),'erf','erf_old.h5')
# print hdf5_filename
# store = HDFStore(hdf5_filename)
# print store
| agpl-3.0 |
bgris/ODL_bgris | lib/python3.5/site-packages/matplotlib/tight_layout.py | 14 | 13073 | """
This module provides routines to adjust subplot params so that subplots are
nicely fit in the figure. In doing so, only axis labels, tick labels, axes
titles and offsetboxes that are anchored to axes are currently considered.
Internally, it assumes that the margins (left_margin, etc.) which are
differences between ax.get_tightbbox and ax.bbox are independent of axes
position. This may fail if Axes.adjustable is datalim. Also, This will fail
for some cases (for example, left or right margin is affected by xlabel).
"""
import warnings
import matplotlib
from matplotlib.transforms import TransformedBbox, Bbox
from matplotlib.font_manager import FontProperties
rcParams = matplotlib.rcParams
def _get_left(tight_bbox, axes_bbox):
return axes_bbox.xmin - tight_bbox.xmin
def _get_right(tight_bbox, axes_bbox):
return tight_bbox.xmax - axes_bbox.xmax
def _get_bottom(tight_bbox, axes_bbox):
return axes_bbox.ymin - tight_bbox.ymin
def _get_top(tight_bbox, axes_bbox):
return tight_bbox.ymax - axes_bbox.ymax
def auto_adjust_subplotpars(fig, renderer,
nrows_ncols,
num1num2_list,
subplot_list,
ax_bbox_list=None,
pad=1.08, h_pad=None, w_pad=None,
rect=None):
"""
Return a dictionary of subplot parameters so that spacing between
subplots are adjusted. Note that this function ignore geometry
information of subplot itself, but uses what is given by
*nrows_ncols* and *num1num2_list* parameteres. Also, the results could be
incorrect if some subplots have ``adjustable=datalim``.
Parameters:
nrows_ncols
number of rows and number of columns of the grid.
num1num2_list
list of numbers specifying the area occupied by the subplot
subplot_list
list of subplots that will be used to calcuate optimal subplot_params.
pad : float
padding between the figure edge and the edges of subplots, as a fraction
of the font-size.
h_pad, w_pad : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
rect
[left, bottom, right, top] in normalized (0, 1) figure coordinates.
"""
rows, cols = nrows_ncols
pad_inches = pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
if h_pad is not None:
vpad_inches = h_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
vpad_inches = pad_inches
if w_pad is not None:
hpad_inches = w_pad * FontProperties(
size=rcParams["font.size"]).get_size_in_points() / 72.
else:
hpad_inches = pad_inches
if len(subplot_list) == 0:
raise RuntimeError("")
if len(num1num2_list) != len(subplot_list):
raise RuntimeError("")
if rect is None:
margin_left = None
margin_bottom = None
margin_right = None
margin_top = None
else:
margin_left, margin_bottom, _right, _top = rect
if _right:
margin_right = 1. - _right
else:
margin_right = None
if _top:
margin_top = 1. - _top
else:
margin_top = None
vspaces = [[] for i in range((rows + 1) * cols)]
hspaces = [[] for i in range(rows * (cols + 1))]
union = Bbox.union
if ax_bbox_list is None:
ax_bbox_list = []
for subplots in subplot_list:
ax_bbox = union([ax.get_position(original=True)
for ax in subplots])
ax_bbox_list.append(ax_bbox)
for subplots, ax_bbox, (num1, num2) in zip(subplot_list,
ax_bbox_list,
num1num2_list):
tight_bbox_raw = union([ax.get_tightbbox(renderer) for ax in subplots])
tight_bbox = TransformedBbox(tight_bbox_raw,
fig.transFigure.inverted())
row1, col1 = divmod(num1, cols)
if num2 is None:
# left
hspaces[row1 * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row1 * (cols + 1) + (col1 + 1)].append(
_get_right(tight_bbox, ax_bbox))
# top
vspaces[row1 * cols + col1].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row1 + 1) * cols + col1].append(
_get_bottom(tight_bbox, ax_bbox))
else:
row2, col2 = divmod(num2, cols)
for row_i in range(row1, row2 + 1):
# left
hspaces[row_i * (cols + 1) + col1].append(
_get_left(tight_bbox, ax_bbox))
# right
hspaces[row_i * (cols + 1) + (col2 + 1)].append(
_get_right(tight_bbox, ax_bbox))
for col_i in range(col1, col2 + 1):
# top
vspaces[row1 * cols + col_i].append(
_get_top(tight_bbox, ax_bbox))
# bottom
vspaces[(row2 + 1) * cols + col_i].append(
_get_bottom(tight_bbox, ax_bbox))
fig_width_inch, fig_height_inch = fig.get_size_inches()
# margins can be negative for axes with aspect applied. And we
# append + [0] to make minimum margins 0
if not margin_left:
margin_left = max([sum(s) for s in hspaces[::cols + 1]] + [0])
margin_left += pad_inches / fig_width_inch
if not margin_right:
margin_right = max([sum(s) for s in hspaces[cols::cols + 1]] + [0])
margin_right += pad_inches / fig_width_inch
if not margin_top:
margin_top = max([sum(s) for s in vspaces[:cols]] + [0])
margin_top += pad_inches / fig_height_inch
if not margin_bottom:
margin_bottom = max([sum(s) for s in vspaces[-cols:]] + [0])
margin_bottom += pad_inches / fig_height_inch
kwargs = dict(left=margin_left,
right=1 - margin_right,
bottom=margin_bottom,
top=1 - margin_top)
if cols > 1:
hspace = max([sum(s)
for i in range(rows)
for s
in hspaces[i * (cols + 1) + 1:(i + 1) * (cols + 1) - 1]])
hspace += hpad_inches / fig_width_inch
h_axes = ((1 - margin_right - margin_left) -
hspace * (cols - 1)) / cols
kwargs["wspace"] = hspace / h_axes
if rows > 1:
vspace = max([sum(s) for s in vspaces[cols:-cols]])
vspace += vpad_inches / fig_height_inch
v_axes = ((1 - margin_top - margin_bottom) -
vspace * (rows - 1)) / rows
kwargs["hspace"] = vspace / v_axes
return kwargs
def get_renderer(fig):
if fig._cachedRenderer:
renderer = fig._cachedRenderer
else:
canvas = fig.canvas
if canvas and hasattr(canvas, "get_renderer"):
renderer = canvas.get_renderer()
else:
# not sure if this can happen
warnings.warn("tight_layout : falling back to Agg renderer")
from matplotlib.backends.backend_agg import FigureCanvasAgg
canvas = FigureCanvasAgg(fig)
renderer = canvas.get_renderer()
return renderer
def get_subplotspec_list(axes_list, grid_spec=None):
"""
Return a list of subplotspec from the given list of axes. For an
instance of axes that does not support subplotspec, None is
inserted in the list.
If grid_spec is given, None is inserted for those not from
the given grid_spec.
"""
subplotspec_list = []
for ax in axes_list:
axes_or_locator = ax.get_axes_locator()
if axes_or_locator is None:
axes_or_locator = ax
if hasattr(axes_or_locator, "get_subplotspec"):
subplotspec = axes_or_locator.get_subplotspec()
subplotspec = subplotspec.get_topmost_subplotspec()
gs = subplotspec.get_gridspec()
if grid_spec is not None:
if gs != grid_spec:
subplotspec = None
elif gs.locally_modified_subplot_params():
subplotspec = None
else:
subplotspec = None
subplotspec_list.append(subplotspec)
return subplotspec_list
def get_tight_layout_figure(fig, axes_list, subplotspec_list, renderer,
pad=1.08, h_pad=None, w_pad=None, rect=None):
"""
Return subplot parameters for tight-layouted-figure with specified
padding.
Parameters:
*fig* : figure instance
*axes_list* : a list of axes
*subplotspec_list* : a list of subplotspec associated with each
axes in axes_list
*renderer* : renderer instance
*pad* : float
padding between the figure edge and the edges of subplots,
as a fraction of the font-size.
*h_pad*, *w_pad* : float
padding (height/width) between edges of adjacent subplots.
Defaults to `pad_inches`.
*rect* : if rect is given, it is interpreted as a rectangle
(left, bottom, right, top) in the normalized figure
coordinate that the whole subplots area (including
labels) will fit into. Default is (0, 0, 1, 1).
"""
subplot_list = []
nrows_list = []
ncols_list = []
ax_bbox_list = []
subplot_dict = {} # multiple axes can share
# same subplot_interface (e.g., axes_grid1). Thus
# we need to join them together.
subplotspec_list2 = []
for ax, subplotspec in zip(axes_list,
subplotspec_list):
if subplotspec is None:
continue
subplots = subplot_dict.setdefault(subplotspec, [])
if not subplots:
myrows, mycols, _, _ = subplotspec.get_geometry()
nrows_list.append(myrows)
ncols_list.append(mycols)
subplotspec_list2.append(subplotspec)
subplot_list.append(subplots)
ax_bbox_list.append(subplotspec.get_position(fig))
subplots.append(ax)
max_nrows = max(nrows_list)
max_ncols = max(ncols_list)
num1num2_list = []
for subplotspec in subplotspec_list2:
rows, cols, num1, num2 = subplotspec.get_geometry()
div_row, mod_row = divmod(max_nrows, rows)
div_col, mod_col = divmod(max_ncols, cols)
if (mod_row != 0) or (mod_col != 0):
raise RuntimeError("")
rowNum1, colNum1 = divmod(num1, cols)
if num2 is None:
rowNum2, colNum2 = rowNum1, colNum1
else:
rowNum2, colNum2 = divmod(num2, cols)
num1num2_list.append((rowNum1 * div_row * max_ncols +
colNum1 * div_col,
((rowNum2 + 1) * div_row - 1) * max_ncols +
(colNum2 + 1) * div_col - 1))
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad)
if rect is not None:
# if rect is given, the whole subplots area (including
# labels) will fit into the rect instead of the
# figure. Note that the rect argument of
# *auto_adjust_subplotpars* specify the area that will be
# covered by the total area of axes.bbox. Thus we call
# auto_adjust_subplotpars twice, where the second run
# with adjusted rect parameters.
left, bottom, right, top = rect
if left is not None:
left += kwargs["left"]
if bottom is not None:
bottom += kwargs["bottom"]
if right is not None:
right -= (1 - kwargs["right"])
if top is not None:
top -= (1 - kwargs["top"])
#if h_pad is None: h_pad = pad
#if w_pad is None: w_pad = pad
kwargs = auto_adjust_subplotpars(fig, renderer,
nrows_ncols=(max_nrows, max_ncols),
num1num2_list=num1num2_list,
subplot_list=subplot_list,
ax_bbox_list=ax_bbox_list,
pad=pad, h_pad=h_pad, w_pad=w_pad,
rect=(left, bottom, right, top))
return kwargs
| gpl-3.0 |
nudomarinero/EU2014_ES_gender | parse_list.py | 1 | 2361 | #-*- coding: utf-8 -*-
__author__ = 'jsm'
import xml.etree.ElementTree as ElementTree
import os
import re
from unidecode import unidecode
import pandas as pd
# XML con la lista de candidatos descargado del BOE
# http://www.boe.es/diario_boe/xml.php?id=BOE-A-2014-4577
tree = ElementTree.parse(os.path.join('data', 'candidatos.xml'))
ps = tree.findall("texto/p")
numbers = [str(i) for i in range(10)]
candidatos = []
candidatura = 0
partido = ""
siglas = ""
orden = 0
suplente = False
nombre1 = ""
nombre_completo = ""
b_nombre = False
b_candidatos = False
b_suplentes = False
for p in ps:
t = p.text.strip()
if b_candidatos or b_suplentes:
if (not t.startswith("Suplentes")) and (t[0] in numbers):
c = t.split(".")
orden = int(c[0])
nombre_completo = ".".join(c[1:])
nombre1 = unidecode(nombre_completo.strip().split(" ")[0])
#print candidatura, partido, orden, nombre_completo, suplente, nombre1
candidatos.append({"candidatura": candidatura,
"partido": partido,
"siglas": siglas,
"suplente": suplente,
"numero": orden,
"nombre": nombre1,
"nombre_completo": nombre_completo})
elif t.startswith("Suplentes"):
pass
else:
b_candidatos = False
b_suplentes = False
if b_nombre:
partido = t
# Siglas
alt = re.findall("(?<= )\((.*)\).*", t)
# Si no hay siglas entre parentesis se toman las dos últimas palabras
if alt == []:
alt = re.findall("(\w+\s\w+)$", t)
siglas = unidecode(alt[0])
b_candidatos = True
b_nombre = False
if t.startswith(u"CANDIDATURA NÚMERO"):
candidatura = int(t[18:])
suplente = False
b_nombre = True
b_suplentes = False
if t.startswith("Suplentes") and b_candidatos:
suplente = True
b_candidatos = False
b_suplentes = True
data = pd.DataFrame(candidatos)
save = True
if save:
data.to_csv(os.path.join('data', 'candidatos.csv'), encoding="utf-8")
store = pd.HDFStore(os.path.join('data', 'candidatos.h5'))
store["data_init"] = data
store.close()
| mit |
Dannnno/odo | odo/backends/pandas.py | 3 | 1540 | from __future__ import absolute_import, division, print_function
from datashape import discover
from datashape import (float32, float64, string, Option, Record, object_,
datetime_)
import datashape
import pandas as pd
possibly_missing = set((string, datetime_, float32, float64))
@discover.register(pd.DataFrame)
def discover_dataframe(df):
obj = object_
names = list(df.columns)
dtypes = list(map(datashape.CType.from_numpy_dtype, df.dtypes))
dtypes = [string if dt == obj else dt for dt in dtypes]
odtypes = [Option(dt) if dt in possibly_missing else dt
for dt in dtypes]
schema = datashape.Record(list(zip(names, odtypes)))
return len(df) * schema
@discover.register(pd.Series)
def discover_series(s):
return len(s) * datashape.CType.from_numpy_dtype(s.dtype)
def coerce_datetimes(df):
""" Make object columns into datetimes if possible
Warning: this operates inplace.
Example
-------
>>> df = pd.DataFrame({'dt': ['2014-01-01'], 'name': ['Alice']})
>>> df.dtypes # note that these are strings/object
dt object
name object
dtype: object
>>> df2 = coerce_datetimes(df)
>>> df2
dt name
0 2014-01-01 Alice
>>> df2.dtypes # note that only the datetime-looking-one was transformed
dt datetime64[ns]
name object
dtype: object
"""
df2 = df.select_dtypes(include=['object']).apply(pd.to_datetime)
for c in df2.columns:
df[c] = df2[c]
return df
| bsd-3-clause |
GeraldLoeffler/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/backends/backend_gtk.py | 69 | 43991 | from __future__ import division
import os, sys
def fn_name(): return sys._getframe(1).f_code.co_name
try:
import gobject
import gtk; gdk = gtk.gdk
import pango
except ImportError:
raise ImportError("Gtk* backend requires pygtk to be installed.")
pygtk_version_required = (2,2,0)
if gtk.pygtk_version < pygtk_version_required:
raise ImportError ("PyGTK %d.%d.%d is installed\n"
"PyGTK %d.%d.%d or later is required"
% (gtk.pygtk_version + pygtk_version_required))
del pygtk_version_required
import matplotlib
from matplotlib import verbose
from matplotlib._pylab_helpers import Gcf
from matplotlib.backend_bases import RendererBase, GraphicsContextBase, \
FigureManagerBase, FigureCanvasBase, NavigationToolbar2, cursors
from matplotlib.backends.backend_gdk import RendererGDK, FigureCanvasGDK
from matplotlib.cbook import is_string_like, is_writable_file_like
from matplotlib.colors import colorConverter
from matplotlib.figure import Figure
from matplotlib.widgets import SubplotTool
from matplotlib import lines
from matplotlib import cbook
backend_version = "%d.%d.%d" % gtk.pygtk_version
_debug = False
#_debug = True
# the true dots per inch on the screen; should be display dependent
# see http://groups.google.com/groups?q=screen+dpi+x11&hl=en&lr=&ie=UTF-8&oe=UTF-8&safe=off&selm=7077.26e81ad5%40swift.cs.tcd.ie&rnum=5 for some info about screen dpi
PIXELS_PER_INCH = 96
cursord = {
cursors.MOVE : gdk.Cursor(gdk.FLEUR),
cursors.HAND : gdk.Cursor(gdk.HAND2),
cursors.POINTER : gdk.Cursor(gdk.LEFT_PTR),
cursors.SELECT_REGION : gdk.Cursor(gdk.TCROSS),
}
# ref gtk+/gtk/gtkwidget.h
def GTK_WIDGET_DRAWABLE(w):
flags = w.flags();
return flags & gtk.VISIBLE != 0 and flags & gtk.MAPPED != 0
def draw_if_interactive():
"""
Is called after every pylab drawing command
"""
if matplotlib.is_interactive():
figManager = Gcf.get_active()
if figManager is not None:
figManager.canvas.draw()
def show(mainloop=True):
"""
Show all the figures and enter the gtk main loop
This should be the last line of your script
"""
for manager in Gcf.get_all_fig_managers():
manager.window.show()
if mainloop and gtk.main_level() == 0 and \
len(Gcf.get_all_fig_managers())>0:
gtk.main()
def new_figure_manager(num, *args, **kwargs):
"""
Create a new figure manager instance
"""
FigureClass = kwargs.pop('FigureClass', Figure)
thisFig = FigureClass(*args, **kwargs)
canvas = FigureCanvasGTK(thisFig)
manager = FigureManagerGTK(canvas, num)
# equals:
#manager = FigureManagerGTK(FigureCanvasGTK(Figure(*args, **kwargs), num)
return manager
class FigureCanvasGTK (gtk.DrawingArea, FigureCanvasBase):
keyvald = {65507 : 'control',
65505 : 'shift',
65513 : 'alt',
65508 : 'control',
65506 : 'shift',
65514 : 'alt',
65361 : 'left',
65362 : 'up',
65363 : 'right',
65364 : 'down',
65307 : 'escape',
65470 : 'f1',
65471 : 'f2',
65472 : 'f3',
65473 : 'f4',
65474 : 'f5',
65475 : 'f6',
65476 : 'f7',
65477 : 'f8',
65478 : 'f9',
65479 : 'f10',
65480 : 'f11',
65481 : 'f12',
65300 : 'scroll_lock',
65299 : 'break',
65288 : 'backspace',
65293 : 'enter',
65379 : 'insert',
65535 : 'delete',
65360 : 'home',
65367 : 'end',
65365 : 'pageup',
65366 : 'pagedown',
65438 : '0',
65436 : '1',
65433 : '2',
65435 : '3',
65430 : '4',
65437 : '5',
65432 : '6',
65429 : '7',
65431 : '8',
65434 : '9',
65451 : '+',
65453 : '-',
65450 : '*',
65455 : '/',
65439 : 'dec',
65421 : 'enter',
}
# Setting this as a static constant prevents
# this resulting expression from leaking
event_mask = (gdk.BUTTON_PRESS_MASK |
gdk.BUTTON_RELEASE_MASK |
gdk.EXPOSURE_MASK |
gdk.KEY_PRESS_MASK |
gdk.KEY_RELEASE_MASK |
gdk.ENTER_NOTIFY_MASK |
gdk.LEAVE_NOTIFY_MASK |
gdk.POINTER_MOTION_MASK |
gdk.POINTER_MOTION_HINT_MASK)
def __init__(self, figure):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
FigureCanvasBase.__init__(self, figure)
gtk.DrawingArea.__init__(self)
self._idle_draw_id = 0
self._need_redraw = True
self._pixmap_width = -1
self._pixmap_height = -1
self._lastCursor = None
self.connect('scroll_event', self.scroll_event)
self.connect('button_press_event', self.button_press_event)
self.connect('button_release_event', self.button_release_event)
self.connect('configure_event', self.configure_event)
self.connect('expose_event', self.expose_event)
self.connect('key_press_event', self.key_press_event)
self.connect('key_release_event', self.key_release_event)
self.connect('motion_notify_event', self.motion_notify_event)
self.connect('leave_notify_event', self.leave_notify_event)
self.connect('enter_notify_event', self.enter_notify_event)
self.set_events(self.__class__.event_mask)
self.set_double_buffered(False)
self.set_flags(gtk.CAN_FOCUS)
self._renderer_init()
self._idle_event_id = gobject.idle_add(self.idle_event)
def destroy(self):
#gtk.DrawingArea.destroy(self)
gobject.source_remove(self._idle_event_id)
if self._idle_draw_id != 0:
gobject.source_remove(self._idle_draw_id)
def scroll_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
if event.direction==gdk.SCROLL_UP:
step = 1
else:
step = -1
FigureCanvasBase.scroll_event(self, x, y, step)
return False # finish event propagation?
def button_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_press_event(self, x, y, event.button)
return False # finish event propagation?
def button_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
x = event.x
# flipy so y=0 is bottom of canvas
y = self.allocation.height - event.y
FigureCanvasBase.button_release_event(self, x, y, event.button)
return False # finish event propagation?
def key_press_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "hit", key
FigureCanvasBase.key_press_event(self, key)
return False # finish event propagation?
def key_release_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
key = self._get_key(event)
if _debug: print "release", key
FigureCanvasBase.key_release_event(self, key)
return False # finish event propagation?
def motion_notify_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
# flipy so y=0 is bottom of canvas
y = self.allocation.height - y
FigureCanvasBase.motion_notify_event(self, x, y)
return False # finish event propagation?
def leave_notify_event(self, widget, event):
FigureCanvasBase.leave_notify_event(self, event)
def enter_notify_event(self, widget, event):
FigureCanvasBase.enter_notify_event(self, event)
def _get_key(self, event):
if event.keyval in self.keyvald:
key = self.keyvald[event.keyval]
elif event.keyval <256:
key = chr(event.keyval)
else:
key = None
ctrl = event.state & gdk.CONTROL_MASK
shift = event.state & gdk.SHIFT_MASK
return key
def configure_event(self, widget, event):
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if widget.window is None:
return
w, h = event.width, event.height
if w < 3 or h < 3:
return # empty fig
# resize the figure (in inches)
dpi = self.figure.dpi
self.figure.set_size_inches (w/dpi, h/dpi)
self._need_redraw = True
return False # finish event propagation?
def draw(self):
# Note: FigureCanvasBase.draw() is inconveniently named as it clashes
# with the deprecated gtk.Widget.draw()
self._need_redraw = True
if GTK_WIDGET_DRAWABLE(self):
self.queue_draw()
# do a synchronous draw (its less efficient than an async draw,
# but is required if/when animation is used)
self.window.process_updates (False)
def draw_idle(self):
def idle_draw(*args):
self.draw()
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _renderer_init(self):
"""Override by GTK backends to select a different renderer
Renderer should provide the methods:
set_pixmap ()
set_width_height ()
that are used by
_render_figure() / _pixmap_prepare()
"""
self._renderer = RendererGDK (self, self.figure.dpi)
def _pixmap_prepare(self, width, height):
"""
Make sure _._pixmap is at least width, height,
create new pixmap if necessary
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
create_pixmap = False
if width > self._pixmap_width:
# increase the pixmap in 10%+ (rather than 1 pixel) steps
self._pixmap_width = max (int (self._pixmap_width * 1.1),
width)
create_pixmap = True
if height > self._pixmap_height:
self._pixmap_height = max (int (self._pixmap_height * 1.1),
height)
create_pixmap = True
if create_pixmap:
self._pixmap = gdk.Pixmap (self.window, self._pixmap_width,
self._pixmap_height)
self._renderer.set_pixmap (self._pixmap)
def _render_figure(self, pixmap, width, height):
"""used by GTK and GTKcairo. GTKAgg overrides
"""
self._renderer.set_width_height (width, height)
self.figure.draw (self._renderer)
def expose_event(self, widget, event):
"""Expose_event for all GTK backends. Should not be overridden.
"""
if _debug: print 'FigureCanvasGTK.%s' % fn_name()
if GTK_WIDGET_DRAWABLE(self):
if self._need_redraw:
x, y, w, h = self.allocation
self._pixmap_prepare (w, h)
self._render_figure(self._pixmap, w, h)
self._need_redraw = False
x, y, w, h = event.area
self.window.draw_drawable (self.style.fg_gc[self.state],
self._pixmap, x, y, x, y, w, h)
return False # finish event propagation?
filetypes = FigureCanvasBase.filetypes.copy()
filetypes['jpg'] = 'JPEG'
filetypes['jpeg'] = 'JPEG'
filetypes['png'] = 'Portable Network Graphics'
def print_jpeg(self, filename, *args, **kwargs):
return self._print_image(filename, 'jpeg')
print_jpg = print_jpeg
def print_png(self, filename, *args, **kwargs):
return self._print_image(filename, 'png')
def _print_image(self, filename, format):
if self.flags() & gtk.REALIZED == 0:
# for self.window(for pixmap) and has a side effect of altering
# figure width,height (via configure-event?)
gtk.DrawingArea.realize(self)
width, height = self.get_width_height()
pixmap = gdk.Pixmap (self.window, width, height)
self._renderer.set_pixmap (pixmap)
self._render_figure(pixmap, width, height)
# jpg colors don't match the display very well, png colors match
# better
pixbuf = gdk.Pixbuf(gdk.COLORSPACE_RGB, 0, 8, width, height)
pixbuf.get_from_drawable(pixmap, pixmap.get_colormap(),
0, 0, 0, 0, width, height)
if is_string_like(filename):
try:
pixbuf.save(filename, format)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
elif is_writable_file_like(filename):
if hasattr(pixbuf, 'save_to_callback'):
def save_callback(buf, data=None):
data.write(buf)
try:
pixbuf.save_to_callback(save_callback, format, user_data=filename)
except gobject.GError, exc:
error_msg_gtk('Save figure failure:\n%s' % (exc,), parent=self)
else:
raise ValueError("Saving to a Python file-like object is only supported by PyGTK >= 2.8")
else:
raise ValueError("filename must be a path or a file-like object")
def get_default_filetype(self):
return 'png'
def flush_events(self):
gtk.gdk.threads_enter()
while gtk.events_pending():
gtk.main_iteration(True)
gtk.gdk.flush()
gtk.gdk.threads_leave()
def start_event_loop(self,timeout):
FigureCanvasBase.start_event_loop_default(self,timeout)
start_event_loop.__doc__=FigureCanvasBase.start_event_loop_default.__doc__
def stop_event_loop(self):
FigureCanvasBase.stop_event_loop_default(self)
stop_event_loop.__doc__=FigureCanvasBase.stop_event_loop_default.__doc__
class FigureManagerGTK(FigureManagerBase):
"""
Public attributes
canvas : The FigureCanvas instance
num : The Figure number
toolbar : The gtk.Toolbar (gtk only)
vbox : The gtk.VBox containing the canvas and toolbar (gtk only)
window : The gtk.Window (gtk only)
"""
def __init__(self, canvas, num):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
FigureManagerBase.__init__(self, canvas, num)
self.window = gtk.Window()
self.window.set_title("Figure %d" % num)
self.vbox = gtk.VBox()
self.window.add(self.vbox)
self.vbox.show()
self.canvas.show()
# attach a show method to the figure for pylab ease of use
self.canvas.figure.show = lambda *args: self.window.show()
self.vbox.pack_start(self.canvas, True, True)
self.toolbar = self._get_toolbar(canvas)
# calculate size for window
w = int (self.canvas.figure.bbox.width)
h = int (self.canvas.figure.bbox.height)
if self.toolbar is not None:
self.toolbar.show()
self.vbox.pack_end(self.toolbar, False, False)
tb_w, tb_h = self.toolbar.size_request()
h += tb_h
self.window.set_default_size (w, h)
def destroy(*args):
Gcf.destroy(num)
self.window.connect("destroy", destroy)
self.window.connect("delete_event", destroy)
if matplotlib.is_interactive():
self.window.show()
def notify_axes_change(fig):
'this will be called whenever the current axes is changed'
if self.toolbar is not None: self.toolbar.update()
self.canvas.figure.add_axobserver(notify_axes_change)
self.canvas.grab_focus()
def destroy(self, *args):
if _debug: print 'FigureManagerGTK.%s' % fn_name()
self.vbox.destroy()
self.window.destroy()
self.canvas.destroy()
self.toolbar.destroy()
self.__dict__.clear()
if Gcf.get_num_fig_managers()==0 and \
not matplotlib.is_interactive() and \
gtk.main_level() >= 1:
gtk.main_quit()
def show(self):
# show the figure window
self.window.show()
def full_screen_toggle (self):
self._full_screen_flag = not self._full_screen_flag
if self._full_screen_flag:
self.window.fullscreen()
else:
self.window.unfullscreen()
_full_screen_flag = False
def _get_toolbar(self, canvas):
# must be inited after the window, drawingArea and figure
# attrs are set
if matplotlib.rcParams['toolbar'] == 'classic':
toolbar = NavigationToolbar (canvas, self.window)
elif matplotlib.rcParams['toolbar'] == 'toolbar2':
toolbar = NavigationToolbar2GTK (canvas, self.window)
else:
toolbar = None
return toolbar
def set_window_title(self, title):
self.window.set_title(title)
def resize(self, width, height):
'set the canvas size in pixels'
#_, _, cw, ch = self.canvas.allocation
#_, _, ww, wh = self.window.allocation
#self.window.resize (width-cw+ww, height-ch+wh)
self.window.resize(width, height)
class NavigationToolbar2GTK(NavigationToolbar2, gtk.Toolbar):
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image_file, callback(str)
toolitems = (
('Home', 'Reset original view', 'home.png', 'home'),
('Back', 'Back to previous view','back.png', 'back'),
('Forward', 'Forward to next view','forward.png', 'forward'),
('Pan', 'Pan axes with left mouse, zoom with right', 'move.png','pan'),
('Zoom', 'Zoom to rectangle','zoom_to_rect.png', 'zoom'),
(None, None, None, None),
('Subplots', 'Configure subplots','subplots.png', 'configure_subplots'),
('Save', 'Save the figure','filesave.png', 'save_figure'),
)
def __init__(self, canvas, window):
self.win = window
gtk.Toolbar.__init__(self)
NavigationToolbar2.__init__(self, canvas)
self._idle_draw_id = 0
def set_message(self, s):
if self._idle_draw_id == 0:
self.message.set_label(s)
def set_cursor(self, cursor):
self.canvas.window.set_cursor(cursord[cursor])
def release(self, event):
try: del self._imageBack
except AttributeError: pass
def dynamic_update(self):
# legacy method; new method is canvas.draw_idle
self.canvas.draw_idle()
def draw_rubberband(self, event, x0, y0, x1, y1):
'adapted from http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/189744'
drawable = self.canvas.window
if drawable is None:
return
gc = drawable.new_gc()
height = self.canvas.figure.bbox.height
y1 = height - y1
y0 = height - y0
w = abs(x1 - x0)
h = abs(y1 - y0)
rect = [int(val)for val in min(x0,x1), min(y0, y1), w, h]
try: lastrect, imageBack = self._imageBack
except AttributeError:
#snap image back
if event.inaxes is None:
return
ax = event.inaxes
l,b,w,h = [int(val) for val in ax.bbox.bounds]
b = int(height)-(b+h)
axrect = l,b,w,h
self._imageBack = axrect, drawable.get_image(*axrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
else:
def idle_draw(*args):
drawable.draw_image(gc, imageBack, 0, 0, *lastrect)
drawable.draw_rectangle(gc, False, *rect)
self._idle_draw_id = 0
return False
if self._idle_draw_id == 0:
self._idle_draw_id = gobject.idle_add(idle_draw)
def _init_toolbar(self):
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._init_toolbar2_4()
else:
self._init_toolbar2_2()
def _init_toolbar2_2(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.append_space()
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
w = self.append_item(text,
tooltip_text,
'Private',
image,
getattr(self, callback)
)
self.append_space()
self.message = gtk.Label()
self.append_widget(self.message, None, None)
self.message.show()
def _init_toolbar2_4(self):
basedir = os.path.join(matplotlib.rcParams['datapath'],'images')
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_file, callback in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
fname = os.path.join(basedir, image_file)
image = gtk.Image()
image.set_from_file(fname)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
tbutton.connect('clicked', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
toolitem = gtk.SeparatorToolItem()
self.insert(toolitem, -1)
# set_draw() not making separator invisible,
# bug #143692 fixed Jun 06 2004, will be in GTK+ 2.6
toolitem.set_draw(False)
toolitem.set_expand(True)
toolitem = gtk.ToolItem()
self.insert(toolitem, -1)
self.message = gtk.Label()
toolitem.add(self.message)
self.show_all()
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win,)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
def configure_subplots(self, button):
toolfig = Figure(figsize=(6,3))
canvas = self._get_canvas(toolfig)
toolfig.subplots_adjust(top=0.9)
tool = SubplotTool(self.canvas.figure, toolfig)
w = int (toolfig.bbox.width)
h = int (toolfig.bbox.height)
window = gtk.Window()
window.set_title("Subplot Configuration Tool")
window.set_default_size(w, h)
vbox = gtk.VBox()
window.add(vbox)
vbox.show()
canvas.show()
vbox.pack_start(canvas, True, True)
window.show()
def _get_canvas(self, fig):
return FigureCanvasGTK(fig)
class NavigationToolbar(gtk.Toolbar):
"""
Public attributes
canvas - the FigureCanvas (gtk.DrawingArea)
win - the gtk.Window
"""
# list of toolitems to add to the toolbar, format is:
# text, tooltip_text, image, callback(str), callback_arg, scroll(bool)
toolitems = (
('Left', 'Pan left with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_BACK, 'panx', -1, True),
('Right', 'Pan right with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_FORWARD, 'panx', 1, True),
('Zoom In X',
'Zoom In X (shrink the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomx', 1, True),
('Zoom Out X',
'Zoom Out X (expand the x axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomx', -1, True),
(None, None, None, None, None, None,),
('Up', 'Pan up with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_UP, 'pany', 1, True),
('Down', 'Pan down with click or wheel mouse (bidirectional)',
gtk.STOCK_GO_DOWN, 'pany', -1, True),
('Zoom In Y',
'Zoom in Y (shrink the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_IN, 'zoomy', 1, True),
('Zoom Out Y',
'Zoom Out Y (expand the y axis limits) with click or wheel'
' mouse (bidirectional)',
gtk.STOCK_ZOOM_OUT, 'zoomy', -1, True),
(None, None, None, None, None, None,),
('Save', 'Save the figure',
gtk.STOCK_SAVE, 'save_figure', None, False),
)
def __init__(self, canvas, window):
"""
figManager is the FigureManagerGTK instance that contains the
toolbar, with attributes figure, window and drawingArea
"""
gtk.Toolbar.__init__(self)
self.canvas = canvas
# Note: gtk.Toolbar already has a 'window' attribute
self.win = window
self.set_style(gtk.TOOLBAR_ICONS)
if gtk.pygtk_version >= (2,4,0):
self._create_toolitems_2_4()
self.update = self._update_2_4
self.fileselect = FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
self._create_toolitems_2_2()
self.update = self._update_2_2
self.fileselect = FileSelection(title='Save the figure',
parent=self.win)
self.show_all()
self.update()
def _create_toolitems_2_4(self):
# use the GTK+ 2.4 GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
self.tooltips = gtk.Tooltips()
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.insert( gtk.SeparatorToolItem(), -1 )
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
tbutton = gtk.ToolButton(image, text)
self.insert(tbutton, -1)
if callback_arg:
tbutton.connect('clicked', getattr(self, callback),
callback_arg)
else:
tbutton.connect('clicked', getattr(self, callback))
if scroll:
tbutton.connect('scroll_event', getattr(self, callback))
tbutton.set_tooltip(self.tooltips, tooltip_text, 'Private')
# Axes toolitem, is empty at start, update() adds a menu if >=2 axes
self.axes_toolitem = gtk.ToolItem()
self.insert(self.axes_toolitem, 0)
self.axes_toolitem.set_tooltip (
self.tooltips,
tip_text='Select axes that controls affect',
tip_private = 'Private')
align = gtk.Alignment (xalign=0.5, yalign=0.5, xscale=0.0, yscale=0.0)
self.axes_toolitem.add(align)
self.menubutton = gtk.Button ("Axes")
align.add (self.menubutton)
def position_menu (menu):
"""Function for positioning a popup menu.
Place menu below the menu button, but ensure it does not go off
the bottom of the screen.
The default is to popup menu at current mouse position
"""
x0, y0 = self.window.get_origin()
x1, y1, m = self.window.get_pointer()
x2, y2 = self.menubutton.get_pointer()
sc_h = self.get_screen().get_height() # requires GTK+ 2.2 +
w, h = menu.size_request()
x = x0 + x1 - x2
y = y0 + y1 - y2 + self.menubutton.allocation.height
y = min(y, sc_h - h)
return x, y, True
def button_clicked (button, data=None):
self.axismenu.popup (None, None, position_menu, 0,
gtk.get_current_event_time())
self.menubutton.connect ("clicked", button_clicked)
def _update_2_4(self):
# for GTK+ 2.4+
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
self.axismenu = self._make_axis_menu()
self.menubutton.show_all()
else:
self.menubutton.hide()
self.set_active(range(len(self._axes)))
def _create_toolitems_2_2(self):
# use the GTK+ 2.2 (and lower) GtkToolbar API
iconSize = gtk.ICON_SIZE_SMALL_TOOLBAR
for text, tooltip_text, image_num, callback, callback_arg, scroll \
in self.toolitems:
if text is None:
self.append_space()
continue
image = gtk.Image()
image.set_from_stock(image_num, iconSize)
item = self.append_item(text, tooltip_text, 'Private', image,
getattr(self, callback), callback_arg)
if scroll:
item.connect("scroll_event", getattr(self, callback))
self.omenu = gtk.OptionMenu()
self.omenu.set_border_width(3)
self.insert_widget(
self.omenu,
'Select axes that controls affect',
'Private', 0)
def _update_2_2(self):
# for GTK+ 2.2 and lower
# called by __init__() and FigureManagerGTK
self._axes = self.canvas.figure.axes
if len(self._axes) >= 2:
# set up the axis menu
self.omenu.set_menu( self._make_axis_menu() )
self.omenu.show_all()
else:
self.omenu.hide()
self.set_active(range(len(self._axes)))
def _make_axis_menu(self):
# called by self._update*()
def toggled(item, data=None):
if item == self.itemAll:
for item in items: item.set_active(True)
elif item == self.itemInvert:
for item in items:
item.set_active(not item.get_active())
ind = [i for i,item in enumerate(items) if item.get_active()]
self.set_active(ind)
menu = gtk.Menu()
self.itemAll = gtk.MenuItem("All")
menu.append(self.itemAll)
self.itemAll.connect("activate", toggled)
self.itemInvert = gtk.MenuItem("Invert")
menu.append(self.itemInvert)
self.itemInvert.connect("activate", toggled)
items = []
for i in range(len(self._axes)):
item = gtk.CheckMenuItem("Axis %d" % (i+1))
menu.append(item)
item.connect("toggled", toggled)
item.set_active(True)
items.append(item)
menu.show_all()
return menu
def set_active(self, ind):
self._ind = ind
self._active = [ self._axes[i] for i in self._ind ]
def panx(self, button, direction):
'panx in direction'
for a in self._active:
a.xaxis.pan(direction)
self.canvas.draw()
return True
def pany(self, button, direction):
'pany in direction'
for a in self._active:
a.yaxis.pan(direction)
self.canvas.draw()
return True
def zoomx(self, button, direction):
'zoomx in direction'
for a in self._active:
a.xaxis.zoom(direction)
self.canvas.draw()
return True
def zoomy(self, button, direction):
'zoomy in direction'
for a in self._active:
a.yaxis.zoom(direction)
self.canvas.draw()
return True
def get_filechooser(self):
if gtk.pygtk_version >= (2,4,0):
return FileChooserDialog(
title='Save the figure',
parent=self.win,
filetypes=self.canvas.get_supported_filetypes(),
default_filetype=self.canvas.get_default_filetype())
else:
return FileSelection(title='Save the figure',
parent=self.win)
def save_figure(self, button):
fname, format = self.get_filechooser().get_filename_from_user()
if fname:
try:
self.canvas.print_figure(fname, format=format)
except Exception, e:
error_msg_gtk(str(e), parent=self)
if gtk.pygtk_version >= (2,4,0):
class FileChooserDialog(gtk.FileChooserDialog):
"""GTK+ 2.4 file selector which remembers the last file/directory
selected and presents the user with a menu of supported image formats
"""
def __init__ (self,
title = 'Save file',
parent = None,
action = gtk.FILE_CHOOSER_ACTION_SAVE,
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
gtk.STOCK_SAVE, gtk.RESPONSE_OK),
path = None,
filetypes = [],
default_filetype = None
):
super (FileChooserDialog, self).__init__ (title, parent, action,
buttons)
self.set_default_response (gtk.RESPONSE_OK)
if not path: path = os.getcwd() + os.sep
# create an extra widget to list supported image formats
self.set_current_folder (path)
self.set_current_name ('image.' + default_filetype)
hbox = gtk.HBox (spacing=10)
hbox.pack_start (gtk.Label ("File Format:"), expand=False)
liststore = gtk.ListStore(gobject.TYPE_STRING)
cbox = gtk.ComboBox(liststore)
cell = gtk.CellRendererText()
cbox.pack_start(cell, True)
cbox.add_attribute(cell, 'text', 0)
hbox.pack_start (cbox)
self.filetypes = filetypes
self.sorted_filetypes = filetypes.items()
self.sorted_filetypes.sort()
default = 0
for i, (ext, name) in enumerate(self.sorted_filetypes):
cbox.append_text ("%s (*.%s)" % (name, ext))
if ext == default_filetype:
default = i
cbox.set_active(default)
self.ext = default_filetype
def cb_cbox_changed (cbox, data=None):
"""File extension changed"""
head, filename = os.path.split(self.get_filename())
root, ext = os.path.splitext(filename)
ext = ext[1:]
new_ext = self.sorted_filetypes[cbox.get_active()][0]
self.ext = new_ext
if ext in self.filetypes:
filename = root + '.' + new_ext
elif ext == '':
filename = filename.rstrip('.') + '.' + new_ext
self.set_current_name (filename)
cbox.connect ("changed", cb_cbox_changed)
hbox.show_all()
self.set_extra_widget(hbox)
def get_filename_from_user (self):
while True:
filename = None
if self.run() != int(gtk.RESPONSE_OK):
break
filename = self.get_filename()
break
self.hide()
return filename, self.ext
else:
class FileSelection(gtk.FileSelection):
"""GTK+ 2.2 and lower file selector which remembers the last
file/directory selected
"""
def __init__(self, path=None, title='Select a file', parent=None):
super(FileSelection, self).__init__(title)
if path: self.path = path
else: self.path = os.getcwd() + os.sep
if parent: self.set_transient_for(parent)
def get_filename_from_user(self, path=None, title=None):
if path: self.path = path
if title: self.set_title(title)
self.set_filename(self.path)
filename = None
if self.run() == int(gtk.RESPONSE_OK):
self.path = filename = self.get_filename()
self.hide()
ext = None
if filename is not None:
ext = os.path.splitext(filename)[1]
if ext.startswith('.'):
ext = ext[1:]
return filename, ext
class DialogLineprops:
"""
A GUI dialog for controlling lineprops
"""
signals = (
'on_combobox_lineprops_changed',
'on_combobox_linestyle_changed',
'on_combobox_marker_changed',
'on_colorbutton_linestyle_color_set',
'on_colorbutton_markerface_color_set',
'on_dialog_lineprops_okbutton_clicked',
'on_dialog_lineprops_cancelbutton_clicked',
)
linestyles = [ls for ls in lines.Line2D.lineStyles if ls.strip()]
linestyled = dict([ (s,i) for i,s in enumerate(linestyles)])
markers = [m for m in lines.Line2D.markers if cbook.is_string_like(m)]
markerd = dict([(s,i) for i,s in enumerate(markers)])
def __init__(self, lines):
import gtk.glade
datadir = matplotlib.get_data_path()
gladefile = os.path.join(datadir, 'lineprops.glade')
if not os.path.exists(gladefile):
raise IOError('Could not find gladefile lineprops.glade in %s'%datadir)
self._inited = False
self._updateson = True # suppress updates when setting widgets manually
self.wtree = gtk.glade.XML(gladefile, 'dialog_lineprops')
self.wtree.signal_autoconnect(dict([(s, getattr(self, s)) for s in self.signals]))
self.dlg = self.wtree.get_widget('dialog_lineprops')
self.lines = lines
cbox = self.wtree.get_widget('combobox_lineprops')
cbox.set_active(0)
self.cbox_lineprops = cbox
cbox = self.wtree.get_widget('combobox_linestyles')
for ls in self.linestyles:
cbox.append_text(ls)
cbox.set_active(0)
self.cbox_linestyles = cbox
cbox = self.wtree.get_widget('combobox_markers')
for m in self.markers:
cbox.append_text(m)
cbox.set_active(0)
self.cbox_markers = cbox
self._lastcnt = 0
self._inited = True
def show(self):
'populate the combo box'
self._updateson = False
# flush the old
cbox = self.cbox_lineprops
for i in range(self._lastcnt-1,-1,-1):
cbox.remove_text(i)
# add the new
for line in self.lines:
cbox.append_text(line.get_label())
cbox.set_active(0)
self._updateson = True
self._lastcnt = len(self.lines)
self.dlg.show()
def get_active_line(self):
'get the active line'
ind = self.cbox_lineprops.get_active()
line = self.lines[ind]
return line
def get_active_linestyle(self):
'get the active lineinestyle'
ind = self.cbox_linestyles.get_active()
ls = self.linestyles[ind]
return ls
def get_active_marker(self):
'get the active lineinestyle'
ind = self.cbox_markers.get_active()
m = self.markers[ind]
return m
def _update(self):
'update the active line props from the widgets'
if not self._inited or not self._updateson: return
line = self.get_active_line()
ls = self.get_active_linestyle()
marker = self.get_active_marker()
line.set_linestyle(ls)
line.set_marker(marker)
button = self.wtree.get_widget('colorbutton_linestyle')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_color((r,g,b))
button = self.wtree.get_widget('colorbutton_markerface')
color = button.get_color()
r, g, b = [val/65535. for val in color.red, color.green, color.blue]
line.set_markerfacecolor((r,g,b))
line.figure.canvas.draw()
def on_combobox_lineprops_changed(self, item):
'update the widgets from the active line'
if not self._inited: return
self._updateson = False
line = self.get_active_line()
ls = line.get_linestyle()
if ls is None: ls = 'None'
self.cbox_linestyles.set_active(self.linestyled[ls])
marker = line.get_marker()
if marker is None: marker = 'None'
self.cbox_markers.set_active(self.markerd[marker])
r,g,b = colorConverter.to_rgb(line.get_color())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_linestyle')
button.set_color(color)
r,g,b = colorConverter.to_rgb(line.get_markerfacecolor())
color = gtk.gdk.Color(*[int(val*65535) for val in r,g,b])
button = self.wtree.get_widget('colorbutton_markerface')
button.set_color(color)
self._updateson = True
def on_combobox_linestyle_changed(self, item):
self._update()
def on_combobox_marker_changed(self, item):
self._update()
def on_colorbutton_linestyle_color_set(self, button):
self._update()
def on_colorbutton_markerface_color_set(self, button):
'called colorbutton marker clicked'
self._update()
def on_dialog_lineprops_okbutton_clicked(self, button):
self._update()
self.dlg.hide()
def on_dialog_lineprops_cancelbutton_clicked(self, button):
self.dlg.hide()
# set icon used when windows are minimized
# Unfortunately, the SVG renderer (rsvg) leaks memory under earlier
# versions of pygtk, so we have to use a PNG file instead.
try:
if gtk.pygtk_version < (2, 8, 0):
icon_filename = 'matplotlib.png'
else:
icon_filename = 'matplotlib.svg'
gtk.window_set_default_icon_from_file (
os.path.join (matplotlib.rcParams['datapath'], 'images', icon_filename))
except:
verbose.report('Could not load matplotlib icon: %s' % sys.exc_info()[1])
def error_msg_gtk(msg, parent=None):
if parent is not None: # find the toplevel gtk.Window
parent = parent.get_toplevel()
if parent.flags() & gtk.TOPLEVEL == 0:
parent = None
if not is_string_like(msg):
msg = ','.join(map(str,msg))
dialog = gtk.MessageDialog(
parent = parent,
type = gtk.MESSAGE_ERROR,
buttons = gtk.BUTTONS_OK,
message_format = msg)
dialog.run()
dialog.destroy()
FigureManager = FigureManagerGTK
| agpl-3.0 |
nan86150/ImageFusion | lib/python2.7/site-packages/matplotlib/mlab.py | 10 | 128046 | """
Numerical python functions written for compatability with MATLAB
commands with the same names.
MATLAB compatible functions
-------------------------------
:func:`cohere`
Coherence (normalized cross spectral density)
:func:`csd`
Cross spectral density uing Welch's average periodogram
:func:`detrend`
Remove the mean or best fit line from an array
:func:`find`
Return the indices where some condition is true;
numpy.nonzero is similar but more general.
:func:`griddata`
Interpolate irregularly distributed data to a
regular grid.
:func:`prctile`
Find the percentiles of a sequence
:func:`prepca`
Principal Component Analysis
:func:`psd`
Power spectral density uing Welch's average periodogram
:func:`rk4`
A 4th order runge kutta integrator for 1D or ND systems
:func:`specgram`
Spectrogram (spectrum over segments of time)
Miscellaneous functions
-------------------------
Functions that don't exist in MATLAB, but are useful anyway:
:func:`cohere_pairs`
Coherence over all pairs. This is not a MATLAB function, but we
compute coherence a lot in my lab, and we compute it for a lot of
pairs. This function is optimized to do this efficiently by
caching the direct FFTs.
:func:`rk4`
A 4th order Runge-Kutta ODE integrator in case you ever find
yourself stranded without scipy (and the far superior
scipy.integrate tools)
:func:`contiguous_regions`
Return the indices of the regions spanned by some logical mask
:func:`cross_from_below`
Return the indices where a 1D array crosses a threshold from below
:func:`cross_from_above`
Return the indices where a 1D array crosses a threshold from above
:func:`complex_spectrum`
Return the complex-valued frequency spectrum of a signal
:func:`magnitude_spectrum`
Return the magnitude of the frequency spectrum of a signal
:func:`angle_spectrum`
Return the angle (wrapped phase) of the frequency spectrum of a signal
:func:`phase_spectrum`
Return the phase (unwrapped angle) of the frequency spectrum of a signal
:func:`detrend_mean`
Remove the mean from a line.
:func:`demean`
Remove the mean from a line. This function is the same as as
:func:`detrend_mean` except for the default *axis*.
:func:`detrend_linear`
Remove the best fit line from a line.
:func:`detrend_none`
Return the original line.
:func:`stride_windows`
Get all windows in an array in a memory-efficient manner
:func:`stride_repeat`
Repeat an array in a memory-efficient manner
:func:`apply_window`
Apply a window along a given axis
record array helper functions
-------------------------------
A collection of helper methods for numpyrecord arrays
.. _htmlonly:
See :ref:`misc-examples-index`
:func:`rec2txt`
Pretty print a record array
:func:`rec2csv`
Store record array in CSV file
:func:`csv2rec`
Import record array from CSV file with type inspection
:func:`rec_append_fields`
Adds field(s)/array(s) to record array
:func:`rec_drop_fields`
Drop fields from record array
:func:`rec_join`
Join two record arrays on sequence of fields
:func:`recs_join`
A simple join of multiple recarrays using a single column as a key
:func:`rec_groupby`
Summarize data by groups (similar to SQL GROUP BY)
:func:`rec_summarize`
Helper code to filter rec array fields into new fields
For the rec viewer functions(e rec2csv), there are a bunch of Format
objects you can pass into the functions that will do things like color
negative values red, set percent formatting and scaling, etc.
Example usage::
r = csv2rec('somefile.csv', checkrows=0)
formatd = dict(
weight = FormatFloat(2),
change = FormatPercent(2),
cost = FormatThousands(2),
)
rec2excel(r, 'test.xls', formatd=formatd)
rec2csv(r, 'test.csv', formatd=formatd)
scroll = rec2gtk(r, formatd=formatd)
win = gtk.Window()
win.set_size_request(600,800)
win.add(scroll)
win.show_all()
gtk.main()
Deprecated functions
---------------------
The following are deprecated; please import directly from numpy (with
care--function signatures may differ):
:func:`load`
Load ASCII file - use numpy.loadtxt
:func:`save`
Save ASCII file - use numpy.savetxt
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
from six.moves import map, xrange, zip
if six.PY3:
long = int
import copy
import csv
import operator
import os
import warnings
import numpy as np
ma = np.ma
from matplotlib import verbose
import matplotlib.cbook as cbook
from matplotlib import docstring
from matplotlib.path import Path
def logspace(xmin, xmax, N):
'''
Return N values logarithmically spaced between xmin and xmax.
Call signature::
logspace(xmin, xmax, N)
'''
return np.exp(np.linspace(np.log(xmin), np.log(xmax), N))
def _norm(x):
'''
Return sqrt(x dot x).
Call signature::
_norm(x)
'''
return np.sqrt(np.dot(x, x))
def window_hanning(x):
'''
Return x times the hanning window of len(x).
Call signature::
window_hanning(x)
.. seealso::
:func:`window_none`
:func:`window_none` is another window algorithm.
'''
return np.hanning(len(x))*x
def window_none(x):
'''
No window function; simply return x.
Call signature::
window_none(x)
.. seealso::
:func:`window_hanning`
:func:`window_hanning` is another window algorithm.
'''
return x
def apply_window(x, window, axis=0, return_window=None):
'''
Apply the given window to the given 1D or 2D array along the given axis.
Call signature::
apply_window(x, window, axis=0, return_window=False)
*x*: 1D or 2D array or sequence
Array or sequence containing the data.
*winodw*: function or array.
Either a function to generate a window or an array with length
*x*.shape[*axis*]
*axis*: integer
The axis over which to do the repetition.
Must be 0 or 1. The default is 0
*return_window*: bool
If true, also return the 1D values of the window that was applied
'''
x = np.asarray(x)
if x.ndim < 1 or x.ndim > 2:
raise ValueError('only 1D or 2D arrays can be used')
if axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
xshape = list(x.shape)
xshapetarg = xshape.pop(axis)
if cbook.iterable(window):
if len(window) != xshapetarg:
raise ValueError('The len(window) must be the same as the shape '
'of x for the chosen axis')
windowVals = window
else:
windowVals = window(np.ones(xshapetarg, dtype=x.dtype))
if x.ndim == 1:
if return_window:
return windowVals * x, windowVals
else:
return windowVals * x
xshapeother = xshape.pop()
otheraxis = (axis+1) % 2
windowValsRep = stride_repeat(windowVals, xshapeother, axis=otheraxis)
if return_window:
return windowValsRep * x, windowVals
else:
return windowValsRep * x
def detrend(x, key=None, axis=None):
'''
Return x with its trend removed.
Call signature::
detrend(x, key='mean')
*x*: array or sequence
Array or sequence containing the data.
*key*: [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or function
Specifies the detrend algorithm to use. 'default' is 'mean',
which is the same as :func:`detrend_mean`. 'constant' is the same.
'linear' is the same as :func:`detrend_linear`. 'none' is the same
as :func:`detrend_none`. The default is 'mean'. See the
corresponding functions for more details regarding the algorithms.
Can also be a function that carries out the detrend operation.
*axis*: integer
The axis along which to do the detrending.
.. seealso::
:func:`detrend_mean`
:func:`detrend_mean` implements the 'mean' algorithm.
:func:`detrend_linear`
:func:`detrend_linear` implements the 'linear' algorithm.
:func:`detrend_none`
:func:`detrend_none` implements the 'none' algorithm.
'''
if key is None or key in ['constant', 'mean', 'default']:
return detrend(x, key=detrend_mean, axis=axis)
elif key == 'linear':
return detrend(x, key=detrend_linear, axis=axis)
elif key == 'none':
return detrend(x, key=detrend_none, axis=axis)
elif cbook.is_string_like(key):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
if not callable(key):
raise ValueError("Unknown value for key %s, must be one of: "
"'default', 'constant', 'mean', "
"'linear', or a function" % key)
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
if (axis is None and x.ndim == 0) or (not axis and x.ndim == 1):
return key(x)
# try to use the 'axis' argument if the function supports it,
# otherwise use apply_along_axis to do it
try:
return key(x, axis=axis)
except TypeError:
return np.apply_along_axis(key, axis=axis, arr=x)
def demean(x, axis=0):
'''
Return x minus its mean along the specified axis.
Call signature::
demean(x, axis=0)
*x*: array or sequence
Array or sequence containing the data
Can have any dimensionality
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`delinear`
:func:`denone`
:func:`delinear` and :func:`denone` are other detrend algorithms.
:func:`detrend_mean`
This function is the same as as :func:`detrend_mean` except
for the default *axis*.
'''
return detrend_mean(x, axis=axis)
def detrend_mean(x, axis=None):
'''
Return x minus the mean(x).
Call signature::
detrend_mean(x, axis=None)
*x*: array or sequence
Array or sequence containing the data
Can have any dimensionality
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`demean`
This function is the same as as :func:`demean` except
for the default *axis*.
:func:`detrend_linear`
:func:`detrend_none`
:func:`detrend_linear` and :func:`detrend_none` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
x = np.asarray(x)
if axis is not None and axis+1 > x.ndim:
raise ValueError('axis(=%s) out of bounds' % axis)
# short-circuit 0-D array.
if not x.ndim:
return np.array(0., dtype=x.dtype)
# short-circuit simple operations
if axis == 0 or axis is None or x.ndim <= 1:
return x - x.mean(axis)
ind = [slice(None)] * x.ndim
ind[axis] = np.newaxis
return x - x.mean(axis)[ind]
def detrend_none(x, axis=None):
'''
Return x: no detrending.
Call signature::
detrend_none(x, axis=None)
*x*: any object
An object containing the data
*axis*: integer
This parameter is ignored.
It is included for compatibility with detrend_mean
.. seealso::
:func:`denone`
This function is the same as as :func:`denone` except
for the default *axis*, which has no effect.
:func:`detrend_mean`
:func:`detrend_linear`
:func:`detrend_mean` and :func:`detrend_linear` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
return x
def detrend_linear(y):
'''
Return x minus best fit line; 'linear' detrending.
Call signature::
detrend_linear(y)
*y*: 0-D or 1-D array or sequence
Array or sequence containing the data
*axis*: integer
The axis along which to take the mean. See numpy.mean for a
description of this argument.
.. seealso::
:func:`delinear`
This function is the same as as :func:`delinear` except
for the default *axis*.
:func:`detrend_mean`
:func:`detrend_none`
:func:`detrend_mean` and :func:`detrend_none` are other
detrend algorithms.
:func:`detrend`
:func:`detrend` is a wrapper around all the detrend algorithms.
'''
# This is faster than an algorithm based on linalg.lstsq.
y = np.asarray(y)
if y.ndim > 1:
raise ValueError('y cannot have ndim > 1')
# short-circuit 0-D array.
if not y.ndim:
return np.array(0., dtype=y.dtype)
x = np.arange(y.size, dtype=np.float_)
C = np.cov(x, y, bias=1)
b = C[0, 1]/C[0, 0]
a = y.mean() - b*x.mean()
return y - (b*x + a)
def stride_windows(x, n, noverlap=None, axis=0):
'''
Get all windows of x with length n as a single array,
using strides to avoid data duplication.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory,
so modifying one value may change others.
Call signature::
stride_windows(x, n, noverlap=0)
*x*: 1D array or sequence
Array or sequence containing the data.
*n*: integer
The number of data points in each window.
*noverlap*: integer
The overlap between adjacent windows.
Default is 0 (no overlap)
*axis*: integer
The axis along which the windows will run.
Refs:
`stackoverflaw: Rolling window for 1D arrays in Numpy?
<http://stackoverflow.com/a/6811241>`_
`stackoverflaw: Using strides for an efficient moving average filter
<http://stackoverflow.com/a/4947453>`_
'''
if noverlap is None:
noverlap = 0
if noverlap >= n:
raise ValueError('noverlap must be less than n')
if n < 1:
raise ValueError('n cannot be less than 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1 and noverlap == 0:
if axis == 0:
return x[np.newaxis]
else:
return x[np.newaxis].transpose()
if n > x.size:
raise ValueError('n cannot be greater than the length of x')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. noverlap or n. See #3845.
noverlap = int(noverlap)
n = int(n)
step = n - noverlap
if axis == 0:
shape = (n, (x.shape[-1]-noverlap)//step)
strides = (x.strides[0], step*x.strides[0])
else:
shape = ((x.shape[-1]-noverlap)//step, n)
strides = (step*x.strides[0], x.strides[0])
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def stride_repeat(x, n, axis=0):
'''
Repeat the values in an array in a memory-efficient manner. Array x is
stacked vertically n times.
.. warning::
It is not safe to write to the output array. Multiple
elements may point to the same piece of memory, so
modifying one value may change others.
Call signature::
stride_repeat(x, n, axis=0)
*x*: 1D array or sequence
Array or sequence containing the data.
*n*: integer
The number of time to repeat the array.
*axis*: integer
The axis along which the data will run.
Refs:
`stackoverflaw: Repeat NumPy array without replicating data?
<http://stackoverflow.com/a/5568169>`_
'''
if axis not in [0, 1]:
raise ValueError('axis must be 0 or 1')
x = np.asarray(x)
if x.ndim != 1:
raise ValueError('only 1-dimensional arrays can be used')
if n == 1:
if axis == 0:
return np.atleast_2d(x)
else:
return np.atleast_2d(x).T
if n < 1:
raise ValueError('n cannot be less than 1')
# np.lib.stride_tricks.as_strided easily leads to memory corruption for
# non integer shape and strides, i.e. n. See #3845.
n = int(n)
if axis == 0:
shape = (n, x.size)
strides = (0, x.strides[0])
else:
shape = (x.size, n)
strides = (x.strides[0], 0)
return np.lib.stride_tricks.as_strided(x, shape=shape, strides=strides)
def _spectral_helper(x, y=None, NFFT=None, Fs=None, detrend_func=None,
window=None, noverlap=None, pad_to=None,
sides=None, scale_by_freq=None, mode=None):
'''
This is a helper function that implements the commonality between the
psd, csd, spectrogram and complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if y is None:
# if y is None use x for y
same_data = True
else:
#The checks for if y is x are so that we can use the same function to
#implement the core of psd(), csd(), and spectrogram() without doing
#extra calculations. We return the unaveraged Pxy, freqs, and t.
same_data = y is x
if Fs is None:
Fs = 2
if noverlap is None:
noverlap = 0
if detrend_func is None:
detrend_func = detrend_none
if window is None:
window = window_hanning
# if NFFT is set to None use the whole signal
if NFFT is None:
NFFT = 256
if mode is None or mode == 'default':
mode = 'psd'
elif mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
#Make sure we're dealing with a numpy array. If y and x were the same
#object to start with, keep them that way
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
if sides is None or sides == 'default':
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
elif sides not in ['onesided', 'twosided']:
raise ValueError("Unknown value for sides %s, must be one of: "
"'default', 'onesided', or 'twosided'" % sides)
# zero pad x and y up to NFFT if they are shorter than NFFT
if len(x) < NFFT:
n = len(x)
x = np.resize(x, (NFFT,))
x[n:] = 0
if not same_data and len(y) < NFFT:
n = len(y)
y = np.resize(y, (NFFT,))
y[n:] = 0
if pad_to is None:
pad_to = NFFT
if mode != 'psd':
scale_by_freq = False
elif scale_by_freq is None:
scale_by_freq = True
# For real x, ignore the negative frequencies unless told otherwise
if sides == 'twosided':
numFreqs = pad_to
if pad_to % 2:
freqcenter = (pad_to - 1)//2 + 1
else:
freqcenter = pad_to//2
scaling_factor = 1.
elif sides == 'onesided':
if pad_to % 2:
numFreqs = (pad_to + 1)//2
else:
numFreqs = pad_to//2 + 1
scaling_factor = 2.
result = stride_windows(x, NFFT, noverlap, axis=0)
result = detrend(result, detrend_func, axis=0)
result, windowVals = apply_window(result, window, axis=0,
return_window=True)
result = np.fft.fft(result, n=pad_to, axis=0)[:numFreqs, :]
freqs = np.fft.fftfreq(pad_to, 1/Fs)[:numFreqs]
if not same_data:
# if same_data is False, mode must be 'psd'
resultY = stride_windows(y, NFFT, noverlap)
resultY = apply_window(resultY, window, axis=0)
resultY = detrend(resultY, detrend_func, axis=0)
resultY = np.fft.fft(resultY, n=pad_to, axis=0)[:numFreqs, :]
result = np.conjugate(result) * resultY
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
# we unwrap the phase later to handle the onesided vs. twosided case
result = np.angle(result)
elif mode == 'complex':
pass
if mode == 'psd':
# Scale the spectrum by the norm of the window to compensate for
# windowing loss; see Bendat & Piersol Sec 11.5.2.
result /= (np.abs(windowVals)**2).sum()
# Also include scaling factors for one-sided densities and dividing by
# the sampling frequency, if desired. Scale everything, except the DC
# component and the NFFT/2 component:
result[1:-1] *= scaling_factor
# MATLAB divides by the sampling frequency so that density function
# has units of dB/Hz and can be integrated by the plotted frequency
# values. Perform the same scaling here.
if scale_by_freq:
result /= Fs
t = np.arange(NFFT/2, len(x) - NFFT/2 + 1, NFFT - noverlap)/Fs
if sides == 'twosided':
# center the frequency range at zero
freqs = np.concatenate((freqs[freqcenter:], freqs[:freqcenter]))
result = np.concatenate((result[freqcenter:, :],
result[:freqcenter, :]), 0)
elif not pad_to % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=0)
return result, freqs, t
def _single_spectrum_helper(x, mode, Fs=None, window=None, pad_to=None,
sides=None):
'''
This is a helper function that implements the commonality between the
complex, magnitude, angle, and phase spectrums.
It is *NOT* meant to be used outside of mlab and may change at any time.
'''
if mode is None or mode == 'psd' or mode == 'default':
raise ValueError('_single_spectrum_helper does not work with %s mode'
% mode)
if pad_to is None:
pad_to = len(x)
spec, freqs, _ = _spectral_helper(x=x, y=None, NFFT=len(x), Fs=Fs,
detrend_func=detrend_none, window=window,
noverlap=0, pad_to=pad_to,
sides=sides,
scale_by_freq=False,
mode=mode)
if mode != 'complex':
spec = spec.real
if len(spec.shape) == 2 and spec.shape[1] == 1:
spec = spec[:, 0]
return spec, freqs
#Split out these keyword docs so that they can be used elsewhere
docstring.interpd.update(Spectral=cbook.dedent("""
Keyword arguments:
*Fs*: scalar
The sampling frequency (samples per time unit). It is used
to calculate the Fourier frequencies, freqs, in cycles per time
unit. The default value is 2.
*window*: callable or ndarray
A function or a vector of length *NFFT*. To create window
vectors see :func:`window_hanning`, :func:`window_none`,
:func:`numpy.blackman`, :func:`numpy.hamming`,
:func:`numpy.bartlett`, :func:`scipy.signal`,
:func:`scipy.signal.get_window`, etc. The default is
:func:`window_hanning`. If a function is passed as the
argument, it must take a data segment as an argument and
return the windowed version of the segment.
*sides*: [ 'default' | 'onesided' | 'twosided' ]
Specifies which sides of the spectrum to return. Default gives the
default behavior, which returns one-sided for real data and both
for complex data. 'onesided' forces the return of a one-sided
spectrum, while 'twosided' forces two-sided.
"""))
docstring.interpd.update(Single_Spectrum=cbook.dedent("""
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. While not increasing the actual resolution of
the spectrum (the minimum distance between resolvable peaks),
this can give more points in the plot, allowing for more
detail. This corresponds to the *n* parameter in the call to fft().
The default is None, which sets *pad_to* equal to the length of the
input signal (i.e. no padding).
"""))
docstring.interpd.update(PSD=cbook.dedent("""
*pad_to*: integer
The number of points to which the data segment is padded when
performing the FFT. This can be different from *NFFT*, which
specifies the number of data points used. While not increasing
the actual resolution of the spectrum (the minimum distance between
resolvable peaks), this can give more points in the plot,
allowing for more detail. This corresponds to the *n* parameter
in the call to fft(). The default is None, which sets *pad_to*
equal to *NFFT*
*NFFT*: integer
The number of data points used in each block for the FFT.
A power 2 is most efficient. The default value is 256.
This should *NOT* be used to get zero padding, or the scaling of the
result will be incorrect. Use *pad_to* for this instead.
*detrend*: [ 'default' | 'constant' | 'mean' | 'linear' | 'none'] or
callable
The function applied to each segment before fft-ing,
designed to remove the mean or linear trend. Unlike in
MATLAB, where the *detrend* parameter is a vector, in
matplotlib is it a function. The :mod:`~matplotlib.pylab`
module defines :func:`~matplotlib.pylab.detrend_none`,
:func:`~matplotlib.pylab.detrend_mean`, and
:func:`~matplotlib.pylab.detrend_linear`, but you can use
a custom function as well. You can also use a string to choose
one of the functions. 'default', 'constant', and 'mean' call
:func:`~matplotlib.pylab.detrend_mean`. 'linear' calls
:func:`~matplotlib.pylab.detrend_linear`. 'none' calls
:func:`~matplotlib.pylab.detrend_none`.
*scale_by_freq*: boolean
Specifies whether the resulting density values should be scaled
by the scaling frequency, which gives density in units of Hz^-1.
This allows for integration over the returned frequency values.
The default is True for MATLAB compatibility.
"""))
@docstring.dedent_interpd
def psd(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the power spectral density.
Call signature::
psd(x, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The power spectral density :math:`P_{xx}` by Welch's average
periodogram method. The vector *x* is divided into *NFFT* length
segments. Each segment is detrended by function *detrend* and
windowed by function *window*. *noverlap* gives the length of
the overlap between segments. The :math:`|\mathrm{fft}(i)|^2`
of each segment :math:`i` are averaged to compute :math:`P_{xx}`.
If len(*x*) < *NFFT*, it will be zero padded to *NFFT*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns the tuple (*Pxx*, *freqs*).
*Pxx*: 1-D array
The values for the power spectrum `P_{xx}` (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxx*
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
.. seealso::
:func:`specgram`
:func:`specgram` differs in the default overlap; in not returning
the mean of the segment periodograms; and in returning the
times of the segments.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitude spectrum.
:func:`csd`
:func:`csd` returns the spectral density between two signals.
"""
Pxx, freqs = csd(x=x, y=None, NFFT=NFFT, Fs=Fs, detrend=detrend,
window=window, noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq)
return Pxx.real, freqs
@docstring.dedent_interpd
def csd(x, y, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None):
"""
Compute the cross-spectral density.
Call signature::
csd(x, y, NFFT=256, Fs=2, detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=0, pad_to=None,
sides='default', scale_by_freq=None)
The cross spectral density :math:`P_{xy}` by Welch's average
periodogram method. The vectors *x* and *y* are divided into
*NFFT* length segments. Each segment is detrended by function
*detrend* and windowed by function *window*. *noverlap* gives
the length of the overlap between segments. The product of
the direct FFTs of *x* and *y* are averaged over each segment
to compute :math:`P_{xy}`, with a scaling to correct for power
loss due to windowing.
If len(*x*) < *NFFT* or len(*y*) < *NFFT*, they will be zero
padded to *NFFT*.
*x*, *y*: 1-D arrays or sequences
Arrays or sequences containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between segments.
The default value is 0 (no overlap).
Returns the tuple (*Pxy*, *freqs*):
*Pxy*: 1-D array
The values for the cross spectrum `P_{xy}` before scaling
(real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *Pxy*
Refs:
Bendat & Piersol -- Random Data: Analysis and Measurement
Procedures, John Wiley & Sons (1986)
.. seealso::
:func:`psd`
:func:`psd` is the equivalent to setting y=x.
"""
if NFFT is None:
NFFT = 256
Pxy, freqs, _ = _spectral_helper(x=x, y=y, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides, scale_by_freq=scale_by_freq,
mode='psd')
if len(Pxy.shape) == 2:
if Pxy.shape[1] > 1:
Pxy = Pxy.mean(axis=1)
else:
Pxy = Pxy[:, 0]
return Pxy, freqs
@docstring.dedent_interpd
def complex_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the complex-valued frequency spectrum of *x*. Data is padded to a
length of *pad_to* and the windowing function *window* is applied to the
signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the complex spectrum (complex valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the absolute value of this
function.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angle of this
function.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of this
function.
:func:`specgram`
:func:`specgram` can return the complex spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='complex')
@docstring.dedent_interpd
def magnitude_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the magnitude (absolute value) of the frequency spectrum of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the magnitude spectrum (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`psd`
:func:`psd` returns the power spectral density.
:func:`complex_spectrum`
This function returns the absolute value of
:func:`complex_spectrum`.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the angles of the corresponding
frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the phase (unwrapped angle) of the
corresponding frequencies.
:func:`specgram`
:func:`specgram` can return the magnitude spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='magnitude')
@docstring.dedent_interpd
def angle_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the angle of the frequency spectrum (wrapped phase spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the angle spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`complex_spectrum`
This function returns the angle value of
:func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`angle_spectrum` returns the magnitudes of the
corresponding frequencies.
:func:`phase_spectrum`
:func:`phase_spectrum` returns the unwrapped version of this
function.
:func:`specgram`
:func:`specgram` can return the angle spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='angle')
@docstring.dedent_interpd
def phase_spectrum(x, Fs=None, window=None, pad_to=None,
sides=None):
"""
Compute the phase of the frequency spectrum (unwrapped angle spectrum) of
*x*. Data is padded to a length of *pad_to* and the windowing function
*window* is applied to the signal.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(Single_Spectrum)s
Returns the tuple (*spectrum*, *freqs*):
*spectrum*: 1-D array
The values for the phase spectrum in radians (real valued)
*freqs*: 1-D array
The frequencies corresponding to the elements in *spectrum*
.. seealso::
:func:`complex_spectrum`
This function returns the angle value of
:func:`complex_spectrum`.
:func:`magnitude_spectrum`
:func:`magnitude_spectrum` returns the magnitudes of the
corresponding frequencies.
:func:`angle_spectrum`
:func:`angle_spectrum` returns the wrapped version of this
function.
:func:`specgram`
:func:`specgram` can return the phase spectrum of segments
within the signal.
"""
return _single_spectrum_helper(x=x, Fs=Fs, window=window, pad_to=pad_to,
sides=sides, mode='phase')
@docstring.dedent_interpd
def specgram(x, NFFT=None, Fs=None, detrend=None, window=None,
noverlap=None, pad_to=None, sides=None, scale_by_freq=None,
mode=None):
"""
Compute a spectrogram.
Call signature::
specgram(x, NFFT=256, Fs=2,detrend=mlab.detrend_none,
window=mlab.window_hanning, noverlap=128,
cmap=None, xextent=None, pad_to=None, sides='default',
scale_by_freq=None, mode='default')
Compute and plot a spectrogram of data in *x*. Data are split into
*NFFT* length segments and the spectrum of each section is
computed. The windowing function *window* is applied to each
segment, and the amount of overlap of each segment is
specified with *noverlap*.
*x*: 1-D array or sequence
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*mode*: [ 'default' | 'psd' | 'complex' | 'magnitude'
'angle' | 'phase' ]
What sort of spectrum to use. Default is 'psd'. which takes the
power spectral density. 'complex' returns the complex-valued
frequency spectrum. 'magnitude' returns the magnitude spectrum.
'angle' returns the phase spectrum without unwrapping. 'phase'
returns the phase spectrum with unwrapping.
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 128.
Returns the tuple (*spectrum*, *freqs*, *t*):
*spectrum*: 2-D array
columns are the periodograms of successive segments
*freqs*: 1-D array
The frequencies corresponding to the rows in *spectrum*
*t*: 1-D array
The times corresponding to midpoints of segments (i.e the columns
in *spectrum*).
.. note::
*detrend* and *scale_by_freq* only apply when *mode* is set to
'psd'
.. seealso::
:func:`psd`
:func:`psd` differs in the default overlap; in returning
the mean of the segment periodograms; and in not returning
times.
:func:`complex_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'complex'.
:func:`magnitude_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'magnitude'.
:func:`angle_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'angle'.
:func:`phase_spectrum`
A single spectrum, similar to having a single segment when
*mode* is 'phase'.
"""
if noverlap is None:
noverlap = 128
spec, freqs, t = _spectral_helper(x=x, y=None, NFFT=NFFT, Fs=Fs,
detrend_func=detrend, window=window,
noverlap=noverlap, pad_to=pad_to,
sides=sides,
scale_by_freq=scale_by_freq,
mode=mode)
if mode != 'complex':
spec = spec.real # Needed since helper implements generically
return spec, freqs, t
_coh_error = """Coherence is calculated by averaging over *NFFT*
length segments. Your signal is too short for your choice of *NFFT*.
"""
@docstring.dedent_interpd
def cohere(x, y, NFFT=256, Fs=2, detrend=detrend_none, window=window_hanning,
noverlap=0, pad_to=None, sides='default', scale_by_freq=None):
"""
The coherence between *x* and *y*. Coherence is the normalized
cross spectral density:
.. math::
C_{xy} = \\frac{|P_{xy}|^2}{P_{xx}P_{yy}}
*x*, *y*
Array or sequence containing the data
%(Spectral)s
%(PSD)s
*noverlap*: integer
The number of points of overlap between blocks. The default value
is 0 (no overlap).
The return value is the tuple (*Cxy*, *f*), where *f* are the
frequencies of the coherence vector. For cohere, scaling the
individual densities by the sampling frequency has no effect,
since the factors cancel out.
.. seealso::
:func:`psd` and :func:`csd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
if len(x) < 2 * NFFT:
raise ValueError(_coh_error)
Pxx, f = psd(x, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pyy, f = psd(y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Pxy, f = csd(x, y, NFFT, Fs, detrend, window, noverlap, pad_to, sides,
scale_by_freq)
Cxy = np.divide(np.absolute(Pxy)**2, Pxx*Pyy)
Cxy.shape = (len(f),)
return Cxy, f
def donothing_callback(*args):
pass
def cohere_pairs( X, ij, NFFT=256, Fs=2, detrend=detrend_none,
window=window_hanning, noverlap=0,
preferSpeedOverMemory=True,
progressCallback=donothing_callback,
returnPxx=False):
"""
Call signature::
Cxy, Phase, freqs = cohere_pairs( X, ij, ...)
Compute the coherence and phase for all pairs *ij*, in *X*.
*X* is a *numSamples* * *numCols* array
*ij* is a list of tuples. Each tuple is a pair of indexes into
the columns of X for which you want to compute coherence. For
example, if *X* has 64 columns, and you want to compute all
nonredundant pairs, define *ij* as::
ij = []
for i in range(64):
for j in range(i+1,64):
ij.append( (i,j) )
*preferSpeedOverMemory* is an optional bool. Defaults to true. If
False, limits the caching by only making one, rather than two,
complex cache arrays. This is useful if memory becomes critical.
Even when *preferSpeedOverMemory* is False, :func:`cohere_pairs`
will still give significant performace gains over calling
:func:`cohere` for each pair, and will use subtantially less
memory than if *preferSpeedOverMemory* is True. In my tests with
a 43000,64 array over all nonredundant pairs,
*preferSpeedOverMemory* = True delivered a 33% performance boost
on a 1.7GHZ Athlon with 512MB RAM compared with
*preferSpeedOverMemory* = False. But both solutions were more
than 10x faster than naively crunching all possible pairs through
:func:`cohere`.
Returns::
(Cxy, Phase, freqs)
where:
- *Cxy*: dictionary of (*i*, *j*) tuples -> coherence vector for
that pair. i.e., ``Cxy[(i,j) = cohere(X[:,i], X[:,j])``.
Number of dictionary keys is ``len(ij)``.
- *Phase*: dictionary of phases of the cross spectral density at
each frequency for each pair. Keys are (*i*, *j*).
- *freqs*: vector of frequencies, equal in length to either the
coherence or phase vectors for any (*i*, *j*) key.
e.g., to make a coherence Bode plot::
subplot(211)
plot( freqs, Cxy[(12,19)])
subplot(212)
plot( freqs, Phase[(12,19)])
For a large number of pairs, :func:`cohere_pairs` can be much more
efficient than just calling :func:`cohere` for each pair, because
it caches most of the intensive computations. If :math:`N` is the
number of pairs, this function is :math:`O(N)` for most of the
heavy lifting, whereas calling cohere for each pair is
:math:`O(N^2)`. However, because of the caching, it is also more
memory intensive, making 2 additional complex arrays with
approximately the same number of elements as *X*.
See :file:`test/cohere_pairs_test.py` in the src tree for an
example script that shows that this :func:`cohere_pairs` and
:func:`cohere` give the same results for a given pair.
.. seealso::
:func:`psd`
For information about the methods used to compute
:math:`P_{xy}`, :math:`P_{xx}` and :math:`P_{yy}`.
"""
numRows, numCols = X.shape
# zero pad if X is too short
if numRows < NFFT:
tmp = X
X = np.zeros( (NFFT, numCols), X.dtype)
X[:numRows,:] = tmp
del tmp
numRows, numCols = X.shape
# get all the columns of X that we are interested in by checking
# the ij tuples
allColumns = set()
for i,j in ij:
allColumns.add(i); allColumns.add(j)
Ncols = len(allColumns)
# for real X, ignore the negative frequencies
if np.iscomplexobj(X): numFreqs = NFFT
else: numFreqs = NFFT//2+1
# cache the FFT of every windowed, detrended NFFT length segement
# of every channel. If preferSpeedOverMemory, cache the conjugate
# as well
if cbook.iterable(window):
assert(len(window) == NFFT)
windowVals = window
else:
windowVals = window(np.ones(NFFT, X.dtype))
ind = list(xrange(0, numRows-NFFT+1, NFFT-noverlap))
numSlices = len(ind)
FFTSlices = {}
FFTConjSlices = {}
Pxx = {}
slices = range(numSlices)
normVal = np.linalg.norm(windowVals)**2
for iCol in allColumns:
progressCallback(i/Ncols, 'Cacheing FFTs')
Slices = np.zeros( (numSlices,numFreqs), dtype=np.complex_)
for iSlice in slices:
thisSlice = X[ind[iSlice]:ind[iSlice]+NFFT, iCol]
thisSlice = windowVals*detrend(thisSlice)
Slices[iSlice,:] = np.fft.fft(thisSlice)[:numFreqs]
FFTSlices[iCol] = Slices
if preferSpeedOverMemory:
FFTConjSlices[iCol] = np.conjugate(Slices)
Pxx[iCol] = np.divide(np.mean(abs(Slices)**2, axis=0), normVal)
del Slices, ind, windowVals
# compute the coherences and phases for all pairs using the
# cached FFTs
Cxy = {}
Phase = {}
count = 0
N = len(ij)
for i,j in ij:
count +=1
if count%10==0:
progressCallback(count/N, 'Computing coherences')
if preferSpeedOverMemory:
Pxy = FFTSlices[i] * FFTConjSlices[j]
else:
Pxy = FFTSlices[i] * np.conjugate(FFTSlices[j])
if numSlices>1: Pxy = np.mean(Pxy, axis=0)
#Pxy = np.divide(Pxy, normVal)
Pxy /= normVal
#Cxy[(i,j)] = np.divide(np.absolute(Pxy)**2, Pxx[i]*Pxx[j])
Cxy[i,j] = abs(Pxy)**2 / (Pxx[i]*Pxx[j])
Phase[i,j] = np.arctan2(Pxy.imag, Pxy.real)
freqs = Fs/NFFT*np.arange(numFreqs)
if returnPxx:
return Cxy, Phase, freqs, Pxx
else:
return Cxy, Phase, freqs
def entropy(y, bins):
r"""
Return the entropy of the data in *y* in units of nat.
.. math::
-\sum p_i \ln(p_i)
where :math:`p_i` is the probability of observing *y* in the
:math:`i^{th}` bin of *bins*. *bins* can be a number of bins or a
range of bins; see :func:`numpy.histogram`.
Compare *S* with analytic calculation for a Gaussian::
x = mu + sigma * randn(200000)
Sanalytic = 0.5 * ( 1.0 + log(2*pi*sigma**2.0) )
"""
n, bins = np.histogram(y, bins)
n = n.astype(np.float_)
n = np.take(n, np.nonzero(n)[0]) # get the positive
p = np.divide(n, len(y))
delta = bins[1] - bins[0]
S = -1.0 * np.sum(p * np.log(p)) + np.log(delta)
return S
def normpdf(x, *args):
"Return the normal pdf evaluated at *x*; args provides *mu*, *sigma*"
mu, sigma = args
return 1./(np.sqrt(2*np.pi)*sigma)*np.exp(-0.5 * (1./sigma*(x - mu))**2)
def find(condition):
"Return the indices where ravel(condition) is true"
res, = np.nonzero(np.ravel(condition))
return res
def longest_contiguous_ones(x):
"""
Return the indices of the longest stretch of contiguous ones in *x*,
assuming *x* is a vector of zeros and ones. If there are two
equally long stretches, pick the first.
"""
x = np.ravel(x)
if len(x)==0:
return np.array([])
ind = (x==0).nonzero()[0]
if len(ind)==0:
return np.arange(len(x))
if len(ind)==len(x):
return np.array([])
y = np.zeros( (len(x)+2,), x.dtype)
y[1:-1] = x
dif = np.diff(y)
up = (dif == 1).nonzero()[0];
dn = (dif == -1).nonzero()[0];
i = (dn-up == max(dn - up)).nonzero()[0][0]
ind = np.arange(up[i], dn[i])
return ind
def longest_ones(x):
'''alias for longest_contiguous_ones'''
return longest_contiguous_ones(x)
def prepca(P, frac=0):
"""
.. warning::
This function is deprecated -- please see class PCA instead
Compute the principal components of *P*. *P* is a (*numVars*,
*numObs*) array. *frac* is the minimum fraction of variance that a
component must contain to be included.
Return value is a tuple of the form (*Pcomponents*, *Trans*,
*fracVar*) where:
- *Pcomponents* : a (numVars, numObs) array
- *Trans* : the weights matrix, i.e., *Pcomponents* = *Trans* *
*P*
- *fracVar* : the fraction of the variance accounted for by each
component returned
A similar function of the same name was in the MATLAB
R13 Neural Network Toolbox but is not found in later versions;
its successor seems to be called "processpcs".
"""
warnings.warn('This function is deprecated -- see class PCA instead')
U,s,v = np.linalg.svd(P)
varEach = s**2/P.shape[1]
totVar = varEach.sum()
fracVar = varEach/totVar
ind = slice((fracVar>=frac).sum())
# select the components that are greater
Trans = U[:,ind].transpose()
# The transformed data
Pcomponents = np.dot(Trans,P)
return Pcomponents, Trans, fracVar[ind]
class PCA:
def __init__(self, a, standardize=True):
"""
compute the SVD of a and store data for PCA. Use project to
project the data onto a reduced set of dimensions
Inputs:
*a*: a numobservations x numdims array
*standardize*: True if input data are to be standardized. If False, only centering will be
carried out.
Attrs:
*a* a centered unit sigma version of input a
*numrows*, *numcols*: the dimensions of a
*mu* : a numdims array of means of a. This is the vector that points to the
origin of PCA space.
*sigma* : a numdims array of standard deviation of a
*fracs* : the proportion of variance of each of the principal components
*s* : the actual eigenvalues of the decomposition
*Wt* : the weight vector for projecting a numdims point or array into PCA space
*Y* : a projected into PCA space
The factor loadings are in the Wt factor, i.e., the factor
loadings for the 1st principal component are given by Wt[0].
This row is also the 1st eigenvector.
"""
n, m = a.shape
if n<m:
raise RuntimeError('we assume data in a is organized with numrows>numcols')
self.numrows, self.numcols = n, m
self.mu = a.mean(axis=0)
self.sigma = a.std(axis=0)
self.standardize = standardize
a = self.center(a)
self.a = a
U, s, Vh = np.linalg.svd(a, full_matrices=False)
# Note: .H indicates the conjugate transposed / Hermitian.
# The SVD is commonly written as a = U s V.H.
# If U is a unitary matrix, it means that it satisfies U.H = inv(U).
# The rows of Vh are the eigenvectors of a.H a.
# The columns of U are the eigenvectors of a a.H.
# For row i in Vh and column i in U, the corresponding eigenvalue is s[i]**2.
self.Wt = Vh
# save the transposed coordinates
Y = np.dot(Vh, a.T).T
self.Y = Y
# save the eigenvalues
self.s = s**2
# and now the contribution of the individual components
vars = self.s/float(len(s))
self.fracs = vars/vars.sum()
def project(self, x, minfrac=0.):
'project x onto the principle axes, dropping any axes where fraction of variance<minfrac'
x = np.asarray(x)
ndims = len(x.shape)
if (x.shape[-1]!=self.numcols):
raise ValueError('Expected an array with dims[-1]==%d'%self.numcols)
Y = np.dot(self.Wt, self.center(x).T).T
mask = self.fracs>=minfrac
if ndims==2:
Yreduced = Y[:,mask]
else:
Yreduced = Y[mask]
return Yreduced
def center(self, x):
'center and optionally standardize the data using the mean and sigma from training set a'
if self.standardize:
return (x - self.mu)/self.sigma
else:
return (x - self.mu)
@staticmethod
def _get_colinear():
c0 = np.array([
0.19294738, 0.6202667 , 0.45962655, 0.07608613, 0.135818 ,
0.83580842, 0.07218851, 0.48318321, 0.84472463, 0.18348462,
0.81585306, 0.96923926, 0.12835919, 0.35075355, 0.15807861,
0.837437 , 0.10824303, 0.1723387 , 0.43926494, 0.83705486])
c1 = np.array([
-1.17705601, -0.513883 , -0.26614584, 0.88067144, 1.00474954,
-1.1616545 , 0.0266109 , 0.38227157, 1.80489433, 0.21472396,
-1.41920399, -2.08158544, -0.10559009, 1.68999268, 0.34847107,
-0.4685737 , 1.23980423, -0.14638744, -0.35907697, 0.22442616])
c2 = c0 + 2*c1
c3 = -3*c0 + 4*c1
a = np.array([c3, c0, c1, c2]).T
return a
def prctile(x, p = (0.0, 25.0, 50.0, 75.0, 100.0)):
"""
Return the percentiles of *x*. *p* can either be a sequence of
percentile values or a scalar. If *p* is a sequence, the ith
element of the return sequence is the *p*(i)-th percentile of *x*.
If *p* is a scalar, the largest value of *x* less than or equal to
the *p* percentage point in the sequence is returned.
"""
# This implementation derived from scipy.stats.scoreatpercentile
def _interpolate(a, b, fraction):
"""Returns the point at the given fraction between a and b, where
'fraction' must be between 0 and 1.
"""
return a + (b - a)*fraction
scalar = True
if cbook.iterable(p):
scalar = False
per = np.array(p)
values = np.array(x).ravel() # copy
values.sort()
idxs = per /100. * (values.shape[0] - 1)
ai = idxs.astype(np.int)
bi = ai + 1
frac = idxs % 1
# handle cases where attempting to interpolate past last index
cond = bi >= len(values)
if scalar:
if cond:
ai -= 1
bi -= 1
frac += 1
else:
ai[cond] -= 1
bi[cond] -= 1
frac[cond] += 1
return _interpolate(values[ai],values[bi],frac)
def prctile_rank(x, p):
"""
Return the rank for each element in *x*, return the rank
0..len(*p*). e.g., if *p* = (25, 50, 75), the return value will be a
len(*x*) array with values in [0,1,2,3] where 0 indicates the
value is less than the 25th percentile, 1 indicates the value is
>= the 25th and < 50th percentile, ... and 3 indicates the value
is above the 75th percentile cutoff.
*p* is either an array of percentiles in [0..100] or a scalar which
indicates how many quantiles of data you want ranked.
"""
if not cbook.iterable(p):
p = np.arange(100.0/p, 100.0, 100.0/p)
else:
p = np.asarray(p)
if p.max()<=1 or p.min()<0 or p.max()>100:
raise ValueError('percentiles should be in range 0..100, not 0..1')
ptiles = prctile(x, p)
return np.searchsorted(ptiles, x)
def center_matrix(M, dim=0):
"""
Return the matrix *M* with each row having zero mean and unit std.
If *dim* = 1 operate on columns instead of rows. (*dim* is
opposite to the numpy axis kwarg.)
"""
M = np.asarray(M, np.float_)
if dim:
M = (M - M.mean(axis=0)) / M.std(axis=0)
else:
M = (M - M.mean(axis=1)[:,np.newaxis])
M = M / M.std(axis=1)[:,np.newaxis]
return M
def rk4(derivs, y0, t):
"""
Integrate 1D or ND system of ODEs using 4-th order Runge-Kutta.
This is a toy implementation which may be useful if you find
yourself stranded on a system w/o scipy. Otherwise use
:func:`scipy.integrate`.
*y0*
initial state vector
*t*
sample times
*derivs*
returns the derivative of the system and has the
signature ``dy = derivs(yi, ti)``
Example 1 ::
## 2D system
def derivs6(x,t):
d1 = x[0] + 2*x[1]
d2 = -3*x[0] + 4*x[1]
return (d1, d2)
dt = 0.0005
t = arange(0.0, 2.0, dt)
y0 = (1,2)
yout = rk4(derivs6, y0, t)
Example 2::
## 1D system
alpha = 2
def derivs(x,t):
return -alpha*x + exp(-t)
y0 = 1
yout = rk4(derivs, y0, t)
If you have access to scipy, you should probably be using the
scipy.integrate tools rather than this function.
"""
try: Ny = len(y0)
except TypeError:
yout = np.zeros( (len(t),), np.float_)
else:
yout = np.zeros( (len(t), Ny), np.float_)
yout[0] = y0
i = 0
for i in np.arange(len(t)-1):
thist = t[i]
dt = t[i+1] - thist
dt2 = dt/2.0
y0 = yout[i]
k1 = np.asarray(derivs(y0, thist))
k2 = np.asarray(derivs(y0 + dt2*k1, thist+dt2))
k3 = np.asarray(derivs(y0 + dt2*k2, thist+dt2))
k4 = np.asarray(derivs(y0 + dt*k3, thist+dt))
yout[i+1] = y0 + dt/6.0*(k1 + 2*k2 + 2*k3 + k4)
return yout
def bivariate_normal(X, Y, sigmax=1.0, sigmay=1.0,
mux=0.0, muy=0.0, sigmaxy=0.0):
"""
Bivariate Gaussian distribution for equal shape *X*, *Y*.
See `bivariate normal
<http://mathworld.wolfram.com/BivariateNormalDistribution.html>`_
at mathworld.
"""
Xmu = X-mux
Ymu = Y-muy
rho = sigmaxy/(sigmax*sigmay)
z = Xmu**2/sigmax**2 + Ymu**2/sigmay**2 - 2*rho*Xmu*Ymu/(sigmax*sigmay)
denom = 2*np.pi*sigmax*sigmay*np.sqrt(1-rho**2)
return np.exp( -z/(2*(1-rho**2))) / denom
def get_xyz_where(Z, Cond):
"""
*Z* and *Cond* are *M* x *N* matrices. *Z* are data and *Cond* is
a boolean matrix where some condition is satisfied. Return value
is (*x*, *y*, *z*) where *x* and *y* are the indices into *Z* and
*z* are the values of *Z* at those indices. *x*, *y*, and *z* are
1D arrays.
"""
X,Y = np.indices(Z.shape)
return X[Cond], Y[Cond], Z[Cond]
def get_sparse_matrix(M,N,frac=0.1):
"""
Return a *M* x *N* sparse matrix with *frac* elements randomly
filled.
"""
data = np.zeros((M,N))*0.
for i in range(int(M*N*frac)):
x = np.random.randint(0,M-1)
y = np.random.randint(0,N-1)
data[x,y] = np.random.rand()
return data
def dist(x,y):
"""
Return the distance between two points.
"""
d = x-y
return np.sqrt(np.dot(d,d))
def dist_point_to_segment(p, s0, s1):
"""
Get the distance of a point to a segment.
*p*, *s0*, *s1* are *xy* sequences
This algorithm from
http://softsurfer.com/Archive/algorithm_0102/algorithm_0102.htm#Distance%20to%20Ray%20or%20Segment
"""
p = np.asarray(p, np.float_)
s0 = np.asarray(s0, np.float_)
s1 = np.asarray(s1, np.float_)
v = s1 - s0
w = p - s0
c1 = np.dot(w,v);
if ( c1 <= 0 ):
return dist(p, s0);
c2 = np.dot(v,v)
if ( c2 <= c1 ):
return dist(p, s1);
b = c1 / c2
pb = s0 + b * v;
return dist(p, pb)
def segments_intersect(s1, s2):
"""
Return *True* if *s1* and *s2* intersect.
*s1* and *s2* are defined as::
s1: (x1, y1), (x2, y2)
s2: (x3, y3), (x4, y4)
"""
(x1, y1), (x2, y2) = s1
(x3, y3), (x4, y4) = s2
den = ((y4-y3) * (x2-x1)) - ((x4-x3)*(y2-y1))
n1 = ((x4-x3) * (y1-y3)) - ((y4-y3)*(x1-x3))
n2 = ((x2-x1) * (y1-y3)) - ((y2-y1)*(x1-x3))
if den == 0:
# lines parallel
return False
u1 = n1/den
u2 = n2/den
return 0.0 <= u1 <= 1.0 and 0.0 <= u2 <= 1.0
def fftsurr(x, detrend=detrend_none, window=window_none):
"""
Compute an FFT phase randomized surrogate of *x*.
"""
if cbook.iterable(window):
x=window*detrend(x)
else:
x = window(detrend(x))
z = np.fft.fft(x)
a = 2.*np.pi*1j
phase = a * np.random.rand(len(x))
z = z*np.exp(phase)
return np.fft.ifft(z).real
class FIFOBuffer:
"""
A FIFO queue to hold incoming *x*, *y* data in a rotating buffer
using numpy arrays under the hood. It is assumed that you will
call asarrays much less frequently than you add data to the queue
-- otherwise another data structure will be faster.
This can be used to support plots where data is added from a real
time feed and the plot object wants to grab data from the buffer
and plot it to screen less freqeuently than the incoming.
If you set the *dataLim* attr to
:class:`~matplotlib.transforms.BBox` (e.g.,
:attr:`matplotlib.Axes.dataLim`), the *dataLim* will be updated as
new data come in.
TODO: add a grow method that will extend nmax
.. note::
mlab seems like the wrong place for this class.
"""
@cbook.deprecated('1.3', name='FIFOBuffer', obj_type='class')
def __init__(self, nmax):
"""
Buffer up to *nmax* points.
"""
self._xa = np.zeros((nmax,), np.float_)
self._ya = np.zeros((nmax,), np.float_)
self._xs = np.zeros((nmax,), np.float_)
self._ys = np.zeros((nmax,), np.float_)
self._ind = 0
self._nmax = nmax
self.dataLim = None
self.callbackd = {}
def register(self, func, N):
"""
Call *func* every time *N* events are passed; *func* signature
is ``func(fifo)``.
"""
self.callbackd.setdefault(N, []).append(func)
def add(self, x, y):
"""
Add scalar *x* and *y* to the queue.
"""
if self.dataLim is not None:
xy = np.asarray([(x,y),])
self.dataLim.update_from_data_xy(xy, None)
ind = self._ind % self._nmax
#print 'adding to fifo:', ind, x, y
self._xs[ind] = x
self._ys[ind] = y
for N,funcs in six.iteritems(self.callbackd):
if (self._ind%N)==0:
for func in funcs:
func(self)
self._ind += 1
def last(self):
"""
Get the last *x*, *y* or *None*. *None* if no data set.
"""
if self._ind==0: return None, None
ind = (self._ind-1) % self._nmax
return self._xs[ind], self._ys[ind]
def asarrays(self):
"""
Return *x* and *y* as arrays; their length will be the len of
data added or *nmax*.
"""
if self._ind<self._nmax:
return self._xs[:self._ind], self._ys[:self._ind]
ind = self._ind % self._nmax
self._xa[:self._nmax-ind] = self._xs[ind:]
self._xa[self._nmax-ind:] = self._xs[:ind]
self._ya[:self._nmax-ind] = self._ys[ind:]
self._ya[self._nmax-ind:] = self._ys[:ind]
return self._xa, self._ya
def update_datalim_to_current(self):
"""
Update the *datalim* in the current data in the fifo.
"""
if self.dataLim is None:
raise ValueError('You must first set the dataLim attr')
x, y = self.asarrays()
self.dataLim.update_from_data(x, y, True)
def movavg(x,n):
"""
Compute the len(*n*) moving average of *x*.
"""
w = np.empty((n,), dtype=np.float_)
w[:] = 1.0/n
return np.convolve(x, w, mode='valid')
### the following code was written and submitted by Fernando Perez
### from the ipython numutils package under a BSD license
# begin fperez functions
"""
A set of convenient utilities for numerical work.
Most of this module requires numpy or is meant to be used with it.
Copyright (c) 2001-2004, Fernando Perez. <Fernando.Perez@colorado.edu>
All rights reserved.
This license was generated from the BSD license template as found in:
http://www.opensource.org/licenses/bsd-license.php
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of the IPython project nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import math
#*****************************************************************************
# Globals
#****************************************************************************
# function definitions
exp_safe_MIN = math.log(2.2250738585072014e-308)
exp_safe_MAX = 1.7976931348623157e+308
def exp_safe(x):
"""
Compute exponentials which safely underflow to zero.
Slow, but convenient to use. Note that numpy provides proper
floating point exception handling with access to the underlying
hardware.
"""
if type(x) is np.ndarray:
return np.exp(np.clip(x,exp_safe_MIN,exp_safe_MAX))
else:
return math.exp(x)
def amap(fn,*args):
"""
amap(function, sequence[, sequence, ...]) -> array.
Works like :func:`map`, but it returns an array. This is just a
convenient shorthand for ``numpy.array(map(...))``.
"""
return np.array(list(map(fn,*args)))
def rms_flat(a):
"""
Return the root mean square of all the elements of *a*, flattened out.
"""
return np.sqrt(np.mean(np.absolute(a)**2))
def l1norm(a):
"""
Return the *l1* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sum(np.absolute(a))
def l2norm(a):
"""
Return the *l2* norm of *a*, flattened out.
Implemented as a separate function (not a call to :func:`norm` for speed).
"""
return np.sqrt(np.sum(np.absolute(a)**2))
def norm_flat(a,p=2):
"""
norm(a,p=2) -> l-p norm of a.flat
Return the l-p norm of *a*, considered as a flat array. This is NOT a true
matrix norm, since arrays of arbitrary rank are always flattened.
*p* can be a number or the string 'Infinity' to get the L-infinity norm.
"""
# This function was being masked by a more general norm later in
# the file. We may want to simply delete it.
if p=='Infinity':
return np.amax(np.absolute(a))
else:
return (np.sum(np.absolute(a)**p))**(1.0/p)
def frange(xini,xfin=None,delta=None,**kw):
"""
frange([start,] stop[, step, keywords]) -> array of floats
Return a numpy ndarray containing a progression of floats. Similar to
:func:`numpy.arange`, but defaults to a closed interval.
``frange(x0, x1)`` returns ``[x0, x0+1, x0+2, ..., x1]``; *start*
defaults to 0, and the endpoint *is included*. This behavior is
different from that of :func:`range` and
:func:`numpy.arange`. This is deliberate, since :func:`frange`
will probably be more useful for generating lists of points for
function evaluation, and endpoints are often desired in this
use. The usual behavior of :func:`range` can be obtained by
setting the keyword *closed* = 0, in this case, :func:`frange`
basically becomes :func:numpy.arange`.
When *step* is given, it specifies the increment (or
decrement). All arguments can be floating point numbers.
``frange(x0,x1,d)`` returns ``[x0,x0+d,x0+2d,...,xfin]`` where
*xfin* <= *x1*.
:func:`frange` can also be called with the keyword *npts*. This
sets the number of points the list should contain (and overrides
the value *step* might have been given). :func:`numpy.arange`
doesn't offer this option.
Examples::
>>> frange(3)
array([ 0., 1., 2., 3.])
>>> frange(3,closed=0)
array([ 0., 1., 2.])
>>> frange(1,6,2)
array([1, 3, 5]) or 1,3,5,7, depending on floating point vagueries
>>> frange(1,6.5,npts=5)
array([ 1. , 2.375, 3.75 , 5.125, 6.5 ])
"""
#defaults
kw.setdefault('closed',1)
endpoint = kw['closed'] != 0
# funny logic to allow the *first* argument to be optional (like range())
# This was modified with a simpler version from a similar frange() found
# at http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66472
if xfin == None:
xfin = xini + 0.0
xini = 0.0
if delta == None:
delta = 1.0
# compute # of points, spacing and return final list
try:
npts=kw['npts']
delta=(xfin-xini)/float(npts-endpoint)
except KeyError:
npts = int(round((xfin-xini)/delta)) + endpoint
#npts = int(floor((xfin-xini)/delta)*(1.0+1e-10)) + endpoint
# round finds the nearest, so the endpoint can be up to
# delta/2 larger than xfin.
return np.arange(npts)*delta+xini
# end frange()
def identity(n, rank=2, dtype='l', typecode=None):
"""
Returns the identity matrix of shape (*n*, *n*, ..., *n*) (rank *r*).
For ranks higher than 2, this object is simply a multi-index Kronecker
delta::
/ 1 if i0=i1=...=iR,
id[i0,i1,...,iR] = -|
\ 0 otherwise.
Optionally a *dtype* (or typecode) may be given (it defaults to 'l').
Since rank defaults to 2, this function behaves in the default case (when
only *n* is given) like ``numpy.identity(n)`` -- but surprisingly, it is
much faster.
"""
if typecode is not None:
dtype = typecode
iden = np.zeros((n,)*rank, dtype)
for i in range(n):
idx = (i,)*rank
iden[idx] = 1
return iden
def base_repr (number, base = 2, padding = 0):
"""
Return the representation of a *number* in any given *base*.
"""
chars = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
if number < base: \
return (padding - 1) * chars [0] + chars [int (number)]
max_exponent = int (math.log (number)/math.log (base))
max_power = long (base) ** max_exponent
lead_digit = int (number/max_power)
return chars [lead_digit] + \
base_repr (number - max_power * lead_digit, base, \
max (padding - 1, max_exponent))
def binary_repr(number, max_length = 1025):
"""
Return the binary representation of the input *number* as a
string.
This is more efficient than using :func:`base_repr` with base 2.
Increase the value of max_length for very large numbers. Note that
on 32-bit machines, 2**1023 is the largest integer power of 2
which can be converted to a Python float.
"""
#assert number < 2L << max_length
shifts = list(map (operator.rshift, max_length * [number], \
range (max_length - 1, -1, -1)))
digits = list(map (operator.mod, shifts, max_length * [2]))
if not digits.count (1): return 0
digits = digits [digits.index (1):]
return ''.join (map (repr, digits)).replace('L','')
def log2(x,ln2 = math.log(2.0)):
"""
Return the log(*x*) in base 2.
This is a _slow_ function but which is guaranteed to return the correct
integer value if the input is an integer exact power of 2.
"""
try:
bin_n = binary_repr(x)[1:]
except (AssertionError,TypeError):
return math.log(x)/ln2
else:
if '1' in bin_n:
return math.log(x)/ln2
else:
return len(bin_n)
def ispower2(n):
"""
Returns the log base 2 of *n* if *n* is a power of 2, zero otherwise.
Note the potential ambiguity if *n* == 1: 2**0 == 1, interpret accordingly.
"""
bin_n = binary_repr(n)[1:]
if '1' in bin_n:
return 0
else:
return len(bin_n)
def isvector(X):
"""
Like the MATLAB function with the same name, returns *True*
if the supplied numpy array or matrix *X* looks like a vector,
meaning it has a one non-singleton axis (i.e., it can have
multiple axes, but all must have length 1, except for one of
them).
If you just want to see if the array has 1 axis, use X.ndim == 1.
"""
return np.prod(X.shape)==np.max(X.shape)
### end fperez numutils code
#helpers for loading, saving, manipulating and viewing numpy record arrays
def safe_isnan(x):
':func:`numpy.isnan` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isnan(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def safe_isinf(x):
':func:`numpy.isinf` for arbitrary types'
if cbook.is_string_like(x):
return False
try: b = np.isinf(x)
except NotImplementedError: return False
except TypeError: return False
else: return b
def rec_append_fields(rec, names, arrs, dtypes=None):
"""
Return a new record array with field names populated with data
from arrays in *arrs*. If appending a single field, then *names*,
*arrs* and *dtypes* do not have to be lists. They can just be the
values themselves.
"""
if (not cbook.is_string_like(names) and cbook.iterable(names) \
and len(names) and cbook.is_string_like(names[0])):
if len(names) != len(arrs):
raise ValueError("number of arrays do not match number of names")
else: # we have only 1 name and 1 array
names = [names]
arrs = [arrs]
arrs = list(map(np.asarray, arrs))
if dtypes is None:
dtypes = [a.dtype for a in arrs]
elif not cbook.iterable(dtypes):
dtypes = [dtypes]
if len(arrs) != len(dtypes):
if len(dtypes) == 1:
dtypes = dtypes * len(arrs)
else:
raise ValueError("dtypes must be None, a single dtype or a list")
newdtype = np.dtype(rec.dtype.descr + list(zip(names, dtypes)))
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in rec.dtype.fields:
newrec[field] = rec[field]
for name, arr in zip(names, arrs):
newrec[name] = arr
return newrec
def rec_drop_fields(rec, names):
"""
Return a new numpy record array with fields in *names* dropped.
"""
names = set(names)
newdtype = np.dtype([(name, rec.dtype[name]) for name in rec.dtype.names
if name not in names])
newrec = np.recarray(rec.shape, dtype=newdtype)
for field in newdtype.names:
newrec[field] = rec[field]
return newrec
def rec_keep_fields(rec, names):
"""
Return a new numpy record array with only fields listed in names
"""
if cbook.is_string_like(names):
names = names.split(',')
arrays = []
for name in names:
arrays.append(rec[name])
return np.rec.fromarrays(arrays, names=names)
def rec_groupby(r, groupby, stats):
"""
*r* is a numpy record array
*groupby* is a sequence of record array attribute names that
together form the grouping key. e.g., ('date', 'productcode')
*stats* is a sequence of (*attr*, *func*, *outname*) tuples which
will call ``x = func(attr)`` and assign *x* to the record array
output with attribute *outname*. For example::
stats = ( ('sales', len, 'numsales'), ('sales', np.mean, 'avgsale') )
Return record array has *dtype* names for each attribute name in
the the *groupby* argument, with the associated group values, and
for each outname name in the *stats* argument, with the associated
stat summary output.
"""
# build a dictionary from groupby keys-> list of indices into r with
# those keys
rowd = dict()
for i, row in enumerate(r):
key = tuple([row[attr] for attr in groupby])
rowd.setdefault(key, []).append(i)
# sort the output by groupby keys
keys = list(six.iterkeys(rowd))
keys.sort()
rows = []
for key in keys:
row = list(key)
# get the indices for this groupby key
ind = rowd[key]
thisr = r[ind]
# call each stat function for this groupby slice
row.extend([func(thisr[attr]) for attr, func, outname in stats])
rows.append(row)
# build the output record array with groupby and outname attributes
attrs, funcs, outnames = list(zip(*stats))
names = list(groupby)
names.extend(outnames)
return np.rec.fromrecords(rows, names=names)
def rec_summarize(r, summaryfuncs):
"""
*r* is a numpy record array
*summaryfuncs* is a list of (*attr*, *func*, *outname*) tuples
which will apply *func* to the the array *r*[attr] and assign the
output to a new attribute name *outname*. The returned record
array is identical to *r*, with extra arrays for each element in
*summaryfuncs*.
"""
names = list(r.dtype.names)
arrays = [r[name] for name in names]
for attr, func, outname in summaryfuncs:
names.append(outname)
arrays.append(np.asarray(func(r[attr])))
return np.rec.fromarrays(arrays, names=names)
def rec_join(key, r1, r2, jointype='inner', defaults=None, r1postfix='1', r2postfix='2'):
"""
Join record arrays *r1* and *r2* on *key*; *key* is a tuple of
field names -- if *key* is a string it is assumed to be a single
attribute name. If *r1* and *r2* have equal values on all the keys
in the *key* tuple, then their fields will be merged into a new
record array containing the intersection of the fields of *r1* and
*r2*.
*r1* (also *r2*) must not have any duplicate keys.
The *jointype* keyword can be 'inner', 'outer', 'leftouter'. To
do a rightouter join just reverse *r1* and *r2*.
The *defaults* keyword is a dictionary filled with
``{column_name:default_value}`` pairs.
The keywords *r1postfix* and *r2postfix* are postfixed to column names
(other than keys) that are both in *r1* and *r2*.
"""
if cbook.is_string_like(key):
key = (key, )
for name in key:
if name not in r1.dtype.names:
raise ValueError('r1 does not have key field %s'%name)
if name not in r2.dtype.names:
raise ValueError('r2 does not have key field %s'%name)
def makekey(row):
return tuple([row[name] for name in key])
r1d = dict([(makekey(row),i) for i,row in enumerate(r1)])
r2d = dict([(makekey(row),i) for i,row in enumerate(r2)])
r1keys = set(r1d.keys())
r2keys = set(r2d.keys())
common_keys = r1keys & r2keys
r1ind = np.array([r1d[k] for k in common_keys])
r2ind = np.array([r2d[k] for k in common_keys])
common_len = len(common_keys)
left_len = right_len = 0
if jointype == "outer" or jointype == "leftouter":
left_keys = r1keys.difference(r2keys)
left_ind = np.array([r1d[k] for k in left_keys])
left_len = len(left_ind)
if jointype == "outer":
right_keys = r2keys.difference(r1keys)
right_ind = np.array([r2d[k] for k in right_keys])
right_len = len(right_ind)
def key_desc(name):
'if name is a string key, use the larger size of r1 or r2 before merging'
dt1 = r1.dtype[name]
if dt1.type != np.string_:
return (name, dt1.descr[0][1])
dt2 = r2.dtype[name]
if dt1 != dt2:
msg = "The '{}' fields in arrays 'r1' and 'r2' must have the same"
msg += " dtype."
raise ValueError(msg.format(name))
if dt1.num>dt2.num:
return (name, dt1.descr[0][1])
else:
return (name, dt2.descr[0][1])
keydesc = [key_desc(name) for name in key]
def mapped_r1field(name):
"""
The column name in *newrec* that corresponds to the column in *r1*.
"""
if name in key or name not in r2.dtype.names: return name
else: return name + r1postfix
def mapped_r2field(name):
"""
The column name in *newrec* that corresponds to the column in *r2*.
"""
if name in key or name not in r1.dtype.names: return name
else: return name + r2postfix
r1desc = [(mapped_r1field(desc[0]), desc[1]) for desc in r1.dtype.descr if desc[0] not in key]
r2desc = [(mapped_r2field(desc[0]), desc[1]) for desc in r2.dtype.descr if desc[0] not in key]
newdtype = np.dtype(keydesc + r1desc + r2desc)
newrec = np.recarray((common_len + left_len + right_len,), dtype=newdtype)
if defaults is not None:
for thiskey in defaults:
if thiskey not in newdtype.names:
warnings.warn('rec_join defaults key="%s" not in new dtype names "%s"'%(
thiskey, newdtype.names))
for name in newdtype.names:
dt = newdtype[name]
if dt.kind in ('f', 'i'):
newrec[name] = 0
if jointype != 'inner' and defaults is not None: # fill in the defaults enmasse
newrec_fields = list(six.iterkeys(newrec.dtype.fields.keys))
for k, v in six.iteritems(defaults):
if k in newrec_fields:
newrec[k] = v
for field in r1.dtype.names:
newfield = mapped_r1field(field)
if common_len:
newrec[newfield][:common_len] = r1[field][r1ind]
if (jointype == "outer" or jointype == "leftouter") and left_len:
newrec[newfield][common_len:(common_len+left_len)] = r1[field][left_ind]
for field in r2.dtype.names:
newfield = mapped_r2field(field)
if field not in key and common_len:
newrec[newfield][:common_len] = r2[field][r2ind]
if jointype == "outer" and right_len:
newrec[newfield][-right_len:] = r2[field][right_ind]
newrec.sort(order=key)
return newrec
def recs_join(key, name, recs, jointype='outer', missing=0., postfixes=None):
"""
Join a sequence of record arrays on single column key.
This function only joins a single column of the multiple record arrays
*key*
is the column name that acts as a key
*name*
is the name of the column that we want to join
*recs*
is a list of record arrays to join
*jointype*
is a string 'inner' or 'outer'
*missing*
is what any missing field is replaced by
*postfixes*
if not None, a len recs sequence of postfixes
returns a record array with columns [rowkey, name0, name1, ... namen-1].
or if postfixes [PF0, PF1, ..., PFN-1] are supplied,
[rowkey, namePF0, namePF1, ... namePFN-1].
Example::
r = recs_join("date", "close", recs=[r0, r1], missing=0.)
"""
results = []
aligned_iters = cbook.align_iterators(operator.attrgetter(key), *[iter(r) for r in recs])
def extract(r):
if r is None: return missing
else: return r[name]
if jointype == "outer":
for rowkey, row in aligned_iters:
results.append([rowkey] + list(map(extract, row)))
elif jointype == "inner":
for rowkey, row in aligned_iters:
if None not in row: # throw out any Nones
results.append([rowkey] + list(map(extract, row)))
if postfixes is None:
postfixes = ['%d'%i for i in range(len(recs))]
names = ",".join([key] + ["%s%s" % (name, postfix) for postfix in postfixes])
return np.rec.fromrecords(results, names=names)
def csv2rec(fname, comments='#', skiprows=0, checkrows=0, delimiter=',',
converterd=None, names=None, missing='', missingd=None,
use_mrecords=False, dayfirst=False, yearfirst=False):
"""
Load data from comma/space/tab delimited file in *fname* into a
numpy record array and return the record array.
If *names* is *None*, a header row is required to automatically
assign the recarray names. The headers will be lower cased,
spaces will be converted to underscores, and illegal attribute
name characters removed. If *names* is not *None*, it is a
sequence of names to use for the column names. In this case, it
is assumed there is no header row.
- *fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
- *comments*: the character used to indicate the start of a comment
in the file, or *None* to switch off the removal of comments
- *skiprows*: is the number of rows from the top to skip
- *checkrows*: is the number of rows to check to validate the column
data type. When set to zero all rows are validated.
- *converterd*: if not *None*, is a dictionary mapping column number or
munged column name to a converter function.
- *names*: if not None, is a list of header names. In this case, no
header will be read from the file
- *missingd* is a dictionary mapping munged column names to field values
which signify that the field does not contain actual data and should
be masked, e.g., '0000-00-00' or 'unused'
- *missing*: a string whose value signals a missing field regardless of
the column it appears in
- *use_mrecords*: if True, return an mrecords.fromrecords record array if any of the data are missing
- *dayfirst*: default is False so that MM-DD-YY has precedence over
DD-MM-YY. See http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
- *yearfirst*: default is False so that MM-DD-YY has precedence over
YY-MM-DD. See http://labix.org/python-dateutil#head-b95ce2094d189a89f80f5ae52a05b4ab7b41af47
for further information.
If no rows are found, *None* is returned -- see :file:`examples/loadrec.py`
"""
if converterd is None:
converterd = dict()
if missingd is None:
missingd = {}
import dateutil.parser
import datetime
fh = cbook.to_filehandle(fname)
delimiter = str(delimiter)
class FH:
"""
For space-delimited files, we want different behavior than
comma or tab. Generally, we want multiple spaces to be
treated as a single separator, whereas with comma and tab we
want multiple commas to return multiple (empty) fields. The
join/strip trick below effects this.
"""
def __init__(self, fh):
self.fh = fh
def close(self):
self.fh.close()
def seek(self, arg):
self.fh.seek(arg)
def fix(self, s):
return ' '.join(s.split())
def __next__(self):
return self.fix(next(self.fh))
def __iter__(self):
for line in self.fh:
yield self.fix(line)
if delimiter==' ':
fh = FH(fh)
reader = csv.reader(fh, delimiter=delimiter)
def process_skiprows(reader):
if skiprows:
for i, row in enumerate(reader):
if i>=(skiprows-1): break
return fh, reader
process_skiprows(reader)
def ismissing(name, val):
"Should the value val in column name be masked?"
if val == missing or val == missingd.get(name) or val == '':
return True
else:
return False
def with_default_value(func, default):
def newfunc(name, val):
if ismissing(name, val):
return default
else:
return func(val)
return newfunc
def mybool(x):
if x=='True': return True
elif x=='False': return False
else: raise ValueError('invalid bool')
dateparser = dateutil.parser.parse
mydateparser = with_default_value(dateparser, datetime.date(1,1,1))
myfloat = with_default_value(float, np.nan)
myint = with_default_value(int, -1)
mystr = with_default_value(str, '')
mybool = with_default_value(mybool, None)
def mydate(x):
# try and return a date object
d = dateparser(x, dayfirst=dayfirst, yearfirst=yearfirst)
if d.hour>0 or d.minute>0 or d.second>0:
raise ValueError('not a date')
return d.date()
mydate = with_default_value(mydate, datetime.date(1,1,1))
def get_func(name, item, func):
# promote functions in this order
funcmap = {mybool:myint,myint:myfloat, myfloat:mydate, mydate:mydateparser, mydateparser:mystr}
try: func(name, item)
except:
if func==mystr:
raise ValueError('Could not find a working conversion function')
else: return get_func(name, item, funcmap[func]) # recurse
else: return func
# map column names that clash with builtins -- TODO - extend this list
itemd = {
'return' : 'return_',
'file' : 'file_',
'print' : 'print_',
}
def get_converters(reader):
converters = None
for i, row in enumerate(reader):
if i==0:
converters = [mybool]*len(row)
if checkrows and i>checkrows:
break
#print i, len(names), len(row)
#print 'converters', zip(converters, row)
for j, (name, item) in enumerate(zip(names, row)):
func = converterd.get(j)
if func is None:
func = converterd.get(name)
if func is None:
#if not item.strip(): continue
func = converters[j]
if len(item.strip()):
func = get_func(name, item, func)
else:
# how should we handle custom converters and defaults?
func = with_default_value(func, None)
converters[j] = func
return converters
# Get header and remove invalid characters
needheader = names is None
if needheader:
for row in reader:
#print 'csv2rec', row
if len(row) and comments is not None and row[0].startswith(comments):
continue
headers = row
break
# remove these chars
delete = set("""~!@#$%^&*()-=+~\|]}[{';: /?.>,<""")
delete.add('"')
names = []
seen = dict()
for i, item in enumerate(headers):
item = item.strip().lower().replace(' ', '_')
item = ''.join([c for c in item if c not in delete])
if not len(item):
item = 'column%d'%i
item = itemd.get(item, item)
cnt = seen.get(item, 0)
if cnt>0:
names.append(item + '_%d'%cnt)
else:
names.append(item)
seen[item] = cnt+1
else:
if cbook.is_string_like(names):
names = [n.strip() for n in names.split(',')]
# get the converter functions by inspecting checkrows
converters = get_converters(reader)
if converters is None:
raise ValueError('Could not find any valid data in CSV file')
# reset the reader and start over
fh.seek(0)
reader = csv.reader(fh, delimiter=delimiter)
process_skiprows(reader)
if needheader:
while 1:
# skip past any comments and consume one line of column header
row = next(reader)
if len(row) and comments is not None and row[0].startswith(comments):
continue
break
# iterate over the remaining rows and convert the data to date
# objects, ints, or floats as approriate
rows = []
rowmasks = []
for i, row in enumerate(reader):
if not len(row):
continue
if comments is not None and row[0].startswith(comments):
continue
# Ensure that the row returned always has the same nr of elements
row.extend([''] * (len(converters) - len(row)))
rows.append([func(name, val) for func, name, val in zip(converters, names, row)])
rowmasks.append([ismissing(name, val) for name, val in zip(names, row)])
fh.close()
if not len(rows):
return None
if use_mrecords and np.any(rowmasks):
try: from numpy.ma import mrecords
except ImportError:
raise RuntimeError('numpy 1.05 or later is required for masked array support')
else:
r = mrecords.fromrecords(rows, names=names, mask=rowmasks)
else:
r = np.rec.fromrecords(rows, names=names)
return r
# a series of classes for describing the format intentions of various rec views
class FormatObj:
def tostr(self, x):
return self.toval(x)
def toval(self, x):
return str(x)
def fromstr(self, s):
return s
def __hash__(self):
"""
override the hash function of any of the formatters, so that we don't create duplicate excel format styles
"""
return hash(self.__class__)
class FormatString(FormatObj):
def tostr(self, x):
val = repr(x)
return val[1:-1]
#class FormatString(FormatObj):
# def tostr(self, x):
# return '"%r"'%self.toval(x)
class FormatFormatStr(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def tostr(self, x):
if x is None: return 'None'
return self.fmt%self.toval(x)
class FormatFloat(FormatFormatStr):
def __init__(self, precision=4, scale=1.):
FormatFormatStr.__init__(self, '%%1.%df'%precision)
self.precision = precision
self.scale = scale
def __hash__(self):
return hash((self.__class__, self.precision, self.scale))
def toval(self, x):
if x is not None:
x = x * self.scale
return x
def fromstr(self, s):
return float(s)/self.scale
class FormatInt(FormatObj):
def tostr(self, x):
return '%d'%int(x)
def toval(self, x):
return int(x)
def fromstr(self, s):
return int(s)
class FormatBool(FormatObj):
def toval(self, x):
return str(x)
def fromstr(self, s):
return bool(s)
class FormatPercent(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=100.)
class FormatThousands(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-3)
class FormatMillions(FormatFloat):
def __init__(self, precision=4):
FormatFloat.__init__(self, precision, scale=1e-6)
class FormatDate(FormatObj):
def __init__(self, fmt):
self.fmt = fmt
def __hash__(self):
return hash((self.__class__, self.fmt))
def toval(self, x):
if x is None: return 'None'
return x.strftime(self.fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x).date()
class FormatDatetime(FormatDate):
def __init__(self, fmt='%Y-%m-%d %H:%M:%S'):
FormatDate.__init__(self, fmt)
def fromstr(self, x):
import dateutil.parser
return dateutil.parser.parse(x)
defaultformatd = {
np.bool_ : FormatBool(),
np.int16 : FormatInt(),
np.int32 : FormatInt(),
np.int64 : FormatInt(),
np.float32 : FormatFloat(),
np.float64 : FormatFloat(),
np.object_ : FormatObj(),
np.string_ : FormatString(),
}
def get_formatd(r, formatd=None):
'build a formatd guaranteed to have a key for every dtype name'
if formatd is None:
formatd = dict()
for i, name in enumerate(r.dtype.names):
dt = r.dtype[name]
format = formatd.get(name)
if format is None:
format = defaultformatd.get(dt.type, FormatObj())
formatd[name] = format
return formatd
def csvformat_factory(format):
format = copy.deepcopy(format)
if isinstance(format, FormatFloat):
format.scale = 1. # override scaling for storage
format.fmt = '%r'
return format
def rec2txt(r, header=None, padding=3, precision=3, fields=None):
"""
Returns a textual representation of a record array.
*r*: numpy recarray
*header*: list of column headers
*padding*: space between each column
*precision*: number of decimal places to use for floats.
Set to an integer to apply to all floats. Set to a
list of integers to apply precision individually.
Precision for non-floats is simply ignored.
*fields* : if not None, a list of field names to print. fields
can be a list of strings like ['field1', 'field2'] or a single
comma separated string like 'field1,field2'
Example::
precision=[0,2,3]
Output::
ID Price Return
ABC 12.54 0.234
XYZ 6.32 -0.076
"""
if fields is not None:
r = rec_keep_fields(r, fields)
if cbook.is_numlike(precision):
precision = [precision]*len(r.dtype)
def get_type(item,atype=int):
tdict = {None:int, int:float, float:str}
try: atype(str(item))
except: return get_type(item,tdict[atype])
return atype
def get_justify(colname, column, precision):
ntype = type(column[0])
if ntype==np.str or ntype==np.str_ or ntype==np.string0 or ntype==np.string_:
length = max(len(colname),column.itemsize)
return 0, length+padding, "%s" # left justify
if ntype==np.int or ntype==np.int16 or ntype==np.int32 or ntype==np.int64 or ntype==np.int8 or ntype==np.int_:
length = max(len(colname),np.max(list(map(len, list(map(str,column))))))
return 1, length+padding, "%d" # right justify
# JDH: my powerbook does not have np.float96 using np 1.3.0
"""
In [2]: np.__version__
Out[2]: '1.3.0.dev5948'
In [3]: !uname -a
Darwin Macintosh-5.local 9.4.0 Darwin Kernel Version 9.4.0: Mon Jun 9 19:30:53 PDT 2008; root:xnu-1228.5.20~1/RELEASE_I386 i386 i386
In [4]: np.float96
---------------------------------------------------------------------------
AttributeError Traceback (most recent call la
"""
if ntype==np.float or ntype==np.float32 or ntype==np.float64 or (hasattr(np, 'float96') and (ntype==np.float96)) or ntype==np.float_:
fmt = "%." + str(precision) + "f"
length = max(len(colname),np.max(list(map(len, list(map(lambda x:fmt%x,column))))))
return 1, length+padding, fmt # right justify
return 0, max(len(colname),np.max(list(map(len, list(map(str,column))))))+padding, "%s"
if header is None:
header = r.dtype.names
justify_pad_prec = [get_justify(header[i],r.__getitem__(colname),precision[i]) for i, colname in enumerate(r.dtype.names)]
justify_pad_prec_spacer = []
for i in range(len(justify_pad_prec)):
just,pad,prec = justify_pad_prec[i]
if i == 0:
justify_pad_prec_spacer.append((just,pad,prec,0))
else:
pjust,ppad,pprec = justify_pad_prec[i-1]
if pjust == 0 and just == 1:
justify_pad_prec_spacer.append((just,pad-padding,prec,0))
elif pjust == 1 and just == 0:
justify_pad_prec_spacer.append((just,pad,prec,padding))
else:
justify_pad_prec_spacer.append((just,pad,prec,0))
def format(item, just_pad_prec_spacer):
just, pad, prec, spacer = just_pad_prec_spacer
if just == 0:
return spacer*' ' + str(item).ljust(pad)
else:
if get_type(item) == float:
item = (prec%float(item))
elif get_type(item) == int:
item = (prec%int(item))
return item.rjust(pad)
textl = []
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(header)]))
for i, row in enumerate(r):
textl.append(''.join([format(colitem,justify_pad_prec_spacer[j]) for j, colitem in enumerate(row)]))
if i==0:
textl[0] = textl[0].rstrip()
text = os.linesep.join(textl)
return text
def rec2csv(r, fname, delimiter=',', formatd=None, missing='',
missingd=None, withheader=True):
"""
Save the data from numpy recarray *r* into a
comma-/space-/tab-delimited file. The record array dtype names
will be used for column headers.
*fname*: can be a filename or a file handle. Support for gzipped
files is automatic, if the filename ends in '.gz'
*withheader*: if withheader is False, do not write the attribute
names in the first row
for formatd type FormatFloat, we override the precision to store
full precision floats in the CSV file
.. seealso::
:func:`csv2rec`
For information about *missing* and *missingd*, which can
be used to fill in masked values into your CSV file.
"""
delimiter = str(delimiter)
if missingd is None:
missingd = dict()
def with_mask(func):
def newfunc(val, mask, mval):
if mask:
return mval
else:
return func(val)
return newfunc
if r.ndim != 1:
raise ValueError('rec2csv only operates on 1 dimensional recarrays')
formatd = get_formatd(r, formatd)
funcs = []
for i, name in enumerate(r.dtype.names):
funcs.append(with_mask(csvformat_factory(formatd[name]).tostr))
fh, opened = cbook.to_filehandle(fname, 'wb', return_opened=True)
writer = csv.writer(fh, delimiter=delimiter)
header = r.dtype.names
if withheader:
writer.writerow(header)
# Our list of specials for missing values
mvals = []
for name in header:
mvals.append(missingd.get(name, missing))
ismasked = False
if len(r):
row = r[0]
ismasked = hasattr(row, '_fieldmask')
for row in r:
if ismasked:
row, rowmask = row.item(), row._fieldmask.item()
else:
rowmask = [False] * len(row)
writer.writerow([func(val, mask, mval) for func, val, mask, mval
in zip(funcs, row, rowmask, mvals)])
if opened:
fh.close()
def griddata(x, y, z, xi, yi, interp='nn'):
"""Interpolates from a nonuniformly spaced grid to some other
grid.
Fits a surface of the form z = f(`x`, `y`) to the data in the
(usually) nonuniformly spaced vectors (`x`, `y`, `z`), then
interpolates this surface at the points specified by
(`xi`, `yi`) to produce `zi`.
Parameters
----------
x, y, z : 1d array_like
Coordinates of grid points to interpolate from.
xi, yi : 1d or 2d array_like
Coordinates of grid points to interpolate to.
interp : string key from {'nn', 'linear'}
Interpolation algorithm, either 'nn' for natural neighbor, or
'linear' for linear interpolation.
Returns
-------
2d float array
Array of values interpolated at (`xi`, `yi`) points. Array
will be masked is any of (`xi`, `yi`) are outside the convex
hull of (`x`, `y`).
Notes
-----
If `interp` is 'nn' (the default), uses natural neighbor
interpolation based on Delaunay triangulation. This option is
only available if the mpl_toolkits.natgrid module is installed.
This can be downloaded from https://github.com/matplotlib/natgrid.
The (`xi`, `yi`) grid must be regular and monotonically increasing
in this case.
If `interp` is 'linear', linear interpolation is used via
matplotlib.tri.LinearTriInterpolator.
Instead of using `griddata`, more flexible functionality and other
interpolation options are available using a
matplotlib.tri.Triangulation and a matplotlib.tri.TriInterpolator.
"""
# Check input arguments.
x = np.asanyarray(x, dtype=np.float64)
y = np.asanyarray(y, dtype=np.float64)
z = np.asanyarray(z, dtype=np.float64)
if x.shape != y.shape or x.shape != z.shape or x.ndim != 1:
raise ValueError("x, y and z must be equal-length 1-D arrays")
xi = np.asanyarray(xi, dtype=np.float64)
yi = np.asanyarray(yi, dtype=np.float64)
if xi.ndim != yi.ndim:
raise ValueError("xi and yi must be arrays with the same number of "
"dimensions (1 or 2)")
if xi.ndim == 2 and xi.shape != yi.shape:
raise ValueError("if xi and yi are 2D arrays, they must have the same "
"shape")
if xi.ndim == 1:
xi, yi = np.meshgrid(xi, yi)
if interp == 'nn':
use_nn_interpolation = True
elif interp == 'linear':
use_nn_interpolation = False
else:
raise ValueError("interp keyword must be one of 'linear' (for linear "
"interpolation) or 'nn' (for natural neighbor "
"interpolation). Default is 'nn'.")
# Remove masked points.
mask = np.ma.getmask(z)
if not (mask is np.ma.nomask):
x = x.compress(~mask)
y = y.compress(~mask)
z = z.compressed()
if use_nn_interpolation:
try:
from mpl_toolkits.natgrid import _natgrid
except ImportError:
raise RuntimeError("To use interp='nn' (Natural Neighbor "
"interpolation) in griddata, natgrid must be installed. "
"Either install it from http://sourceforge.net/projects/"
"matplotlib/files/matplotlib-toolkits, or use interp='linear' "
"instead.")
if xi.ndim == 2:
# natgrid expects 1D xi and yi arrays.
xi = xi[0, :]
yi = yi[:, 0]
# Override default natgrid internal parameters.
_natgrid.seti(b'ext', 0)
_natgrid.setr(b'nul', np.nan)
if np.min(np.diff(xi)) < 0 or np.min(np.diff(yi)) < 0:
raise ValueError("Output grid defined by xi,yi must be monotone "
"increasing")
# Allocate array for output (buffer will be overwritten by natgridd)
zi = np.empty((yi.shape[0], xi.shape[0]), np.float64)
# Natgrid requires each array to be contiguous rather than e.g. a view
# that is a non-contiguous slice of another array. Use numpy.require
# to deal with this, which will copy if necessary.
x = np.require(x, requirements=['C'])
y = np.require(y, requirements=['C'])
z = np.require(z, requirements=['C'])
xi = np.require(xi, requirements=['C'])
yi = np.require(yi, requirements=['C'])
_natgrid.natgridd(x, y, z, xi, yi, zi)
# Mask points on grid outside convex hull of input data.
if np.any(np.isnan(zi)):
zi = np.ma.masked_where(np.isnan(zi), zi)
return zi
else:
# Linear interpolation performed using a matplotlib.tri.Triangulation
# and a matplotlib.tri.LinearTriInterpolator.
from .tri import Triangulation, LinearTriInterpolator
triang = Triangulation(x, y)
interpolator = LinearTriInterpolator(triang, z)
return interpolator(xi, yi)
##################################################
# Linear interpolation algorithms
##################################################
def less_simple_linear_interpolation( x, y, xi, extrap=False ):
"""
This function provides simple (but somewhat less so than
:func:`cbook.simple_linear_interpolation`) linear interpolation.
:func:`simple_linear_interpolation` will give a list of point
between a start and an end, while this does true linear
interpolation at an arbitrary set of points.
This is very inefficient linear interpolation meant to be used
only for a small number of points in relatively non-intensive use
cases. For real linear interpolation, use scipy.
"""
if cbook.is_scalar(xi): xi = [xi]
x = np.asarray(x)
y = np.asarray(y)
xi = np.asarray(xi)
s = list(y.shape)
s[0] = len(xi)
yi = np.tile( np.nan, s )
for ii,xx in enumerate(xi):
bb = x == xx
if np.any(bb):
jj, = np.nonzero(bb)
yi[ii] = y[jj[0]]
elif xx<x[0]:
if extrap:
yi[ii] = y[0]
elif xx>x[-1]:
if extrap:
yi[ii] = y[-1]
else:
jj, = np.nonzero(x<xx)
jj = max(jj)
yi[ii] = y[jj] + (xx-x[jj])/(x[jj+1]-x[jj]) * (y[jj+1]-y[jj])
return yi
def slopes(x,y):
"""
:func:`slopes` calculates the slope *y*'(*x*)
The slope is estimated using the slope obtained from that of a
parabola through any three consecutive points.
This method should be superior to that described in the appendix
of A CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russel
W. Stineman (Creative Computing July 1980) in at least one aspect:
Circles for interpolation demand a known aspect ratio between
*x*- and *y*-values. For many functions, however, the abscissa
are given in different dimensions, so an aspect ratio is
completely arbitrary.
The parabola method gives very similar results to the circle
method for most regular cases but behaves much better in special
cases.
Norbert Nemec, Institute of Theoretical Physics, University or
Regensburg, April 2006 Norbert.Nemec at physik.uni-regensburg.de
(inspired by a original implementation by Halldor Bjornsson,
Icelandic Meteorological Office, March 2006 halldor at vedur.is)
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
yp=np.zeros(y.shape, np.float_)
dx=x[1:] - x[:-1]
dy=y[1:] - y[:-1]
dydx = dy/dx
yp[1:-1] = (dydx[:-1] * dx[1:] + dydx[1:] * dx[:-1])/(dx[1:] + dx[:-1])
yp[0] = 2.0 * dy[0]/dx[0] - yp[1]
yp[-1] = 2.0 * dy[-1]/dx[-1] - yp[-2]
return yp
def stineman_interp(xi,x,y,yp=None):
"""
Given data vectors *x* and *y*, the slope vector *yp* and a new
abscissa vector *xi*, the function :func:`stineman_interp` uses
Stineman interpolation to calculate a vector *yi* corresponding to
*xi*.
Here's an example that generates a coarse sine curve, then
interpolates over a finer abscissa::
x = linspace(0,2*pi,20); y = sin(x); yp = cos(x)
xi = linspace(0,2*pi,40);
yi = stineman_interp(xi,x,y,yp);
plot(x,y,'o',xi,yi)
The interpolation method is described in the article A
CONSISTENTLY WELL BEHAVED METHOD OF INTERPOLATION by Russell
W. Stineman. The article appeared in the July 1980 issue of
Creative Computing with a note from the editor stating that while
they were:
not an academic journal but once in a while something serious
and original comes in adding that this was
"apparently a real solution" to a well known problem.
For *yp* = *None*, the routine automatically determines the slopes
using the :func:`slopes` routine.
*x* is assumed to be sorted in increasing order.
For values ``xi[j] < x[0]`` or ``xi[j] > x[-1]``, the routine
tries an extrapolation. The relevance of the data obtained from
this, of course, is questionable...
Original implementation by Halldor Bjornsson, Icelandic
Meteorolocial Office, March 2006 halldor at vedur.is
Completely reworked and optimized for Python by Norbert Nemec,
Institute of Theoretical Physics, University or Regensburg, April
2006 Norbert.Nemec at physik.uni-regensburg.de
"""
# Cast key variables as float.
x=np.asarray(x, np.float_)
y=np.asarray(y, np.float_)
assert x.shape == y.shape
if yp is None:
yp = slopes(x,y)
else:
yp=np.asarray(yp, np.float_)
xi=np.asarray(xi, np.float_)
yi=np.zeros(xi.shape, np.float_)
# calculate linear slopes
dx = x[1:] - x[:-1]
dy = y[1:] - y[:-1]
s = dy/dx #note length of s is N-1 so last element is #N-2
# find the segment each xi is in
# this line actually is the key to the efficiency of this implementation
idx = np.searchsorted(x[1:-1], xi)
# now we have generally: x[idx[j]] <= xi[j] <= x[idx[j]+1]
# except at the boundaries, where it may be that xi[j] < x[0] or xi[j] > x[-1]
# the y-values that would come out from a linear interpolation:
sidx = s.take(idx)
xidx = x.take(idx)
yidx = y.take(idx)
xidxp1 = x.take(idx+1)
yo = yidx + sidx * (xi - xidx)
# the difference that comes when using the slopes given in yp
dy1 = (yp.take(idx)- sidx) * (xi - xidx) # using the yp slope of the left point
dy2 = (yp.take(idx+1)-sidx) * (xi - xidxp1) # using the yp slope of the right point
dy1dy2 = dy1*dy2
# The following is optimized for Python. The solution actually
# does more calculations than necessary but exploiting the power
# of numpy, this is far more efficient than coding a loop by hand
# in Python
yi = yo + dy1dy2 * np.choose(np.array(np.sign(dy1dy2), np.int32)+1,
((2*xi-xidx-xidxp1)/((dy1-dy2)*(xidxp1-xidx)),
0.0,
1/(dy1+dy2),))
return yi
class GaussianKDE(object):
"""
Representation of a kernel-density estimate using Gaussian kernels.
Call signature::
kde = GaussianKDE(dataset, bw_method='silverman')
Parameters
----------
dataset : array_like
Datapoints to estimate from. In case of univariate data this is a 1-D
array, otherwise a 2-D array with shape (# of dims, # of data).
bw_method : str, scalar or callable, optional
The method used to calculate the estimator bandwidth. This can be
'scott', 'silverman', a scalar constant or a callable. If a
scalar, this will be used directly as `kde.factor`. If a
callable, it should take a `GaussianKDE` instance as only
parameter and return a scalar. If None (default), 'scott' is used.
Attributes
----------
dataset : ndarray
The dataset with which `gaussian_kde` was initialized.
dim : int
Number of dimensions.
num_dp : int
Number of datapoints.
factor : float
The bandwidth factor, obtained from `kde.covariance_factor`, with which
the covariance matrix is multiplied.
covariance : ndarray
The covariance matrix of `dataset`, scaled by the calculated bandwidth
(`kde.factor`).
inv_cov : ndarray
The inverse of `covariance`.
Methods
-------
kde.evaluate(points) : ndarray
Evaluate the estimated pdf on a provided set of points.
kde(points) : ndarray
Same as kde.evaluate(points)
"""
# This implementation with minor modification was too good to pass up.
# from scipy: https://github.com/scipy/scipy/blob/master/scipy/stats/kde.py
def __init__(self, dataset, bw_method=None):
self.dataset = np.atleast_2d(dataset)
if not np.array(self.dataset).size > 1:
raise ValueError("`dataset` input should have multiple elements.")
self.dim, self.num_dp = np.array(self.dataset).shape
if bw_method is None:
pass
elif bw_method == 'scott':
self.covariance_factor = self.scotts_factor
elif bw_method == 'silverman':
self.covariance_factor = self.silverman_factor
elif (np.isscalar(bw_method) and not
isinstance(bw_method, six.string_types)):
self._bw_method = 'use constant'
self.covariance_factor = lambda: bw_method
elif callable(bw_method):
self._bw_method = bw_method
self.covariance_factor = lambda: self._bw_method(self)
else:
msg = "`bw_method` should be 'scott', 'silverman', a scalar " \
"or a callable."
raise ValueError(msg)
# Computes the covariance matrix for each Gaussian kernel using
# covariance_factor().
self.factor = self.covariance_factor()
# Cache covariance and inverse covariance of the data
if not hasattr(self, '_data_inv_cov'):
self.data_covariance = np.atleast_2d(
np.cov(
self.dataset,
rowvar=1,
bias=False))
self.data_inv_cov = np.linalg.inv(self.data_covariance)
self.covariance = self.data_covariance * self.factor ** 2
self.inv_cov = self.data_inv_cov / self.factor ** 2
self.norm_factor = np.sqrt(
np.linalg.det(
2 * np.pi * self.covariance)) * self.num_dp
def scotts_factor(self):
return np.power(self.num_dp, -1. / (self.dim + 4))
def silverman_factor(self):
return np.power(
self.num_dp * (self.dim + 2.0) / 4.0, -1. / (self.dim + 4))
# Default method to calculate bandwidth, can be overwritten by subclass
covariance_factor = scotts_factor
def evaluate(self, points):
"""Evaluate the estimated pdf on a set of points.
Parameters
----------
points : (# of dimensions, # of points)-array
Alternatively, a (# of dimensions,) vector can be passed in and
treated as a single point.
Returns
-------
values : (# of points,)-array
The values at each point.
Raises
------
ValueError : if the dimensionality of the input points is different
than the dimensionality of the KDE.
"""
points = np.atleast_2d(points)
dim, num_m = np.array(points).shape
if dim != self.dim:
msg = "points have dimension %s, dataset has dimension %s" % (
dim, self.dim)
raise ValueError(msg)
result = np.zeros((num_m,), dtype=np.float)
if num_m >= self.num_dp:
# there are more points than data, so loop over data
for i in range(self.num_dp):
diff = self.dataset[:, i, np.newaxis] - points
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result = result + np.exp(-energy)
else:
# loop over points
for i in range(num_m):
diff = self.dataset - points[:, i, np.newaxis]
tdiff = np.dot(self.inv_cov, diff)
energy = np.sum(diff * tdiff, axis=0) / 2.0
result[i] = np.sum(np.exp(-energy), axis=0)
result = result / self.norm_factor
return result
__call__ = evaluate
##################################################
# Code related to things in and around polygons
##################################################
def inside_poly(points, verts):
"""
*points* is a sequence of *x*, *y* points.
*verts* is a sequence of *x*, *y* vertices of a polygon.
Return value is a sequence of indices into points for the points
that are inside the polygon.
"""
# Make a closed polygon path
poly = Path( verts )
# Check to see which points are contained withing the Path
return [ idx for idx, p in enumerate(points) if poly.contains_point(p) ]
def poly_below(xmin, xs, ys):
"""
Given a sequence of *xs* and *ys*, return the vertices of a
polygon that has a horizontal base at *xmin* and an upper bound at
the *ys*. *xmin* is a scalar.
Intended for use with :meth:`matplotlib.axes.Axes.fill`, e.g.,::
xv, yv = poly_below(0, x, y)
ax.fill(xv, yv)
"""
if ma.isMaskedArray(xs) or ma.isMaskedArray(ys):
numpy = ma
else:
numpy = np
xs = numpy.asarray(xs)
ys = numpy.asarray(ys)
Nx = len(xs)
Ny = len(ys)
assert(Nx==Ny)
x = xmin*numpy.ones(2*Nx)
y = numpy.ones(2*Nx)
x[:Nx] = xs
y[:Nx] = ys
y[Nx:] = ys[::-1]
return x, y
def poly_between(x, ylower, yupper):
"""
Given a sequence of *x*, *ylower* and *yupper*, return the polygon
that fills the regions between them. *ylower* or *yupper* can be
scalar or iterable. If they are iterable, they must be equal in
length to *x*.
Return value is *x*, *y* arrays for use with
:meth:`matplotlib.axes.Axes.fill`.
"""
if ma.isMaskedArray(ylower) or ma.isMaskedArray(yupper) or ma.isMaskedArray(x):
numpy = ma
else:
numpy = np
Nx = len(x)
if not cbook.iterable(ylower):
ylower = ylower*numpy.ones(Nx)
if not cbook.iterable(yupper):
yupper = yupper*numpy.ones(Nx)
x = numpy.concatenate( (x, x[::-1]) )
y = numpy.concatenate( (yupper, ylower[::-1]) )
return x,y
def is_closed_polygon(X):
"""
Tests whether first and last object in a sequence are the same. These are
presumably coordinates on a polygonal curve, in which case this function
tests if that curve is closed.
"""
return np.all(X[0] == X[-1])
def contiguous_regions(mask):
"""
return a list of (ind0, ind1) such that mask[ind0:ind1].all() is
True and we cover all such regions
TODO: this is a pure python implementation which probably has a much faster numpy impl
"""
in_region = None
boundaries = []
for i, val in enumerate(mask):
if in_region is None and val:
in_region = i
elif in_region is not None and not val:
boundaries.append((in_region, i))
in_region = None
if in_region is not None:
boundaries.append((in_region, i+1))
return boundaries
def cross_from_below(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]<threshold and x[i]>=threshold
Example code::
import matplotlib.pyplot as plt
t = np.arange(0.0, 2.0, 0.1)
s = np.sin(2*np.pi*t)
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t, s, '-o')
ax.axhline(0.5)
ax.axhline(-0.5)
ind = cross_from_below(s, 0.5)
ax.vlines(t[ind], -1, 1)
ind = cross_from_above(s, -0.5)
ax.vlines(t[ind], -1, 1)
plt.show()
.. seealso::
:func:`cross_from_above` and :func:`contiguous_regions`
"""
x = np.asarray(x)
threshold = threshold
ind = np.nonzero( (x[:-1]<threshold) & (x[1:]>=threshold))[0]
if len(ind): return ind+1
else: return ind
def cross_from_above(x, threshold):
"""
return the indices into *x* where *x* crosses some threshold from
below, e.g., the i's where::
x[i-1]>threshold and x[i]<=threshold
.. seealso::
:func:`cross_from_below` and :func:`contiguous_regions`
"""
x = np.asarray(x)
ind = np.nonzero( (x[:-1]>=threshold) & (x[1:]<threshold))[0]
if len(ind): return ind+1
else: return ind
##################################################
# Vector and path length geometry calculations
##################################################
def vector_lengths( X, P=2., axis=None ):
"""
Finds the length of a set of vectors in *n* dimensions. This is
like the :func:`numpy.norm` function for vectors, but has the ability to
work over a particular axis of the supplied array or matrix.
Computes ``(sum((x_i)^P))^(1/P)`` for each ``{x_i}`` being the
elements of *X* along the given axis. If *axis* is *None*,
compute over all elements of *X*.
"""
X = np.asarray(X)
return (np.sum(X**(P),axis=axis))**(1./P)
def distances_along_curve( X ):
"""
Computes the distance between a set of successive points in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. The distances between
successive rows is computed. Distance is the standard Euclidean
distance.
"""
X = np.diff( X, axis=0 )
return vector_lengths(X,axis=1)
def path_length(X):
"""
Computes the distance travelled along a polygonal curve in *N* dimensions.
Where *X* is an *M* x *N* array or matrix. Returns an array of
length *M* consisting of the distance along the curve at each point
(i.e., the rows of *X*).
"""
X = distances_along_curve(X)
return np.concatenate( (np.zeros(1), np.cumsum(X)) )
def quad2cubic(q0x, q0y, q1x, q1y, q2x, q2y):
"""
Converts a quadratic Bezier curve to a cubic approximation.
The inputs are the *x* and *y* coordinates of the three control
points of a quadratic curve, and the output is a tuple of *x* and
*y* coordinates of the four control points of the cubic curve.
"""
# c0x, c0y = q0x, q0y
c1x, c1y = q0x + 2./3. * (q1x - q0x), q0y + 2./3. * (q1y - q0y)
c2x, c2y = c1x + 1./3. * (q2x - q0x), c1y + 1./3. * (q2y - q0y)
# c3x, c3y = q2x, q2y
return q0x, q0y, c1x, c1y, c2x, c2y, q2x, q2y
def offset_line(y, yerr):
"""
Offsets an array *y* by +/- an error and returns a tuple (y - err, y + err).
The error term can be:
* A scalar. In this case, the returned tuple is obvious.
* A vector of the same length as *y*. The quantities y +/- err are computed
component-wise.
* A tuple of length 2. In this case, yerr[0] is the error below *y* and
yerr[1] is error above *y*. For example::
from pylab import *
x = linspace(0, 2*pi, num=100, endpoint=True)
y = sin(x)
y_minus, y_plus = mlab.offset_line(y, 0.1)
plot(x, y)
fill_between(x, ym, y2=yp)
show()
"""
if cbook.is_numlike(yerr) or (cbook.iterable(yerr) and len(yerr) == len(y)):
ymin = y - yerr
ymax = y + yerr
elif len(yerr) == 2:
ymin, ymax = y - yerr[0], y + yerr[1]
else:
raise ValueError("yerr must be scalar, 1xN or 2xN")
return ymin, ymax
| mit |
LiaoPan/scikit-learn | sklearn/cluster/__init__.py | 364 | 1228 | """
The :mod:`sklearn.cluster` module gathers popular unsupervised clustering
algorithms.
"""
from .spectral import spectral_clustering, SpectralClustering
from .mean_shift_ import (mean_shift, MeanShift,
estimate_bandwidth, get_bin_seeds)
from .affinity_propagation_ import affinity_propagation, AffinityPropagation
from .hierarchical import (ward_tree, AgglomerativeClustering, linkage_tree,
FeatureAgglomeration)
from .k_means_ import k_means, KMeans, MiniBatchKMeans
from .dbscan_ import dbscan, DBSCAN
from .bicluster import SpectralBiclustering, SpectralCoclustering
from .birch import Birch
__all__ = ['AffinityPropagation',
'AgglomerativeClustering',
'Birch',
'DBSCAN',
'KMeans',
'FeatureAgglomeration',
'MeanShift',
'MiniBatchKMeans',
'SpectralClustering',
'affinity_propagation',
'dbscan',
'estimate_bandwidth',
'get_bin_seeds',
'k_means',
'linkage_tree',
'mean_shift',
'spectral_clustering',
'ward_tree',
'SpectralBiclustering',
'SpectralCoclustering']
| bsd-3-clause |
bigdataelephants/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
mrry/tensorflow | tensorflow/examples/skflow/iris.py | 25 | 1649 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of DNNClassifier for Iris plant dataset."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from sklearn import cross_validation
from sklearn import metrics
import tensorflow as tf
from tensorflow.contrib import learn
def main(unused_argv):
# Load dataset.
iris = learn.datasets.load_dataset('iris')
x_train, x_test, y_train, y_test = cross_validation.train_test_split(
iris.data, iris.target, test_size=0.2, random_state=42)
# Build 3 layer DNN with 10, 20, 10 units respectively.
feature_columns = learn.infer_real_valued_columns_from_input(x_train)
classifier = learn.DNNClassifier(
feature_columns=feature_columns, hidden_units=[10, 20, 10], n_classes=3)
# Fit and predict.
classifier.fit(x_train, y_train, steps=200)
predictions = list(classifier.predict(x_test, as_iterable=True))
score = metrics.accuracy_score(y_test, predictions)
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
maheshakya/scikit-learn | sklearn/ensemble/tests/test_gradient_boosting.py | 5 | 36214 | """
Testing for the gradient boosting module (sklearn.ensemble.gradient_boosting).
"""
import numpy as np
import warnings
from sklearn import datasets
from sklearn.base import clone
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.ensemble.gradient_boosting import ZeroEstimator
from sklearn.metrics import mean_squared_error
from sklearn.utils import check_random_state, tosequence
from sklearn.utils.testing import assert_almost_equal, clean_warning_registry
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.validation import DataConversionWarning
from sklearn.utils.validation import NotFittedError
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [-1, 1, 1]
rng = np.random.RandomState(0)
# also load the boston dataset
# and randomly permute it
boston = datasets.load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
"""Check classification on a toy dataset."""
for loss in ('deviance', 'exponential'):
clf = GradientBoostingClassifier(loss=loss, n_estimators=10,
random_state=1)
assert_raises(ValueError, clf.predict, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(10, len(clf.estimators_))
deviance_decrease = (clf.train_score_[:-1] - clf.train_score_[1:])
assert np.any(deviance_decrease >= 0.0), \
"Train deviance does not monotonically decrease."
def test_parameter_checks():
"""Check input parameter validation."""
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(n_estimators=-1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(learning_rate=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='foobar').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_split=-1.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_samples_leaf=-1.).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=-1.).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(min_weight_fraction_leaf=0.6).fit,
X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=0.0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=1.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(subsample=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=-0.1).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(max_depth=0).fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(init={}).fit, X, y)
# test fit before feature importance
assert_raises(ValueError,
lambda: GradientBoostingClassifier().feature_importances_)
# deviance requires ``n_classes >= 2``.
assert_raises(ValueError,
lambda X, y: GradientBoostingClassifier(
loss='deviance').fit(X, y),
X, [0, 0, 0, 0])
def test_loss_function():
assert_raises(ValueError,
GradientBoostingClassifier(loss='ls').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='lad').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='quantile').fit, X, y)
assert_raises(ValueError,
GradientBoostingClassifier(loss='huber').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='deviance').fit, X, y)
assert_raises(ValueError,
GradientBoostingRegressor(loss='exponential').fit, X, y)
def test_classification_synthetic():
"""Test GradientBoostingClassifier on synthetic dataset used by
Hastie et al. in ESLII Example 12.7. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
for loss in ('deviance', 'exponential'):
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=1,
max_depth=1, loss=loss,
learning_rate=1.0, random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.09, \
"GB(loss={}) failed with error {}".format(loss, error_rate)
gbrt = GradientBoostingClassifier(n_estimators=200, min_samples_split=1,
max_depth=1,
learning_rate=1.0, subsample=0.5,
random_state=0)
gbrt.fit(X_train, y_train)
error_rate = (1.0 - gbrt.score(X_test, y_test))
assert error_rate < 0.08, ("Stochastic GradientBoostingClassifier(loss={}) "
"failed with error {}".format(loss, error_rate))
def test_boston():
"""Check consistency on dataset boston house prices with least squares
and least absolute deviation. """
for loss in ("ls", "lad", "huber"):
for subsample in (1.0, 0.5):
last_y_pred = None
for i, sample_weight in enumerate((None, np.ones(len(boston.target)),
2 * np.ones(len(boston.target)))):
clf = GradientBoostingRegressor(n_estimators=100, loss=loss,
max_depth=4, subsample=subsample,
min_samples_split=1,
random_state=1)
assert_raises(ValueError, clf.predict, boston.data)
clf.fit(boston.data, boston.target,
sample_weight=sample_weight)
y_pred = clf.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert mse < 6.0, "Failed with loss %s and " \
"mse = %.4f" % (loss, mse)
if last_y_pred is not None:
np.testing.assert_array_almost_equal(
last_y_pred, y_pred,
err_msg='pred_%d doesnt match last pred_%d for loss %r and subsample %r. '
% (i, i - 1, loss, subsample))
last_y_pred = y_pred
def test_iris():
"""Check consistency on dataset iris."""
for subsample in (1.0, 0.5):
for sample_weight in (None, np.ones(len(iris.target))):
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=subsample)
clf.fit(iris.data, iris.target, sample_weight=sample_weight)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (subsample, score)
def test_regression_synthetic():
"""Test on synthetic regression datasets used in Leo Breiman,
`Bagging Predictors?. Machine Learning 24(2): 123-140 (1996). """
random_state = check_random_state(1)
regression_params = {'n_estimators': 100, 'max_depth': 4,
'min_samples_split': 1, 'learning_rate': 0.1,
'loss': 'ls'}
# Friedman1
X, y = datasets.make_friedman1(n_samples=1200,
random_state=random_state, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor()
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 5.0, "Failed on Friedman1 with mse = %.4f" % mse
# Friedman2
X, y = datasets.make_friedman2(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 1700.0, "Failed on Friedman2 with mse = %.4f" % mse
# Friedman3
X, y = datasets.make_friedman3(n_samples=1200, random_state=random_state)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingRegressor(**regression_params)
clf.fit(X_train, y_train)
mse = mean_squared_error(y_test, clf.predict(X_test))
assert mse < 0.015, "Failed on Friedman3 with mse = %.4f" % mse
def test_feature_importances():
X = np.array(boston.data, dtype=np.float32)
y = np.array(boston.target, dtype=np.float32)
clf = GradientBoostingRegressor(n_estimators=100, max_depth=5,
min_samples_split=1, random_state=1)
clf.fit(X, y)
#feature_importances = clf.feature_importances_
assert_true(hasattr(clf, 'feature_importances_'))
X_new = clf.transform(X, threshold="mean")
assert_less(X_new.shape[1], X.shape[1])
feature_mask = clf.feature_importances_ > clf.feature_importances_.mean()
assert_array_almost_equal(X_new, X[:, feature_mask])
# true feature importance ranking
# true_ranking = np.array([3, 1, 8, 2, 10, 9, 4, 11, 0, 6, 7, 5, 12])
# assert_array_equal(true_ranking, feature_importances.argsort())
def test_probability_log():
"""Predict probabilities."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_check_inputs():
"""Test input checks (shape and type of X and y)."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y + [0, 1])
from scipy import sparse
X_sparse = sparse.csr_matrix(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(TypeError, clf.fit, X_sparse, y)
clf = GradientBoostingClassifier().fit(X, y)
assert_raises(TypeError, clf.predict, X_sparse)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
assert_raises(ValueError, clf.fit, X, y,
sample_weight=([1] * len(y)) + [0, 1])
def test_check_inputs_predict():
"""X has wrong shape """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, rng.rand(len(X)))
x = np.array([1.0, 2.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
x = np.array([])
assert_raises(ValueError, clf.predict, x)
x = np.array([1.0, 2.0, 3.0])[:, np.newaxis]
assert_raises(ValueError, clf.predict, x)
def test_check_max_features():
"""test if max_features is valid. """
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=0)
assert_raises(ValueError, clf.fit, X, y)
clf = GradientBoostingRegressor(n_estimators=100, random_state=1,
max_features=(len(X[0]) + 1))
assert_raises(ValueError, clf.fit, X, y)
def test_max_feature_regression():
"""Test to make sure random state is set properly. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
gbrt = GradientBoostingClassifier(n_estimators=100, min_samples_split=5,
max_depth=2, learning_rate=.1,
max_features=2, random_state=1)
gbrt.fit(X_train, y_train)
deviance = gbrt.loss_(y_test, gbrt.decision_function(X_test))
assert_true(deviance < 0.5, "GB failed with deviance %.4f" % deviance)
def test_max_feature_auto():
"""Test if max features is set properly for floats and str. """
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
_, n_features = X.shape
X_train = X[:2000]
y_train = y[:2000]
gbrt = GradientBoostingClassifier(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='auto')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, n_features)
gbrt = GradientBoostingRegressor(n_estimators=1, max_features=0.3)
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(n_features * 0.3))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='sqrt')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.sqrt(n_features)))
gbrt = GradientBoostingRegressor(n_estimators=1, max_features='log2')
gbrt.fit(X_train, y_train)
assert_equal(gbrt.max_features_, int(np.log2(n_features)))
def test_staged_predict():
"""Test whether staged decision function eventually gives
the same prediction.
"""
X, y = datasets.make_friedman1(n_samples=1200,
random_state=1, noise=1.0)
X_train, y_train = X[:200], y[:200]
X_test = X[200:]
clf = GradientBoostingRegressor()
# test raise ValueError if not fitted
assert_raises(ValueError, lambda X: np.fromiter(
clf.staged_predict(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# test if prediction for last stage equals ``predict``
for y in clf.staged_predict(X_test):
assert_equal(y.shape, y_pred.shape)
assert_array_equal(y_pred, y)
def test_staged_predict_proba():
"""Test whether staged predict proba eventually gives
the same prediction.
"""
X, y = datasets.make_hastie_10_2(n_samples=1200,
random_state=1)
X_train, y_train = X[:200], y[:200]
X_test, y_test = X[200:], y[200:]
clf = GradientBoostingClassifier(n_estimators=20)
# test raise NotFittedError if not fitted
assert_raises(NotFittedError, lambda X: np.fromiter(
clf.staged_predict_proba(X), dtype=np.float64), X_test)
clf.fit(X_train, y_train)
# test if prediction for last stage equals ``predict``
for y_pred in clf.staged_predict(X_test):
assert_equal(y_test.shape, y_pred.shape)
assert_array_equal(clf.predict(X_test), y_pred)
# test if prediction for last stage equals ``predict_proba``
for staged_proba in clf.staged_predict_proba(X_test):
assert_equal(y_test.shape[0], staged_proba.shape[0])
assert_equal(2, staged_proba.shape[1])
assert_array_equal(clf.predict_proba(X_test), staged_proba)
def test_serialization():
"""Check model serialization."""
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
try:
import cPickle as pickle
except ImportError:
import pickle
serialized_clf = pickle.dumps(clf, protocol=pickle.HIGHEST_PROTOCOL)
clf = None
clf = pickle.loads(serialized_clf)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_degenerate_targets():
"""Check if we can fit even though all targets are equal. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
# classifier should raise exception
assert_raises(ValueError, clf.fit, X, np.ones(len(X)))
clf = GradientBoostingRegressor(n_estimators=100, random_state=1)
clf.fit(X, np.ones(len(X)))
clf.predict(rng.rand(2))
assert_array_equal(np.ones((1,), dtype=np.float64),
clf.predict(rng.rand(2)))
def test_quantile_loss():
"""Check if quantile loss with alpha=0.5 equals lad. """
clf_quantile = GradientBoostingRegressor(n_estimators=100, loss='quantile',
max_depth=4, alpha=0.5,
random_state=7)
clf_quantile.fit(boston.data, boston.target)
y_quantile = clf_quantile.predict(boston.data)
clf_lad = GradientBoostingRegressor(n_estimators=100, loss='lad',
max_depth=4, random_state=7)
clf_lad.fit(boston.data, boston.target)
y_lad = clf_lad.predict(boston.data)
assert_array_almost_equal(y_quantile, y_lad, decimal=4)
def test_symbol_labels():
"""Test with non-integer class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
symbol_y = tosequence(map(str, y))
clf.fit(X, symbol_y)
assert_array_equal(clf.predict(T), tosequence(map(str, true_result)))
assert_equal(100, len(clf.estimators_))
def test_float_class_labels():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
float_y = np.asarray(y, dtype=np.float32)
clf.fit(X, float_y)
assert_array_equal(clf.predict(T),
np.asarray(true_result, dtype=np.float32))
assert_equal(100, len(clf.estimators_))
def test_shape_y():
"""Test with float class labels. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
y_ = np.asarray(y, dtype=np.int32)
y_ = y_[:, np.newaxis]
# This will raise a DataConversionWarning that we want to
# "always" raise, elsewhere the warnings gets ignored in the
# later tests, and the tests that check for this warning fail
assert_warns(DataConversionWarning, clf.fit, X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_mem_layout():
"""Test with different memory layouts of X and y"""
X_ = np.asfortranarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
X_ = np.ascontiguousarray(X)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X_, y)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.ascontiguousarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
y_ = np.asarray(y, dtype=np.int32)
y_ = np.asfortranarray(y_)
clf = GradientBoostingClassifier(n_estimators=100, random_state=1)
clf.fit(X, y_)
assert_array_equal(clf.predict(T), true_result)
assert_equal(100, len(clf.estimators_))
def test_oob_improvement():
"""Test if oob improvement has correct shape and regression test. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=0.5)
clf.fit(X, y)
assert clf.oob_improvement_.shape[0] == 100
# hard-coded regression test - change if modification in OOB computation
assert_array_almost_equal(clf.oob_improvement_[:5],
np.array([0.19, 0.15, 0.12, -0.12, -0.11]),
decimal=2)
def test_oob_improvement_raise():
"""Test if oob improvement has correct shape. """
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
subsample=1.0)
clf.fit(X, y)
assert_raises(AttributeError, lambda: clf.oob_improvement_)
def test_oob_multilcass_iris():
"""Check OOB improvement on multi-class dataset."""
clf = GradientBoostingClassifier(n_estimators=100, loss='deviance',
random_state=1, subsample=0.5)
clf.fit(iris.data, iris.target)
score = clf.score(iris.data, iris.target)
assert score > 0.9, "Failed with subsample %.1f " \
"and score = %f" % (0.5, score)
assert clf.oob_improvement_.shape[0] == clf.n_estimators
# hard-coded regression test - change if modification in OOB computation
# FIXME: the following snippet does not yield the same results on 32 bits
# assert_array_almost_equal(clf.oob_improvement_[:5],
# np.array([12.68, 10.45, 8.18, 6.43, 5.13]),
# decimal=2)
def test_verbose_output():
"""Check verbose=1 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=1, subsample=0.8)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# with OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 3) % (
'Iter', 'Train Loss', 'OOB Improve', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# one for 1-10 and then 9 for 20-100
assert_equal(10 + 9, n_lines)
def test_more_verbose_output():
"""Check verbose=2 does not cause error. """
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
sys.stdout = StringIO()
clf = GradientBoostingClassifier(n_estimators=100, random_state=1,
verbose=2)
clf.fit(X, y)
verbose_output = sys.stdout
sys.stdout = old_stdout
# check output
verbose_output.seek(0)
header = verbose_output.readline().rstrip()
# no OOB
true_header = ' '.join(['%10s'] + ['%16s'] * 2) % (
'Iter', 'Train Loss', 'Remaining Time')
assert_equal(true_header, header)
n_lines = sum(1 for l in verbose_output.readlines())
# 100 lines for n_estimators==100
assert_equal(100, n_lines)
def test_warm_start():
"""Test if warm start equals fit. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_n_estimators():
"""Test if warm start equals fit - set n_estimators. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=300, max_depth=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=300)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.predict(X), est.predict(X))
def test_warm_start_max_depth():
"""Test if possible to fit trees of different depth in ensemble. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, max_depth=2)
est.fit(X, y)
# last 10 trees have different depth
assert est.estimators_[0, 0].max_depth == 1
for i in range(1, 11):
assert est.estimators_[-i, 0].max_depth == 2
def test_warm_start_clear():
"""Test if fit clears state. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est_2 = Cls(n_estimators=100, max_depth=1, warm_start=True)
est_2.fit(X, y) # inits state
est_2.set_params(warm_start=False)
est_2.fit(X, y) # clears old state and equals est
assert_array_almost_equal(est_2.predict(X), est.predict(X))
def test_warm_start_zero_n_estimators():
"""Test if warm start with zero n_estimators raises error """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=0)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_smaller_n_estimators():
"""Test if warm start with smaller n_estimators raises error """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=99)
assert_raises(ValueError, est.fit, X, y)
def test_warm_start_equal_n_estimators():
"""Test if warm start with equal n_estimators does nothing """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1)
est.fit(X, y)
est2 = clone(est)
est2.set_params(n_estimators=est.n_estimators, warm_start=True)
est2.fit(X, y)
assert_array_almost_equal(est2.predict(X), est.predict(X))
def test_warm_start_oob_switch():
"""Test if oob can be turned on during warm start. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=100, max_depth=1, warm_start=True)
est.fit(X, y)
est.set_params(n_estimators=110, subsample=0.5)
est.fit(X, y)
assert_array_equal(est.oob_improvement_[:100], np.zeros(100))
# the last 10 are not zeros
assert_array_equal(est.oob_improvement_[-10:] == 0.0,
np.zeros(10, dtype=np.bool))
def test_warm_start_oob():
"""Test if warm start OOB equals fit. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=200, max_depth=1, subsample=0.5,
random_state=1)
est.fit(X, y)
est_ws = Cls(n_estimators=100, max_depth=1, subsample=0.5,
random_state=1, warm_start=True)
est_ws.fit(X, y)
est_ws.set_params(n_estimators=200)
est_ws.fit(X, y)
assert_array_almost_equal(est_ws.oob_improvement_[:100],
est.oob_improvement_[:100])
def early_stopping_monitor(i, est, locals):
"""Returns True on the 10th iteration. """
if i == 9:
return True
else:
return False
def test_monitor_early_stopping():
"""Test if monitor return value works. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
for Cls in [GradientBoostingRegressor, GradientBoostingClassifier]:
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20) # this is not altered
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.train_score_.shape[0], 30)
est = Cls(n_estimators=20, max_depth=1, random_state=1, subsample=0.5,
warm_start=True)
est.fit(X, y, monitor=early_stopping_monitor)
assert_equal(est.n_estimators, 20)
assert_equal(est.estimators_.shape[0], 10)
assert_equal(est.train_score_.shape[0], 10)
assert_equal(est.oob_improvement_.shape[0], 10)
# try refit
est.set_params(n_estimators=30, warm_start=False)
est.fit(X, y)
assert_equal(est.n_estimators, 30)
assert_equal(est.train_score_.shape[0], 30)
assert_equal(est.estimators_.shape[0], 30)
assert_equal(est.oob_improvement_.shape[0], 30)
def test_complete_classification():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
k = 4
est = GradientBoostingClassifier(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k+1)
est.fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, k)
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_complete_regression():
"""Test greedy trees with max_depth + 1 leafs. """
from sklearn.tree._tree import TREE_LEAF
k = 4
est = GradientBoostingRegressor(n_estimators=20, max_depth=None,
random_state=1, max_leaf_nodes=k+1)
est.fit(boston.data, boston.target)
tree = est.estimators_[-1, 0].tree_
assert_equal(tree.children_left[tree.children_left == TREE_LEAF].shape[0],
k + 1)
def test_zero_estimator_reg():
"""Test if ZeroEstimator works for regression. """
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(boston.data, boston.target)
y_pred = est.predict(boston.data)
mse = mean_squared_error(boston.target, y_pred)
assert_almost_equal(mse, 33.0, decimal=0)
est = GradientBoostingRegressor(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, boston.data, boston.target)
def test_zero_estimator_clf():
"""Test if ZeroEstimator works for classification. """
X = iris.data
y = np.array(iris.target)
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init=ZeroEstimator())
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
# binary clf
mask = y != 0
y[mask] = 1
y[~mask] = 0
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='zero')
est.fit(X, y)
assert est.score(X, y) > 0.96
est = GradientBoostingClassifier(n_estimators=20, max_depth=1,
random_state=1, init='foobar')
assert_raises(ValueError, est.fit, X, y)
def test_max_leaf_nodes_max_depth():
"""Test preceedence of max_leaf_nodes over max_depth. """
X, y = datasets.make_hastie_10_2(n_samples=100, random_state=1)
all_estimators = [GradientBoostingRegressor,
GradientBoostingClassifier]
k = 4
for GBEstimator in all_estimators:
est = GBEstimator(max_depth=1, max_leaf_nodes=k).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_greater(tree.max_depth, 1)
est = GBEstimator(max_depth=1).fit(X, y)
tree = est.estimators_[0, 0].tree_
assert_equal(tree.max_depth, 1)
def test_warm_start_wo_nestimators_change():
"""Test if warm_start does nothing if n_estimators is not changed.
Regression test for #3513.
"""
clf = GradientBoostingClassifier(n_estimators=10, warm_start=True)
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
clf.fit([[0, 1], [2, 3]], [0, 1])
assert clf.estimators_.shape[0] == 10
def test_probability_exponential():
"""Predict probabilities."""
clf = GradientBoostingClassifier(loss='exponential',
n_estimators=100, random_state=1)
assert_raises(ValueError, clf.predict_proba, T)
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# check if probabilities are in [0, 1].
y_proba = clf.predict_proba(T)
assert np.all(y_proba >= 0.0)
assert np.all(y_proba <= 1.0)
score = clf.decision_function(T).ravel()
assert_array_equal(y_proba[:, 1],
1.0 / (1.0 + np.exp(-2 * score)))
# derive predictions from probabilities
y_pred = clf.classes_.take(y_proba.argmax(axis=1), axis=0)
assert_array_equal(y_pred, true_result)
def test_non_uniform_weights_toy_edge_case_reg():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('ls', 'huber', 'lad', 'quantile'):
gb = GradientBoostingRegressor(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_true(gb.predict([[1, 0]])[0] > 0.5)
def test_non_uniform_weights_toy_edge_case_clf():
X = [[1, 0],
[1, 0],
[1, 0],
[0, 1],
]
y = [0, 0, 1, 0]
# ignore the first 2 training samples by setting their weight to 0
sample_weight = [0, 0, 1, 1]
for loss in ('deviance', 'exponential'):
gb = GradientBoostingClassifier(n_estimators=5)
gb.fit(X, y, sample_weight=sample_weight)
assert_array_equal(gb.predict([[1, 0]]), [1])
if __name__ == "__main__":
import nose
nose.runmodule()
| bsd-3-clause |
xray/xray | xarray/tests/test_formatting.py | 1 | 15393 | import sys
from textwrap import dedent
import numpy as np
import pandas as pd
import pytest
import xarray as xr
from xarray.core import formatting
from xarray.core.npcompat import IS_NEP18_ACTIVE
from . import raises_regex
class TestFormatting:
def test_get_indexer_at_least_n_items(self):
cases = [
((20,), (slice(10),), (slice(-10, None),)),
((3, 20), (0, slice(10)), (-1, slice(-10, None))),
((2, 10), (0, slice(10)), (-1, slice(-10, None))),
((2, 5), (slice(2), slice(None)), (slice(-2, None), slice(None))),
((1, 2, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),
((2, 3, 5), (0, slice(2), slice(None)), (-1, slice(-2, None), slice(None))),
(
(1, 10, 1),
(0, slice(10), slice(None)),
(-1, slice(-10, None), slice(None)),
),
(
(2, 5, 1),
(slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None)),
),
((2, 5, 3), (0, slice(4), slice(None)), (-1, slice(-4, None), slice(None))),
(
(2, 3, 3),
(slice(2), slice(None), slice(None)),
(slice(-2, None), slice(None), slice(None)),
),
]
for shape, start_expected, end_expected in cases:
actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=False)
assert start_expected == actual
actual = formatting._get_indexer_at_least_n_items(shape, 10, from_end=True)
assert end_expected == actual
def test_first_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.first_n_items(array, n)
expected = array.flat[:n]
assert (expected == actual).all()
with raises_regex(ValueError, "at least one item"):
formatting.first_n_items(array, 0)
def test_last_n_items(self):
array = np.arange(100).reshape(10, 5, 2)
for n in [3, 10, 13, 100, 200]:
actual = formatting.last_n_items(array, n)
expected = array.flat[-n:]
assert (expected == actual).all()
with raises_regex(ValueError, "at least one item"):
formatting.first_n_items(array, 0)
def test_last_item(self):
array = np.arange(100)
reshape = ((10, 10), (1, 100), (2, 2, 5, 5))
expected = np.array([99])
for r in reshape:
result = formatting.last_item(array.reshape(r))
assert result == expected
def test_format_item(self):
cases = [
(pd.Timestamp("2000-01-01T12"), "2000-01-01T12:00:00"),
(pd.Timestamp("2000-01-01"), "2000-01-01"),
(pd.Timestamp("NaT"), "NaT"),
(pd.Timedelta("10 days 1 hour"), "10 days 01:00:00"),
(pd.Timedelta("-3 days"), "-3 days +00:00:00"),
(pd.Timedelta("3 hours"), "0 days 03:00:00"),
(pd.Timedelta("NaT"), "NaT"),
("foo", "'foo'"),
(b"foo", "b'foo'"),
(1, "1"),
(1.0, "1.0"),
]
for item, expected in cases:
actual = formatting.format_item(item)
assert expected == actual
def test_format_items(self):
cases = [
(np.arange(4) * np.timedelta64(1, "D"), "0 days 1 days 2 days 3 days"),
(
np.arange(4) * np.timedelta64(3, "h"),
"00:00:00 03:00:00 06:00:00 09:00:00",
),
(
np.arange(4) * np.timedelta64(500, "ms"),
"00:00:00 00:00:00.500000 00:00:01 00:00:01.500000",
),
(pd.to_timedelta(["NaT", "0s", "1s", "NaT"]), "NaT 00:00:00 00:00:01 NaT"),
(
pd.to_timedelta(["1 day 1 hour", "1 day", "0 hours"]),
"1 days 01:00:00 1 days 00:00:00 0 days 00:00:00",
),
([1, 2, 3], "1 2 3"),
]
for item, expected in cases:
actual = " ".join(formatting.format_items(item))
assert expected == actual
def test_format_array_flat(self):
actual = formatting.format_array_flat(np.arange(100), 2)
expected = "..."
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 9)
expected = "0 ... 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 10)
expected = "0 1 ... 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 13)
expected = "0 1 ... 98 99"
assert expected == actual
actual = formatting.format_array_flat(np.arange(100), 15)
expected = "0 1 2 ... 98 99"
assert expected == actual
# NB: Probably not ideal; an alternative would be cutting after the
# first ellipsis
actual = formatting.format_array_flat(np.arange(100.0), 11)
expected = "0.0 ... ..."
assert expected == actual
actual = formatting.format_array_flat(np.arange(100.0), 12)
expected = "0.0 ... 99.0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(3), 5)
expected = "0 1 2"
assert expected == actual
actual = formatting.format_array_flat(np.arange(4.0), 11)
expected = "0.0 ... 3.0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(0), 0)
expected = ""
assert expected == actual
actual = formatting.format_array_flat(np.arange(1), 1)
expected = "0"
assert expected == actual
actual = formatting.format_array_flat(np.arange(2), 3)
expected = "0 1"
assert expected == actual
actual = formatting.format_array_flat(np.arange(4), 7)
expected = "0 1 2 3"
assert expected == actual
actual = formatting.format_array_flat(np.arange(5), 7)
expected = "0 ... 4"
assert expected == actual
long_str = [" ".join(["hello world" for _ in range(100)])]
actual = formatting.format_array_flat(np.asarray([long_str]), 21)
expected = "'hello world hello..."
assert expected == actual
def test_pretty_print(self):
assert formatting.pretty_print("abcdefghij", 8) == "abcde..."
assert formatting.pretty_print("ß", 1) == "ß"
def test_maybe_truncate(self):
assert formatting.maybe_truncate("ß", 10) == "ß"
def test_format_timestamp_out_of_bounds(self):
from datetime import datetime
date = datetime(1300, 12, 1)
expected = "1300-12-01"
result = formatting.format_timestamp(date)
assert result == expected
date = datetime(2300, 12, 1)
expected = "2300-12-01"
result = formatting.format_timestamp(date)
assert result == expected
def test_attribute_repr(self):
short = formatting.summarize_attr("key", "Short string")
long = formatting.summarize_attr("key", 100 * "Very long string ")
newlines = formatting.summarize_attr("key", "\n\n\n")
tabs = formatting.summarize_attr("key", "\t\t\t")
assert short == " key: Short string"
assert len(long) <= 80
assert long.endswith("...")
assert "\n" not in newlines
assert "\t" not in tabs
def test_diff_array_repr(self):
da_a = xr.DataArray(
np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"),
dims=("x", "y"),
coords={
"x": np.array(["a", "b"], dtype="U1"),
"y": np.array([1, 2, 3], dtype="int64"),
},
attrs={"units": "m", "description": "desc"},
)
da_b = xr.DataArray(
np.array([1, 2], dtype="int64"),
dims="x",
coords={
"x": np.array(["a", "c"], dtype="U1"),
"label": ("x", np.array([1, 2], dtype="int64")),
},
attrs={"units": "kg"},
)
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
"""\
Left and right DataArray objects are not identical
Differing dimensions:
(x: 2, y: 3) != (x: 2)
Differing values:
L
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)
R
array([1, 2], dtype=int64)
Differing coordinates:
L * x (x) %cU1 'a' 'b'
R * x (x) %cU1 'a' 'c'
Coordinates only on the left object:
* y (y) int64 1 2 3
Coordinates only on the right object:
label (x) int64 1 2
Differing attributes:
L units: m
R units: kg
Attributes only on the left object:
description: desc"""
% (byteorder, byteorder)
)
actual = formatting.diff_array_repr(da_a, da_b, "identical")
try:
assert actual == expected
except AssertionError:
# depending on platform, dtype may not be shown in numpy array repr
assert actual == expected.replace(", dtype=int64", "")
va = xr.Variable(
"x", np.array([1, 2, 3], dtype="int64"), {"title": "test Variable"}
)
vb = xr.Variable(("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64"))
expected = dedent(
"""\
Left and right Variable objects are not equal
Differing dimensions:
(x: 3) != (x: 2, y: 3)
Differing values:
L
array([1, 2, 3], dtype=int64)
R
array([[1, 2, 3],
[4, 5, 6]], dtype=int64)"""
)
actual = formatting.diff_array_repr(va, vb, "equals")
try:
assert actual == expected
except AssertionError:
assert actual == expected.replace(", dtype=int64", "")
@pytest.mark.filterwarnings("error")
def test_diff_attrs_repr_with_array(self):
attrs_a = {"attr": np.array([0, 1])}
attrs_b = {"attr": 1}
expected = dedent(
"""\
Differing attributes:
L attr: [0 1]
R attr: 1
"""
).strip()
actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
assert expected == actual
attrs_b = {"attr": np.array([-3, 5])}
expected = dedent(
"""\
Differing attributes:
L attr: [0 1]
R attr: [-3 5]
"""
).strip()
actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
assert expected == actual
# should not raise a warning
attrs_b = {"attr": np.array([0, 1, 2])}
expected = dedent(
"""\
Differing attributes:
L attr: [0 1]
R attr: [0 1 2]
"""
).strip()
actual = formatting.diff_attrs_repr(attrs_a, attrs_b, "equals")
assert expected == actual
def test_diff_dataset_repr(self):
ds_a = xr.Dataset(
data_vars={
"var1": (("x", "y"), np.array([[1, 2, 3], [4, 5, 6]], dtype="int64")),
"var2": ("x", np.array([3, 4], dtype="int64")),
},
coords={
"x": np.array(["a", "b"], dtype="U1"),
"y": np.array([1, 2, 3], dtype="int64"),
},
attrs={"units": "m", "description": "desc"},
)
ds_b = xr.Dataset(
data_vars={"var1": ("x", np.array([1, 2], dtype="int64"))},
coords={
"x": ("x", np.array(["a", "c"], dtype="U1"), {"source": 0}),
"label": ("x", np.array([1, 2], dtype="int64")),
},
attrs={"units": "kg"},
)
byteorder = "<" if sys.byteorder == "little" else ">"
expected = dedent(
"""\
Left and right Dataset objects are not identical
Differing dimensions:
(x: 2, y: 3) != (x: 2)
Differing coordinates:
L * x (x) %cU1 'a' 'b'
R * x (x) %cU1 'a' 'c'
source: 0
Coordinates only on the left object:
* y (y) int64 1 2 3
Coordinates only on the right object:
label (x) int64 1 2
Differing data variables:
L var1 (x, y) int64 1 2 3 4 5 6
R var1 (x) int64 1 2
Data variables only on the left object:
var2 (x) int64 3 4
Differing attributes:
L units: m
R units: kg
Attributes only on the left object:
description: desc"""
% (byteorder, byteorder)
)
actual = formatting.diff_dataset_repr(ds_a, ds_b, "identical")
assert actual == expected
def test_array_repr(self):
ds = xr.Dataset(coords={"foo": [1, 2, 3], "bar": [1, 2, 3]})
ds[(1, 2)] = xr.DataArray([0], dims="test")
actual = formatting.array_repr(ds[(1, 2)])
expected = dedent(
"""\
<xarray.DataArray (1, 2) (test: 1)>
array([0])
Dimensions without coordinates: test"""
)
assert actual == expected
@pytest.mark.skipif(not IS_NEP18_ACTIVE, reason="requires __array_function__")
def test_inline_variable_array_repr_custom_repr():
class CustomArray:
def __init__(self, value, attr):
self.value = value
self.attr = attr
def _repr_inline_(self, width):
formatted = f"({self.attr}) {self.value}"
if len(formatted) > width:
formatted = f"({self.attr}) ..."
return formatted
def __array_function__(self, *args, **kwargs):
return NotImplemented
@property
def shape(self):
return self.value.shape
@property
def dtype(self):
return self.value.dtype
@property
def ndim(self):
return self.value.ndim
value = CustomArray(np.array([20, 40]), "m")
variable = xr.Variable("x", value)
max_width = 10
actual = formatting.inline_variable_array_repr(variable, max_width=10)
assert actual == value._repr_inline_(max_width)
def test_set_numpy_options():
original_options = np.get_printoptions()
with formatting.set_numpy_options(threshold=10):
assert len(repr(np.arange(500))) < 200
# original options are restored
assert np.get_printoptions() == original_options
def test_short_numpy_repr():
cases = [
np.random.randn(500),
np.random.randn(20, 20),
np.random.randn(5, 10, 15),
np.random.randn(5, 10, 15, 3),
np.random.randn(100, 5, 1),
]
# number of lines:
# for default numpy repr: 167, 140, 254, 248, 599
# for short_numpy_repr: 1, 7, 24, 19, 25
for array in cases:
num_lines = formatting.short_numpy_repr(array).count("\n") + 1
assert num_lines < 30
def test_large_array_repr_length():
da = xr.DataArray(np.random.randn(100, 5, 1))
result = repr(da).splitlines()
assert len(result) < 50
| apache-2.0 |
Eric89GXL/scikit-learn | examples/decomposition/plot_pca_3d.py | 8 | 2410 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import pylab as pl
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = pl.figure(fig_num, figsize=(4, 3))
pl.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density, marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
pl.show()
| bsd-3-clause |
lhilt/scipy | scipy/ndimage/interpolation.py | 4 | 27090 | # Copyright (C) 2003-2005 Peter J. Verveer
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# 3. The name of the author may not be used to endorse or promote
# products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
# OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
# GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import division, print_function, absolute_import
import itertools
import numpy
import warnings
from . import _ni_support
from . import _nd_image
from ._ni_docstrings import docdict
from scipy._lib import doccer
# Change the default 'reflect' to 'constant' via modifying a copy of docdict
docdict_copy = docdict.copy()
del docdict
docdict_copy['mode'] = docdict_copy['mode'].replace("Default is 'reflect'",
"Default is 'constant'")
docfiller = doccer.filldoc(docdict_copy)
__all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform',
'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate']
@docfiller
def spline_filter1d(input, order=3, axis=-1, output=numpy.float64,
mode='mirror'):
"""
Calculate a one-dimensional spline filter along the given axis.
The lines of the array along the given axis are filtered by a
spline filter. The order of the spline must be >= 2 and <= 5.
Parameters
----------
%(input)s
order : int, optional
The order of the spline, default is 3.
axis : int, optional
The axis along which the spline filter is applied. Default is the last
axis.
output : ndarray or dtype, optional
The array in which to place the output, or the dtype of the returned
array. Default is ``numpy.float64``.
%(mode)s
Returns
-------
spline_filter1d : ndarray
The filtered input.
Notes
-----
All functions in `ndimage.interpolation` do spline interpolation of
the input image. If using b-splines of `order > 1`, the input image
values have to be converted to b-spline coefficients first, which is
done by applying this one-dimensional filter sequentially along all
axes of the input. All functions that require b-spline coefficients
will automatically filter their inputs, a behavior controllable with
the `prefilter` keyword argument. For functions that accept a `mode`
parameter, the result will only be correct if it matches the `mode`
used when filtering.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order in [0, 1]:
output[...] = numpy.array(input)
else:
mode = _ni_support._extend_mode_to_code(mode)
axis = _ni_support._check_axis(axis, input.ndim)
_nd_image.spline_filter1d(input, order, axis, output, mode)
return output
def spline_filter(input, order=3, output=numpy.float64, mode='mirror'):
"""
Multi-dimensional spline filter.
For more details, see `spline_filter1d`.
See Also
--------
spline_filter1d
Notes
-----
The multi-dimensional filter is implemented as a sequence of
one-dimensional spline filters. The intermediate arrays are stored
in the same data type as the output. Therefore, for output types
with a limited precision, the results may be imprecise because
intermediate results may be stored with insufficient precision.
"""
if order < 2 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
output = _ni_support._get_output(output, input)
if order not in [0, 1] and input.ndim > 0:
for axis in range(input.ndim):
spline_filter1d(input, order, axis, output=output, mode=mode)
input = output
else:
output[...] = input[...]
return output
@docfiller
def geometric_transform(input, mapping, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True,
extra_arguments=(), extra_keywords={}):
"""
Apply an arbitrary geometric transform.
The given mapping function is used to find, for each point in the
output, the corresponding coordinates in the input. The value of the
input at those coordinates is determined by spline interpolation of
the requested order.
Parameters
----------
%(input)s
mapping : {callable, scipy.LowLevelCallable}
A callable object that accepts a tuple of length equal to the output
array rank, and returns the corresponding input coordinates as a tuple
of length equal to the input array rank.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
extra_arguments : tuple, optional
Extra arguments passed to `mapping`.
extra_keywords : dict, optional
Extra keywords passed to `mapping`.
Returns
-------
output : ndarray
The filtered input.
See Also
--------
map_coordinates, affine_transform, spline_filter1d
Notes
-----
This function also accepts low-level callback functions with one
the following signatures and wrapped in `scipy.LowLevelCallable`:
.. code:: c
int mapping(npy_intp *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
int mapping(intptr_t *output_coordinates, double *input_coordinates,
int output_rank, int input_rank, void *user_data)
The calling function iterates over the elements of the output array,
calling the callback function at each element. The coordinates of the
current output element are passed through ``output_coordinates``. The
callback function must return the coordinates at which the input must
be interpolated in ``input_coordinates``. The rank of the input and
output arrays are given by ``input_rank`` and ``output_rank``
respectively. ``user_data`` is the data pointer provided
to `scipy.LowLevelCallable` as-is.
The callback function must return an integer error status that is zero
if something went wrong and one otherwise. If an error occurs, you should
normally set the python error status with an informative message
before returning, otherwise a default error message is set by the
calling function.
In addition, some other low-level function pointer specifications
are accepted, but these are for backward compatibility only and should
not be used in new code.
Examples
--------
>>> import numpy as np
>>> from scipy.ndimage import geometric_transform
>>> a = np.arange(12.).reshape((4, 3))
>>> def shift_func(output_coords):
... return (output_coords[0] - 0.5, output_coords[1] - 0.5)
...
>>> geometric_transform(a, shift_func)
array([[ 0. , 0. , 0. ],
[ 0. , 1.362, 2.738],
[ 0. , 4.812, 6.187],
[ 0. , 8.263, 9.637]])
>>> b = [1, 2, 3, 4, 5]
>>> def shift_func(output_coords):
... return (output_coords[0] - 3,)
...
>>> geometric_transform(b, shift_func, mode='constant')
array([0, 0, 0, 1, 2])
>>> geometric_transform(b, shift_func, mode='nearest')
array([1, 1, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='reflect')
array([3, 2, 1, 1, 2])
>>> geometric_transform(b, shift_func, mode='wrap')
array([2, 3, 4, 1, 2])
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input, shape=output_shape)
_nd_image.geometric_transform(filtered, mapping, None, None, None, output,
order, mode, cval, extra_arguments,
extra_keywords)
return output
@docfiller
def map_coordinates(input, coordinates, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Map the input array to new coordinates by interpolation.
The array of coordinates is used to find, for each point in the output,
the corresponding coordinates in the input. The value of the input at
those coordinates is determined by spline interpolation of the
requested order.
The shape of the output is derived from that of the coordinate
array by dropping the first axis. The values of the array along
the first axis are the coordinates in the input array at which the
output value is found.
Parameters
----------
%(input)s
coordinates : array_like
The coordinates at which `input` is evaluated.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
map_coordinates : ndarray
The result of transforming the input. The shape of the output is
derived from that of `coordinates` by dropping the first axis.
See Also
--------
spline_filter, geometric_transform, scipy.interpolate
Examples
--------
>>> from scipy import ndimage
>>> a = np.arange(12.).reshape((4, 3))
>>> a
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.],
[ 9., 10., 11.]])
>>> ndimage.map_coordinates(a, [[0.5, 2], [0.5, 1]], order=1)
array([ 2., 7.])
Above, the interpolated value of a[0.5, 0.5] gives output[0], while
a[2, 1] is output[1].
>>> inds = np.array([[0.5, 2], [0.5, 4]])
>>> ndimage.map_coordinates(a, inds, order=1, cval=-33.3)
array([ 2. , -33.3])
>>> ndimage.map_coordinates(a, inds, order=1, mode='nearest')
array([ 2., 8.])
>>> ndimage.map_coordinates(a, inds, order=1, cval=0, output=bool)
array([ True, False], dtype=bool)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
coordinates = numpy.asarray(coordinates)
if numpy.iscomplexobj(coordinates):
raise TypeError('Complex type not supported')
output_shape = coordinates.shape[1:]
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
if coordinates.shape[0] != input.ndim:
raise RuntimeError('invalid shape for coordinate array')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
_nd_image.geometric_transform(filtered, None, coordinates, None, None,
output, order, mode, cval, None, None)
return output
@docfiller
def affine_transform(input, matrix, offset=0.0, output_shape=None,
output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Apply an affine transformation.
Given an output image pixel index vector ``o``, the pixel value
is determined from the input image at position
``np.dot(matrix, o) + offset``.
This does 'pull' (or 'backward') resampling, transforming the output space
to the input to locate data. Affine transformations are often described in
the 'push' (or 'forward') direction, transforming input to output. If you
have a matrix for the 'push' transformation, use its inverse
(:func:`numpy.linalg.inv`) in this function.
Parameters
----------
%(input)s
matrix : ndarray
The inverse coordinate transformation matrix, mapping output
coordinates to input coordinates. If ``ndim`` is the number of
dimensions of ``input``, the given matrix must have one of the
following shapes:
- ``(ndim, ndim)``: the linear transformation matrix for each
output coordinate.
- ``(ndim,)``: assume that the 2D transformation matrix is
diagonal, with the diagonal specified by the given value. A more
efficient algorithm is then used that exploits the separability
of the problem.
- ``(ndim + 1, ndim + 1)``: assume that the transformation is
specified using homogeneous coordinates [1]_. In this case, any
value passed to ``offset`` is ignored.
- ``(ndim, ndim + 1)``: as above, but the bottom row of a
homogeneous transformation matrix is always ``[0, 0, ..., 1]``,
and may be omitted.
offset : float or sequence, optional
The offset into the array where the transform is applied. If a float,
`offset` is the same for each axis. If a sequence, `offset` should
contain one value for each axis.
output_shape : tuple of ints, optional
Shape tuple.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
affine_transform : ndarray
The transformed input.
Notes
-----
The given matrix and offset are used to find for each point in the
output the corresponding coordinates in the input by an affine
transformation. The value of the input at those coordinates is
determined by spline interpolation of the requested order. Points
outside the boundaries of the input are filled according to the given
mode.
.. versionchanged:: 0.18.0
Previously, the exact interpretation of the affine transformation
depended on whether the matrix was supplied as a one-dimensional or
two-dimensional array. If a one-dimensional array was supplied
to the matrix parameter, the output pixel value at index ``o``
was determined from the input image at position
``matrix * (o + offset)``.
References
----------
.. [1] https://en.wikipedia.org/wiki/Homogeneous_coordinates
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if output_shape is None:
output_shape = input.shape
if input.ndim < 1 or len(output_shape) < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input,
shape=output_shape)
matrix = numpy.asarray(matrix, dtype=numpy.float64)
if matrix.ndim not in [1, 2] or matrix.shape[0] < 1:
raise RuntimeError('no proper affine matrix provided')
if (matrix.ndim == 2 and matrix.shape[1] == input.ndim + 1 and
(matrix.shape[0] in [input.ndim, input.ndim + 1])):
if matrix.shape[0] == input.ndim + 1:
exptd = [0] * input.ndim + [1]
if not numpy.all(matrix[input.ndim] == exptd):
msg = ('Expected homogeneous transformation matrix with '
'shape %s for image shape %s, but bottom row was '
'not equal to %s' % (matrix.shape, input.shape, exptd))
raise ValueError(msg)
# assume input is homogeneous coordinate transformation matrix
offset = matrix[:input.ndim, input.ndim]
matrix = matrix[:input.ndim, :input.ndim]
if matrix.shape[0] != input.ndim:
raise RuntimeError('affine matrix has wrong number of rows')
if matrix.ndim == 2 and matrix.shape[1] != output.ndim:
raise RuntimeError('affine matrix has wrong number of columns')
if not matrix.flags.contiguous:
matrix = matrix.copy()
offset = _ni_support._normalize_sequence(offset, input.ndim)
offset = numpy.asarray(offset, dtype=numpy.float64)
if offset.ndim != 1 or offset.shape[0] < 1:
raise RuntimeError('no proper offset provided')
if not offset.flags.contiguous:
offset = offset.copy()
if matrix.ndim == 1:
warnings.warn(
"The behaviour of affine_transform with a one-dimensional "
"array supplied for the matrix parameter has changed in "
"scipy 0.18.0."
)
_nd_image.zoom_shift(filtered, matrix, offset/matrix, output, order,
mode, cval)
else:
_nd_image.geometric_transform(filtered, None, None, matrix, offset,
output, order, mode, cval, None, None)
return output
@docfiller
def shift(input, shift, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Shift an array.
The array is shifted using spline interpolation of the requested order.
Points outside the boundaries of the input are filled according to the
given mode.
Parameters
----------
%(input)s
shift : float or sequence
The shift along the axes. If a float, `shift` is the same for each
axis. If a sequence, `shift` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
shift : ndarray
The shifted input.
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
output = _ni_support._get_output(output, input)
shift = _ni_support._normalize_sequence(shift, input.ndim)
shift = [-ii for ii in shift]
shift = numpy.asarray(shift, dtype=numpy.float64)
if not shift.flags.contiguous:
shift = shift.copy()
_nd_image.zoom_shift(filtered, None, shift, output, order, mode, cval)
return output
@docfiller
def zoom(input, zoom, output=None, order=3, mode='constant', cval=0.0,
prefilter=True):
"""
Zoom an array.
The array is zoomed using spline interpolation of the requested order.
Parameters
----------
%(input)s
zoom : float or sequence
The zoom factor along the axes. If a float, `zoom` is the same for each
axis. If a sequence, `zoom` should contain one value for each axis.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
zoom : ndarray
The zoomed input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure()
>>> ax1 = fig.add_subplot(121) # left side
>>> ax2 = fig.add_subplot(122) # right side
>>> ascent = misc.ascent()
>>> result = ndimage.zoom(ascent, 3.0)
>>> ax1.imshow(ascent)
>>> ax2.imshow(result)
>>> plt.show()
>>> print(ascent.shape)
(512, 512)
>>> print(result.shape)
(1536, 1536)
"""
if order < 0 or order > 5:
raise RuntimeError('spline order not supported')
input = numpy.asarray(input)
if numpy.iscomplexobj(input):
raise TypeError('Complex type not supported')
if input.ndim < 1:
raise RuntimeError('input and output rank must be > 0')
mode = _ni_support._extend_mode_to_code(mode)
if prefilter and order > 1:
filtered = spline_filter(input, order, output=numpy.float64)
else:
filtered = input
zoom = _ni_support._normalize_sequence(zoom, input.ndim)
output_shape = tuple(
[int(round(ii * jj)) for ii, jj in zip(input.shape, zoom)])
zoom_div = numpy.array(output_shape, float) - 1
# Zooming to infinite values is unpredictable, so just choose
# zoom factor 1 instead
zoom = numpy.divide(numpy.array(input.shape) - 1, zoom_div,
out=numpy.ones_like(input.shape, dtype=numpy.float64),
where=zoom_div != 0)
output = _ni_support._get_output(output, input,
shape=output_shape)
zoom = numpy.ascontiguousarray(zoom)
_nd_image.zoom_shift(filtered, zoom, None, output, order, mode, cval)
return output
@docfiller
def rotate(input, angle, axes=(1, 0), reshape=True, output=None, order=3,
mode='constant', cval=0.0, prefilter=True):
"""
Rotate an array.
The array is rotated in the plane defined by the two axes given by the
`axes` parameter using spline interpolation of the requested order.
Parameters
----------
%(input)s
angle : float
The rotation angle in degrees.
axes : tuple of 2 ints, optional
The two axes that define the plane of rotation. Default is the first
two axes.
reshape : bool, optional
If `reshape` is true, the output shape is adapted so that the input
array is contained completely in the output. Default is True.
%(output)s
order : int, optional
The order of the spline interpolation, default is 3.
The order has to be in the range 0-5.
%(mode)s
%(cval)s
%(prefilter)s
Returns
-------
rotate : ndarray
The rotated input.
Examples
--------
>>> from scipy import ndimage, misc
>>> import matplotlib.pyplot as plt
>>> fig = plt.figure(figsize=(10, 3))
>>> ax1, ax2, ax3 = fig.subplots(1, 3)
>>> img = misc.ascent()
>>> img_45 = ndimage.rotate(img, 45, reshape=False)
>>> full_img_45 = ndimage.rotate(img, 45, reshape=True)
>>> ax1.imshow(img, cmap='gray')
>>> ax1.set_axis_off()
>>> ax2.imshow(img_45, cmap='gray')
>>> ax2.set_axis_off()
>>> ax3.imshow(full_img_45, cmap='gray')
>>> ax3.set_axis_off()
>>> fig.set_tight_layout(True)
>>> plt.show()
>>> print(img.shape)
(512, 512)
>>> print(img_45.shape)
(512, 512)
>>> print(full_img_45.shape)
(724, 724)
"""
input_arr = numpy.asarray(input)
ndim = input_arr.ndim
if ndim < 2:
raise ValueError('input array should be at least two-dimensional')
axes = list(axes)
if len(axes) != 2:
raise ValueError('axes should contain exactly two values')
if not all([float(ax).is_integer() for ax in axes]):
raise ValueError('axes should contain only integer values')
if axes[0] < 0:
axes[0] += ndim
if axes[1] < 0:
axes[1] += ndim
if axes[0] < 0 or axes[1] < 0 or axes[0] >= ndim or axes[1] >= ndim:
raise ValueError('invalid rotation plane specified')
axes.sort()
angle_rad = numpy.deg2rad(angle)
c, s = numpy.cos(angle_rad), numpy.sin(angle_rad)
rot_matrix = numpy.array([[c, s],
[-s, c]])
img_shape = numpy.asarray(input_arr.shape)
in_plane_shape = img_shape[axes]
if reshape:
# Compute transformed input bounds
iy, ix = in_plane_shape
out_bounds = rot_matrix @ [[0, 0, iy, iy],
[0, ix, 0, ix]]
# Compute the shape of the transformed input plane
out_plane_shape = (out_bounds.ptp(axis=1) + 0.5).astype(int)
else:
out_plane_shape = img_shape[axes]
out_center = rot_matrix @ ((out_plane_shape - 1) / 2)
in_center = (in_plane_shape - 1) / 2
offset = in_center - out_center
output_shape = img_shape
output_shape[axes] = out_plane_shape
output_shape = tuple(output_shape)
output = _ni_support._get_output(output, input_arr, shape=output_shape)
if ndim <= 2:
affine_transform(input_arr, rot_matrix, offset, output_shape, output,
order, mode, cval, prefilter)
else:
# If ndim > 2, the rotation is applied over all the planes
# parallel to axes
planes_coord = itertools.product(
*[[slice(None)] if ax in axes else range(img_shape[ax])
for ax in range(ndim)])
out_plane_shape = tuple(out_plane_shape)
for coordinates in planes_coord:
ia = input_arr[coordinates]
oa = output[coordinates]
affine_transform(ia, rot_matrix, offset, out_plane_shape,
oa, order, mode, cval, prefilter)
return output
| bsd-3-clause |
evgchz/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
jgao54/airflow | airflow/contrib/hooks/bigquery_hook.py | 1 | 81084 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
import time
import six
from builtins import range
from copy import deepcopy
from six import iteritems
from past.builtins import basestring
from airflow import AirflowException
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from airflow.utils.log.logging_mixin import LoggingMixin
from apiclient.discovery import HttpError, build
from googleapiclient import errors
from pandas_gbq.gbq import \
_check_google_client_version as gbq_check_google_client_version
from pandas_gbq import read_gbq
from pandas_gbq.gbq import \
_test_google_api_imports as gbq_test_google_api_imports
from pandas_gbq.gbq import GbqConnector
class BigQueryHook(GoogleCloudBaseHook, DbApiHook, LoggingMixin):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None,
use_legacy_sql=True,
location=None):
super(BigQueryHook, self).__init__(
gcp_conn_id=bigquery_conn_id, delegate_to=delegate_to)
self.use_legacy_sql = use_legacy_sql
self.location = location
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(
service=service,
project_id=project,
use_legacy_sql=self.use_legacy_sql,
location=self.location,
)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build(
'bigquery', 'v2', http=http_authorized, cache_discovery=False)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, sql, parameters=None, dialect=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param sql: The BigQuery SQL to execute.
:type sql: str
:param parameters: The parameters to render the SQL query with (not
used, leave to override superclass method)
:type parameters: mapping or iterable
:param dialect: Dialect of BigQuery SQL – legacy SQL or standard SQL
defaults to use `self.use_legacy_sql` if not specified
:type dialect: str in {'legacy', 'standard'}
"""
if dialect is None:
dialect = 'legacy' if self.use_legacy_sql else 'standard'
return read_gbq(sql,
project_id=self._get_field('project'),
dialect=dialect,
verbose=False)
def table_exists(self, project_id, dataset_id, table_id):
"""
Checks for the existence of a table in Google BigQuery.
:param project_id: The Google cloud project in which to look for the
table. The connection supplied to the hook must provide access to
the specified project.
:type project_id: str
:param dataset_id: The name of the dataset in which to look for the
table.
:type dataset_id: str
:param table_id: The name of the table to check the existence of.
:type table_id: str
"""
service = self.get_service()
try:
service.tables().get(
projectId=project_id, datasetId=dataset_id,
tableId=table_id).execute()
return True
except errors.HttpError as e:
if e.resp['status'] == '404':
return False
raise
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self,
project_id,
service,
reauth=False,
verbose=False,
dialect='legacy'):
super(BigQueryPandasConnector, self).__init__(project_id)
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
self.dialect = dialect
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(LoggingMixin):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self,
service,
project_id,
use_legacy_sql=True,
api_resource_configs=None,
location=None):
self.service = service
self.project_id = project_id
self.use_legacy_sql = use_legacy_sql
if api_resource_configs:
_validate_value("api_resource_configs", api_resource_configs, dict)
self.api_resource_configs = api_resource_configs \
if api_resource_configs else {}
self.running_job_id = None
self.location = location
def create_empty_table(self,
project_id,
dataset_id,
table_id,
schema_fields=None,
time_partitioning=None,
labels=None,
view=None):
"""
Creates a new, empty table in the dataset.
To create a view, which is defined by a SQL query, parse a dictionary to 'view' kwarg
:param project_id: The project to create the table into.
:type project_id: str
:param dataset_id: The dataset to create the table into.
:type dataset_id: str
:param table_id: The Name of the table to be created.
:type table_id: str
:param schema_fields: If set, the schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schema
:type schema_fields: list
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
**Example**: ::
schema_fields=[{"name": "emp_name", "type": "STRING", "mode": "REQUIRED"},
{"name": "salary", "type": "INTEGER", "mode": "NULLABLE"}]
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
.. seealso::
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#timePartitioning
:type time_partitioning: dict
:param view: [Optional] A dictionary containing definition for the view.
If set, it will create a view instead of a table:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#view
:type view: dict
**Example**: ::
view = {
"query": "SELECT * FROM `test-project-id.test_dataset_id.test_table_prefix*` LIMIT 1000",
"useLegacySql": False
}
:return:
"""
project_id = project_id if project_id is not None else self.project_id
table_resource = {
'tableReference': {
'tableId': table_id
}
}
if schema_fields:
table_resource['schema'] = {'fields': schema_fields}
if time_partitioning:
table_resource['timePartitioning'] = time_partitioning
if labels:
table_resource['labels'] = labels
if view:
table_resource['view'] = view
self.log.info('Creating Table %s:%s.%s',
project_id, dataset_id, table_id)
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
self.log.info('Table created successfully: %s:%s.%s',
project_id, dataset_id, table_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def create_external_table(self,
external_project_dataset_table,
schema_fields,
source_uris,
source_format='CSV',
autodetect=False,
compression='NONE',
ignore_unknown_values=False,
max_bad_records=0,
skip_leading_rows=0,
field_delimiter=',',
quote_character=None,
allow_quoted_newlines=False,
allow_jagged_rows=False,
src_fmt_configs=None,
labels=None
):
"""
Creates a new external table in the dataset with the data in Google
Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
for more details about these parameters.
:param external_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table name to create external table.
If <project> is not included, project will be the
project defined in the connection json.
:type external_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#resource
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param autodetect: Try to detect schema and format options automatically.
Any option specified explicitly will be honored.
:type autodetect: bool
:param compression: [Optional] The compression type of the data source.
Possible values include GZIP and NONE.
The default value is NONE.
This setting is ignored for Google Cloud Bigtable,
Google Cloud Datastore backups and Avro formats.
:type compression: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param labels: a dictionary containing labels for the table, passed to BigQuery
:type labels: dict
"""
if src_fmt_configs is None:
src_fmt_configs = {}
project_id, dataset_id, external_table_id = \
_split_tablename(table_input=external_project_dataset_table,
default_project_id=self.project_id,
var_name='external_project_dataset_table')
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/tables#externalDataConfiguration.sourceFormat
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
compression = compression.upper()
allowed_compressions = ['NONE', 'GZIP']
if compression not in allowed_compressions:
raise ValueError("{0} is not a valid compression format. "
"Please use one of the following types: {1}"
.format(compression, allowed_compressions))
table_resource = {
'externalDataConfiguration': {
'autodetect': autodetect,
'sourceFormat': source_format,
'sourceUris': source_uris,
'compression': compression,
'ignoreUnknownValues': ignore_unknown_values
},
'tableReference': {
'projectId': project_id,
'datasetId': dataset_id,
'tableId': external_table_id,
}
}
if schema_fields:
table_resource['externalDataConfiguration'].update({
'schema': {
'fields': schema_fields
}
})
self.log.info('Creating external table: %s', external_project_dataset_table)
if max_bad_records:
table_resource['externalDataConfiguration']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'quote_character' not in src_fmt_configs:
src_fmt_configs['quote'] = quote_character
if 'allowQuotedNewlines' not in src_fmt_configs:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
if 'allowJaggedRows' not in src_fmt_configs:
src_fmt_configs['allowJaggedRows'] = allow_jagged_rows
src_fmt_to_param_mapping = {
'CSV': 'csvOptions',
'GOOGLE_SHEETS': 'googleSheetsOptions'
}
src_fmt_to_configs_mapping = {
'csvOptions': [
'allowJaggedRows', 'allowQuotedNewlines',
'fieldDelimiter', 'skipLeadingRows',
'quote'
],
'googleSheetsOptions': ['skipLeadingRows']
}
if source_format in src_fmt_to_param_mapping.keys():
valid_configs = src_fmt_to_configs_mapping[
src_fmt_to_param_mapping[source_format]
]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
table_resource['externalDataConfiguration'][src_fmt_to_param_mapping[
source_format]] = src_fmt_configs
if labels:
table_resource['labels'] = labels
try:
self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource
).execute()
self.log.info('External table created successfully: %s',
external_project_dataset_table)
except HttpError as err:
raise Exception(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def run_query(self,
sql,
destination_dataset_table=None,
write_disposition='WRITE_EMPTY',
allow_large_results=False,
flatten_results=None,
udf_config=None,
use_legacy_sql=None,
maximum_billing_tier=None,
maximum_bytes_billed=None,
create_disposition='CREATE_IF_NEEDED',
query_params=None,
labels=None,
schema_update_options=(),
priority='INTERACTIVE',
time_partitioning=None,
api_resource_configs=None,
cluster_fields=None,
location=None):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param sql: The BigQuery SQL to execute.
:type sql: str
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:type destination_dataset_table: str
:param write_disposition: What to do if the table already exists in
BigQuery.
:type write_disposition: str
:param allow_large_results: Whether to allow large results.
:type allow_large_results: bool
:param flatten_results: If true and query uses legacy SQL dialect, flattens
all nested and repeated fields in the query results. ``allowLargeResults``
must be true if this is set to false. For standard SQL queries, this
flag is ignored and results are never flattened.
:type flatten_results: bool
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
If `None`, defaults to `self.use_legacy_sql`.
:type use_legacy_sql: bool
:param api_resource_configs: a dictionary that contain params
'configuration' applied for Google BigQuery Jobs API:
https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs
for example, {'query': {'useQueryCache': False}}. You could use it
if you need to provide some params that are not supported by the
BigQueryHook like args.
:type api_resource_configs: dict
:param maximum_billing_tier: Positive integer that serves as a
multiplier of the basic price.
:type maximum_billing_tier: int
:param maximum_bytes_billed: Limits the bytes billed for this job.
Queries that will have bytes billed beyond this limit will fail
(without incurring a charge). If unspecified, this will be
set to your project default.
:type maximum_bytes_billed: float
:param create_disposition: Specifies whether the job is allowed to
create new tables.
:type create_disposition: str
:param query_params a dictionary containing query parameter types and
values, passed to BigQuery
:type query_params: dict
:param labels a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
:param schema_update_options: Allows the schema of the destination
table to be updated as a side effect of the query job.
:type schema_update_options: tuple
:param priority: Specifies a priority for the query.
Possible values include INTERACTIVE and BATCH.
The default value is INTERACTIVE.
:type priority: str
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this query be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list of str
:param location: The geographic location of the job. Required except for
US and EU. See details at
https://cloud.google.com/bigquery/docs/locations#specifying_your_location
:type location: str
"""
if time_partitioning is None:
time_partitioning = {}
if location:
self.location = location
if not api_resource_configs:
api_resource_configs = self.api_resource_configs
else:
_validate_value('api_resource_configs',
api_resource_configs, dict)
configuration = deepcopy(api_resource_configs)
if 'query' not in configuration:
configuration['query'] = {}
else:
_validate_value("api_resource_configs['query']",
configuration['query'], dict)
if sql is None and not configuration['query'].get('query', None):
raise TypeError('`BigQueryBaseCursor.run_query` '
'missing 1 required positional argument: `sql`')
# BigQuery also allows you to define how you want a table's schema to change
# as a side effect of a query job
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options
).issuperset(set(schema_update_options)):
raise ValueError("{0} contains invalid schema update options. "
"Please only use one or more of the following "
"options: {1}"
.format(schema_update_options,
allowed_schema_update_options))
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
if destination_dataset_table:
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
destination_dataset_table = {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
if cluster_fields:
cluster_fields = {'fields': cluster_fields}
query_param_list = [
(sql, 'query', None, six.string_types),
(priority, 'priority', 'INTERACTIVE', six.string_types),
(use_legacy_sql, 'useLegacySql', self.use_legacy_sql, bool),
(query_params, 'queryParameters', None, dict),
(udf_config, 'userDefinedFunctionResources', None, list),
(maximum_billing_tier, 'maximumBillingTier', None, int),
(maximum_bytes_billed, 'maximumBytesBilled', None, float),
(time_partitioning, 'timePartitioning', {}, dict),
(schema_update_options, 'schemaUpdateOptions', None, tuple),
(destination_dataset_table, 'destinationTable', None, dict),
(cluster_fields, 'clustering', None, dict),
]
for param_tuple in query_param_list:
param, param_name, param_default, param_type = param_tuple
if param_name not in configuration['query'] and param in [None, {}, ()]:
if param_name == 'timePartitioning':
param_default = _cleanse_time_partitioning(
destination_dataset_table, time_partitioning)
param = param_default
if param not in [None, {}, ()]:
_api_resource_configs_duplication_check(
param_name, param, configuration['query'])
configuration['query'][param_name] = param
# check valid type of provided param,
# it last step because we can get param from 2 sources,
# and first of all need to find it
_validate_value(param_name, configuration['query'][param_name],
param_type)
if param_name == 'schemaUpdateOptions' and param:
self.log.info("Adding experimental 'schemaUpdateOptions': "
"{0}".format(schema_update_options))
if param_name == 'destinationTable':
for key in ['projectId', 'datasetId', 'tableId']:
if key not in configuration['query']['destinationTable']:
raise ValueError(
"Not correct 'destinationTable' in "
"api_resource_configs. 'destinationTable' "
"must be a dict with {'projectId':'', "
"'datasetId':'', 'tableId':''}")
configuration['query'].update({
'allowLargeResults': allow_large_results,
'flattenResults': flatten_results,
'writeDisposition': write_disposition,
'createDisposition': create_disposition,
})
if 'useLegacySql' in configuration['query'] and \
'queryParameters' in configuration['query']:
raise ValueError("Query parameters are not allowed "
"when using legacy SQL")
if labels:
_api_resource_configs_duplication_check(
'labels', labels, configuration)
configuration['labels'] = labels
return self.run_with_configuration(configuration)
def run_extract( # noqa
self,
source_project_dataset_table,
destination_cloud_storage_uris,
compression='NONE',
export_format='CSV',
field_delimiter=',',
print_header=True,
labels=None):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: str
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: str
:param export_format: File format to export.
:type export_format: str
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: str
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: bool
:param labels: a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if labels:
configuration['labels'] = labels
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED',
labels=None):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: str
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param labels a dictionary containing labels for the job/query,
passed to BigQuery
:type labels: dict
"""
source_project_dataset_tables = ([
source_project_dataset_tables
] if not isinstance(source_project_dataset_tables, list) else
source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId':
source_project,
'datasetId':
source_dataset,
'tableId':
source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
if labels:
configuration['labels'] = labels
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
source_uris,
schema_fields=None,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs=None,
time_partitioning=None,
cluster_fields=None,
autodetect=False):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table>($<partition>) BigQuery
table to load data into. If <project> is not included, project will be the
project defined in the connection json. If a partition is specified the
operator will automatically append the data, create a new partition or create
a new DAY partitioned table.
:type destination_project_dataset_table: str
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
Required if autodetect=False; optional if autodetect=True.
:type schema_fields: list
:param autodetect: Attempt to autodetect the schema for CSV and JSON
source files.
:type autodetect: bool
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: str
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: str
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: str
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: str
:param max_bad_records: The maximum number of bad records that BigQuery can
ignore when running the job.
:type max_bad_records: int
:param quote_character: The value that is used to quote data sections in a CSV
file.
:type quote_character: str
:param ignore_unknown_values: [Optional] Indicates if BigQuery should allow
extra values that are not represented in the table schema.
If true, the extra values are ignored. If false, records with extra columns
are treated as bad records, and if there are too many bad records, an
invalid error is returned in the job result.
:type ignore_unknown_values: bool
:param allow_quoted_newlines: Whether to allow quoted newlines (true) or not
(false).
:type allow_quoted_newlines: bool
:param allow_jagged_rows: Accept rows that are missing trailing optional columns.
The missing values are treated as nulls. If false, records with missing
trailing columns are treated as bad records, and if there are too many bad
records, an invalid error is returned in the job result. Only applicable when
soure_format is CSV.
:type allow_jagged_rows: bool
:param schema_update_options: Allows the schema of the desitination
table to be updated as a side effect of the load job.
:type schema_update_options: tuple
:param src_fmt_configs: configure optional fields specific to the source format
:type src_fmt_configs: dict
:param time_partitioning: configure optional time partitioning fields i.e.
partition by field, type and expiration as per API specifications.
:type time_partitioning: dict
:param cluster_fields: Request that the result of this load be stored sorted
by one or more columns. This is only available in combination with
time_partitioning. The order of columns given determines the sort order.
:type cluster_fields: list of str
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
if schema_fields is None and not autodetect:
raise ValueError(
'You must either pass a schema or autodetect=True.')
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options."
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'autodetect': autodetect,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if cluster_fields:
configuration['load'].update({'clustering': {'fields': cluster_fields}})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental "
"'schemaUpdateOptions': {0}".format(schema_update_options))
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': [],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {'configuration': configuration}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
self.running_job_id = query_reply['jobReference']['jobId']
# Wait for query to finish.
keep_polling_job = True
while keep_polling_job:
try:
if self.location:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id,
location=self.location).execute()
else:
job = jobs.get(
projectId=self.project_id,
jobId=self.running_job_id).execute()
if job['status']['state'] == 'DONE':
keep_polling_job = False
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.
format(job['status']['errorResult'], job))
else:
self.log.info('Waiting for job to complete : %s, %s',
self.project_id, self.running_job_id)
time.sleep(5)
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error, waiting for job to complete: %s',
err.resp.status, self.running_job_id)
time.sleep(5)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return self.running_job_id
def poll_job_complete(self, job_id):
jobs = self.service.jobs()
try:
if self.location:
job = jobs.get(projectId=self.project_id,
jobId=job_id,
location=self.location).execute()
else:
job = jobs.get(projectId=self.project_id,
jobId=job_id).execute()
if job['status']['state'] == 'DONE':
return True
except HttpError as err:
if err.resp.status in [500, 503]:
self.log.info(
'%s: Retryable error while polling job with id %s',
err.resp.status, job_id)
else:
raise Exception(
'BigQuery job status check failed. Final error was: %s',
err.resp.status)
return False
def cancel_query(self):
"""
Cancel all started queries that have not yet completed
"""
jobs = self.service.jobs()
if (self.running_job_id and
not self.poll_job_complete(self.running_job_id)):
self.log.info('Attempting to cancel job : %s, %s', self.project_id,
self.running_job_id)
if self.location:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id,
location=self.location).execute()
else:
jobs.cancel(
projectId=self.project_id,
jobId=self.running_job_id).execute()
else:
self.log.info('No running BigQuery jobs to cancel.')
return
# Wait for all the calls to cancel to finish
max_polling_attempts = 12
polling_attempts = 0
job_complete = False
while polling_attempts < max_polling_attempts and not job_complete:
polling_attempts = polling_attempts + 1
job_complete = self.poll_job_complete(self.running_job_id)
if job_complete:
self.log.info('Job successfully canceled: %s, %s',
self.project_id, self.running_job_id)
elif polling_attempts == max_polling_attempts:
self.log.info(
"Stopping polling due to timeout. Job with id %s "
"has not completed cancel and may or may not finish.",
self.running_job_id)
else:
self.log.info('Waiting for canceled job with id %s to finish.',
self.running_job_id)
time.sleep(5)
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, selected_fields=None, page_token=None,
start_index=None):
"""
Get the data of a given dataset.table and optionally with selected columns.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param selected_fields: List of fields to return (comma-separated). If
unspecified, all fields are returned.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if selected_fields:
optional_params['selectedFields'] = selected_fields
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (self.service.tabledata().list(
projectId=self.project_id,
datasetId=dataset_id,
tableId=table_id,
**optional_params).execute())
def run_table_delete(self, deletion_dataset_table,
ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: bool
:return:
"""
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
self.log.info('Deleted table %s:%s.%s.', deletion_project,
deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception('Table deletion failed. Table does not exist.')
else:
self.log.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(
projectId=project_id, datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
self.log.info('Table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(
projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
self.log.info('Table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(
projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project=None,
view_project=None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(
projectId=source_project, datasetId=source_dataset).execute()
access = source_dataset_resource[
'access'] if 'access' in source_dataset_resource else []
view_access = {
'view': {
'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table
}
}
# check to see if the view we want to add already exists.
if view_access not in access:
self.log.info(
'Granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project,
source_dataset)
access.append(view_access)
return self.service.datasets().patch(
projectId=source_project,
datasetId=source_dataset,
body={
'access': access
}).execute()
else:
# if view is already in access, do nothing.
self.log.info(
'Table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table, source_project, source_dataset)
return source_dataset_resource
def create_empty_dataset(self, dataset_id="", project_id="",
dataset_reference=None):
"""
Create a new empty dataset:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/insert
:param project_id: The name of the project where we want to create
an empty a dataset. Don't need to provide, if projectId in dataset_reference.
:type project_id: str
:param dataset_id: The id of dataset. Don't need to provide,
if datasetId in dataset_reference.
:type dataset_id: str
:param dataset_reference: Dataset reference that could be provided
with request body. More info:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
:type dataset_reference: dict
"""
if dataset_reference:
_validate_value('dataset_reference', dataset_reference, dict)
else:
dataset_reference = {}
if "datasetReference" not in dataset_reference:
dataset_reference["datasetReference"] = {}
if not dataset_reference["datasetReference"].get("datasetId") and not dataset_id:
raise ValueError(
"{} not provided datasetId. Impossible to create dataset")
dataset_required_params = [(dataset_id, "datasetId", ""),
(project_id, "projectId", self.project_id)]
for param_tuple in dataset_required_params:
param, param_name, param_default = param_tuple
if param_name not in dataset_reference['datasetReference']:
if param_default and not param:
self.log.info("{} was not specified. Will be used default "
"value {}.".format(param_name,
param_default))
param = param_default
dataset_reference['datasetReference'].update(
{param_name: param})
elif param:
_api_resource_configs_duplication_check(
param_name, param,
dataset_reference['datasetReference'], 'dataset_reference')
dataset_id = dataset_reference.get("datasetReference").get("datasetId")
dataset_project_id = dataset_reference.get("datasetReference").get(
"projectId")
self.log.info('Creating Dataset: %s in project: %s ', dataset_id,
dataset_project_id)
try:
self.service.datasets().insert(
projectId=dataset_project_id,
body=dataset_reference).execute()
self.log.info('Dataset created successfully: In project %s '
'Dataset %s', dataset_project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def delete_dataset(self, project_id, dataset_id):
"""
Delete a dataset of Big query in your project.
:param project_id: The name of the project where we have the dataset .
:type project_id: str
:param dataset_id: The dataset to be delete.
:type dataset_id: str
:return:
"""
project_id = project_id if project_id is not None else self.project_id
self.log.info('Deleting from project: %s Dataset:%s',
project_id, dataset_id)
try:
self.service.datasets().delete(
projectId=project_id,
datasetId=dataset_id).execute()
self.log.info('Dataset deleted successfully: In project %s '
'Dataset %s', project_id, dataset_id)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
def get_dataset(self, dataset_id, project_id=None):
"""
Method returns dataset_resource if dataset exist
and raised 404 error if dataset does not exist
:param dataset_id: The BigQuery Dataset ID
:type dataset_id: str
:param project_id: The GCP Project ID
:type project_id: str
:return: dataset_resource
.. seealso::
For more information, see Dataset Resource content:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets#resource
"""
if not dataset_id or not isinstance(dataset_id, str):
raise ValueError("dataset_id argument must be provided and has "
"a type 'str'. You provided: {}".format(dataset_id))
dataset_project_id = project_id if project_id else self.project_id
try:
dataset_resource = self.service.datasets().get(
datasetId=dataset_id, projectId=dataset_project_id).execute()
self.log.info("Dataset Resource: {}".format(dataset_resource))
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content))
return dataset_resource
def get_datasets_list(self, project_id=None):
"""
Method returns full list of BigQuery datasets in the current project
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/datasets/list
:param project_id: Google Cloud Project for which you
try to get all datasets
:type project_id: str
:return: datasets_list
Example of returned datasets_list: ::
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_2_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_2_test"
}
},
{
"kind":"bigquery#dataset",
"location":"US",
"id":"your-project:dataset_1_test",
"datasetReference":{
"projectId":"your-project",
"datasetId":"dataset_1_test"
}
}
]
"""
dataset_project_id = project_id if project_id else self.project_id
try:
datasets_list = self.service.datasets().list(
projectId=dataset_project_id).execute()['datasets']
self.log.info("Datasets List: {}".format(datasets_list))
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content))
return datasets_list
def insert_all(self, project_id, dataset_id, table_id,
rows, ignore_unknown_values=False,
skip_invalid_rows=False, fail_on_error=False):
"""
Method to stream data into BigQuery one record at a time without needing
to run a load job
.. seealso::
For more information, see:
https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll
:param project_id: The name of the project where we have the table
:type project_id: str
:param dataset_id: The name of the dataset where we have the table
:type dataset_id: str
:param table_id: The name of the table
:type table_id: str
:param rows: the rows to insert
:type rows: list
**Example or rows**:
rows=[{"json": {"a_key": "a_value_0"}}, {"json": {"a_key": "a_value_1"}}]
:param ignore_unknown_values: [Optional] Accept rows that contain values
that do not match the schema. The unknown values are ignored.
The default value is false, which treats unknown values as errors.
:type ignore_unknown_values: bool
:param skip_invalid_rows: [Optional] Insert all valid rows of a request,
even if invalid rows exist. The default value is false, which causes
the entire request to fail if any invalid rows exist.
:type skip_invalid_rows: bool
:param fail_on_error: [Optional] Force the task to fail if any errors occur.
The default value is false, which indicates the task should not fail
even if any insertion errors occur.
:type fail_on_error: bool
"""
dataset_project_id = project_id if project_id else self.project_id
body = {
"rows": rows,
"ignoreUnknownValues": ignore_unknown_values,
"kind": "bigquery#tableDataInsertAllRequest",
"skipInvalidRows": skip_invalid_rows,
}
try:
self.log.info('Inserting {} row(s) into Table {}:{}.{}'.format(
len(rows), dataset_project_id,
dataset_id, table_id))
resp = self.service.tabledata().insertAll(
projectId=dataset_project_id, datasetId=dataset_id,
tableId=table_id, body=body
).execute()
if 'insertErrors' not in resp:
self.log.info('All row(s) inserted successfully: {}:{}.{}'.format(
dataset_project_id, dataset_id, table_id))
else:
error_msg = '{} insert error(s) occurred: {}:{}.{}. Details: {}'.format(
len(resp['insertErrors']),
dataset_project_id, dataset_id, table_id, resp['insertErrors'])
if fail_on_error:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(error_msg)
)
self.log.info(error_msg)
except HttpError as err:
raise AirflowException(
'BigQuery job failed. Error was: {}'.format(err.content)
)
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id, use_legacy_sql=True, location=None):
super(BigQueryCursor, self).__init__(
service=service,
project_id=project_id,
use_legacy_sql=use_legacy_sql,
location=location,
)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: str
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
sql = _bind_parameters(operation,
parameters) if parameters else operation
self.job_id = self.run_query(sql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: str
:param seq_of_parameters: List of dictionary parameters to substitute into the
query.
:type seq_of_parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (self.service.jobs().getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token).execute())
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences
(e.g. a list of tuples). An empty sequence is returned when no more rows are
available. The number of rows to fetch per call is specified by the parameter.
If it is not given, the cursor's arraysize determines the number of rows to be
fetched. The method should try to fetch as many rows as indicated by the size
parameter. If this is not possible due to the specified number of rows not being
available, fewer rows may be returned. An :py:class:`~pyhive.exc.Error`
(or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of
sequences (e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER':
return int(string_field)
elif bq_type == 'FLOAT' or bq_type == 'TIMESTAMP':
return float(string_field)
elif bq_type == 'BOOLEAN':
if string_field not in ['true', 'false']:
raise ValueError("{} must have value 'true' or 'false'".format(
string_field))
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
if '.' not in table_input:
raise ValueError(
'Expected target table name in the format of '
'<dataset>.<table>. Got: {}'.format(table_input))
if not default_project_id:
raise ValueError("INTERNAL: No default project is specified")
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
if table_input.count('.') + table_input.count(':') > 3:
raise Exception(('{var}Use either : or . to specify project '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = table_input.rsplit(':', 1)
project_id = None
rest = table_input
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2 and cmpt[0].count(':') <= 1:
if cmpt[-1].count('.') != 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception(('{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}').format(
var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
if project_id:
raise ValueError(
"{var}Use either : or . to specify project".format(
var=var_print(var_name)))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception(
('{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}').format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
log = LoggingMixin().log
log.info('Project not included in {var}: {input}; '
'using project "{project}"'.format(
var=var_name,
input=table_input,
project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
def _cleanse_time_partitioning(destination_dataset_table, time_partitioning_in):
# if it is a partitioned table ($ is in the table name) add partition load option
if time_partitioning_in is None:
time_partitioning_in = {}
time_partitioning_out = {}
if destination_dataset_table and '$' in destination_dataset_table:
time_partitioning_out['type'] = 'DAY'
time_partitioning_out.update(time_partitioning_in)
return time_partitioning_out
def _validate_value(key, value, expected_type):
""" function to check expected type and raise
error if type is not correct """
if not isinstance(value, expected_type):
raise TypeError("{} argument must have a type {} not {}".format(
key, expected_type, type(value)))
def _api_resource_configs_duplication_check(key, value, config_dict,
config_dict_name='api_resource_configs'):
if key in config_dict and value != config_dict[key]:
raise ValueError("Values of {param_name} param are duplicated. "
"{dict_name} contained {param_name} param "
"in `query` config and {param_name} was also provided "
"with arg to run_query() method. Please remove duplicates."
.format(param_name=key, dict_name=config_dict_name))
| apache-2.0 |
Groovy-Dragon/tcRIP | ST_pTuple.py | 1 | 2015 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 4 10:56:51 2017
@author: lewismoffat
This script is focused on statistics, it calculates the most common pTuples
without clipping and with clipping
"""
#==============================================================================
# Module Imports
#==============================================================================
import numpy as np
import matplotlib.pyplot as plt
import dataProcessing as dp
import pdb
import seaborn as sns
from collections import defaultdict
from collections import Counter
from sklearn.manifold import TSNE
from sklearn.cluster import KMeans
from matplotlib import pylab
#==============================================================================
# Get the data
#==============================================================================
# are we doing the full set
singlePatient=False
# which patient to get data from
patient=['Complete']
chain = "beta"
if singlePatient:
print('Patient: '+patient[0])
delim = ["naive",chain]+patient #other delimiters
else:
print('Patient: All')
delim = ["naive",chain] #other delimiters
seqs, vj = dp.loadAllPatients(delim) # these gets all the sequences and vj values
#==============================================================================
# Clipping the data
#==============================================================================
# filter out joint sequences
seqs[0], seqs[1], vj[0], vj[1], joint = dp.removeDup(seqs[0], seqs[1], vj[0], vj[1])
# filter to 14 long, together its still 200k seqs
seqs[0]=dp.filtr(seqs[0], 14)
seqs[1]=dp.filtr(seqs[1], 14)
# clip the sequences
for idx, group in enumerate(seqs):
for idx2, seq in enumerate(group):
group[idx2]=seq[4:10]
# get tuples, list is already flat
seqs[0]=dp.expandTuples(seqs[0],n=4)
seqs[1]=dp.expandTuples(seqs[1],n=4)
# make counters
c4=Counter(seqs[0])
c8=Counter(seqs[1])
print(c4.most_common(n=10))
print()
print(c8.most_common(n=10))
| mit |
HIIT/hybra-core | setup.py | 1 | 1551 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name = 'hybra-core',
version = '0.1.2a1',
description = 'Toolkit for data management and analysis.',
keywords = ['data management', 'data analysis'],
url = 'https://github.com/HIIT/hybra-core',
author = 'Matti Nelimarkka, Juho Pääkkönen, Arto Kekkonen',
author_email = 'matti.nelimarkka@aalto.fi, juho.paakkonen@aalto.fi, arto.kekkonen@helsinki.fi',
packages = find_packages(exclude=['docs', 'test']),
package_data={
'hybra.timeline' : ['*.js', '*.css', '*.html'],
'hybra.network' : ['*.js', '*.css', '*.html'],
'hybra.analysis' : ['*.r'],
'hybra.analysis.topicmodel' : ['*.r', '*.txt']
},
licence = 'MIT',
install_requires=[
'dateparser>=0.5.1',
'GitPython>=2.0.6',
'jupyter>=1.0.0',
'jupyter_client>=4.3.0',
'jupyter_console>=4.1.1',
'jupyter_core>=4.1.0',
'matplotlib>=1.5.3',
'nbstripout>=0.2.9',
'networkx>=1.11',
'numpy>=1.11.0',
'requests>=2.9.1',
'scikit-learn>=0.17.1',
'scipy>=0.17.1',
'XlsxWriter>=0.9.6',
'wordcloud>=1.2.1',
'tldextract>=2.1.0',
'pandas>=0.22.0',
'rpy2<=2.8.6'
],
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Programming Language :: JavaScript'
]
)
| mit |
justanothercoder/LSTM-Optimizer-TF | tf_optimizees/mlp_classifier.py | 1 | 3308 | import numpy as np
import tensorflow as tf
from sklearn.datasets import load_digits, fetch_mldata
from sklearn.preprocessing import StandardScaler
from sklearn import utils
class MLPClassifier:
def __init__(self, num_units=20, num_layers=1, activation='sigmoid'):
self.num_units = num_units
self.num_layers = num_layers
self.activation = getattr(tf.nn, activation)
def build(self, optimizer):
x, y = self.inputs()
pred = self.inference(x)
loss, acc = self.loss(pred, y)
train_op = self.train_op(loss, optimizer)
self.ops = {
'x': x, 'y': y,
'pred': pred,
'loss': self.ema.average(loss),
'acc': self.ema.average(acc),
'train_op': train_op
}
return self.ops
def inputs(self):
x = tf.placeholder(tf.float32, shape=[None, self.X.shape[1]])
y = tf.placeholder(tf.int32, shape=[None])
return x, y
def inference(self, x):
pred = x
with tf.variable_scope('inference') as self.scope:
for _ in range(self.num_layers):
pred = tf.layers.dense(pred, self.num_units, activation=None)
pred = tf.layers.batch_normalization(pred)
pred = self.activation(pred)
pred = tf.layers.dense(pred, 10)
return pred
def loss(self, logits, y):
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=logits), axis=-1)
p = tf.cast(tf.argmax(tf.nn.softmax(logits), axis=1), tf.int32)
acc = tf.reduce_mean(tf.cast(tf.equal(p, y), tf.float32))
self.ema = tf.train.ExponentialMovingAverage(decay=0.95)
self.average_op = self.ema.apply([loss, acc])
return loss, acc
def train_op(self, loss, optimizer='adam'):
if optimizer == 'adam':
optimizer = tf.train.AdamOptimizer(1e-3)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):
all_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=self.scope.name)
train_op = optimizer.minimize(loss, var_list=all_vars)
with tf.control_dependencies([train_op]):
train_op = tf.group(self.average_op)
return train_op
def prepare_data(self, dataset_name):
self.dataset_name = dataset_name
if dataset_name == 'digits':
dataset = load_digits(n_class=10)
elif dataset_name == 'mnist':
dataset = fetch_mldata('MNIST original', data_home='/srv/hd1/data/vyanush/')
self.X, self.Y = dataset.data, dataset.target
self.X, self.Y = utils.shuffle(self.X, self.Y)
if dataset_name == 'mnist':
self.X = self.X[:50000]
self.Y = self.Y[:50000]
self.X = StandardScaler().fit_transform(self.X.astype(np.float32))
def batch_iterator(self, n_epochs, batch_size):
for epoch in range(n_epochs):
indices = np.arange(self.X.shape[0])
np.random.shuffle(indices)
for pos in range(0, self.X.shape[0] - batch_size + 1, batch_size):
ind = indices[pos: pos + batch_size]
yield self.X[ind], self.Y[ind]
| mit |
IssamLaradji/scikit-learn | examples/datasets/plot_random_multilabel_dataset.py | 15 | 3460 | """
==============================================
Plot randomly generated multilabel dataset
==============================================
This illustrates the `datasets.make_multilabel_classification` dataset
generator. Each sample consists of counts of two features (up to 50 in
total), which are differently distributed in each of two classes.
Points are labeled as follows, where Y means the class is present:
===== ===== ===== ======
1 2 3 Color
===== ===== ===== ======
Y N N Red
N Y N Blue
N N Y Yellow
Y Y N Purple
Y N Y Orange
Y Y N Green
Y Y Y Brown
===== ===== ===== ======
A star marks the expected sample for each class; its size reflects the
probability of selecting that class label.
The left and right examples highlight the ``n_labels`` parameter:
more of the samples in the right plot have 2 or 3 labels.
Note that this two-dimensional example is very degenerate:
generally the number of features would be much greater than the
"document length", while here we have much larger documents than vocabulary.
Similarly, with ``n_classes > n_features``, it is much less likely that a
feature distinguishes a particular cluss.
"""
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets import make_multilabel_classification as make_ml_clf
print(__doc__)
COLORS = np.array(['!',
'#FF3333', # red
'#0198E1', # blue
'#BF5FFF', # purple
'#FCD116', # yellow
'#FF7216', # orange
'#4DBD33', # green
'#87421F' # brown
])
# Use same random seed for multiple calls to make_multilabel_classification to
# ensure same distributions
RANDOM_SEED = np.random.randint(2 ** 10)
def plot_2d(ax, n_labels=1, n_classes=3, length=50):
X, Y, p_c, p_w_c = make_ml_clf(n_samples=150, n_features=2,
n_classes=n_classes, n_labels=n_labels,
length=length, allow_unlabeled=False,
return_indicator=True,
return_distributions=True,
random_state=RANDOM_SEED)
ax.scatter(X[:, 0], X[:, 1], color=COLORS.take((Y * [1, 2, 4]
).sum(axis=1)),
marker='.')
ax.scatter(p_w_c[0] * length, p_w_c[1] * length,
marker='*', linewidth=.5, edgecolor='black',
s=20 + 1500 * p_c ** 2,
color=COLORS.take([1, 2, 4]))
ax.set_xlabel('Feature 0 count')
return p_c, p_w_c
_, (ax1, ax2) = plt.subplots(1, 2, sharex='row', sharey='row', figsize=(8, 4))
plt.subplots_adjust(bottom=.15)
p_c, p_w_c = plot_2d(ax1, n_labels=1)
ax1.set_title('n_labels=1, length=50')
ax1.set_ylabel('Feature 1 count')
plot_2d(ax2, n_labels=3)
ax2.set_title('n_labels=3, length=50')
ax2.set_xlim(left=0, auto=True)
ax2.set_ylim(bottom=0, auto=True)
plt.show()
print('The data was generated from (random_state=%d):' % RANDOM_SEED)
print('Class', 'P(C)', 'P(w0|C)', 'P(w1|C)', sep='\t')
for k, p, p_w in zip(['red', 'blue', 'yellow'], p_c, p_w_c.T):
print('%s\t%0.2f\t%0.2f\t%0.2f' % (k, p, p_w[0], p_w[1]))
| bsd-3-clause |
zorojean/scikit-learn | sklearn/datasets/mldata.py | 309 | 7838 | """Automatically download MLdata datasets."""
# Copyright (c) 2011 Pietro Berkes
# License: BSD 3 clause
import os
from os.path import join, exists
import re
import numbers
try:
# Python 2
from urllib2 import HTTPError
from urllib2 import quote
from urllib2 import urlopen
except ImportError:
# Python 3+
from urllib.error import HTTPError
from urllib.parse import quote
from urllib.request import urlopen
import numpy as np
import scipy as sp
from scipy import io
from shutil import copyfileobj
from .base import get_data_home, Bunch
MLDATA_BASE_URL = "http://mldata.org/repository/data/download/matlab/%s"
def mldata_filename(dataname):
"""Convert a raw name for a data set in a mldata.org filename."""
dataname = dataname.lower().replace(' ', '-')
return re.sub(r'[().]', '', dataname)
def fetch_mldata(dataname, target_name='label', data_name='data',
transpose_data=True, data_home=None):
"""Fetch an mldata.org data set
If the file does not exist yet, it is downloaded from mldata.org .
mldata.org does not have an enforced convention for storing data or
naming the columns in a data set. The default behavior of this function
works well with the most common cases:
1) data values are stored in the column 'data', and target values in the
column 'label'
2) alternatively, the first column stores target values, and the second
data values
3) the data array is stored as `n_features x n_samples` , and thus needs
to be transposed to match the `sklearn` standard
Keyword arguments allow to adapt these defaults to specific data sets
(see parameters `target_name`, `data_name`, `transpose_data`, and
the examples below).
mldata.org data sets may have multiple columns, which are stored in the
Bunch object with their original name.
Parameters
----------
dataname:
Name of the data set on mldata.org,
e.g.: "leukemia", "Whistler Daily Snowfall", etc.
The raw name is automatically converted to a mldata.org URL .
target_name: optional, default: 'label'
Name or index of the column containing the target values.
data_name: optional, default: 'data'
Name or index of the column containing the data.
transpose_data: optional, default: True
If True, transpose the downloaded data array.
data_home: optional, default: None
Specify another download and cache folder for the data sets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
Returns
-------
data : Bunch
Dictionary-like object, the interesting attributes are:
'data', the data to learn, 'target', the classification labels,
'DESCR', the full description of the dataset, and
'COL_NAMES', the original names of the dataset columns.
Examples
--------
Load the 'iris' dataset from mldata.org:
>>> from sklearn.datasets.mldata import fetch_mldata
>>> import tempfile
>>> test_data_home = tempfile.mkdtemp()
>>> iris = fetch_mldata('iris', data_home=test_data_home)
>>> iris.target.shape
(150,)
>>> iris.data.shape
(150, 4)
Load the 'leukemia' dataset from mldata.org, which needs to be transposed
to respects the sklearn axes convention:
>>> leuk = fetch_mldata('leukemia', transpose_data=True,
... data_home=test_data_home)
>>> leuk.data.shape
(72, 7129)
Load an alternative 'iris' dataset, which has different names for the
columns:
>>> iris2 = fetch_mldata('datasets-UCI iris', target_name=1,
... data_name=0, data_home=test_data_home)
>>> iris3 = fetch_mldata('datasets-UCI iris',
... target_name='class', data_name='double0',
... data_home=test_data_home)
>>> import shutil
>>> shutil.rmtree(test_data_home)
"""
# normalize dataset name
dataname = mldata_filename(dataname)
# check if this data set has been already downloaded
data_home = get_data_home(data_home=data_home)
data_home = join(data_home, 'mldata')
if not exists(data_home):
os.makedirs(data_home)
matlab_name = dataname + '.mat'
filename = join(data_home, matlab_name)
# if the file does not exist, download it
if not exists(filename):
urlname = MLDATA_BASE_URL % quote(dataname)
try:
mldata_url = urlopen(urlname)
except HTTPError as e:
if e.code == 404:
e.msg = "Dataset '%s' not found on mldata.org." % dataname
raise
# store Matlab file
try:
with open(filename, 'w+b') as matlab_file:
copyfileobj(mldata_url, matlab_file)
except:
os.remove(filename)
raise
mldata_url.close()
# load dataset matlab file
with open(filename, 'rb') as matlab_file:
matlab_dict = io.loadmat(matlab_file, struct_as_record=True)
# -- extract data from matlab_dict
# flatten column names
col_names = [str(descr[0])
for descr in matlab_dict['mldata_descr_ordering'][0]]
# if target or data names are indices, transform then into names
if isinstance(target_name, numbers.Integral):
target_name = col_names[target_name]
if isinstance(data_name, numbers.Integral):
data_name = col_names[data_name]
# rules for making sense of the mldata.org data format
# (earlier ones have priority):
# 1) there is only one array => it is "data"
# 2) there are multiple arrays
# a) copy all columns in the bunch, using their column name
# b) if there is a column called `target_name`, set "target" to it,
# otherwise set "target" to first column
# c) if there is a column called `data_name`, set "data" to it,
# otherwise set "data" to second column
dataset = {'DESCR': 'mldata.org dataset: %s' % dataname,
'COL_NAMES': col_names}
# 1) there is only one array => it is considered data
if len(col_names) == 1:
data_name = col_names[0]
dataset['data'] = matlab_dict[data_name]
# 2) there are multiple arrays
else:
for name in col_names:
dataset[name] = matlab_dict[name]
if target_name in col_names:
del dataset[target_name]
dataset['target'] = matlab_dict[target_name]
else:
del dataset[col_names[0]]
dataset['target'] = matlab_dict[col_names[0]]
if data_name in col_names:
del dataset[data_name]
dataset['data'] = matlab_dict[data_name]
else:
del dataset[col_names[1]]
dataset['data'] = matlab_dict[col_names[1]]
# set axes to sklearn conventions
if transpose_data:
dataset['data'] = dataset['data'].T
if 'target' in dataset:
if not sp.sparse.issparse(dataset['target']):
dataset['target'] = dataset['target'].squeeze()
return Bunch(**dataset)
# The following is used by nosetests to setup the docstring tests fixture
def setup_module(module):
# setup mock urllib2 module to avoid downloading from mldata.org
from sklearn.utils.testing import install_mldata_mock
install_mldata_mock({
'iris': {
'data': np.empty((150, 4)),
'label': np.empty(150),
},
'datasets-uci-iris': {
'double0': np.empty((150, 4)),
'class': np.empty((150,)),
},
'leukemia': {
'data': np.empty((72, 7129)),
},
})
def teardown_module(module):
from sklearn.utils.testing import uninstall_mldata_mock
uninstall_mldata_mock()
| bsd-3-clause |
I2Cvb/data_balancing | pipeline/feature-classification/classification_melanoma_naive_bayes.py | 1 | 7583 | #title :classiciation_imbalanced_study.py
#description :This will create a header for a python script.
#author :Guillaume Lemaitre, Mojdeh Rastgoo
#date :2016/01/19
#version :0.1
#notes :
#python_version :2.7.6
#==============================================================================
# Import the needed libraries
# Numpy library
import numpy as np
import pandas as pd
import h5py
# Joblib library
### Module to performed parallel processing
from joblib import Parallel, delayed
### Module to performed parallel processing
import multiprocessing
# OS library
import os
from os.path import join, isdir, isfile
# sys library
import sys
# Scikit-learn library
from sklearn.datasets import make_classification
from sklearn.cross_validation import KFold
from sklearn.cross_validation import StratifiedKFold
from sklearn.preprocessing import MinMaxScaler
from protoclass.classification.classification import Classify
# Initialization to the data paths
dataPath = sys.argv[1]
path_to_save = sys.argv[2]
#fread = pd.read_csv(dataPath.__add__('feature.csv'))
fread = pd.read_csv(join(dataPath, 'feature.csv'))
FeatureLists = fread.values
FeatureLists = FeatureLists[:,0]
#f= h5py.File(dataPath.__add__('PH2_Train_Test_80_20.mat'), 'r')
f = h5py.File(join(dataPath, 'PH2_Train_Test_80_20.mat'), 'r')
#CVIdx = sio.loadmat(datapath.__add__('TrainTestIndex_117_39_80.mat'))
trainIdx = np.asmatrix(f.get('trainingIdx'))
trainIdx = trainIdx.T
trainIdx = trainIdx - 1.
testIdx = np.asmatrix(f.get('testingIdx'))
testIdx = testIdx.T
testIdx = testIdx - 1.
Labels= np.asmatrix(f.get('BinaryLabels'))
Labels = Labels.T
ntree = 100;
config = [{'classifier_str' : 'naive-bayes', 'class_prior' : np.array([.2, .8])},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'random-over-sampling'},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'smote', 'kind_smote' : 'regular'},
#{'classifier_str' : 'naive-bayes',
#'balancing_criterion' : 'smote', 'kind_smote' : 'borderline1'},
#{'classifier_str' : 'naive-bayes',
#'balancing_criterion' : 'smote', 'kind_smote' : 'borderline2'},
#{'classifier_str' : 'naive-bayes',
#'balancing_criterion' : 'smote', 'kind_smote' : 'svm'},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'random-under-sampling', 'replacement' : True},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'tomek_links'},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'clustering'},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 1, 'size_ngh': 3},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 2, 'size_ngh': 3},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'nearmiss', 'version_nearmiss' : 3, 'size_ngh': 3, 'ver3_samp_ngh' : 3},
#{'classifier_str' : 'naive-bayes',
#'balancing_criterion' : 'cnn', 'size_ngh' : 3, 'n_seeds_S' :1},
#{'classifier_str' : 'naive-bayes',
#'balancing_criterion' : 'one-sided-selection', 'size_ngh' : 1, 'n_seeds_S' :1},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'ncr', 'size_ngh' : 3},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'easy-ensemble', 'n_subsets' : 10},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'balance-cascade', 'n_max_subset' : 100,
'balancing_classifier' : 'knn', 'bootstrap' : True},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'smote-enn', 'size_ngh' : 3},
{'classifier_str' : 'naive-bayes', 'class_prior' : None,
'balancing_criterion' : 'smote-tomek'}]
FeaturesIdx = np.array([[1,0,0,0,0,0], [0,1,0,0,0,0], [0,0,1,0,0,0], [0,0,0,1,0,0], [0,0,0,0,1,0], [0,0,0,0,0,1],\
[1,1,0,0,0,0], [1,0,1,0,0,0], [1,0,0,1,0,0], [0,1,1,0,0,0], [0,1,0,1,0,0], [0,0,1,1,0,0],\
[0,0,0,0,1,1], [1,1,1,1,0,0], [1,0,0,0,1,1], [0,1,0,0,1,1], [0,0,1,0,1,1], [0,0,0,1,1,1],\
[1,1,0,0,1,1], [1,0,1,0,1,1], [1,0,0,1,1,1], [0,1,1,0,1,1], [0,1,0,1,1,1], [0,0,1,1,1,1]])
#[0,1,0,0],[0,0,1,0],[0,0,0,1] , [0,0,1,1], [1,1,0,0],[1,0,1,1],[0,1,1,1],[1,1,1,1]])
for I in range (0, FeaturesIdx.shape[0]):
NonzeroIdx = np.ravel(np.nonzero(FeaturesIdx[I]))
FVcombined = np.empty(shape = [193, 0])
for PIdx in range (0, NonzeroIdx.shape[0]):
f= h5py.File(join(dataPath,FeatureLists[NonzeroIdx[PIdx]]), 'r')
#f = sio.loadmat(join(featurePath, FeatureLists[NonzeroIdx[PIdx]]))
FV =np.asmatrix(f.get('FV'))
FV =FV.T
FVcombined = np.append(FVcombined, FV, axis = 1)
del FV
FV = FVcombined
rocs = []
gt_labels = []
pred_labels = []
pred_probs = []
# Apply the classification for each fold
for CV in range (0, trainIdx.shape[1]):
print 'Iteration #{}'.format(CV)
# Extract the data
### Training
train_data = FV[np.ravel(trainIdx[:,CV].astype(int)), :]
train_label = np.ravel(Labels[np.ravel(trainIdx[:,CV].astype(int))])
### Testing
test_data = FV[np.ravel(testIdx[:,CV].astype(int)), :]
test_label = np.ravel(Labels[np.ravel(testIdx[:,CV].astype(int))])
config_roc = []
config_pred_label = []
config_pred_prob = []
config_gt_label = []
for c in config:
print c
pred_label, pred_prob, roc = Classify(train_data, train_label, test_data, test_label, **c)
config_roc.append(roc)
config_pred_label.append(pred_label)
config_pred_prob.append(pred_prob)
config_gt_label.append(test_label)
rocs.append(config_roc)
pred_labels.append(config_pred_label)
pred_probs.append(config_pred_prob)
gt_labels.append(config_gt_label)
# Convert the data to store to numpy data
rocs = np.array(rocs)
pred_labels = np.array(pred_labels)
pred_probs = np.array(pred_probs)
gt_labels = np.array(gt_labels)
# Reshape the array to have the first index corresponding to the
# configuration, the second index to the iteration of the k-fold
# and the last index to the data themselve.
rocs = np.swapaxes(rocs, 0, 1)
pred_labels = np.swapaxes(pred_labels, 0, 1)
pred_probs = np.swapaxes(pred_probs, 0, 1)
gt_labels = np.swapaxes(gt_labels, 0, 1)
# Save the results somewhere
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
from os.path import basename
saving_filename = 'melanoma_imbalanced_80_20_' + str(ntree) + '_' + str(I)
saving_path = join(path_to_save, saving_filename)
np.savez(saving_path, gt_labels=gt_labels, pred_labels=pred_labels, pred_probs=pred_probs, rocs=rocs)
tosave={}
tosave['rocs'] = rocs
tosave['pred_labels'] = pred_labels
tosave['pred_probs'] = pred_probs
tosave['gt_labels'] = gt_labels
saving_path = join(path_to_save, saving_filename)
from scipy.io import savemat
savemat(saving_path, tosave)
| mit |
stormsson/procedural_city_generation_wrapper | vendor/stormsson/pcg_wrapper/roadMapGenerator.py | 1 | 4240 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import division
from copy import copy
import matplotlib.pyplot as plt
from pcg_wrapper.configurationInstance import ConfigurationInstance
from procedural_city_generation.roadmap.getSuggestion import getSuggestion
from procedural_city_generation.roadmap.check import check
from procedural_city_generation.additional_stuff.pickletools import save_vertexlist
gui=None
class RoadMapGenerator():
def __init__(self, input_dir_path, temp_dir_path):
self.input_dir_path = input_dir_path
self.temp_dir_path = temp_dir_path
self.configurationInstance = ConfigurationInstance(self.input_dir_path, self.temp_dir_path)
def getConfigurationInstance(self):
return self.configurationInstance
def generateRoadMap(self, rule_image_path, density_image_path, seed=False, plotMap=False, plotVertexes=False):
""" Generate a roadmap and returns a vertex list
Params:
rule_image_path (string) image path that rules how the city growth is executed
density_image_path (string) image path that defines the population density
seed (integer) seed to use for random generation
plotMap (bool) show the map?
plotVertexes (bool) show the single vertexes?
Returns:
vertex list (list)
"""
self.configurationInstance.setSeed(seed)
self.singleton = self.configurationInstance.getRoadmapSingleton(rule_image_path, density_image_path)
front=copy(self.singleton.global_lists.vertex_list)
front.pop(0)
front.pop()
vertex_queue = copy(self.singleton.global_lists.vertex_queue)
self.singleton.iterationszaehler=0
if plotMap:
plt.close()
fig=plt.figure()
ax=plt.subplot(111)
fig.canvas.draw()
ax.set_xlim((-self.singleton.border[0], self.singleton.border[0]))
ax.set_ylim((-self.singleton.border[1], self.singleton.border[1]))
i=0
while (front!=[] or self.singleton.global_lists.vertex_queue !=[]):
i+=1
front=self._iteration(front)
if plotMap == 1:
if i%self.singleton.plot_counter == 0:
# plt.pause(0.001)
try:
fig.canvas.blit(ax.bbox)
fig.canvas.flush_events()
except:
fig.canvas.draw()
self.singleton.iterationszaehler=0
vertexes = [];
for v in self.singleton.global_lists.vertex_list:
neighbours = [ n.coords for n in v.neighbours ]
vertexes.append({
'coords': v.coords,
'neighbours': neighbours,
'minor_road': v.minor_road
})
if plotMap and plotVertexes:
plt.plot(
[v['coords'][0] for v in vertexes],
[v['coords'][1] for v in vertexes],
'o', color="red")
# print("Roadmap is complete!")
if plotMap:
plt.show()
return vertexes
def _iteration(self, front):
"""
Gets Called in the mainloop.
Manages the front and newfront and the queue
Parameters
----------
front : list<Vertex>
Returns
-------
newfront : list<Vertex>
"""
newfront=[]
for vertex in front:
for suggested_vertex in getSuggestion(vertex):
newfront=check(suggested_vertex, vertex, newfront)
#Increments index of each element in queue
self.singleton.global_lists.vertex_queue=[[x[0], x[1]+1] for x in self.singleton.global_lists.vertex_queue]
#Finds elements in queue which are to be added into the newfront
while self.singleton.global_lists.vertex_queue!=[] and self.singleton.global_lists.vertex_queue[0][1]>=self.singleton.minor_road_delay:
newfront.append(self.singleton.global_lists.vertex_queue.pop(0)[0])
return newfront | mpl-2.0 |
0x0all/nupic | external/linux32/lib/python2.6/site-packages/matplotlib/units.py | 70 | 4810 | """
The classes here provide support for using custom classes with
matplotlib, eg those that do not expose the array interface but know
how to converter themselves to arrays. It also supoprts classes with
units and units conversion. Use cases include converters for custom
objects, eg a list of datetime objects, as well as for objects that
are unit aware. We don't assume any particular units implementation,
rather a units implementation must provide a ConversionInterface, and
the register with the Registry converter dictionary. For example,
here is a complete implementation which support plotting with native
datetime objects
import matplotlib.units as units
import matplotlib.dates as dates
import matplotlib.ticker as ticker
import datetime
class DateConverter(units.ConversionInterface):
def convert(value, unit):
'convert value to a scalar or array'
return dates.date2num(value)
convert = staticmethod(convert)
def axisinfo(unit):
'return major and minor tick locators and formatters'
if unit!='date': return None
majloc = dates.AutoDateLocator()
majfmt = dates.AutoDateFormatter(majloc)
return AxisInfo(majloc=majloc,
majfmt=majfmt,
label='date')
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return 'date'
default_units = staticmethod(default_units)
# finally we register our object type with a converter
units.registry[datetime.date] = DateConverter()
"""
import numpy as np
from matplotlib.cbook import iterable, is_numlike
class AxisInfo:
'information to support default axis labeling and tick labeling'
def __init__(self, majloc=None, minloc=None,
majfmt=None, minfmt=None, label=None):
"""
majloc and minloc: TickLocators for the major and minor ticks
majfmt and minfmt: TickFormatters for the major and minor ticks
label: the default axis label
If any of the above are None, the axis will simply use the default
"""
self.majloc = majloc
self.minloc = minloc
self.majfmt = majfmt
self.minfmt = minfmt
self.label = label
class ConversionInterface:
"""
The minimal interface for a converter to take custom instances (or
sequences) and convert them to values mpl can use
"""
def axisinfo(unit):
'return an units.AxisInfo instance for unit'
return None
axisinfo = staticmethod(axisinfo)
def default_units(x):
'return the default unit for x or None'
return None
default_units = staticmethod(default_units)
def convert(obj, unit):
"""
convert obj using unit. If obj is a sequence, return the
converted sequence. The ouput must be a sequence of scalars
that can be used by the numpy array layer
"""
return obj
convert = staticmethod(convert)
def is_numlike(x):
"""
The matplotlib datalim, autoscaling, locators etc work with
scalars which are the units converted to floats given the
current unit. The converter may be passed these floats, or
arrays of them, even when units are set. Derived conversion
interfaces may opt to pass plain-ol unitless numbers through
the conversion interface and this is a helper function for
them.
"""
if iterable(x):
for thisx in x:
return is_numlike(thisx)
else:
return is_numlike(x)
is_numlike = staticmethod(is_numlike)
class Registry(dict):
"""
register types with conversion interface
"""
def __init__(self):
dict.__init__(self)
self._cached = {}
def get_converter(self, x):
'get the converter interface instance for x, or None'
if not len(self): return None # nothing registered
#DISABLED idx = id(x)
#DISABLED cached = self._cached.get(idx)
#DISABLED if cached is not None: return cached
converter = None
classx = getattr(x, '__class__', None)
if classx is not None:
converter = self.get(classx)
if converter is None and iterable(x):
# if this is anything but an object array, we'll assume
# there are no custom units
if isinstance(x, np.ndarray) and x.dtype != np.object:
return None
for thisx in x:
converter = self.get_converter( thisx )
return converter
#DISABLED self._cached[idx] = converter
return converter
registry = Registry()
| gpl-3.0 |
ElDeveloper/qiita | qiita_db/test/test_util.py | 2 | 56304 | # -----------------------------------------------------------------------------
# Copyright (c) 2014--, The Qiita Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
from unittest import TestCase, main
from tempfile import mkstemp, mkdtemp, NamedTemporaryFile, TemporaryFile
from os import close, remove, mkdir
from os.path import join, exists, basename
from shutil import rmtree
from datetime import datetime
from functools import partial
from string import punctuation
import h5py
from six import StringIO, BytesIO
import pandas as pd
from qiita_core.util import qiita_test_checker
import qiita_db as qdb
@qiita_test_checker()
class DBUtilTestsBase(TestCase):
def setUp(self):
self.table = 'study'
self.required = [
'study_title', 'mixs_compliant',
'metadata_complete', 'study_description', 'first_contact',
'reprocess', 'timeseries_type_id', 'study_alias',
'study_abstract', 'principal_investigator_id', 'email']
self.files_to_remove = []
def tearDown(self):
for fp in self.files_to_remove:
if exists(fp):
remove(fp)
class DBUtilTests(DBUtilTestsBase):
def test_filepath_id_to_object_id(self):
# filepaths 1, 2 belongs to artifact 1
self.assertEqual(qdb.util.filepath_id_to_object_id(1), 1)
self.assertEqual(qdb.util.filepath_id_to_object_id(2), 1)
# filepaths 3, 4 belongs to artifact 2
self.assertEqual(qdb.util.filepath_id_to_object_id(3), 2)
self.assertEqual(qdb.util.filepath_id_to_object_id(4), 2)
# filepaths 9 belongs to artifact 4
self.assertEqual(qdb.util.filepath_id_to_object_id(9), 4)
# filepath 16 belongs to anlaysis 1
self.assertEqual(qdb.util.filepath_id_to_object_id(16), 1)
# filepath 18 belongs to study 1
self.assertIsNone(qdb.util.filepath_id_to_object_id(18))
# filepath 22 belongs to analysis/artifact 7
self.assertEqual(qdb.util.filepath_id_to_object_id(22), 7)
def test_check_required_columns(self):
# Doesn't do anything if correct info passed, only errors if wrong info
qdb.util.check_required_columns(self.required, self.table)
def test_check_required_columns_fail(self):
self.required.remove('study_title')
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.util.check_required_columns(self.required, self.table)
def test_check_table_cols(self):
# Doesn't do anything if correct info passed, only errors if wrong info
qdb.util.check_table_cols(self.required, self.table)
def test_check_table_cols_fail(self):
self.required.append('BADTHINGNOINHERE')
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.util.check_table_cols(self.required, self.table)
def test_get_table_cols(self):
obs = qdb.util.get_table_cols("qiita_user")
exp = {"email", "user_level_id", "password", "name", "affiliation",
"address", "phone", "user_verify_code", "pass_reset_code",
"pass_reset_timestamp"}
self.assertEqual(set(obs), exp)
def test_exists_table(self):
"""Correctly checks if a table exists"""
# True cases
self.assertTrue(qdb.util.exists_table("filepath"))
self.assertTrue(qdb.util.exists_table("qiita_user"))
self.assertTrue(qdb.util.exists_table("analysis"))
self.assertTrue(qdb.util.exists_table("prep_1"))
self.assertTrue(qdb.util.exists_table("sample_1"))
# False cases
self.assertFalse(qdb.util.exists_table("sample_2"))
self.assertFalse(qdb.util.exists_table("prep_3"))
self.assertFalse(qdb.util.exists_table("foo_table"))
self.assertFalse(qdb.util.exists_table("bar_table"))
def test_convert_to_id(self):
"""Tests that ids are returned correctly"""
self.assertEqual(
qdb.util.convert_to_id("directory", "filepath_type"), 8)
self.assertEqual(
qdb.util.convert_to_id("private", "visibility", "visibility"), 3)
self.assertEqual(
qdb.util.convert_to_id("EMP", "portal_type", "portal"), 2)
def test_convert_to_id_bad_value(self):
"""Tests that ids are returned correctly"""
with self.assertRaises(qdb.exceptions.QiitaDBLookupError):
qdb.util.convert_to_id("FAKE", "filepath_type")
def test_get_artifact_types(self):
obs = qdb.util.get_artifact_types()
exp = {'SFF': 1, 'FASTA_Sanger': 2, 'FASTQ': 3, 'FASTA': 4,
'per_sample_FASTQ': 5, 'Demultiplexed': 6, 'BIOM': 7,
'beta_div_plots': 8, 'rarefaction_curves': 9,
'taxa_summary': 10}
self.assertEqual(obs, exp)
obs = qdb.util.get_artifact_types(key_by_id=True)
exp = {v: k for k, v in exp.items()}
self.assertEqual(obs, exp)
def test_get_filepath_types(self):
"""Tests that get_filepath_types works with valid arguments"""
obs = qdb.util.get_filepath_types()
exp = {'raw_forward_seqs': 1, 'raw_reverse_seqs': 2,
'raw_barcodes': 3, 'preprocessed_fasta': 4,
'preprocessed_fastq': 5, 'preprocessed_demux': 6, 'biom': 7,
'directory': 8, 'plain_text': 9, 'reference_seqs': 10,
'reference_tax': 11, 'reference_tree': 12, 'log': 13,
'sample_template': 14, 'prep_template': 15, 'qiime_map': 16,
}
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT filepath_type,filepath_type_id "
"FROM qiita.filepath_type")
exp = dict(qdb.sql_connection.TRN.execute_fetchindex())
self.assertEqual(obs, exp)
obs = qdb.util.get_filepath_types(key='filepath_type_id')
exp = {v: k for k, v in exp.items()}
self.assertEqual(obs, exp)
def test_get_filepath_types_fail(self):
"""Tests that get_Filetypes fails with invalid argument"""
with self.assertRaises(qdb.exceptions.QiitaDBColumnError):
qdb.util.get_filepath_types(key='invalid')
def test_get_data_types(self):
"""Tests that get_data_types works with valid arguments"""
obs = qdb.util.get_data_types()
exp = {'16S': 1, '18S': 2, 'ITS': 3, 'Proteomic': 4, 'Metabolomic': 5,
'Metagenomic': 6, 'Multiomic': 7, 'Metatranscriptomics': 8,
'Viromics': 9, 'Genomics': 10, 'Transcriptomics': 11}
self.assertEqual(obs, exp)
obs = qdb.util.get_data_types(key='data_type_id')
exp = {v: k for k, v in exp.items()}
self.assertEqual(obs, exp)
def test_create_rand_string(self):
set_punct = set(punctuation)
obs = qdb.util.create_rand_string(200)
self.assertEqual(len(obs), 200)
self.assertTrue(set_punct.intersection(set(obs)))
obs = qdb.util.create_rand_string(400, punct=False)
self.assertEqual(len(obs), 400)
self.assertFalse(set_punct.intersection(set(obs)))
def test_get_count(self):
"""Checks that get_count retrieves proper count"""
self.assertEqual(qdb.util.get_count('qiita.study_person'), 3)
def test_check_count(self):
"""Checks that check_count returns True and False appropriately"""
self.assertTrue(qdb.util.check_count('qiita.study_person', 3))
self.assertFalse(qdb.util.check_count('qiita.study_person', 2))
def test_insert_filepaths(self):
fd, fp = mkstemp()
close(fd)
with open(fp, "w") as f:
f.write("\n")
self.files_to_remove.append(fp)
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT last_value FROM qiita.filepath_filepath_id_seq")
exp_new_id = 1 + qdb.sql_connection.TRN.execute_fetchflatten()[0]
obs = qdb.util.insert_filepaths([(fp, 1)], 2, "raw_data")
self.assertEqual(obs, [exp_new_id])
# Check that the files have been copied correctly
exp_fp = join(qdb.util.get_db_files_base_dir(), "raw_data",
"2_%s" % basename(fp))
self.assertTrue(exists(exp_fp))
self.assertFalse(exists(fp))
self.files_to_remove.append(exp_fp)
# Check that the filepaths have been added to the DB
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.filepath "
"WHERE filepath_id=%d" % exp_new_id)
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp_fp = "2_%s" % basename(fp)
exp = [[exp_new_id, exp_fp, 1, '852952723', 1, 5, 1]]
self.assertEqual(obs, exp)
qdb.util.purge_filepaths()
def test_insert_filepaths_copy(self):
fd, fp = mkstemp()
close(fd)
with open(fp, "w") as f:
f.write("\n")
self.files_to_remove.append(fp)
# The id's in the database are bigserials, i.e. they get
# autoincremented for each element introduced.
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT last_value FROM qiita.filepath_filepath_id_seq")
exp_new_id = 1 + qdb.sql_connection.TRN.execute_fetchflatten()[0]
obs = qdb.util.insert_filepaths([(fp, 1)], 2, "raw_data", copy=True)
self.assertEqual(obs, [exp_new_id])
# Check that the files have been copied correctly
exp_fp = join(qdb.util.get_db_files_base_dir(), "raw_data",
"2_%s" % basename(fp))
self.assertTrue(exists(exp_fp))
self.assertTrue(exists(fp))
self.files_to_remove.append(exp_fp)
# Check that the filepaths have been added to the DB
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.filepath "
"WHERE filepath_id=%d" % exp_new_id)
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp_fp = "2_%s" % basename(fp)
exp = [[exp_new_id, exp_fp, 1, '852952723', 1, 5, 1]]
self.assertEqual(obs, exp)
qdb.util.purge_filepaths()
def test_insert_filepaths_string(self):
fd, fp = mkstemp()
close(fd)
with open(fp, "w") as f:
f.write("\n")
self.files_to_remove.append(fp)
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(
"SELECT last_value FROM qiita.filepath_filepath_id_seq")
exp_new_id = 1 + qdb.sql_connection.TRN.execute_fetchflatten()[0]
obs = qdb.util.insert_filepaths(
[(fp, "raw_forward_seqs")], 2, "raw_data")
self.assertEqual(obs, [exp_new_id])
# Check that the files have been copied correctly
exp_fp = join(qdb.util.get_db_files_base_dir(), "raw_data",
"2_%s" % basename(fp))
self.assertTrue(exists(exp_fp))
self.files_to_remove.append(exp_fp)
# Check that the filepaths have been added to the DB
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add("SELECT * FROM qiita.filepath "
"WHERE filepath_id=%d" % exp_new_id)
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp_fp = "2_%s" % basename(fp)
exp = [[exp_new_id, exp_fp, 1, '852952723', 1, 5, 1]]
self.assertEqual(obs, exp)
qdb.util.purge_filepaths()
def test_retrieve_filepaths(self):
obs = qdb.util.retrieve_filepaths('artifact_filepath',
'artifact_id', 1)
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 1,
'fp': path_builder("1_s_G1_L001_sequences.fastq.gz"),
'fp_type': "raw_forward_seqs",
'checksum': '2125826711',
'fp_size': 58},
{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
def test_retrieve_filepaths_sort(self):
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, sort='descending')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58},
{'fp_id': 1,
'fp': path_builder("1_s_G1_L001_sequences.fastq.gz"),
'fp_type': "raw_forward_seqs",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
def test_retrieve_filepaths_type(self):
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, sort='descending',
fp_type='raw_barcodes')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, fp_type='raw_barcodes')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
exp = [{'fp_id': 2,
'fp': path_builder("1_s_G1_L001_sequences_barcodes.fastq.gz"),
'fp_type': "raw_barcodes",
'checksum': '2125826711',
'fp_size': 58}]
self.assertEqual(obs, exp)
obs = qdb.util.retrieve_filepaths(
'artifact_filepath', 'artifact_id', 1, fp_type='biom')
path_builder = partial(
join, qdb.util.get_db_files_base_dir(), "raw_data")
self.assertEqual(obs, [])
def test_retrieve_filepaths_error(self):
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.util.retrieve_filepaths('artifact_filepath', 'artifact_id', 1,
sort='Unknown')
def test_empty_trash_upload_folder(self):
# creating file to delete so we know it actually works
study_id = '1'
uploads_fp = join(qdb.util.get_mountpoint("uploads")[0][1], study_id)
trash = join(uploads_fp, 'trash')
if not exists(trash):
mkdir(trash)
fp = join(trash, 'my_file_to_delete.txt')
open(fp, 'w').close()
self.assertTrue(exists(fp))
qdb.util.empty_trash_upload_folder()
self.assertFalse(exists(fp))
def test_move_filepaths_to_upload_folder(self):
# we are going to test the move_filepaths_to_upload_folder indirectly
# by creating an artifact and deleting it. To accomplish this we need
# to create a new prep info file, attach a biom with html_summary and
# then deleting it. However, we will do this twice to assure that
# there are no conflicts with this
study_id = 1
# creating the 2 sets of files for the 2 artifacts
fd, seqs_fp1 = mkstemp(suffix='_seqs.fastq')
close(fd)
html_fp1 = mkdtemp()
html_fp1 = join(html_fp1, 'support_files')
mkdir(html_fp1)
with open(join(html_fp1, 'index.html'), 'w') as fp:
fp.write(">AAA\nAAA")
fd, seqs_fp2 = mkstemp(suffix='_seqs.fastq')
close(fd)
html_fp2 = mkdtemp()
html_fp2 = join(html_fp2, 'support_files')
mkdir(html_fp2)
with open(join(html_fp2, 'index.html'), 'w') as fp:
fp.write(">AAA\nAAA")
# creating new prep info file
metadata_dict = {
'SKB8.640193': {'center_name': 'ANL',
'primer': 'GTGCCAGCMGCCGCGGTAA',
'barcode': 'GTCCGCAAGTTA',
'run_prefix': "s_G1_L001_sequences",
'platform': 'Illumina',
'instrument_model': 'Illumina MiSeq',
'library_construction_protocol': 'AAAA',
'experiment_design_description': 'BBBB'}}
metadata = pd.DataFrame.from_dict(
metadata_dict, orient='index', dtype=str)
pt1 = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(study_id), "16S")
pt2 = qdb.metadata_template.prep_template.PrepTemplate.create(
metadata, qdb.study.Study(study_id), "16S")
# inserting artifact 1
artifact1 = qdb.artifact.Artifact.create(
[(seqs_fp1, 1), (html_fp1, 'html_summary')], "FASTQ",
prep_template=pt1)
# inserting artifact 2
artifact2 = qdb.artifact.Artifact.create(
[(seqs_fp2, 1), (html_fp2, 'html_summary')], "FASTQ",
prep_template=pt2)
# retrieving filepaths
filepaths = artifact1.filepaths
filepaths.extend(artifact2.filepaths)
# delete artifacts
qdb.artifact.Artifact.delete(artifact1.id)
qdb.artifact.Artifact.delete(artifact2.id)
# now let's create another artifact with the same filenames that
# artifact1 so we can test successfull overlapping of names
with open(seqs_fp1, 'w') as fp:
fp.write(">AAA\nAAA")
mkdir(html_fp1)
with open(join(html_fp1, 'index.html'), 'w') as fp:
fp.write(">AAA\nAAA")
artifact3 = qdb.artifact.Artifact.create(
[(seqs_fp1, 1), (html_fp1, 'html_summary')], "FASTQ",
prep_template=pt1)
filepaths.extend(artifact2.filepaths)
qdb.artifact.Artifact.delete(artifact3.id)
# check that they do not exist in the old path but do in the new one
path_for_removal = join(qdb.util.get_mountpoint("uploads")[0][1],
str(study_id))
for x in filepaths:
self.assertFalse(exists(x['fp']))
new_fp = join(path_for_removal, basename(x['fp']))
if x['fp_type'] == 'html_summary':
# The html summary gets removed, not moved
self.assertFalse(exists(new_fp))
else:
self.assertTrue(exists(new_fp))
self.files_to_remove.append(new_fp)
def test_get_mountpoint(self):
exp = [(5, join(qdb.util.get_db_files_base_dir(), 'raw_data'))]
obs = qdb.util.get_mountpoint("raw_data")
self.assertEqual(obs, exp)
exp = [(1, join(qdb.util.get_db_files_base_dir(), 'analysis'))]
obs = qdb.util.get_mountpoint("analysis")
self.assertEqual(obs, exp)
exp = [(2, join(qdb.util.get_db_files_base_dir(), 'job'))]
obs = qdb.util.get_mountpoint("job")
self.assertEqual(obs, exp)
# inserting new ones so we can test that it retrieves these and
# doesn't alter other ones
qdb.sql_connection.perform_as_transaction(
"UPDATE qiita.data_directory SET active=false WHERE "
"data_directory_id=1")
count = qdb.util.get_count('qiita.data_directory')
sql = """INSERT INTO qiita.data_directory (data_type, mountpoint,
subdirectory, active)
VALUES ('analysis', 'analysis_tmp', true, true),
('raw_data', 'raw_data_tmp', true, false)"""
qdb.sql_connection.perform_as_transaction(sql)
# this should have been updated
exp = [(count + 1, join(qdb.util.get_db_files_base_dir(),
'analysis_tmp'))]
obs = qdb.util.get_mountpoint("analysis")
self.assertEqual(obs, exp)
# these 2 shouldn't
exp = [(5, join(qdb.util.get_db_files_base_dir(), 'raw_data'))]
obs = qdb.util.get_mountpoint("raw_data")
self.assertEqual(obs, exp)
exp = [(2, join(qdb.util.get_db_files_base_dir(), 'job'))]
obs = qdb.util.get_mountpoint("job")
self.assertEqual(obs, exp)
# testing multi returns
exp = [(5, join(qdb.util.get_db_files_base_dir(), 'raw_data')),
(count + 2, join(qdb.util.get_db_files_base_dir(),
'raw_data_tmp'))]
obs = qdb.util.get_mountpoint("raw_data", retrieve_all=True)
self.assertEqual(obs, exp)
# testing retrieve subdirectory
exp = [
(5, join(qdb.util.get_db_files_base_dir(), 'raw_data'), False),
(count + 2, join(qdb.util.get_db_files_base_dir(), 'raw_data_tmp'),
True)]
obs = qdb.util.get_mountpoint("raw_data", retrieve_all=True,
retrieve_subdir=True)
self.assertEqual(obs, exp)
def test_get_mountpoint_path_by_id(self):
exp = join(qdb.util.get_db_files_base_dir(), 'raw_data')
obs = qdb.util.get_mountpoint_path_by_id(5)
self.assertEqual(obs, exp)
exp = join(qdb.util.get_db_files_base_dir(), 'analysis')
obs = qdb.util.get_mountpoint_path_by_id(1)
self.assertEqual(obs, exp)
exp = join(qdb.util.get_db_files_base_dir(), 'job')
obs = qdb.util.get_mountpoint_path_by_id(2)
self.assertEqual(obs, exp)
# inserting new ones so we can test that it retrieves these and
# doesn't alter other ones
qdb.sql_connection.perform_as_transaction(
"UPDATE qiita.data_directory SET active=false WHERE "
"data_directory_id=1")
count = qdb.util.get_count('qiita.data_directory')
sql = """INSERT INTO qiita.data_directory (data_type, mountpoint,
subdirectory, active)
VALUES ('analysis', 'analysis_tmp', true, true),
('raw_data', 'raw_data_tmp', true, false)"""
qdb.sql_connection.perform_as_transaction(sql)
# this should have been updated
exp = join(qdb.util.get_db_files_base_dir(), 'analysis_tmp')
obs = qdb.util.get_mountpoint_path_by_id(count + 1)
self.assertEqual(obs, exp)
# these 2 shouldn't
exp = join(qdb.util.get_db_files_base_dir(), 'raw_data')
obs = qdb.util.get_mountpoint_path_by_id(5)
self.assertEqual(obs, exp)
exp = join(qdb.util.get_db_files_base_dir(), 'job')
obs = qdb.util.get_mountpoint_path_by_id(2)
self.assertEqual(obs, exp)
def test_get_files_from_uploads_folders(self):
# something has been uploaded and ignoring hidden files/folders
# and folders
exp = (7, 'uploaded_file.txt', '0B')
obs = qdb.util.get_files_from_uploads_folders("1")
self.assertIn(exp, obs)
# nothing has been uploaded
exp = []
obs = qdb.util.get_files_from_uploads_folders("2")
self.assertEqual(obs, exp)
def test_move_upload_files_to_trash(self):
test_filename = 'this_is_a_test_file.txt'
# create file to move to trash
fid, folder = qdb.util.get_mountpoint("uploads")[0]
test_fp = join(folder, '1', test_filename)
with open(test_fp, 'w') as f:
f.write('test')
self.files_to_remove.append(test_fp)
exp = (fid, 'this_is_a_test_file.txt', '4B')
obs = qdb.util.get_files_from_uploads_folders("1")
self.assertIn(exp, obs)
# move file
qdb.util.move_upload_files_to_trash(1, [(fid, test_filename)])
obs = qdb.util.get_files_from_uploads_folders("1")
self.assertNotIn(obs, exp)
# if the file doesn't exist, don't raise any errors
qdb.util.move_upload_files_to_trash(1, [(fid, test_filename)])
# testing errors
# - study doesn't exist
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.util.move_upload_files_to_trash(100, [(fid, test_filename)])
# - fid doen't exist
with self.assertRaises(qdb.exceptions.QiitaDBError):
qdb.util.move_upload_files_to_trash(1, [(10, test_filename)])
# removing trash folder
rmtree(join(folder, '1', 'trash'))
def test_get_environmental_packages(self):
obs = qdb.util.get_environmental_packages()
exp = [['air', 'ep_air'],
['built environment', 'ep_built_environment'],
['host-associated', 'ep_host_associated'],
['human-amniotic-fluid', 'ep_human_amniotic_fluid'],
['human-associated', 'ep_human_associated'],
['human-blood', 'ep_human_blood'],
['human-gut', 'ep_human_gut'],
['human-oral', 'ep_human_oral'],
['human-skin', 'ep_human_skin'],
['human-urine', 'ep_human_urine'],
['human-vaginal', 'ep_human_vaginal'],
['microbial mat/biofilm', 'ep_microbial_mat_biofilm'],
['miscellaneous natural or artificial environment',
'ep_misc_artif'],
['plant-associated', 'ep_plant_associated'],
['sediment', 'ep_sediment'],
['soil', 'ep_soil'],
['wastewater/sludge', 'ep_wastewater_sludge'],
['water', 'ep_water']]
self.assertEqual(sorted(obs), sorted(exp))
def test_get_timeseries_types(self):
obs = qdb.util.get_timeseries_types()
exp = [[1, 'None', 'None'],
[2, 'real', 'single intervention'],
[3, 'real', 'multiple intervention'],
[4, 'real', 'combo intervention'],
[5, 'pseudo', 'single intervention'],
[6, 'pseudo', 'multiple intervention'],
[7, 'pseudo', 'combo intervention'],
[8, 'mixed', 'single intervention'],
[9, 'mixed', 'multiple intervention'],
[10, 'mixed', 'combo intervention']]
self.assertEqual(obs, exp)
def test_get_filepath_information(self):
obs = qdb.util.get_filepath_information(1)
# This path is machine specific. Just checking that is not empty
self.assertIsNotNone(obs.pop('fullpath'))
exp = {'filepath_id': 1, 'filepath': '1_s_G1_L001_sequences.fastq.gz',
'filepath_type': 'raw_forward_seqs', 'checksum': '2125826711',
'data_type': 'raw_data', 'mountpoint': 'raw_data',
'subdirectory': False, 'active': True}
self.assertEqual(obs, exp)
def test_filepath_id_to_rel_path(self):
obs = qdb.util.filepath_id_to_rel_path(1)
exp = 'raw_data/1_s_G1_L001_sequences.fastq.gz'
self.assertEqual(obs, exp)
obs = qdb.util.filepath_id_to_rel_path(3)
exp = 'preprocessed_data/1_seqs.fna'
self.assertEqual(obs, exp)
fd, fp = mkstemp()
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self.files_to_remove.append(fp)
test = qdb.util.insert_filepaths(
[(fp, "raw_forward_seqs")], 2, "FASTQ")[0]
sql = """INSERT INTO qiita.artifact_filepath
(artifact_id, filepath_id)
VALUES (%s, %s)"""
qdb.sql_connection.perform_as_transaction(sql, [2, test])
obs = qdb.util.filepath_id_to_rel_path(test)
exp = 'FASTQ/2/%s' % basename(fp)
self.assertEqual(obs, exp)
def test_filepath_ids_to_rel_paths(self):
fd, fp = mkstemp()
close(fd)
with open(fp, 'w') as f:
f.write('\n')
self.files_to_remove.append(fp)
test = qdb.util.insert_filepaths(
[(fp, "raw_forward_seqs")], 2, "FASTQ")[0]
sql = """INSERT INTO qiita.artifact_filepath
(artifact_id, filepath_id)
VALUES (%s, %s)"""
qdb.sql_connection.perform_as_transaction(sql, [2, test])
obs = qdb.util.filepath_ids_to_rel_paths([1, 3, test])
exp = {1: 'raw_data/1_s_G1_L001_sequences.fastq.gz',
3: 'preprocessed_data/1_seqs.fna',
test: 'FASTQ/2/%s' % basename(fp)}
self.assertEqual(obs, exp)
def test_add_message(self):
count = qdb.util.get_count('qiita.message') + 1
user = qdb.user.User.create('new@test.bar', 'password')
users = [user]
qdb.util.add_message("TEST MESSAGE", users)
obs = [[x[0], x[1]] for x in user.messages()]
exp = [[count, 'TEST MESSAGE']]
self.assertEqual(obs, exp)
def test_add_system_message(self):
count = qdb.util.get_count('qiita.message') + 1
qdb.util.add_system_message("SYS MESSAGE",
datetime(2015, 8, 5, 19, 41))
obs = [[x[0], x[1]]
for x in qdb.user.User('shared@foo.bar').messages()]
exp = [[count, 'SYS MESSAGE'], [1, 'message 1']]
self.assertEqual(obs, exp)
obs = [[x[0], x[1]] for x in qdb.user.User('admin@foo.bar').messages()]
exp = [[count, 'SYS MESSAGE']]
self.assertEqual(obs, exp)
sql = "SELECT expiration from qiita.message WHERE message_id = %s"
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql, [count])
obs = qdb.sql_connection.TRN.execute_fetchindex()
exp = [[datetime(2015, 8, 5, 19, 41)]]
self.assertEqual(obs, exp)
def test_clear_system_messages(self):
message_id = qdb.util.get_count('qiita.message') + 1
user = qdb.user.User.create('csm@test.bar', 'password')
obs = [[x[0], x[1]] for x in user.messages()]
exp = []
self.assertEqual(obs, exp)
qdb.util.add_system_message("SYS MESSAGE",
datetime(2015, 8, 5, 19, 41))
obs = [[x[0], x[1]] for x in user.messages()]
exp = [[message_id, 'SYS MESSAGE']]
self.assertCountEqual(obs, exp)
qdb.util.clear_system_messages()
obs = [[x[0], x[1]] for x in user.messages()]
exp = []
self.assertEqual(obs, exp)
# Run again with no system messages to make sure no errors
qdb.util.clear_system_messages()
def test_supported_filepath_types(self):
obs = qdb.util.supported_filepath_types("FASTQ")
exp = [["raw_forward_seqs", True], ["raw_reverse_seqs", False],
["raw_barcodes", True]]
self.assertCountEqual(obs, exp)
obs = qdb.util.supported_filepath_types("BIOM")
exp = [["biom", True], ["directory", False], ["log", False]]
self.assertCountEqual(obs, exp)
def test_generate_analysis_list(self):
self.assertEqual(qdb.util.generate_analysis_list([]), [])
obs = qdb.util.generate_analysis_list([1, 2, 3, 5])
exp = [{'mapping_files': [
(16, qdb.util.get_filepath_information(16)['fullpath'])],
'description': 'A test analysis', 'artifacts': [9], 'name':
'SomeAnalysis', 'analysis_id': 1, 'visibility': 'private'},
{'mapping_files': [], 'description': 'Another test analysis',
'artifacts': [], 'name': 'SomeSecondAnalysis',
'analysis_id': 2, 'visibility': 'private'}]
# removing timestamp for testing
for i in range(len(obs)):
del obs[i]['timestamp']
self.assertEqual(obs, exp)
self.assertEqual(
qdb.util.generate_analysis_list([1, 2, 3, 5], True), [])
@qiita_test_checker()
class UtilTests(TestCase):
"""Tests for the util functions that do not need to access the DB"""
def setUp(self):
fh, self.filepath = mkstemp()
close(fh)
with open(self.filepath, "w") as f:
f.write("Some text so we can actually compute a checksum")
def test_compute_checksum(self):
"""Correctly returns the file checksum"""
obs = qdb.util.compute_checksum(self.filepath)
exp = 1719580229
self.assertEqual(obs, exp)
def test_scrub_data_nothing(self):
"""Returns the same string without changes"""
self.assertEqual(qdb.util.scrub_data("nothing_changes"),
"nothing_changes")
def test_scrub_data_semicolon(self):
"""Correctly removes the semicolon from the string"""
self.assertEqual(qdb.util.scrub_data("remove_;_char"), "remove__char")
def test_scrub_data_single_quote(self):
"""Correctly removes single quotes from the string"""
self.assertEqual(qdb.util.scrub_data("'quotes'"), "quotes")
def test_get_visibilities(self):
obs = qdb.util.get_visibilities()
exp = ['awaiting_approval', 'sandbox', 'private', 'public']
self.assertEqual(obs, exp)
def test_infer_status(self):
obs = qdb.util.infer_status([])
self.assertEqual(obs, 'sandbox')
obs = qdb.util.infer_status([['private']])
self.assertEqual(obs, 'private')
obs = qdb.util.infer_status([['private'], ['public']])
self.assertEqual(obs, 'public')
obs = qdb.util.infer_status([['sandbox'], ['awaiting_approval']])
self.assertEqual(obs, 'awaiting_approval')
obs = qdb.util.infer_status([['sandbox'], ['sandbox']])
self.assertEqual(obs, 'sandbox')
def test_get_pubmed_ids_from_dois(self):
exp = {'10.100/123456': '123456'}
obs = qdb.util.get_pubmed_ids_from_dois(['', '10.100/123456'])
self.assertEqual(obs, exp)
def test_generate_study_list(self):
USER = qdb.user.User
STUDY = qdb.study.Study
PREP = qdb.metadata_template.prep_template.PrepTemplate
UTIL = qdb.util
# testing owner email as name
user = USER('test@foo.bar')
username = user.info['name']
# test without changes
self.assertDictEqual(
STUDY_INFO, UTIL.generate_study_list(user, 'user')[0])
# change user's name to None and tests again
user.info = {'name': None}
exp = STUDY_INFO.copy()
exp['owner'] = 'test@foo.bar'
self.assertDictEqual(
exp, qdb.util.generate_study_list(user, 'user')[0])
# returning original name
user.info = {'name': username}
# creating a new study to make sure that empty studies are also
# returned
info = {"timeseries_type_id": 1, "metadata_complete": True,
"mixs_compliant": True, "study_alias": "TST",
"study_description": "Some description of the study goes here",
"study_abstract": "Some abstract goes here",
"principal_investigator_id": qdb.study.StudyPerson(1),
"lab_person_id": qdb.study.StudyPerson(1)}
new_study = STUDY.create(
USER('shared@foo.bar'), 'test_study_1', info=info)
snew_info = {
'status': 'sandbox', 'study_title': 'test_study_1',
'metadata_complete': True, 'publication_pid': [],
'artifact_biom_ids': [], 'autoloaded': False,
'ebi_submission_status': 'not submitted',
'study_id': new_study.id, 'ebi_study_accession': None,
'owner': 'Shared', 'shared': [],
'study_abstract': 'Some abstract goes here',
'pi': ('lab_dude@foo.bar', 'LabDude'), 'publication_doi': [],
'study_alias': 'TST', 'study_tags': None,
'preparation_data_types': [], 'number_samples_collected': 0}
exp1 = [STUDY_INFO]
exp2 = [snew_info]
exp_both = [STUDY_INFO, snew_info]
# let's make sure that everything is private for study 1
for a in STUDY(1).artifacts():
a.visibility = 'private'
# owner of study
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'user')
self.assertEqual(len(obs), 1)
self.assertDictEqual(obs[0], exp1[0])
# shared with
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'user')
self.assertEqual(len(obs), 2)
self.assertDictEqual(obs[0], exp_both[0])
self.assertDictEqual(obs[1], exp_both[1])
# admin
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'user')
self.assertEqual(obs, exp_both)
# no access/hidden
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'user')
self.assertEqual(obs, [])
# public - none for everyone
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'public')
self.assertEqual(obs, [])
def _avoid_duplicated_tests(all_artifacts=False):
# nothing should shange for owner, shared
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'user')
self.assertEqual(obs, exp1)
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'user')
self.assertEqual(obs, exp_both)
# for admin it should be shown in public and user cause there are
# 2 preps and only one is public
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'user')
if not all_artifacts:
self.assertEqual(obs, exp_both)
else:
self.assertEqual(obs, exp2)
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'user')
self.assertEqual(obs, [])
# for the public query, everything should be same for owner, share
# and admin but demo should now see it as public but with limited
# artifacts
obs = UTIL.generate_study_list(USER('test@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('shared@foo.bar'), 'public')
self.assertEqual(obs, [])
obs = UTIL.generate_study_list(USER('admin@foo.bar'), 'public')
if not all_artifacts:
exp1[0]['artifact_biom_ids'] = [7]
self.assertEqual(obs, exp1)
obs = UTIL.generate_study_list(USER('demo@microbio.me'), 'public')
self.assertEqual(obs, exp1)
# returning artifacts
exp1[0]['artifact_biom_ids'] = [4, 5, 6, 7]
# make artifacts of prep 2 public
PREP(2).artifact.visibility = 'public'
exp1[0]['status'] = 'public'
exp_both[0]['status'] = 'public'
_avoid_duplicated_tests()
# make artifacts of prep 1 awaiting_approval
PREP(1).artifact.visibility = 'awaiting_approval'
_avoid_duplicated_tests()
# making all studies public
PREP(1).artifact.visibility = 'public'
_avoid_duplicated_tests(True)
# deleting the new study study and returning artifact status
qdb.study.Study.delete(new_study.id)
PREP(1).artifact.visibility = 'private'
PREP(2).artifact.visibility = 'private'
def test_generate_study_list_errors(self):
with self.assertRaises(ValueError):
qdb.util.generate_study_list(qdb.user.User('test@foo.bar'), 'bad')
def test_generate_study_list_without_artifacts(self):
# creating a new study to make sure that empty studies are also
# returned
info = {"timeseries_type_id": 1, "metadata_complete": True,
"mixs_compliant": True, "study_alias": "TST",
"study_description": "Some description of the study goes here",
"study_abstract": "Some abstract goes here",
"principal_investigator_id": qdb.study.StudyPerson(1),
"lab_person_id": qdb.study.StudyPerson(1)}
new_study = qdb.study.Study.create(
qdb.user.User('shared@foo.bar'), 'test_study_1', info=info)
exp_info = [
{'status': 'private', 'study_title': (
'Identification of the Microbiomes for Cannabis Soils'),
'metadata_complete': True, 'publication_pid': [
'123456', '7891011'], 'ebi_submission_status': 'submitted',
'study_id': 1, 'ebi_study_accession': 'EBI123456-BB',
'autoloaded': False,
'study_abstract': (
'This is a preliminary study to examine the microbiota '
'associated with the Cannabis plant. Soils samples from '
'the bulk soil, soil associated with the roots, and the '
'rhizosphere were extracted and the DNA sequenced. Roots '
'from three independent plants of different strains were '
'examined. These roots were obtained November 11, 2011 from '
'plants that had been harvested in the summer. Future studies '
'will attempt to analyze the soils and rhizospheres from the '
'same location at different time points in the plant '
'lifecycle.'), 'pi': ('PI_dude@foo.bar', 'PIDude'),
'publication_doi': ['10.100/123456', '10.100/7891011'],
'study_alias': 'Cannabis Soils', 'number_samples_collected': 27},
{'status': 'sandbox', 'study_title': 'test_study_1',
'metadata_complete': True, 'publication_pid': [],
'ebi_submission_status': 'not submitted', 'autoloaded': False,
'study_id': new_study.id, 'ebi_study_accession': None,
'study_abstract': 'Some abstract goes here',
'pi': ('lab_dude@foo.bar', 'LabDude'), 'publication_doi': [],
'study_alias': 'TST', 'number_samples_collected': 0}]
obs_info = qdb.util.generate_study_list_without_artifacts([1, 2, 3, 4])
self.assertEqual(obs_info, exp_info)
obs_info = qdb.util.generate_study_list_without_artifacts(
[1, 2, 3, 4], 'EMP')
self.assertEqual(obs_info, [])
# deleting the old study
qdb.study.Study.delete(new_study.id)
def test_get_artifacts_information(self):
# we are going to test that it ignores 1 and 2 cause they are not biom,
# 4 has all information and 7 and 8 don't
obs = qdb.util.get_artifacts_information([1, 2, 4, 6, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp = [
{'artifact_id': 6, 'target_subfragment': ['V4'],
'prep_samples': 27, 'platform': 'Illumina',
'target_gene': '16S rRNA', 'name': 'BIOM', 'data_type': '16S',
'parameters': {'reference': '2', 'similarity': '0.97',
'sortmerna_e_value': '1',
'sortmerna_max_pos': '10000', 'threads': '1',
'sortmerna_coverage': '0.97'},
'algorithm': 'Pick closed-reference OTUs | Split libraries FASTQ',
'algorithm_az': 'd480799a0a7a2fbe0e9022bc9c602018',
'deprecated': False, 'active': True,
'files': ['1_study_1001_closed_reference_otu_table_Silva.biom']},
{'artifact_id': 4, 'target_subfragment': ['V4'],
'prep_samples': 27, 'platform': 'Illumina',
'target_gene': '16S rRNA', 'name': 'BIOM', 'data_type': '18S',
'parameters': {'reference': '1', 'similarity': '0.97',
'sortmerna_e_value': '1',
'sortmerna_max_pos': '10000', 'threads': '1',
'sortmerna_coverage': '0.97'},
'algorithm': 'Pick closed-reference OTUs | Split libraries FASTQ',
'algorithm_az': 'd480799a0a7a2fbe0e9022bc9c602018',
'deprecated': False, 'active': True,
'files': ['1_study_1001_closed_reference_otu_table.biom']},
{'artifact_id': 7, 'target_subfragment': ['V4'],
'prep_samples': 27, 'platform': 'Illumina',
'target_gene': '16S rRNA', 'name': 'BIOM', 'data_type': '16S',
'parameters': {}, 'algorithm': '', 'algorithm_az': '',
'deprecated': None, 'active': None, 'files': ['biom_table.biom']},
{'artifact_id': 8, 'target_subfragment': [], 'prep_samples': 0,
'platform': 'not provided', 'target_gene': 'not provided', 'name':
'noname', 'data_type': '18S', 'parameters': {}, 'algorithm': '',
'algorithm_az': '', 'deprecated': None, 'active': None,
'files': ['biom_table.biom']}]
self.assertCountEqual(obs, exp)
exp = exp[1:]
# now let's test that the order given by the commands actually give the
# correct results
with qdb.sql_connection.TRN:
# setting up database changes for just checking commands
qdb.sql_connection.TRN.add(
"""UPDATE qiita.command_parameter SET check_biom_merge = True
WHERE parameter_name = 'reference'""")
qdb.sql_connection.TRN.execute()
# testing that it works as expected
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1) '
'| Split libraries FASTQ')
exp[0]['algorithm_az'] = '33fed1b35728417d7ba4139b8f817d44'
self.assertCountEqual(obs, exp)
# setting up database changes for also command output
qdb.sql_connection.TRN.add(
"UPDATE qiita.command_output SET check_biom_merge = True")
qdb.sql_connection.TRN.execute()
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1, '
'BIOM: 1_study_1001_closed_reference_'
'otu_table.biom) | Split libraries FASTQ')
exp[0]['algorithm_az'] = 'de5b794a2cacd428f36fea86df196bfd'
self.assertCountEqual(obs, exp)
# let's test that we ignore the parent_info
qdb.sql_connection.TRN.add("""UPDATE qiita.software_command
SET ignore_parent_command = True""")
qdb.sql_connection.TRN.execute()
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1, '
'BIOM: 1_study_1001_closed_reference_'
'otu_table.biom)')
exp[0]['algorithm_az'] = '7f59a45b2f0d30cd1ed1929391c26e07'
self.assertCountEqual(obs, exp)
# let's test that we ignore the parent_info
qdb.sql_connection.TRN.add("""UPDATE qiita.software_command
SET ignore_parent_command = True""")
qdb.sql_connection.TRN.execute()
obs = qdb.util.get_artifacts_information([1, 2, 4, 7, 8])
# not testing timestamp
for i in range(len(obs)):
del obs[i]['timestamp']
exp[0]['algorithm'] = ('Pick closed-reference OTUs (reference: 1, '
'BIOM: 1_study_1001_closed_reference_'
'otu_table.biom)')
exp[0]['algorithm_az'] = '7f59a45b2f0d30cd1ed1929391c26e07'
self.assertCountEqual(obs, exp)
# returning database as it was
qdb.sql_connection.TRN.add(
"UPDATE qiita.command_output SET check_biom_merge = False")
qdb.sql_connection.TRN.add("""UPDATE qiita.software_command
SET ignore_parent_command = False""")
qdb.sql_connection.TRN.add(
"""UPDATE qiita.command_parameter SET check_biom_merge = False
WHERE parameter_name = 'reference'""")
qdb.sql_connection.TRN.execute()
class TestFilePathOpening(TestCase):
"""Tests adapted from scikit-bio's skbio.io.util tests"""
def test_is_string_or_bytes(self):
self.assertTrue(qdb.util._is_string_or_bytes('foo'))
self.assertTrue(qdb.util._is_string_or_bytes(u'foo'))
self.assertTrue(qdb.util._is_string_or_bytes(b'foo'))
self.assertFalse(qdb.util._is_string_or_bytes(StringIO('bar')))
self.assertFalse(qdb.util._is_string_or_bytes([1]))
def test_file_closed(self):
"""File gets closed in decorator"""
f = NamedTemporaryFile('r')
filepath = f.name
with qdb.util.open_file(filepath) as fh:
pass
self.assertTrue(fh.closed)
def test_file_closed_harder(self):
"""File gets closed in decorator, even if exceptions happen."""
f = NamedTemporaryFile('r')
filepath = f.name
try:
with qdb.util.open_file(filepath) as fh:
raise TypeError
except TypeError:
self.assertTrue(fh.closed)
else:
# If we're here, no exceptions have been raised inside the
# try clause, so the context manager swallowed them. No
# good.
raise Exception("`open_file` didn't propagate exceptions")
def test_filehandle(self):
"""Filehandles slip through untouched"""
with TemporaryFile('r') as fh:
with qdb.util.open_file(fh) as ffh:
self.assertTrue(fh is ffh)
# And it doesn't close the file-handle
self.assertFalse(fh.closed)
def test_StringIO(self):
"""StringIO (useful e.g. for testing) slips through."""
f = StringIO("File contents")
with qdb.util.open_file(f) as fh:
self.assertTrue(fh is f)
def test_BytesIO(self):
"""BytesIO (useful e.g. for testing) slips through."""
f = BytesIO(b"File contents")
with qdb.util.open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO(self):
"""This tests that if we send a file handler it returns it"""
f = h5py.File('test', driver='core', backing_store=False, mode='w')
with qdb.util.open_file(f) as fh:
self.assertTrue(fh is f)
def test_hdf5IO_open(self):
with NamedTemporaryFile(delete=False) as fh:
name = fh.name
fh.close()
h5file = h5py.File(name, 'w')
h5file.close()
with qdb.util.open_file(name) as fh_inner:
self.assertTrue(isinstance(fh_inner, h5py.File))
remove(name)
class PurgeFilepathsTests(DBUtilTestsBase):
def _get_current_filepaths(self):
sql_fp = "SELECT filepath_id FROM qiita.filepath"
with qdb.sql_connection.TRN:
qdb.sql_connection.TRN.add(sql_fp)
results = qdb.sql_connection.TRN.execute_fetchflatten()
return [qdb.util.get_filepath_information(_id)['fullpath']
for _id in results]
def _create_files(self, files):
# format is: [mp_id, fp_type_id, file_name]
sql = """INSERT INTO qiita.filepath (
data_directory_id, filepath_type_id, filepath, checksum,
checksum_algorithm_id)
VALUES (%s, %s, %s, '852952723', 1) RETURNING filepath_id"""
with qdb.sql_connection.TRN:
for f in files:
qdb.sql_connection.TRN.add(sql, tuple(f))
fid = qdb.sql_connection.TRN.execute_fetchflatten()[0]
qdb.util.get_filepath_information(fid)
def test_purge_filepaths_test(self):
# Get all the filepaths so we can test if they've been removed or not
fps_expected = self._get_current_filepaths()
# Make sure that the files exist - specially for travis
for fp in fps_expected:
if not exists(fp):
with open(fp, 'w') as f:
f.write('\n')
self.files_to_remove.append(fp)
# nothing shold be removed
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
# testing study filepath delete by inserting a new study sample info
# and make sure it gets deleted
mp_id, mp = qdb.util.get_mountpoint('templates')[0]
txt_id = qdb.util.convert_to_id('sample_template', "filepath_type")
self._create_files([[mp_id, txt_id, '100_filepath.txt']])
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
# testing artifact [A], creating a folder with an artifact that
# doesn't exist
_, mp = qdb.util.get_mountpoint('per_sample_FASTQ')[0]
not_an_artifact_fp = join(mp, '10000')
mkdir(not_an_artifact_fp)
# now let's add test for [B] by creating 2 filepaths without a
# link to the artifacts tables
mp_id, mp = qdb.util.get_mountpoint('BIOM')[0]
biom_id = qdb.util.convert_to_id('biom', "filepath_type")
self._create_files([
[mp_id, txt_id, 'artifact_filepath.txt'],
[mp_id, biom_id, 'my_biom.biom']
])
# adding files to tests
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
self.assertFalse(exists(not_an_artifact_fp))
# testing analysis filepath delete by filepaths for 2 different files
# and making sure they get deleted
mp_id, mp = qdb.util.get_mountpoint('analysis')[0]
biom_id = qdb.util.convert_to_id('biom', "filepath_type")
self._create_files([
[mp_id, txt_id, '10000_my_analysis_map.txt'],
[mp_id, biom_id, '10000_my_analysis_biom.biom']
])
qdb.util.purge_filepaths()
fps_viewed = self._get_current_filepaths()
self.assertCountEqual(fps_expected, fps_viewed)
STUDY_INFO = {
'study_id': 1,
'owner': 'Dude',
'study_alias': 'Cannabis Soils',
'status': 'private',
'study_abstract':
'This is a preliminary study to examine the microbiota '
'associated with the Cannabis plant. Soils samples '
'from the bulk soil, soil associated with the roots, '
'and the rhizosphere were extracted and the DNA '
'sequenced. Roots from three independent plants of '
'different strains were examined. These roots were '
'obtained November 11, 2011 from plants that had been '
'harvested in the summer. Future studies will attempt '
'to analyze the soils and rhizospheres from the same '
'location at different time points in the plant '
'lifecycle.',
'metadata_complete': True,
'autoloaded': False,
'ebi_study_accession': 'EBI123456-BB',
'ebi_submission_status': 'submitted',
'study_title':
'Identification of the Microbiomes for Cannabis Soils',
'number_samples_collected': 27,
'shared': [('shared@foo.bar', 'Shared')],
'publication_doi': ['10.100/123456', '10.100/7891011'],
'publication_pid': ['123456', '7891011'],
'pi': ('PI_dude@foo.bar', 'PIDude'),
'artifact_biom_ids': [4, 5, 6, 7],
'preparation_data_types': ['18S'],
'study_tags': None,
}
if __name__ == '__main__':
main()
| bsd-3-clause |
habi/GlobalDiagnostiX | CalculateDetector.py | 1 | 19688 | # -*- coding: utf-8 -*-
"""
Script to "calculate" the detector.
The script estimates the number of photons landing on the scintillator
from the source and the number of photons reaching the detector.
Also it displays the geometrical situation depending no the chosen parameters.
You can run this script to produce several frames of output as so:
(or use the command at the end of the script to also start Fiji and do some
more stuff)
for f in {10..15..1};
do for o in {45..50..1};
do for s in {5..10..1};
do ./CalculateDetector.py -f $f -o $o -s $s -p;
done;
done;
done
"""
import numpy
from scipy import constants
from scipy import integrate
import matplotlib.pylab as plt
from matplotlib.patches import Wedge, Rectangle
from optparse import OptionParser
import sys
import os
# ##################### SETUP ######################
# Use Pythons Optionparser to define and read the options, and also
# give some help to the user
parser = OptionParser()
usage = "usage: %prog [options] arg"
parser.add_option('-s', '--ScreenSize', dest='FOV', type='float', default=4.5,
help='Field of view in centimeters, i.e. desired screen '
'size (default=43 cm)', metavar='43')
parser.add_option('-o', '--OpeningAngle', dest='OpeningAngle', default=90.0,
type='float',
help='Opening angle of the lens in degrees (default=90)',
metavar='45')
parser.add_option('-n', '--NumericalAperture', dest='NA', default=0.4,
type='float',
help='Numerical Aperture of the lens',
metavar='0.6')
parser.add_option('-f', '--FStop', dest='FStop', default=1.2, type='float',
help='F-Stop of the lens',
metavar='0.8')
parser.add_option('-c', '--CCDSize', dest='SensorSize', default=3.0,
type='float',
help='Size of the CCD/CMOS sensor (in millimeters!), '
'Default=7 mm/0.7 cm', metavar='7')
parser.add_option('-e', '--Energy', dest='InputEnergy', default=50.4,
type='float',
help='Energy of the x-ray photons in kV (default=50 kV)',
metavar='120')
parser.add_option('-l', '--LinePairs', dest='LinePairs', default=5.0,
type='float',
help='Desired resolution in lp/mm (default=2.5 lp/mm)',
metavar='4')
parser.add_option('-p', '--print', dest='Output', default=False,
action='store_true',
help='Save/Print output files to disk', metavar=1)
(options, args) = parser.parse_args()
options.SensorSize /= 10
options.InputEnergy *= 1000
# show the help if some important parameters are not given
if options.FOV is None \
or options.OpeningAngle is None \
or options.SensorSize is None \
or options.InputEnergy is None \
or options.LinePairs is None:
parser.print_help()
print 'Example:'
print 'The command below shows you the configuration for a setup with a ' \
'screen size of 20.5 cm (half the required size), a lens with an ' \
'opening angle of 45 deg, a small sensor of 7 mm and an x-ray ' \
'energy of 50 kV:'
print ''
print sys.argv[0], '-s 20.5 -o 45 -c 7 -e 50'
print ''
sys.exit(1)
print 80 * '_'
# CALCULATE
# Intensifying screen
# http://www.sprawls.org/ppmi2/FILMSCR/:
# > Although the total energy of the light emitted by a screen is much less
# than the total x-ray energy the screen receives, the light energy is much
# more efficient in exposing film because it is "repackaged" into a much larger
# number of photons. If we assume a 5% energy conversion efficiency, then one
# 50-keV x-ray photon can produce 1,000 blue-green light photons with an energy
# of 2.5 eV each.
ScreenAbsorption = 0.1
ScreenConversion = 0.5
ScreenEmission = 1
ScreenOutput = ScreenAbsorption * ScreenConversion * ScreenEmission
# nm (green according to http://is.gd/AWmNpp)
Wavelength = 500e-9
# E = h * nu, nu = c / lambda
PhotonEnergyJ = constants.h * constants.c / Wavelength
PhotonEnergyeV = PhotonEnergyJ / constants.eV
# print 'Visible light photons with a wavelength of',int(Wavelength*1e9),\
# 'nm have an energy of',round(PhotonEnergyJ,22),'J or',\
# round(PhotonEnergyeV,3),'eV.'
PhotonsAfterScintillator = options.InputEnergy / PhotonEnergyeV * ScreenOutput
print 'For each', options.InputEnergy / 1000, 'kV x-ray photon'
print ' * we have', int(round(PhotonsAfterScintillator)), 'visible light', \
'photons after the scintillator (with a'
print ' conversion efficiency of', ScreenOutput * 100, '%).'
# Lens
LensReflectance = 0.02
LensAbsorption = 0.02
# Assume a set of double plano-convex lenses, with 4% loss per lens
LensTransmission = 1 - (2 * LensReflectance) - (2 * LensAbsorption)
PhotonsAfterLens = PhotonsAfterScintillator * LensTransmission
# ~ tan(\alpha/2) = (FOV/2) / Distance
# ~ Distance = (FOV/2)/tan(\alpha/2)
WorkingDistance = (options.FOV / 2) / numpy.tan(
numpy.deg2rad(options.OpeningAngle) / 2)
print ' * we have', int(round(PhotonsAfterLens)), 'visible light photons', \
'after the lens couple (with a'
print ' transmission of', LensTransmission * 100, '%).'
# Sensor
QESensor = 0.4
ProducedElectrons = PhotonsAfterLens * QESensor
Demagnification = options.FOV / options.SensorSize
SensorPosition = WorkingDistance / Demagnification
print ' * we get', int(round(ProducedElectrons)), 'electrons on the', \
'detector (with a QE of', str(QESensor) + ').'
# LinePairs
LinePairsScintillator = options.FOV * 10 * options.LinePairs
PixelsNeeded = LinePairsScintillator * 2
SensorPixelSize = options.SensorSize / PixelsNeeded
# Comparison with Flatpanel detectors
FlatPanelPixelSize = 0.194 # mm
ScintillatorThickness = 1.0 # mm
ConversionEfficiency = 1.0
NumericalApertureCalculated = FlatPanelPixelSize / (ScintillatorThickness / 2)
NumericalApertureAverage = \
integrate.quad(lambda x: numpy.arctan(FlatPanelPixelSize / (2 * x)),
0.01, 1)[0]
NumericalApertureDetermined = (SensorPosition * 10) / (
options.FStop * 2 * SensorPosition * 10 / (1 / Demagnification))
FStopJBAG = 0.8
NumericalApertureJBAG = 1 / (2 * FStopJBAG)
# PLOT
# Plot optical configuration
# Draw the stuff we calculated above
fig = plt.figure(1, figsize=(32, 18))
Thickness = 1.0
SupportThickness = 0.5
XRaySourcePosition = 25
# Optical Configuration
plt.subplot(211)
plt.axis('equal')
# axes = plt.gca()
# axes.axes.get_yaxis().set_ticks([])
plt.title('Angular opening: ' + str('%.2f' % options.OpeningAngle) +
', Screen size: ' + str('%.2f' % options.FOV) +
'cm, Working Distance: ' + str('%.2f' % round(WorkingDistance, 2)) +
'cm\nScintillator Efficiency: ' + str(round(ScreenOutput, 2) * 100)
+ '%, Lens transmission: ' + str(round(LensTransmission, 2) * 100)
+ '%, QE sensor: ' + str(QESensor))
plt.xlabel('Distance [cm]')
plt.ylabel('Distance [cm]')
# Optical Axis
plt.axhline(color='k', linestyle='--')
# X-rays
x = numpy.arange(0, XRaySourcePosition - Thickness - SupportThickness, 0.1)
for yshift in numpy.arange(-options.FOV / 2,
options.FOV / 2,
options.FOV / 10.0):
plt.plot(-x - Thickness - SupportThickness, numpy.sin(x) + yshift, 'k')
# Scintillator
ScintillatorSupport = Rectangle(
(-Thickness - SupportThickness, (options.FOV / 2) + SupportThickness),
Thickness + SupportThickness, -options.FOV - SupportThickness * 2,
facecolor="black")
plt.gca().add_patch(ScintillatorSupport)
Scintillator = Rectangle((-Thickness, options.FOV / 2), Thickness,
-options.FOV, facecolor="lightgreen")
plt.gca().add_patch(Scintillator)
# Light-Cone
# Opening angle
wedgecolor = 'r'
Wedge = Wedge((WorkingDistance, 0), -WorkingDistance * .25,
-(options.OpeningAngle / 2), (options.OpeningAngle / 2),
fill=False, color=wedgecolor)
plt.gca().add_patch(Wedge)
# Light Beams
beamcolor = wedgecolor
# Scintillator - Lens
plt.plot([0, WorkingDistance], [options.FOV / 2, 0], beamcolor)
plt.plot([0, WorkingDistance], [-options.FOV / 2, 0], beamcolor)
# Lens - Sensor
plt.plot([WorkingDistance, WorkingDistance + SensorPosition],
[0, options.FOV / 2 / Demagnification], beamcolor)
plt.plot([WorkingDistance, WorkingDistance + SensorPosition],
[0, -options.FOV / 2 / Demagnification], beamcolor)
# Camera
Sensor = Rectangle((WorkingDistance + SensorPosition, options.SensorSize / 2),
Thickness / 4, -options.SensorSize, facecolor="black")
plt.gca().add_patch(Sensor)
Housing = Rectangle((WorkingDistance + SensorPosition + Thickness / 4,
options.SensorSize / 2 / .618), Thickness / 4 / .618,
-options.SensorSize / .618, facecolor="black")
plt.gca().add_patch(Housing)
# Text
step = options.FOV / 8.0
plt.text(1.618 * WorkingDistance, options.FOV / 2,
'- 1 ' + str(options.InputEnergy / 1000) + ' kV x-ray photon')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - step,
'- ' + str(int(PhotonsAfterScintillator)) + ' ' + str(
Wavelength * 1e9) + ' nm photons after scintillator')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 2 * step,
'- ' + str(int(PhotonsAfterLens)) + ' ' + str(
Wavelength * 1e9) + ' nm photons after lens')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 3 * step,
'- ' + str(int(ProducedElectrons)) + ' electrons on sensor')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 4 * step,
'- Opening Angle: ' + str(
options.OpeningAngle) + ' deg') # http://is.gd/pxodor
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 5 * step,
'- Sensorsize: ' + str(options.SensorSize) + ' cm')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 6 * step,
'- Demagnification: ' + str('%.2f' % Demagnification) + 'x')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 7 * step,
'- To achieve ' + str('%.2f' % options.LinePairs) + ' lp/mm, we need')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 8 * step,
' a sensor with ' + str(
round(PixelsNeeded ** 2 / 1e6, 2)) + ' Mpx (' + str(
int(PixelsNeeded)) + 'x' + str(int(PixelsNeeded)) + ' px)')
plt.text(1.618 * WorkingDistance, options.FOV / 2 - 9 * step,
' resulting in a pixelsize of ' + str(
'%.2f' % (SensorPixelSize * 1000)) + ' um.')
# Plot NA
plt.subplot(234)
plt.axis('equal')
Magnification = numpy.arange(0, 1.01, 0.01)
for FStop in [0.5, 0.8, 1, 1.2, 1.4, 2]:
plt.plot(Magnification, Magnification / (2 * FStop * (1 + Magnification)),
label='f/' + str('%0.2f' % FStop))
plt.plot(Magnification,
Magnification / (2 * options.FStop * (1 + Magnification)), 'g--',
linewidth=5, label='f/' + str('%0.2f' % options.FStop))
plt.legend(loc='upper left')
plt.hlines(NumericalApertureAverage, 0, 1)
plt.text(0.618, NumericalApertureAverage, 'NA flat panel')
plt.hlines(NumericalApertureDetermined, 0, 1)
plt.text(0.618, NumericalApertureDetermined, 'simulated NA of our lens')
plt.hlines(NumericalApertureJBAG, 0, 1)
plt.text(0.618, NumericalApertureJBAG, 'NA JBAG (?)')
plt.vlines(1 / Demagnification, 0, 1, 'g', '--')
plt.text(1 / Demagnification + 0.25, 0.8, 'Our calculated\nDemagnification: ' +
str(Demagnification) + 'x=' + str(round(1 / Demagnification, 3)))
plt.title('NA')
plt.xlabel('Magnification')
plt.ylabel('NA')
plt.xlim([0, 1])
# Plot X-ray spectra
plt.subplot(235)
# http://stackoverflow.com/a/11249430/323100
Spectra = [
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_040kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_046kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_053kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_060kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_070kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_080kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_090kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_100kV.txt')),
(os.path.join(os.getcwd(), 'Spectra/Xray-Spectrum_120kV.txt'))
]
AnodeMaterial = [str(open(FileName).readlines()[1].split()[3]) for FileName in
Spectra]
Energy = [int(open(FileName).readlines()[2].split()[4]) for FileName in
Spectra]
Ripple = [float(open(FileName).readlines()[3].split()[4]) for FileName in
Spectra]
AirKerma = [float(open(FileName).readlines()[4].split()[3]) for FileName in
Spectra]
MeanEnergy = [float(open(FileName).readlines()[5].split()[3]) for FileName in
Spectra]
FilterMaterial = [str(open(FileName).readlines()[9].split()[1]) for FileName in
Spectra]
FilterThickness = [int(open(FileName).readlines()[9].split()[2]) for FileName
in Spectra]
Data = [(numpy.loadtxt(FileName)) for FileName in Spectra]
for i in range(len(Spectra)):
plt.plot(Data[i][:, 0], Data[i][:, 1],
label=str(Energy[i]) + 'kV, Mean=' + str(
round(MeanEnergy[i], 2)) + 'keV')
# plt.plot( Data[i][:,0], Data[i][:,1], label=str(Energy[i]) +'kV')
plt.legend(loc='best')
plt.title(
'X-ray spectra for ' + AnodeMaterial[0] + ' Anode,\n' + FilterMaterial[
0] + ' Filter with ' + str(FilterThickness[0]) + ' mm Thickness')
plt.xlabel('Energy [kV]')
plt.ylabel('Photons')
# Plot of Ball Lenses
plt.subplot(236)
Dia = numpy.arange(0, 15, 0.2)
NA = (0.918919 * (-1.0 + Dia)) / Dia
FNo = (0.544118 * Dia) / (-1.0 + Dia)
plt.plot(Dia, NA, 'r', label='NA')
plt.plot(Dia, FNo, 'g', label='FNo')
plt.legend(loc='best')
plt.xlim([1.5, 10])
plt.ylim([0.3, 1.2])
for i in (2, 8):
plt.axvline(i, color='k')
if i > 3:
plt.axhline(NA[numpy.where(Dia == i)], color='k')
plt.axhline(FNo[numpy.where(Dia == i)], color='k')
plt.savefig('CalculateDetector.png')
# OUTPUT
if options.Output:
Prefix = 'Config'
try:
os.mkdir(os.path.join(os.getcwd(), Prefix))
except OSError:
print 'Directory', os.path.join(os.getcwd(),
Prefix), 'already exists, did not ' \
'create it...'
print
# We should probably do something more clever with "print "%10.4f" %
# options" than the stuff below
SaveName = Prefix + str(options).replace('{', '_').replace('}', ''). \
replace("'", '').replace(': ', '_').replace(', ', '-'). \
replace('-Output_True', '').replace('9999999999999', '')
# getting the output of 'options' and doing some string-replacement to get
# a nice filename for the output.
# FIGURE
plt.savefig(os.path.join(Prefix, ''.join([SaveName, '.png'])),
dpi=fig.dpi)
print 'Figure saved to ' + os.path.join(Prefix,
''.join([SaveName, '.png']))
print
# LOGFILE
# Redirect console-output to a file according to
# http://stackoverflow.com/a/4829801/323100
# open the result file in write mode
logfile = open(os.path.join(Prefix, ''.join([SaveName, '.txt'])), 'w')
# store the default system handler to be able to restore it
old_stdout = sys.stdout
# Now your file is used by print as destination
sys.stdout = logfile
print 'Call the script with the commandline below to get the same result.'
print ' '.join(sys.argv)
print 80 * '-'
print 'If we define the intensifying screen:'
print '\t- to have an absorption of', 100 * ScreenAbsorption, '%'
print '\t- to convert', 100 * ScreenConversion, \
'% of the incoming x-rays to visible light'
print '\t- and to have an emmittance of', 100 * ScreenAbsorption, \
'% of all converted photons'
print 'we have a total efficiency of the screen of ', 100 * ScreenOutput, \
'%.'
print
print 'One incoming', options.InputEnergy / 1000, \
'keV x-ray photon will thus produce:'
print '\t-', int(round(PhotonsAfterScintillator)), \
'photons with a wavelength of', \
int(Wavelength * 1e9), 'nm (or', round(PhotonEnergyeV, 3), 'eV).'
print '\t-', int(round(PhotonsAfterLens)), 'of these photons (' + \
str(
LensTransmission * 100) + \
' %) will arrive at the sensor'
print '\t- which will produce', int(round(ProducedElectrons)), \
'electrons on a sensor with a QE of', QESensor
print 'To achieve', options.LinePairs, 'lp/mm on a', options.FOV, \
'cm scintillator, we need a sensor with', \
round(int(PixelsNeeded) ** 2 / 1e6, 1), 'Mpx (' + \
str(int(PixelsNeeded)) + 'x' + str(int(PixelsNeeded)), \
'px), which results in pixels with a physical size of', \
round(SensorPixelSize * 1000, 2), 'um on a', options.SensorSize, \
'cm sensor.'
print 'For the chosen optical configuration of:'
print '\t- FOV =', '%.2f' % options.FOV, 'cm and'
print '\t- Opening angle =', '%.2f' % options.OpeningAngle + 'deg we get a'
print '\t- Working distance of', '%.2f' % WorkingDistance, 'cm'
print
print 'Numerical Aperture:'
print '\t- calculated NA:', NumericalApertureCalculated, \
'(central element in scintillator layer of FPD)'
print '\t- average NA:', NumericalApertureAverage, \
'(average NA on optical axis assuming 10 um distance between ' \
'scintillator and detector)'
print '\t- NA JBAG lenses:', NumericalApertureJBAG, \
'(assuming F=1/2NA -> NA = 1/2F, with F =', FStopJBAG, ')'
print '\t- NA for our sensor:', NumericalApertureDetermined, \
'(according to Rene = SensorDistance / (FStop * 2 * SensorDistance/' \
'Magnification)'
sys.stdout = old_stdout # here we restore the default behavior
logfile.close() # do not forget to close your file
print 'Logfile saved to ' + os.path.join(Prefix,
''.join([SaveName, '.txt']))
print
else:
plt.show()
print 'The options were:'
# getting the output of 'options' and doing some string-replacement to get a
# nice filename for the output.
print str(options).replace('{', '').replace('}', '').replace("'", '').replace(
', ', '\n')
print 80 * '_'
print 'Call the script with the commandline below to get the same result...'
print ' '.join(sys.argv)
if options.Output:
print
print 'use the command below to open all the generated .png images with ' \
'Fiji.'
viewcommand = '/home/scratch/Apps/Fiji.app/fiji-linux -eval run("Image ' \
'Sequence...", "open=' + os.getcwd() + \
' starting=1 increment=1 scale=100 file=png or=[] ' \
'sort");\' &'
print viewcommand
print 80 * '_'
# # kill all runnig fiji jobs
# killall fiji-linux;
# # remove all calculated images
# rm *.png;
# # calculate some stuff
# for f in {10..43..15}; # Field of View
# do echo FOV $f;
# for o in {10..150..15}; # Opening Angle
# do echo OpeningAngle $o;
# for s in {5..25..15}; # Sensor Size
# do echo SensorSize $s;
# ./CalculateDetector.py -f $f -o $o -s $s -p;
# done;
# done;
# done
# # open fiji
# /home/scratch/Apps/Fiji.app/fiji-linux -eval 'run("Image Sequence...",
# "open=/afs/psi.ch/project/EssentialMed/Dev starting=1 increment=1 scale=100
# file=png or=[] sort");' & # start fiji
| unlicense |
xyguo/scikit-learn | sklearn/utils/validation.py | 15 | 25983 | """Utilities for input validation"""
# Authors: Olivier Grisel
# Gael Varoquaux
# Andreas Mueller
# Lars Buitinck
# Alexandre Gramfort
# Nicolas Tresegnie
# License: BSD 3 clause
import warnings
import numbers
import numpy as np
import scipy.sparse as sp
from ..externals import six
from ..utils.fixes import signature
from .deprecation import deprecated
from ..exceptions import DataConversionWarning as _DataConversionWarning
from ..exceptions import NonBLASDotWarning as _NonBLASDotWarning
from ..exceptions import NotFittedError as _NotFittedError
@deprecated("DataConversionWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class DataConversionWarning(_DataConversionWarning):
pass
@deprecated("NonBLASDotWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class NonBLASDotWarning(_NonBLASDotWarning):
pass
@deprecated("NotFittedError has been moved into the sklearn.exceptions module."
" It will not be available here from version 0.19")
class NotFittedError(_NotFittedError):
pass
FLOAT_DTYPES = (np.float64, np.float32, np.float16)
# Silenced by default to reduce verbosity. Turn on at runtime for
# performance profiling.
warnings.simplefilter('ignore', _NonBLASDotWarning)
def _assert_all_finite(X):
"""Like assert_all_finite, but only for ndarray."""
X = np.asanyarray(X)
# First try an O(n) time, O(1) space solution for the common case that
# everything is finite; fall back to O(n) space np.isfinite to prevent
# false positives from overflow in sum method.
if (X.dtype.char in np.typecodes['AllFloat'] and not np.isfinite(X.sum())
and not np.isfinite(X).all()):
raise ValueError("Input contains NaN, infinity"
" or a value too large for %r." % X.dtype)
def assert_all_finite(X):
"""Throw a ValueError if X contains NaN or infinity.
Input MUST be an np.ndarray instance or a scipy.sparse matrix."""
_assert_all_finite(X.data if sp.issparse(X) else X)
def as_float_array(X, copy=True, force_all_finite=True):
"""Converts an array-like to an array of floats
The new dtype will be np.float32 or np.float64, depending on the original
type. The function can create a copy or modify the argument depending
on the argument copy.
Parameters
----------
X : {array-like, sparse matrix}
copy : bool, optional
If True, a copy of X will be created. If False, a copy may still be
returned if X's dtype is not a floating point type.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
XT : {array, sparse matrix}
An array of type np.float
"""
if isinstance(X, np.matrix) or (not isinstance(X, np.ndarray)
and not sp.issparse(X)):
return check_array(X, ['csr', 'csc', 'coo'], dtype=np.float64,
copy=copy, force_all_finite=force_all_finite,
ensure_2d=False)
elif sp.issparse(X) and X.dtype in [np.float32, np.float64]:
return X.copy() if copy else X
elif X.dtype in [np.float32, np.float64]: # is numpy array
return X.copy('F' if X.flags['F_CONTIGUOUS'] else 'C') if copy else X
else:
return X.astype(np.float32 if X.dtype == np.int32 else np.float64)
def _is_arraylike(x):
"""Returns whether the input is array-like"""
return (hasattr(x, '__len__') or
hasattr(x, 'shape') or
hasattr(x, '__array__'))
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
def _shape_repr(shape):
"""Return a platform independent representation of an array shape
Under Python 2, the `long` type introduces an 'L' suffix when using the
default %r format for tuples of integers (typically used to store the shape
of an array).
Under Windows 64 bit (and Python 2), the `long` type is used by default
in numpy shapes even when the integer dimensions are well below 32 bit.
The platform specific type causes string messages or doctests to change
from one platform to another which is not desirable.
Under Python 3, there is no more `long` type so the `L` suffix is never
introduced in string representation.
>>> _shape_repr((1, 2))
'(1, 2)'
>>> one = 2 ** 64 / 2 ** 64 # force an upcast to `long` under Python 2
>>> _shape_repr((one, 2 * one))
'(1, 2)'
>>> _shape_repr((1,))
'(1,)'
>>> _shape_repr(())
'()'
"""
if len(shape) == 0:
return "()"
joined = ", ".join("%d" % e for e in shape)
if len(shape) == 1:
# special notation for singleton tuples
joined += ','
return "(%s)" % joined
def check_consistent_length(*arrays):
"""Check that all arrays have consistent first dimensions.
Checks whether all objects in arrays have the same shape or length.
Parameters
----------
*arrays : list or tuple of input objects.
Objects that will be checked for consistent length.
"""
uniques = np.unique([_num_samples(X) for X in arrays if X is not None])
if len(uniques) > 1:
raise ValueError("Found arrays with inconsistent numbers of samples: "
"%s" % str(uniques))
def indexable(*iterables):
"""Make arrays indexable for cross-validation.
Checks consistent length, passes through None, and ensures that everything
can be indexed by converting sparse matrices to csr and converting
non-interable objects to arrays.
Parameters
----------
*iterables : lists, dataframes, arrays, sparse matrices
List of objects to ensure sliceability.
"""
result = []
for X in iterables:
if sp.issparse(X):
result.append(X.tocsr())
elif hasattr(X, "__getitem__") or hasattr(X, "iloc"):
result.append(X)
elif X is None:
result.append(X)
else:
result.append(np.array(X))
check_consistent_length(*result)
return result
def _ensure_sparse_format(spmatrix, accept_sparse, dtype, copy,
force_all_finite):
"""Convert a sparse matrix to a given format.
Checks the sparse format of spmatrix and converts if necessary.
Parameters
----------
spmatrix : scipy sparse matrix
Input to validate and convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats ('csc',
'csr', 'coo', 'dok', 'bsr', 'lil', 'dia'). None means that sparse
matrix input will raise an error. If the input is sparse but not in
the allowed format, it will be converted to the first listed format.
dtype : string, type or None (default=none)
Data type of result. If None, the dtype of the input is preserved.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
Returns
-------
spmatrix_converted : scipy sparse matrix.
Matrix that is ensured to have an allowed type.
"""
if accept_sparse in [None, False]:
raise TypeError('A sparse matrix was passed, but dense '
'data is required. Use X.toarray() to '
'convert to a dense numpy array.')
if dtype is None:
dtype = spmatrix.dtype
changed_format = False
if (isinstance(accept_sparse, (list, tuple))
and spmatrix.format not in accept_sparse):
# create new with correct sparse
spmatrix = spmatrix.asformat(accept_sparse[0])
changed_format = True
if dtype != spmatrix.dtype:
# convert dtype
spmatrix = spmatrix.astype(dtype)
elif copy and not changed_format:
# force copy
spmatrix = spmatrix.copy()
if force_all_finite:
if not hasattr(spmatrix, "data"):
warnings.warn("Can't check %s sparse matrix for nan or inf."
% spmatrix.format)
else:
_assert_all_finite(spmatrix.data)
return spmatrix
def check_array(array, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, ensure_min_samples=1, ensure_min_features=1,
warn_on_dtype=False, estimator=None):
"""Input validation on an array, list, sparse matrix or similar.
By default, the input is converted to an at least 2D numpy array.
If the dtype of the array is object, attempt converting to float,
raising on failure.
Parameters
----------
array : object
Input object to check / convert.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
When order is None (default), then if copy=False, nothing is ensured
about the memory layout of the output array; otherwise (copy=True)
the memory layout of the returned array is kept as close as possible
to the original array.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
ensure_min_samples : int (default=1)
Make sure that the array has a minimum number of samples in its first
axis (rows for a 2D array). Setting to 0 disables this check.
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when the input data has effectively 2
dimensions or is originally 1D and ``ensure_2d`` is True. Setting to 0
disables this check.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
"""
if isinstance(accept_sparse, str):
accept_sparse = [accept_sparse]
# store whether originally we wanted numeric dtype
dtype_numeric = dtype == "numeric"
dtype_orig = getattr(array, "dtype", None)
if not hasattr(dtype_orig, 'kind'):
# not a data type (e.g. a column named dtype in a pandas DataFrame)
dtype_orig = None
if dtype_numeric:
if dtype_orig is not None and dtype_orig.kind == "O":
# if input is object, convert to float.
dtype = np.float64
else:
dtype = None
if isinstance(dtype, (list, tuple)):
if dtype_orig is not None and dtype_orig in dtype:
# no dtype conversion required
dtype = None
else:
# dtype conversion required. Let's select the first element of the
# list of accepted types.
dtype = dtype[0]
if estimator is not None:
if isinstance(estimator, six.string_types):
estimator_name = estimator
else:
estimator_name = estimator.__class__.__name__
else:
estimator_name = "Estimator"
context = " by %s" % estimator_name if estimator is not None else ""
if sp.issparse(array):
array = _ensure_sparse_format(array, accept_sparse, dtype, copy,
force_all_finite)
else:
array = np.array(array, dtype=dtype, order=order, copy=copy)
if ensure_2d:
if array.ndim == 1:
if ensure_min_samples >= 2:
raise ValueError("%s expects at least 2 samples provided "
"in a 2 dimensional array-like input"
% estimator_name)
warnings.warn(
"Passing 1d arrays as data is deprecated in 0.17 and will "
"raise ValueError in 0.19. Reshape your data either using "
"X.reshape(-1, 1) if your data has a single feature or "
"X.reshape(1, -1) if it contains a single sample.",
DeprecationWarning)
array = np.atleast_2d(array)
# To ensure that array flags are maintained
array = np.array(array, dtype=dtype, order=order, copy=copy)
# make sure we actually converted to numeric:
if dtype_numeric and array.dtype.kind == "O":
array = array.astype(np.float64)
if not allow_nd and array.ndim >= 3:
raise ValueError("Found array with dim %d. %s expected <= 2."
% (array.ndim, estimator_name))
if force_all_finite:
_assert_all_finite(array)
shape_repr = _shape_repr(array.shape)
if ensure_min_samples > 0:
n_samples = _num_samples(array)
if n_samples < ensure_min_samples:
raise ValueError("Found array with %d sample(s) (shape=%s) while a"
" minimum of %d is required%s."
% (n_samples, shape_repr, ensure_min_samples,
context))
if ensure_min_features > 0 and array.ndim == 2:
n_features = array.shape[1]
if n_features < ensure_min_features:
raise ValueError("Found array with %d feature(s) (shape=%s) while"
" a minimum of %d is required%s."
% (n_features, shape_repr, ensure_min_features,
context))
if warn_on_dtype and dtype_orig is not None and array.dtype != dtype_orig:
msg = ("Data with input dtype %s was converted to %s%s."
% (dtype_orig, array.dtype, context))
warnings.warn(msg, _DataConversionWarning)
return array
def check_X_y(X, y, accept_sparse=None, dtype="numeric", order=None,
copy=False, force_all_finite=True, ensure_2d=True,
allow_nd=False, multi_output=False, ensure_min_samples=1,
ensure_min_features=1, y_numeric=False,
warn_on_dtype=False, estimator=None):
"""Input validation for standard estimators.
Checks X and y for consistent length, enforces X 2d and y 1d.
Standard input checks are only applied to y, such as checking that y
does not have np.nan or np.inf targets. For multi-label y, set
multi_output=True to allow 2d and sparse y. If the dtype of X is
object, attempt converting to float, raising on failure.
Parameters
----------
X : nd-array, list or sparse matrix
Input data.
y : nd-array, list or sparse matrix
Labels.
accept_sparse : string, list of string or None (default=None)
String[s] representing allowed sparse matrix formats, such as 'csc',
'csr', etc. None means that sparse matrix input will raise an error.
If the input is sparse but not in the allowed format, it will be
converted to the first listed format.
dtype : string, type, list of types or None (default="numeric")
Data type of result. If None, the dtype of the input is preserved.
If "numeric", dtype is preserved unless array.dtype is object.
If dtype is a list of types, conversion on the first type is only
performed if the dtype of the input is not in the list.
order : 'F', 'C' or None (default=None)
Whether an array will be forced to be fortran or c-style.
copy : boolean (default=False)
Whether a forced copy will be triggered. If copy=False, a copy might
be triggered by a conversion.
force_all_finite : boolean (default=True)
Whether to raise an error on np.inf and np.nan in X. This parameter
does not influence whether y can have np.inf or np.nan values.
ensure_2d : boolean (default=True)
Whether to make X at least 2d.
allow_nd : boolean (default=False)
Whether to allow X.ndim > 2.
multi_output : boolean (default=False)
Whether to allow 2-d y (array or sparse matrix). If false, y will be
validated as a vector. y cannot have np.nan or np.inf values if
multi_output=True.
ensure_min_samples : int (default=1)
Make sure that X has a minimum number of samples in its first
axis (rows for a 2D array).
ensure_min_features : int (default=1)
Make sure that the 2D array has some minimum number of features
(columns). The default value of 1 rejects empty datasets.
This check is only enforced when X has effectively 2 dimensions or
is originally 1D and ``ensure_2d`` is True. Setting to 0 disables
this check.
y_numeric : boolean (default=False)
Whether to ensure that y has a numeric type. If dtype of y is object,
it is converted to float64. Should only be used for regression
algorithms.
warn_on_dtype : boolean (default=False)
Raise DataConversionWarning if the dtype of the input data structure
does not match the requested dtype, causing a memory copy.
estimator : str or estimator instance (default=None)
If passed, include the name of the estimator in warning messages.
Returns
-------
X_converted : object
The converted and validated X.
y_converted : object
The converted and validated y.
"""
X = check_array(X, accept_sparse, dtype, order, copy, force_all_finite,
ensure_2d, allow_nd, ensure_min_samples,
ensure_min_features, warn_on_dtype, estimator)
if multi_output:
y = check_array(y, 'csr', force_all_finite=True, ensure_2d=False,
dtype=None)
else:
y = column_or_1d(y, warn=True)
_assert_all_finite(y)
if y_numeric and y.dtype.kind == 'O':
y = y.astype(np.float64)
check_consistent_length(X, y)
return X, y
def column_or_1d(y, warn=False):
""" Ravel column or 1d numpy array, else raises an error
Parameters
----------
y : array-like
warn : boolean, default False
To control display of warnings.
Returns
-------
y : array
"""
shape = np.shape(y)
if len(shape) == 1:
return np.ravel(y)
if len(shape) == 2 and shape[1] == 1:
if warn:
warnings.warn("A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
_DataConversionWarning, stacklevel=2)
return np.ravel(y)
raise ValueError("bad input shape {0}".format(shape))
def check_random_state(seed):
"""Turn seed into a np.random.RandomState instance
If seed is None, return the RandomState singleton used by np.random.
If seed is an int, return a new RandomState instance seeded with seed.
If seed is already a RandomState instance, return it.
Otherwise raise ValueError.
"""
if seed is None or seed is np.random:
return np.random.mtrand._rand
if isinstance(seed, (numbers.Integral, np.integer)):
return np.random.RandomState(seed)
if isinstance(seed, np.random.RandomState):
return seed
raise ValueError('%r cannot be used to seed a numpy.random.RandomState'
' instance' % seed)
def has_fit_parameter(estimator, parameter):
"""Checks whether the estimator's fit method supports the given parameter.
Examples
--------
>>> from sklearn.svm import SVC
>>> has_fit_parameter(SVC(), "sample_weight")
True
"""
return parameter in signature(estimator.fit).parameters
def check_symmetric(array, tol=1E-10, raise_warning=True,
raise_exception=False):
"""Make sure that array is 2D, square and symmetric.
If the array is not symmetric, then a symmetrized version is returned.
Optionally, a warning or exception is raised if the matrix is not
symmetric.
Parameters
----------
array : nd-array or sparse matrix
Input object to check / convert. Must be two-dimensional and square,
otherwise a ValueError will be raised.
tol : float
Absolute tolerance for equivalence of arrays. Default = 1E-10.
raise_warning : boolean (default=True)
If True then raise a warning if conversion is required.
raise_exception : boolean (default=False)
If True then raise an exception if array is not symmetric.
Returns
-------
array_sym : ndarray or sparse matrix
Symmetrized version of the input array, i.e. the average of array
and array.transpose(). If sparse, then duplicate entries are first
summed and zeros are eliminated.
"""
if (array.ndim != 2) or (array.shape[0] != array.shape[1]):
raise ValueError("array must be 2-dimensional and square. "
"shape = {0}".format(array.shape))
if sp.issparse(array):
diff = array - array.T
# only csr, csc, and coo have `data` attribute
if diff.format not in ['csr', 'csc', 'coo']:
diff = diff.tocsr()
symmetric = np.all(abs(diff.data) < tol)
else:
symmetric = np.allclose(array, array.T, atol=tol)
if not symmetric:
if raise_exception:
raise ValueError("Array must be symmetric")
if raise_warning:
warnings.warn("Array is not symmetric, and will be converted "
"to symmetric by average with its transpose.")
if sp.issparse(array):
conversion = 'to' + array.format
array = getattr(0.5 * (array + array.T), conversion)()
else:
array = 0.5 * (array + array.T)
return array
def check_is_fitted(estimator, attributes, msg=None, all_or_any=all):
"""Perform is_fitted validation for estimator.
Checks if the estimator is fitted by verifying the presence of
"all_or_any" of the passed attributes and raises a NotFittedError with the
given message.
Parameters
----------
estimator : estimator instance.
estimator instance for which the check is performed.
attributes : attribute name(s) given as string or a list/tuple of strings
Eg. : ["coef_", "estimator_", ...], "coef_"
msg : string
The default error message is, "This %(name)s instance is not fitted
yet. Call 'fit' with appropriate arguments before using this method."
For custom messages if "%(name)s" is present in the message string,
it is substituted for the estimator name.
Eg. : "Estimator, %(name)s, must be fitted before sparsifying".
all_or_any : callable, {all, any}, default all
Specify whether all or any of the given attributes must exist.
"""
if msg is None:
msg = ("This %(name)s instance is not fitted yet. Call 'fit' with "
"appropriate arguments before using this method.")
if not hasattr(estimator, 'fit'):
raise TypeError("%s is not an estimator instance." % (estimator))
if not isinstance(attributes, (list, tuple)):
attributes = [attributes]
if not all_or_any([hasattr(estimator, attr) for attr in attributes]):
# FIXME NotFittedError_ --> NotFittedError in 0.19
raise _NotFittedError(msg % {'name': type(estimator).__name__})
def check_non_negative(X, whom):
"""
Check if there is any negative value in an array.
Parameters
----------
X : array-like or sparse matrix
Input data.
whom : string
Who passed X to this function.
"""
X = X.data if sp.issparse(X) else X
if (X < 0).any():
raise ValueError("Negative values in data passed to %s" % whom)
| bsd-3-clause |
tomlouden/SPIDERMAN | spiderman/test.py | 2 | 3795 | import spiderman as sp
import numpy as np
import matplotlib.pyplot as plt
import time as timing
def plot_test():
spider_params = sp.ModelParams(brightness_model='zhang')
spider_params.n_layers= 20
spider_params.t0= 200 # Central time of PRIMARY transit [days]
spider_params.per= 0.81347753 # Period [days]
spider_params.a_abs= 0.01526 # The absolute value of the semi-major axis [AU]
spider_params.inc= 82.33 # Inclination [degrees]
spider_params.ecc= 0.0 # Eccentricity
spider_params.w= 90 # Argument of periastron
spider_params.rp= 0.1594 # Planet to star radius ratio
spider_params.a= 4.855 # Semi-major axis scaled by stellar radius
spider_params.p_u1= 0 # Planetary limb darkening parameter
spider_params.p_u2= 0 # Planetary limb darkening parameter
spider_params.xi= 0.3 # Ratio of radiative to advective timescale
spider_params.T_n= 1128 # Temperature of nightside
spider_params.delta_T= 942 # Day-night temperature contrast
spider_params.T_s = 5000 # Temperature of the star
spider_params.l1 = 1.3e-6 # start of integration channel in microns
spider_params.l2 = 1.6e-6 # end of integration channel in microns
t= spider_params.t0 + np.linspace(0, + spider_params.per,100)
lc = sp.lightcurve(t,spider_params)
plt.plot(t,lc)
plt.show()
def time_test(nlayers=5,tpoints=100,nreps=1000):
spider_params = sp.ModelParams(brightness_model='zhang')
# spider_params = sp.ModelParams(brightness_model='uniform brightness')
spider_params.n_layers= nlayers
spider_params.t0= 200 # Central time of PRIMARY transit [days]
spider_params.per= 0.81347753 # Period [days]
spider_params.a_abs= 0.01526 # The absolute value of the semi-major axis [AU]
spider_params.inc= 82.33 # Inclination [degrees]
spider_params.ecc= 0.0 # Eccentricity
spider_params.w= 90 # Argument of periastron
spider_params.rp= 0.1594 # Planet to star radius ratio
spider_params.a= 4.855 # Semi-major axis scaled by stellar radius
spider_params.p_u1= 0 # Planetary limb darkening parameter
spider_params.p_u2= 0 # Planetary limb darkening parameter
spider_params.xi= 0.3 # Ratio of radiative to advective timescale
spider_params.T_n= 1128 # Temperature of nightside
spider_params.delta_T= 942 # Day-night temperature contrast
spider_params.T_s = 4500 # Temperature of the star
spider_params.l1 = 1.3e-6 # start of integration channel in microns
spider_params.l2 = 1.6e-6 # end of integration channel in microns
spider_params.pb = 0.01 # planet relative brightness
t= spider_params.t0 + np.linspace(0, + spider_params.per,tpoints)
print('')
print('About to generate {} lightcurves with {} layers and {} timepoints'.format(nreps,spider_params.n_layers,tpoints))
print('')
start = timing.time()
star_grid = sp.stellar_grid.gen_grid(spider_params.l1,spider_params.l2)
ends = []
for i in range(0,nreps):
lc = sp.lightcurve(t,spider_params,stellar_grid=star_grid)
ends += [timing.time()]
ends = np.array(ends)
exec_times = np.diff(ends)
total = ends[-1] - start
medtime = np.median(exec_times)
stdtimes = np.std(exec_times)
medtime = np.median(exec_times)
print('In total it took {} seconds'.format(round(total,2)))
print('Each function call was between {:.2E} and {:.2E}seconds'.format(np.min(exec_times),np.max(exec_times)))
print('Median execution time was {:.2E} seconds'.format(medtime))
print('Standard deviation was {:.2E} seconds'.format(stdtimes))
print('{} lightcurves generated per second!'.format(round(1.0/medtime),1))
print('')
| mit |
DataUSA/datausa-site | mobilitycovid19.py | 2 | 4125 | import pandas as pd
import os
stateToFips = {"AL": "04000US01", "AK": "04000US02", "AZ": "04000US04", "AR": "04000US05", "CA": "04000US06",
"CO": "04000US08", "CT": "04000US09", "DE": "04000US10", "DC": "04000US11", "FL": "04000US12",
"GA": "04000US13", "HI": "04000US15", "ID": "04000US16", "IL": "04000US17", "IN": "04000US18",
"IA": "04000US19", "KS": "04000US20", "KY": "04000US21", "LA": "04000US22", "ME": "04000US23",
"MD": "04000US24", "MA": "04000US25", "MI": "04000US26", "MN": "04000US27", "MS": "04000US28",
"MO": "04000US29", "MT": "04000US30", "NE": "04000US31", "NV": "04000US32", "NH": "04000US33",
"NJ": "04000US34", "NM": "04000US35", "NY": "04000US36", "NC": "04000US37", "ND": "04000US38",
"OH": "04000US39", "OK": "04000US40", "OR": "04000US41", "PA": "04000US42", "RI": "04000US44",
"SC": "04000US45", "SD": "04000US46", "TN": "04000US47", "TX": "04000US48", "UT": "04000US49",
"VT": "04000US50", "VA": "04000US51", "WA": "04000US53", "WV": "04000US54", "WI": "04000US55",
"WY": "04000US56"}
states = {"Alabama": "AL", "Alaska": "AK", "Arizona": "AZ", "Arkansas": "AR", "California": "CA", "Colorado": "CO",
"Connecticut": "CT", "District of Columbia": "DC", "Delaware": "DE", "Florida": "FL", "Georgia": "GA",
"Hawaii": "HI", "Idaho": "ID", "Illinois": "IL", "Indiana": "IN", "Iowa": "IA", "Kansas": "KS",
"Kentucky": "KY", "Louisiana": "LA", "Maine": "ME", "Maryland": "MD", "Massachusetts": "MA", "Michigan": "MI",
"Minnesota": "MN", "Mississippi": "MS", "Missouri": "MO", "Montana": "MT", "Nebraska": "NE", "Nevada": "NV",
"New Hampshire": "NH", "New Jersey": "NJ", "New Mexico": "NM", "New York": "NY", "North Carolina": "NC",
"North Dakota": "ND", "Ohio": "OH", "Oklahoma": "OK", "Oregon": "OR", "Pennsylvania": "PA",
"Rhode Island": "RI", "South Carolina": "SC", "South Dakota": "SD", "Tennessee": "TN", "Texas": "TX",
"Utah": "UT", "Vermont": "VT", "Virginia": "VA", "Washington": "WA", "West Virginia": "WV",
"Wisconsin": "WI", "Wyoming": "WY", "Chicago": "IL"}
df_google = pd.read_csv("https://www.gstatic.com/covid19/mobility/Global_Mobility_Report.csv", low_memory=False)
df_google = df_google[df_google["country_region_code"] == "US"]
df_google = df_google[(~df_google["sub_region_1"].isna()) & (df_google["sub_region_2"].isna())]
df_google = df_google.melt(
id_vars=["country_region", "sub_region_1", "date"],
value_vars=[
"retail_and_recreation_percent_change_from_baseline",
"grocery_and_pharmacy_percent_change_from_baseline",
"parks_percent_change_from_baseline",
"transit_stations_percent_change_from_baseline",
"workplaces_percent_change_from_baseline",
"residential_percent_change_from_baseline"
]
)
df_google["variable"] = df_google["variable"].replace({
"retail_and_recreation_percent_change_from_baseline": "Retail and Recreation",
"grocery_and_pharmacy_percent_change_from_baseline": "Grocery and Pharmacy",
"parks_percent_change_from_baseline": "Parks",
"transit_stations_percent_change_from_baseline": "Transit Stations",
"workplaces_percent_change_from_baseline": "Workplaces",
"residential_percent_change_from_baseline": "Residential"
})
df_google = df_google.drop(columns=["country_region"])
df_google = df_google.rename(columns={
"sub_region_1": "Geography",
"date": "Date",
"variable": "Type",
"value": "Percent Change from Baseline"
})
df_google = df_google[~df_google["Geography"].isna()]
df_google["ID Geography"] = df_google["Geography"].replace(states).replace(stateToFips)
df_google["Date"] = df_google["Date"].str.replace("-", "/")
path = os.path.dirname(os.path.abspath("__file__")) + "/static/mobilitycovid19.json"
previous = pd.read_json(path) if os.path.exists(path) else pd.DataFrame([])
if len(df_google) > len(previous):
df_google.to_json(path, orient="records")
| agpl-3.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/svm/tests/test_bounds.py | 49 | 2386 | import warnings
import numpy as np
from scipy import sparse as sp
from sklearn.svm.bounds import l1_min_c
from sklearn.svm import LinearSVC
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.utils.testing import assert_true, raises
from sklearn.utils.testing import assert_raise_message
dense_X = [[-1, 0], [0, 1], [1, 1], [1, 1]]
sparse_X = sp.csr_matrix(dense_X)
Y1 = [0, 1, 1, 1]
Y2 = [2, 1, 0, 0]
def test_l1_min_c():
losses = ['squared_hinge', 'log']
Xs = {'sparse': sparse_X, 'dense': dense_X}
Ys = {'two-classes': Y1, 'multi-class': Y2}
intercepts = {'no-intercept': {'fit_intercept': False},
'fit-intercept': {'fit_intercept': True,
'intercept_scaling': 10}}
for loss in losses:
for X_label, X in Xs.items():
for Y_label, Y in Ys.items():
for intercept_label, intercept_params in intercepts.items():
check = lambda: check_l1_min_c(X, Y, loss,
**intercept_params)
check.description = ('Test l1_min_c loss=%r %s %s %s' %
(loss, X_label, Y_label,
intercept_label))
yield check
# loss='l2' should raise ValueError
assert_raise_message(ValueError, "loss type not in",
l1_min_c, dense_X, Y1, "l2")
def check_l1_min_c(X, y, loss, fit_intercept=True, intercept_scaling=None):
min_c = l1_min_c(X, y, loss, fit_intercept, intercept_scaling)
clf = {
'log': LogisticRegression(penalty='l1'),
'squared_hinge': LinearSVC(loss='squared_hinge',
penalty='l1', dual=False),
}[loss]
clf.fit_intercept = fit_intercept
clf.intercept_scaling = intercept_scaling
clf.C = min_c
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) == 0).all())
assert_true((np.asarray(clf.intercept_) == 0).all())
clf.C = min_c * 1.01
clf.fit(X, y)
assert_true((np.asarray(clf.coef_) != 0).any() or
(np.asarray(clf.intercept_) != 0).any())
@raises(ValueError)
def test_ill_posed_min_c():
X = [[0, 0], [0, 0]]
y = [0, 1]
l1_min_c(X, y)
@raises(ValueError)
def test_unsupported_loss():
l1_min_c(dense_X, Y1, 'l1')
| mit |
jorisvandenbossche/numpy | numpy/lib/npyio.py | 1 | 88059 | from __future__ import division, absolute_import, print_function
import sys
import os
import re
import functools
import itertools
import warnings
import weakref
import contextlib
from operator import itemgetter, index as opindex
import numpy as np
from . import format
from ._datasource import DataSource
from numpy.core import overrides
from numpy.core.multiarray import packbits, unpackbits
from numpy.core.overrides import set_module
from numpy.core._internal import recursive
from ._iotools import (
LineSplitter, NameValidator, StringConverter, ConverterError,
ConverterLockError, ConversionWarning, _is_string_like,
has_nested_fields, flatten_dtype, easy_dtype, _decode_line
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
if sys.version_info[0] >= 3:
from collections.abc import Mapping
else:
from future_builtins import map
from collections import Mapping
@set_module('numpy')
def loads(*args, **kwargs):
# NumPy 1.15.0, 2017-12-10
warnings.warn(
"np.loads is deprecated, use pickle.loads instead",
DeprecationWarning, stacklevel=2)
return pickle.loads(*args, **kwargs)
__all__ = [
'savetxt', 'loadtxt', 'genfromtxt', 'ndfromtxt', 'mafromtxt',
'recfromtxt', 'recfromcsv', 'load', 'loads', 'save', 'savez',
'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
]
array_function_dispatch = functools.partial(
overrides.array_function_dispatch, module='numpy')
class BagObj(object):
"""
BagObj(obj)
Convert attribute look-ups to getitems on the object passed in.
Parameters
----------
obj : class instance
Object on which attribute look-up is performed.
Examples
--------
>>> from numpy.lib.npyio import BagObj as BO
>>> class BagDemo(object):
... def __getitem__(self, key): # An instance of BagObj(BagDemo)
... # will call this method when any
... # attribute look-up is required
... result = "Doesn't matter what you want, "
... return result + "you're gonna get this"
...
>>> demo_obj = BagDemo()
>>> bagobj = BO(demo_obj)
>>> bagobj.hello_there
"Doesn't matter what you want, you're gonna get this"
>>> bagobj.I_can_be_anything
"Doesn't matter what you want, you're gonna get this"
"""
def __init__(self, obj):
# Use weakref to make NpzFile objects collectable by refcount
self._obj = weakref.proxy(obj)
def __getattribute__(self, key):
try:
return object.__getattribute__(self, '_obj')[key]
except KeyError:
raise AttributeError(key)
def __dir__(self):
"""
Enables dir(bagobj) to list the files in an NpzFile.
This also enables tab-completion in an interpreter or IPython.
"""
return list(object.__getattribute__(self, '_obj').keys())
def zipfile_factory(file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
return zipfile.ZipFile(file, *args, **kwargs)
class NpzFile(Mapping):
"""
NpzFile(fid)
A dictionary-like object with lazy-loading of files in the zipped
archive provided on construction.
`NpzFile` is used to load files in the NumPy ``.npz`` data archive
format. It assumes that files in the archive have a ``.npy`` extension,
other files are ignored.
The arrays and file strings are lazily loaded on either
getitem access using ``obj['key']`` or attribute lookup using
``obj.f.key``. A list of all files (without ``.npy`` extensions) can
be obtained with ``obj.files`` and the ZipFile object itself using
``obj.zip``.
Attributes
----------
files : list of str
List of all files in the archive with a ``.npy`` extension.
zip : ZipFile instance
The ZipFile object initialized with the zipped archive.
f : BagObj instance
An object on which attribute can be performed as an alternative
to getitem access on the `NpzFile` instance itself.
allow_pickle : bool, optional
Allow loading pickled data. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
pickle_kwargs : dict, optional
Additional keyword arguments to pass on to pickle.load.
These are only useful when loading object arrays saved on
Python 2 when using Python 3.
Parameters
----------
fid : file or str
The zipped archive to open. This is either a file-like object
or a string containing the path to the archive.
own_fid : bool, optional
Whether NpzFile should close the file handle.
Requires that `fid` is a file-like object.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npz = np.load(outfile)
>>> isinstance(npz, np.lib.io.NpzFile)
True
>>> sorted(npz.files)
['x', 'y']
>>> npz['x'] # getitem access
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> npz.f.x # attribute lookup
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
def __init__(self, fid, own_fid=False, allow_pickle=False,
pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an
# optional component of the so-called standard library.
_zip = zipfile_factory(fid)
self._files = _zip.namelist()
self.files = []
self.allow_pickle = allow_pickle
self.pickle_kwargs = pickle_kwargs
for x in self._files:
if x.endswith('.npy'):
self.files.append(x[:-4])
else:
self.files.append(x)
self.zip = _zip
self.f = BagObj(self)
if own_fid:
self.fid = fid
else:
self.fid = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.close()
def close(self):
"""
Close the file.
"""
if self.zip is not None:
self.zip.close()
self.zip = None
if self.fid is not None:
self.fid.close()
self.fid = None
self.f = None # break reference cycle
def __del__(self):
self.close()
# Implement the Mapping ABC
def __iter__(self):
return iter(self.files)
def __len__(self):
return len(self.files)
def __getitem__(self, key):
# FIXME: This seems like it will copy strings around
# more than is strictly necessary. The zipfile
# will read the string and then
# the format.read_array will copy the string
# to another place in memory.
# It would be better if the zipfile could read
# (or at least uncompress) the data
# directly into the array memory.
member = False
if key in self._files:
member = True
elif key in self.files:
member = True
key += '.npy'
if member:
bytes = self.zip.open(key)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
if magic == format.MAGIC_PREFIX:
bytes = self.zip.open(key)
return format.read_array(bytes,
allow_pickle=self.allow_pickle,
pickle_kwargs=self.pickle_kwargs)
else:
return self.zip.read(key)
else:
raise KeyError("%s is not a file in the archive" % key)
if sys.version_info.major == 3:
# deprecate the python 2 dict apis that we supported by accident in
# python 3. We forgot to implement itervalues() at all in earlier
# versions of numpy, so no need to deprecated it here.
def iteritems(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iteritems is deprecated in python 3, to match the "
"removal of dict.itertems. Use .items() instead.",
DeprecationWarning, stacklevel=2)
return self.items()
def iterkeys(self):
# Numpy 1.15, 2018-02-20
warnings.warn(
"NpzFile.iterkeys is deprecated in python 3, to match the "
"removal of dict.iterkeys. Use .keys() instead.",
DeprecationWarning, stacklevel=2)
return self.keys()
@set_module('numpy')
def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
encoding='ASCII'):
"""
Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
.. warning:: Loading files that contain object arrays uses the ``pickle``
module, which is not secure against erroneous or maliciously
constructed data. Consider passing ``allow_pickle=False`` to
load data that is known not to contain object arrays for the
safer handling of untrusted sources.
Parameters
----------
file : file-like object, string, or pathlib.Path
The file to read. File-like objects must support the
``seek()`` and ``read()`` methods. Pickled files require that the
file-like object support the ``readline()`` method as well.
mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
If not None, then memory-map the file, using the given mode (see
`numpy.memmap` for a detailed description of the modes). A
memory-mapped array is kept on disk. However, it can be accessed
and sliced like any ndarray. Memory mapping is especially useful
for accessing small fragments of large files without reading the
entire file into memory.
allow_pickle : bool, optional
Allow loading pickled object arrays stored in npy files. Reasons for
disallowing pickles include security, as loading pickled data can
execute arbitrary code. If pickles are disallowed, loading object
arrays will fail. Default: False
.. versionchanged:: 1.16.3
Made default False in response to CVE-2019-6446.
fix_imports : bool, optional
Only useful when loading Python 2 generated pickled files on Python 3,
which includes npy/npz files containing object arrays. If `fix_imports`
is True, pickle will try to map the old Python 2 names to the new names
used in Python 3.
encoding : str, optional
What encoding to use when reading Python 2 strings. Only useful when
loading Python 2 generated pickled files in Python 3, which includes
npy/npz files containing object arrays. Values other than 'latin1',
'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
data. Default: 'ASCII'
Returns
-------
result : array, tuple, dict, etc.
Data stored in the file. For ``.npz`` files, the returned instance
of NpzFile class must be closed to avoid leaking file descriptors.
Raises
------
IOError
If the input file does not exist or cannot be read.
ValueError
The file contains an object array, but allow_pickle=False given.
See Also
--------
save, savez, savez_compressed, loadtxt
memmap : Create a memory-map to an array stored in a file on disk.
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
- If the file contains pickle data, then whatever object is stored
in the pickle is returned.
- If the file is a ``.npy`` file, then a single array is returned.
- If the file is a ``.npz`` file, then a dictionary-like object is
returned, containing ``{filename: array}`` key-value pairs, one for
each file in the archive.
- If the file is a ``.npz`` file, the returned value supports the
context manager protocol in a similar fashion to the open function::
with load('foo.npz') as data:
a = data['a']
The underlying file descriptor is closed when exiting the 'with'
block.
Examples
--------
Store data to disk, and load it again:
>>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
>>> np.load('/tmp/123.npy')
array([[1, 2, 3],
[4, 5, 6]])
Store compressed data to disk, and load it again:
>>> a=np.array([[1, 2, 3], [4, 5, 6]])
>>> b=np.array([1, 2])
>>> np.savez('/tmp/123.npz', a=a, b=b)
>>> data = np.load('/tmp/123.npz')
>>> data['a']
array([[1, 2, 3],
[4, 5, 6]])
>>> data['b']
array([1, 2])
>>> data.close()
Mem-map the stored array, and then access the second row
directly from disk:
>>> X = np.load('/tmp/123.npy', mmap_mode='r')
>>> X[1, :]
memmap([4, 5, 6])
"""
if encoding not in ('ASCII', 'latin1', 'bytes'):
# The 'encoding' value for pickle also affects what encoding
# the serialized binary data of NumPy arrays is loaded
# in. Pickle does not pass on the encoding information to
# NumPy. The unpickling code in numpy.core.multiarray is
# written to assume that unicode data appearing where binary
# should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
#
# Other encoding values can corrupt binary data, and we
# purposefully disallow them. For the same reason, the errors=
# argument is not exposed, as values other than 'strict'
# result can similarly silently corrupt numerical data.
raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
if sys.version_info[0] >= 3:
pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = {}
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
try:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
# zip-file (assume .npz)
# Transfer file ownership to NpzFile
ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
own_fid = False
return ret
elif magic == format.MAGIC_PREFIX:
# .npy file
if mmap_mode:
return format.open_memmap(file, mode=mmap_mode)
else:
return format.read_array(fid, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Try a pickle
if not allow_pickle:
raise ValueError("Cannot load file containing pickled data "
"when allow_pickle=False")
try:
return pickle.load(fid, **pickle_kwargs)
except Exception:
raise IOError(
"Failed to interpret file %s as a pickle" % repr(file))
finally:
if own_fid:
fid.close()
def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
return (arr,)
@array_function_dispatch(_save_dispatcher)
def save(file, arr, allow_pickle=True, fix_imports=True):
"""
Save an array to a binary file in NumPy ``.npy`` format.
Parameters
----------
file : file, str, or pathlib.Path
File or filename to which the data is saved. If file is a file-object,
then the filename is unchanged. If file is a string or Path, a ``.npy``
extension will be appended to the filename if it does not already
have one.
arr : array_like
Array data to be saved.
allow_pickle : bool, optional
Allow saving object arrays using Python pickles. Reasons for disallowing
pickles include security (loading pickled data can execute arbitrary
code) and portability (pickled objects may not be loadable on different
Python installations, for example if the stored objects require libraries
that are not available, and not all pickled data is compatible between
Python 2 and Python 3).
Default: True
fix_imports : bool, optional
Only useful in forcing objects in object arrays on Python 3 to be
pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
will try to map the new Python 3 names to the old module names used in
Python 2, so that the pickle data stream is readable with Python 2.
See Also
--------
savez : Save several arrays into a ``.npz`` archive
savetxt, load
Notes
-----
For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
Any data saved to the file is appended to the end of the file.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> np.save(outfile, x)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> np.load(outfile)
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
>>> with open('test.npy', 'wb') as f:
... np.save(f, np.array([1, 2]))
... np.save(f, np.array([1, 3]))
>>> with open('test.npy', 'rb') as f:
... a = np.load(f)
... b = np.load(f)
>>> print(a, b)
# [1 2] [1 3]
"""
own_fid = False
if hasattr(file, 'write'):
fid = file
else:
file = os_fspath(file)
if not file.endswith('.npy'):
file = file + '.npy'
fid = open(file, "wb")
own_fid = True
if sys.version_info[0] >= 3:
pickle_kwargs = dict(fix_imports=fix_imports)
else:
# Nothing to do on Python 2
pickle_kwargs = None
try:
arr = np.asanyarray(arr)
format.write_array(fid, arr, allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
finally:
if own_fid:
fid.close()
def _savez_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_dispatcher)
def savez(file, *args, **kwds):
"""Save several arrays into a single file in uncompressed ``.npz`` format.
If arguments are passed in with no keywords, the corresponding variable
names, in the ``.npz`` file, are 'arr_0', 'arr_1', etc. If keyword
arguments are given, the corresponding variable names, in the ``.npz``
file will match the keyword names.
Parameters
----------
file : str or file
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
save : Save a single array to a binary file in NumPy format.
savetxt : Save an array to a file as plain text.
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is not compressed and each file
in the archive contains one variable in ``.npy`` format. For a
description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
When saving dictionaries, the dictionary keys become filenames
inside the ZIP archive. Therefore, keys should be valid filenames.
E.g., avoid keys that begin with ``/`` or contain ``.``.
Examples
--------
>>> from tempfile import TemporaryFile
>>> outfile = TemporaryFile()
>>> x = np.arange(10)
>>> y = np.sin(x)
Using `savez` with \\*args, the arrays are saved with default names.
>>> np.savez(outfile, x, y)
>>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
>>> npzfile = np.load(outfile)
>>> npzfile.files
['arr_0', 'arr_1']
>>> npzfile['arr_0']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
Using `savez` with \\**kwds, the arrays are saved with the keyword names.
>>> outfile = TemporaryFile()
>>> np.savez(outfile, x=x, y=y)
>>> _ = outfile.seek(0)
>>> npzfile = np.load(outfile)
>>> sorted(npzfile.files)
['x', 'y']
>>> npzfile['x']
array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
"""
_savez(file, args, kwds, False)
def _savez_compressed_dispatcher(file, *args, **kwds):
for a in args:
yield a
for v in kwds.values():
yield v
@array_function_dispatch(_savez_compressed_dispatcher)
def savez_compressed(file, *args, **kwds):
"""
Save several arrays into a single file in compressed ``.npz`` format.
If keyword arguments are given, then filenames are taken from the keywords.
If arguments are passed in with no keywords, then stored filenames are
arr_0, arr_1, etc.
Parameters
----------
file : str or file
Either the filename (string) or an open file (file-like object)
where the data will be saved. If file is a string or a Path, the
``.npz`` extension will be appended to the filename if it is not
already there.
args : Arguments, optional
Arrays to save to the file. Since it is not possible for Python to
know the names of the arrays outside `savez`, the arrays will be saved
with names "arr_0", "arr_1", and so on. These arguments can be any
expression.
kwds : Keyword arguments, optional
Arrays to save to the file. Arrays will be saved in the file with the
keyword names.
Returns
-------
None
See Also
--------
numpy.save : Save a single array to a binary file in NumPy format.
numpy.savetxt : Save an array to a file as plain text.
numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
numpy.load : Load the files created by savez_compressed.
Notes
-----
The ``.npz`` file format is a zipped archive of files named after the
variables they contain. The archive is compressed with
``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
in ``.npy`` format. For a description of the ``.npy`` format, see
:py:mod:`numpy.lib.format`.
When opening the saved ``.npz`` file with `load` a `NpzFile` object is
returned. This is a dictionary-like object which can be queried for
its list of arrays (with the ``.files`` attribute), and for the arrays
themselves.
Examples
--------
>>> test_array = np.random.rand(3, 2)
>>> test_vector = np.random.rand(4)
>>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
>>> loaded = np.load('/tmp/123.npz')
>>> print(np.array_equal(test_array, loaded['a']))
True
>>> print(np.array_equal(test_vector, loaded['b']))
True
"""
_savez(file, args, kwds, True)
def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
# Import is postponed to here since zipfile depends on gzip, an optional
# component of the so-called standard library.
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.npz'):
file = file + '.npz'
namedict = kwds
for i, val in enumerate(args):
key = 'arr_%d' % i
if key in namedict.keys():
raise ValueError(
"Cannot use un-named variables and keyword %s" % key)
namedict[key] = val
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zipfile_factory(file, mode="w", compression=compression)
if sys.version_info >= (3, 6):
# Since Python 3.6 it is possible to write directly to a ZIP file.
for key, val in namedict.items():
fname = key + '.npy'
val = np.asanyarray(val)
# always force zip64, gh-10776
with zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, val,
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
else:
# Stage arrays in a temporary file on disk, before writing to zip.
# Import deferred for startup time improvement
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
fd, tmpfile = tempfile.mkstemp(prefix=file_prefix, dir=file_dir, suffix='-numpy.npy')
os.close(fd)
try:
for key, val in namedict.items():
fname = key + '.npy'
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val),
allow_pickle=allow_pickle,
pickle_kwargs=pickle_kwargs)
fid.close()
fid = None
zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
zipf.close()
def _getconv(dtype):
""" Find the correct dtype converter. Adapted from matplotlib """
def floatconv(x):
x.lower()
if '0x' in x:
return float.fromhex(x)
return float(x)
typ = dtype.type
if issubclass(typ, np.bool_):
return lambda x: bool(int(x))
if issubclass(typ, np.uint64):
return np.uint64
if issubclass(typ, np.int64):
return np.int64
if issubclass(typ, np.integer):
return lambda x: int(float(x))
elif issubclass(typ, np.longdouble):
return np.longdouble
elif issubclass(typ, np.floating):
return floatconv
elif issubclass(typ, complex):
return lambda x: complex(asstr(x).replace('+-', '-'))
elif issubclass(typ, np.bytes_):
return asbytes
elif issubclass(typ, np.unicode_):
return asunicode
else:
return asstr
# amount of lines loadtxt reads in one chunk, can be overridden for testing
_loadtxt_chunksize = 50000
@set_module('numpy')
def loadtxt(fname, dtype=float, comments='#', delimiter=None,
converters=None, skiprows=0, usecols=None, unpack=False,
ndmin=0, encoding='bytes', max_rows=None):
"""
Load data from a text file.
Each row in the text file must have the same number of values.
Parameters
----------
fname : file, str, or pathlib.Path
File, filename, or generator to read. If the filename extension is
``.gz`` or ``.bz2``, the file is first decompressed. Note that
generators should return byte strings for Python 3k.
dtype : data-type, optional
Data-type of the resulting array; default: float. If this is a
structured data-type, the resulting array will be 1-dimensional, and
each row will be interpreted as an element of the array. In this
case, the number of columns used must match the number of fields in
the data-type.
comments : str or sequence of str, optional
The characters or list of characters used to indicate the start of a
comment. None implies no comments. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is '#'.
delimiter : str, optional
The string used to separate values. For backwards compatibility, byte
strings will be decoded as 'latin1'. The default is whitespace.
converters : dict, optional
A dictionary mapping column number to a function that will parse the
column string into the desired value. E.g., if column 0 is a date
string: ``converters = {0: datestr2num}``. Converters can also be
used to provide a default value for missing data (but see also
`genfromtxt`): ``converters = {3: lambda s: float(s.strip() or 0)}``.
Default: None.
skiprows : int, optional
Skip the first `skiprows` lines, including comments; default: 0.
usecols : int or sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
The default, None, results in all columns being read.
.. versionchanged:: 1.11.0
When a single column has to be read it is possible to use
an integer instead of a tuple. E.g ``usecols = 3`` reads the
fourth column the same way as ``usecols = (3,)`` would.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``. When used with a structured
data-type, arrays are returned for each field. Default is False.
ndmin : int, optional
The returned array will have at least `ndmin` dimensions.
Otherwise mono-dimensional axes will be squeezed.
Legal values: 0 (default), 1 or 2.
.. versionadded:: 1.6.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
The special value 'bytes' enables backward compatibility workarounds
that ensures you receive byte arrays as results if possible and passes
'latin1' encoded strings to converters. Override this value to receive
unicode arrays and pass strings as input to converters. If set to None
the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
max_rows : int, optional
Read `max_rows` lines of content after `skiprows` lines. The default
is to read all the lines.
.. versionadded:: 1.16.0
Returns
-------
out : ndarray
Data read from the text file.
See Also
--------
load, fromstring, fromregex
genfromtxt : Load data with missing values handled as specified.
scipy.io.loadmat : reads MATLAB data files
Notes
-----
This function aims to be a fast reader for simply formatted files. The
`genfromtxt` function provides more sophisticated handling of, e.g.,
lines with missing values.
.. versionadded:: 1.10.0
The strings produced by the Python float.hex method can be used as
input for floats.
Examples
--------
>>> from io import StringIO # StringIO behaves like a file object
>>> c = StringIO(u"0 1\\n2 3")
>>> np.loadtxt(c)
array([[0., 1.],
[2., 3.]])
>>> d = StringIO(u"M 21 72\\nF 35 58")
>>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
... 'formats': ('S1', 'i4', 'f4')})
array([(b'M', 21, 72.), (b'F', 35, 58.)],
dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
>>> c = StringIO(u"1,0,2\\n3,0,4")
>>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
>>> x
array([1., 3.])
>>> y
array([2., 4.])
"""
# Type conversions for Py3 convenience
if comments is not None:
if isinstance(comments, (basestring, bytes)):
comments = [comments]
comments = [_decode_line(x) for x in comments]
# Compile regex for comments beforehand
comments = (re.escape(comment) for comment in comments)
regex_comments = re.compile('|'.join(comments))
if delimiter is not None:
delimiter = _decode_line(delimiter)
user_converters = converters
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
if usecols is not None:
# Allow usecols to be a single int or a sequence of ints
try:
usecols_as_list = list(usecols)
except TypeError:
usecols_as_list = [usecols]
for col_idx in usecols_as_list:
try:
opindex(col_idx)
except TypeError as e:
e.args = (
"usecols must be an int or a sequence of ints but "
"it contains at least one element of type %s" %
type(col_idx),
)
raise
# Fall back to existing code
usecols = usecols_as_list
fown = False
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fencoding = getattr(fh, 'encoding', 'latin1')
fh = iter(fh)
fown = True
else:
fh = iter(fname)
fencoding = getattr(fname, 'encoding', 'latin1')
except TypeError:
raise ValueError('fname must be a string, file handle, or generator')
# input may be a python2 io stream
if encoding is not None:
fencoding = encoding
# we must assume local encoding
# TODO emit portability warning?
elif fencoding is None:
import locale
fencoding = locale.getpreferredencoding()
# not to be confused with the flatten_dtype we import...
@recursive
def flatten_dtype_internal(self, dt):
"""Unpack a structured data-type, and produce re-packing info."""
if dt.names is None:
# If the dtype is flattened, return.
# If the dtype has a shape, the dtype occurs
# in the list more than once.
shape = dt.shape
if len(shape) == 0:
return ([dt.base], None)
else:
packing = [(shape[-1], list)]
if len(shape) > 1:
for dim in dt.shape[-2::-1]:
packing = [(dim*packing[0][0], packing*dim)]
return ([dt.base] * int(np.prod(dt.shape)), packing)
else:
types = []
packing = []
for field in dt.names:
tp, bytes = dt.fields[field]
flat_dt, flat_packing = self(tp)
types.extend(flat_dt)
# Avoid extra nesting for subarrays
if tp.ndim > 0:
packing.extend(flat_packing)
else:
packing.append((len(flat_dt), flat_packing))
return (types, packing)
@recursive
def pack_items(self, items, packing):
"""Pack items into nested lists based on re-packing info."""
if packing is None:
return items[0]
elif packing is tuple:
return tuple(items)
elif packing is list:
return list(items)
else:
start = 0
ret = []
for length, subpacking in packing:
ret.append(self(items[start:start+length], subpacking))
start += length
return tuple(ret)
def split_line(line):
"""Chop off comments, strip, and split at delimiter. """
line = _decode_line(line, encoding=encoding)
if comments is not None:
line = regex_comments.split(line, maxsplit=1)[0]
line = line.strip('\r\n')
if line:
return line.split(delimiter)
else:
return []
def read_data(chunk_size):
"""Parse each line, including the first.
The file read, `fh`, is a global defined above.
Parameters
----------
chunk_size : int
At most `chunk_size` lines are read at a time, with iteration
until all lines are read.
"""
X = []
line_iter = itertools.chain([first_line], fh)
line_iter = itertools.islice(line_iter, max_rows)
for i, line in enumerate(line_iter):
vals = split_line(line)
if len(vals) == 0:
continue
if usecols:
vals = [vals[j] for j in usecols]
if len(vals) != N:
line_num = i + skiprows + 1
raise ValueError("Wrong number of columns at line %d"
% line_num)
# Convert each value according to its column and store
items = [conv(val) for (conv, val) in zip(converters, vals)]
# Then pack it according to the dtype's nesting
items = pack_items(items, packing)
X.append(items)
if len(X) > chunk_size:
yield X
X = []
if X:
yield X
try:
# Make sure we're dealing with a proper dtype
dtype = np.dtype(dtype)
defconv = _getconv(dtype)
# Skip the first `skiprows` lines
for i in range(skiprows):
next(fh)
# Read until we find a line with some values, and use
# it to estimate the number of columns, N.
first_vals = None
try:
while not first_vals:
first_line = next(fh)
first_vals = split_line(first_line)
except StopIteration:
# End of lines reached
first_line = ''
first_vals = []
warnings.warn('loadtxt: Empty input file: "%s"' % fname, stacklevel=2)
N = len(usecols or first_vals)
dtype_types, packing = flatten_dtype_internal(dtype)
if len(dtype_types) > 1:
# We're dealing with a structured array, each field of
# the dtype matches a column
converters = [_getconv(dt) for dt in dtype_types]
else:
# All fields have the same dtype
converters = [defconv for i in range(N)]
if N > 1:
packing = [(N, tuple)]
# By preference, use the converters specified by the user
for i, conv in (user_converters or {}).items():
if usecols:
try:
i = usecols.index(i)
except ValueError:
# Unused converter specified
continue
if byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
converters[i] = functools.partial(tobytes_first, conv=conv)
else:
converters[i] = conv
converters = [conv if conv is not bytes else
lambda x: x.encode(fencoding) for conv in converters]
# read data in chunks and fill it into an array via resize
# over-allocating and shrinking the array later may be faster but is
# probably not relevant compared to the cost of actually reading and
# converting the data
X = None
for x in read_data(_loadtxt_chunksize):
if X is None:
X = np.array(x, dtype)
else:
nshape = list(X.shape)
pos = nshape[0]
nshape[0] += len(x)
X.resize(nshape, refcheck=False)
X[pos:, ...] = x
finally:
if fown:
fh.close()
if X is None:
X = np.array([], dtype)
# Multicolumn data are returned with shape (1, N, M), i.e.
# (1, 1, M) for a single row - remove the singleton dimension there
if X.ndim == 3 and X.shape[:2] == (1, 1):
X.shape = (1, -1)
# Verify that the array has at least dimensions `ndmin`.
# Check correctness of the values of `ndmin`
if ndmin not in [0, 1, 2]:
raise ValueError('Illegal value of ndmin keyword: %s' % ndmin)
# Tweak the size and shape of the arrays - remove extraneous dimensions
if X.ndim > ndmin:
X = np.squeeze(X)
# and ensure we have the minimum number of dimensions asked for
# - has to be in this order for the odd case ndmin=1, X.squeeze().ndim=0
if X.ndim < ndmin:
if ndmin == 1:
X = np.atleast_1d(X)
elif ndmin == 2:
X = np.atleast_2d(X).T
if unpack:
if len(dtype_types) > 1:
# For structured arrays, return an array for each field.
return [X[field] for field in dtype.names]
else:
return X.T
else:
return X
def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
header=None, footer=None, comments=None,
encoding=None):
return (X,)
@array_function_dispatch(_savetxt_dispatcher)
def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
footer='', comments='# ', encoding=None):
"""
Save an array to a text file.
Parameters
----------
fname : filename or file handle
If the filename ends in ``.gz``, the file is automatically saved in
compressed gzip format. `loadtxt` understands gzipped files
transparently.
X : 1D or 2D array_like
Data to be saved to a text file.
fmt : str or sequence of strs, optional
A single format (%10.5f), a sequence of formats, or a
multi-format string, e.g. 'Iteration %d -- %10.5f', in which
case `delimiter` is ignored. For complex `X`, the legal options
for `fmt` are:
* a single specifier, `fmt='%.4e'`, resulting in numbers formatted
like `' (%s+%sj)' % (fmt, fmt)`
* a full string specifying every real and imaginary part, e.g.
`' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
* a list of specifiers, one per column - in this case, the real
and imaginary part must have separate specifiers,
e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
delimiter : str, optional
String or character separating columns.
newline : str, optional
String or character separating lines.
.. versionadded:: 1.5.0
header : str, optional
String that will be written at the beginning of the file.
.. versionadded:: 1.7.0
footer : str, optional
String that will be written at the end of the file.
.. versionadded:: 1.7.0
comments : str, optional
String that will be prepended to the ``header`` and ``footer`` strings,
to mark them as comments. Default: '# ', as expected by e.g.
``numpy.loadtxt``.
.. versionadded:: 1.7.0
encoding : {None, str}, optional
Encoding used to encode the outputfile. Does not apply to output
streams. If the encoding is something other than 'bytes' or 'latin1'
you will not be able to load the file in NumPy versions < 1.14. Default
is 'latin1'.
.. versionadded:: 1.14.0
See Also
--------
save : Save an array to a binary file in NumPy ``.npy`` format
savez : Save several arrays into an uncompressed ``.npz`` archive
savez_compressed : Save several arrays into a compressed ``.npz`` archive
Notes
-----
Further explanation of the `fmt` parameter
(``%[flag]width[.precision]specifier``):
flags:
``-`` : left justify
``+`` : Forces to precede result with + or -.
``0`` : Left pad the number with zeros instead of space (see width).
width:
Minimum number of characters to be printed. The value is not truncated
if it has more characters.
precision:
- For integer specifiers (eg. ``d,i,o,x``), the minimum number of
digits.
- For ``e, E`` and ``f`` specifiers, the number of digits to print
after the decimal point.
- For ``g`` and ``G``, the maximum number of significant digits.
- For ``s``, the maximum number of characters.
specifiers:
``c`` : character
``d`` or ``i`` : signed decimal integer
``e`` or ``E`` : scientific notation with ``e`` or ``E``.
``f`` : decimal floating point
``g,G`` : use the shorter of ``e,E`` or ``f``
``o`` : signed octal
``s`` : string of characters
``u`` : unsigned decimal integer
``x,X`` : unsigned hexadecimal integer
This explanation of ``fmt`` is not complete, for an exhaustive
specification see [1]_.
References
----------
.. [1] `Format Specification Mini-Language
<https://docs.python.org/library/string.html#format-specification-mini-language>`_,
Python Documentation.
Examples
--------
>>> x = y = z = np.arange(0.0,5.0,1.0)
>>> np.savetxt('test.out', x, delimiter=',') # X is an array
>>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
>>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
"""
# Py3 conversions first
if isinstance(fmt, bytes):
fmt = asstr(fmt)
delimiter = asstr(delimiter)
class WriteWrap(object):
"""Convert to unicode in py2 or to bytes on bytestream inputs.
"""
def __init__(self, fh, encoding):
self.fh = fh
self.encoding = encoding
self.do_write = self.first_write
def close(self):
self.fh.close()
def write(self, v):
self.do_write(v)
def write_bytes(self, v):
if isinstance(v, bytes):
self.fh.write(v)
else:
self.fh.write(v.encode(self.encoding))
def write_normal(self, v):
self.fh.write(asunicode(v))
def first_write(self, v):
try:
self.write_normal(v)
self.write = self.write_normal
except TypeError:
# input is probably a bytestream
self.write_bytes(v)
self.write = self.write_bytes
own_fh = False
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if _is_string_like(fname):
# datasource doesn't support creating a new file ...
open(fname, 'wt').close()
fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
own_fh = True
# need to convert str to unicode for text io output
if sys.version_info[0] == 2:
fh = WriteWrap(fh, encoding or 'latin1')
elif hasattr(fname, 'write'):
# wrap to handle byte output streams
fh = WriteWrap(fname, encoding or 'latin1')
else:
raise ValueError('fname must be a string or file handle')
try:
X = np.asarray(X)
# Handle 1-dimensional arrays
if X.ndim == 0 or X.ndim > 2:
raise ValueError(
"Expected 1D or 2D array, got %dD array instead" % X.ndim)
elif X.ndim == 1:
# Common case -- 1d array of numbers
if X.dtype.names is None:
X = np.atleast_2d(X).T
ncol = 1
# Complex dtype -- each field indicates a separate column
else:
ncol = len(X.dtype.names)
else:
ncol = X.shape[1]
iscomplex_X = np.iscomplexobj(X)
# `fmt` can be a string with multiple insertion points or a
# list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
if type(fmt) in (list, tuple):
if len(fmt) != ncol:
raise AttributeError('fmt has wrong shape. %s' % str(fmt))
format = asstr(delimiter).join(map(asstr, fmt))
elif isinstance(fmt, basestring):
n_fmt_chars = fmt.count('%')
error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
if n_fmt_chars == 1:
if iscomplex_X:
fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
else:
fmt = [fmt, ] * ncol
format = delimiter.join(fmt)
elif iscomplex_X and n_fmt_chars != (2 * ncol):
raise error
elif ((not iscomplex_X) and n_fmt_chars != ncol):
raise error
else:
format = fmt
else:
raise ValueError('invalid fmt: %r' % (fmt,))
if len(header) > 0:
header = header.replace('\n', '\n' + comments)
fh.write(comments + header + newline)
if iscomplex_X:
for row in X:
row2 = []
for number in row:
row2.append(number.real)
row2.append(number.imag)
s = format % tuple(row2) + newline
fh.write(s.replace('+-', '-'))
else:
for row in X:
try:
v = format % tuple(row) + newline
except TypeError:
raise TypeError("Mismatch between array dtype ('%s') and "
"format specifier ('%s')"
% (str(X.dtype), format))
fh.write(v)
if len(footer) > 0:
footer = footer.replace('\n', '\n' + comments)
fh.write(comments + footer + newline)
finally:
if own_fh:
fh.close()
@set_module('numpy')
def fromregex(file, regexp, dtype, encoding=None):
"""
Construct an array from a text file, using regular expression parsing.
The returned array is always a structured array, and is constructed from
all matches of the regular expression in the file. Groups in the regular
expression are converted to fields of the structured array.
Parameters
----------
file : str or file
Filename or file object to read.
regexp : str or regexp
Regular expression used to parse the file.
Groups in the regular expression correspond to fields in the dtype.
dtype : dtype or list of dtypes
Dtype for the structured array.
encoding : str, optional
Encoding used to decode the inputfile. Does not apply to input streams.
.. versionadded:: 1.14.0
Returns
-------
output : ndarray
The output array, containing the part of the content of `file` that
was matched by `regexp`. `output` is always a structured array.
Raises
------
TypeError
When `dtype` is not a valid dtype for a structured array.
See Also
--------
fromstring, loadtxt
Notes
-----
Dtypes for structured arrays can be specified in several forms, but all
forms specify at least the data type and field name. For details see
`doc.structured_arrays`.
Examples
--------
>>> f = open('test.dat', 'w')
>>> _ = f.write("1312 foo\\n1534 bar\\n444 qux")
>>> f.close()
>>> regexp = r"(\\d+)\\s+(...)" # match [digits, whitespace, anything]
>>> output = np.fromregex('test.dat', regexp,
... [('num', np.int64), ('key', 'S3')])
>>> output
array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
dtype=[('num', '<i8'), ('key', 'S3')])
>>> output['num']
array([1312, 1534, 444])
"""
own_fh = False
if not hasattr(file, "read"):
file = np.lib._datasource.open(file, 'rt', encoding=encoding)
own_fh = True
try:
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
content = file.read()
if isinstance(content, bytes) and isinstance(regexp, np.compat.unicode):
regexp = asbytes(regexp)
elif isinstance(content, np.compat.unicode) and isinstance(regexp, bytes):
regexp = asstr(regexp)
if not hasattr(regexp, 'match'):
regexp = re.compile(regexp)
seq = regexp.findall(content)
if seq and not isinstance(seq[0], tuple):
# Only one group is in the regexp.
# Create the new array as a single data-type and then
# re-interpret as a single-field structured array.
newdtype = np.dtype(dtype[dtype.names[0]])
output = np.array(seq, dtype=newdtype)
output.dtype = dtype
else:
output = np.array(seq, dtype=dtype)
return output
finally:
if own_fh:
file.close()
#####--------------------------------------------------------------------------
#---- --- ASCII functions ---
#####--------------------------------------------------------------------------
@set_module('numpy')
def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
skip_header=0, skip_footer=0, converters=None,
missing_values=None, filling_values=None, usecols=None,
names=None, excludelist=None,
deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
replace_space='_', autostrip=False, case_sensitive=True,
defaultfmt="f%i", unpack=None, usemask=False, loose=True,
invalid_raise=True, max_rows=None, encoding='bytes'):
"""
Load data from a text file, with missing values handled as specified.
Each line past the first `skip_header` lines is split at the `delimiter`
character, and characters following the `comments` character are discarded.
Parameters
----------
fname : file, str, pathlib.Path, list of str, generator
File, filename, list, or generator to read. If the filename
extension is `.gz` or `.bz2`, the file is first decompressed. Note
that generators must return byte strings in Python 3k. The strings
in a list or produced by a generator are treated as lines.
dtype : dtype, optional
Data type of the resulting array.
If None, the dtypes will be determined by the contents of each
column, individually.
comments : str, optional
The character used to indicate the start of a comment.
All the characters occurring on a line after a comment are discarded
delimiter : str, int, or sequence, optional
The string used to separate values. By default, any consecutive
whitespaces act as delimiter. An integer or sequence of integers
can also be provided as width(s) of each field.
skiprows : int, optional
`skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
skip_header : int, optional
The number of lines to skip at the beginning of the file.
skip_footer : int, optional
The number of lines to skip at the end of the file.
converters : variable, optional
The set of functions that convert the data of a column to a value.
The converters can also be used to provide a default value
for missing data: ``converters = {3: lambda s: float(s or 0)}``.
missing : variable, optional
`missing` was removed in numpy 1.10. Please use `missing_values`
instead.
missing_values : variable, optional
The set of strings corresponding to missing data.
filling_values : variable, optional
The set of values to be used as default when the data are missing.
usecols : sequence, optional
Which columns to read, with 0 being the first. For example,
``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
names : {None, True, str, sequence}, optional
If `names` is True, the field names are read from the first line after
the first `skip_header` lines. This line can optionally be proceeded
by a comment delimiter. If `names` is a sequence or a single-string of
comma-separated names, the names will be used to define the field names
in a structured dtype. If `names` is None, the names of the dtype
fields will be used, if any.
excludelist : sequence, optional
A list of names to exclude. This list is appended to the default list
['return','file','print']. Excluded names are appended an underscore:
for example, `file` would become `file_`.
deletechars : str, optional
A string combining invalid characters that must be deleted from the
names.
defaultfmt : str, optional
A format used to define default field names, such as "f%i" or "f_%02i".
autostrip : bool, optional
Whether to automatically strip white spaces from the variables.
replace_space : char, optional
Character(s) used in replacement of white spaces in the variables
names. By default, use a '_'.
case_sensitive : {True, False, 'upper', 'lower'}, optional
If True, field names are case sensitive.
If False or 'upper', field names are converted to upper case.
If 'lower', field names are converted to lower case.
unpack : bool, optional
If True, the returned array is transposed, so that arguments may be
unpacked using ``x, y, z = loadtxt(...)``
usemask : bool, optional
If True, return a masked array.
If False, return a regular array.
loose : bool, optional
If True, do not raise errors for invalid values.
invalid_raise : bool, optional
If True, an exception is raised if an inconsistency is detected in the
number of columns.
If False, a warning is emitted and the offending lines are skipped.
max_rows : int, optional
The maximum number of rows to read. Must not be used with skip_footer
at the same time. If given, the value must be at least 1. Default is
to read the entire file.
.. versionadded:: 1.10.0
encoding : str, optional
Encoding used to decode the inputfile. Does not apply when `fname` is
a file object. The special value 'bytes' enables backward compatibility
workarounds that ensure that you receive byte arrays when possible
and passes latin1 encoded strings to converters. Override this value to
receive unicode arrays and pass strings as input to converters. If set
to None the system default is used. The default value is 'bytes'.
.. versionadded:: 1.14.0
Returns
-------
out : ndarray
Data read from the text file. If `usemask` is True, this is a
masked array.
See Also
--------
numpy.loadtxt : equivalent function when no data is missing.
Notes
-----
* When spaces are used as delimiters, or when no delimiter has been given
as input, there should not be any missing data between two fields.
* When the variables are named (either by a flexible dtype or with `names`,
there must not be any header in the file (else a ValueError
exception is raised).
* Individual values are not stripped of spaces by default.
When using a custom converter, make sure the function does remove spaces.
References
----------
.. [1] NumPy User Guide, section `I/O with NumPy
<https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
Examples
---------
>>> from io import StringIO
>>> import numpy as np
Comma delimited file with mixed dtype
>>> s = StringIO(u"1,1.3,abcde")
>>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
... ('mystring','S5')], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Using dtype = None
>>> _ = s.seek(0) # needed for StringIO example only
>>> data = np.genfromtxt(s, dtype=None,
... names = ['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
Specifying dtype and names
>>> _ = s.seek(0)
>>> data = np.genfromtxt(s, dtype="i8,f8,S5",
... names=['myint','myfloat','mystring'], delimiter=",")
>>> data
array((1, 1.3, b'abcde'),
dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
An example with fixed-width columns
>>> s = StringIO(u"11.3abcde")
>>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
... delimiter=[1,3,5])
>>> data
array((1, 1.3, b'abcde'),
dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
An example to show comments
>>> f = StringIO('''
... text,# of chars
... hello world,11
... numpy,5''')
>>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
dtype=[('f0', 'S12'), ('f1', 'S12')])
"""
if max_rows is not None:
if skip_footer:
raise ValueError(
"The keywords 'skip_footer' and 'max_rows' can not be "
"specified at the same time.")
if max_rows < 1:
raise ValueError("'max_rows' must be at least 1.")
if usemask:
from numpy.ma import MaskedArray, make_mask_descr
# Check the input dictionary of converters
user_converters = converters or {}
if not isinstance(user_converters, dict):
raise TypeError(
"The input argument 'converter' should be a valid dictionary "
"(got '%s' instead)" % type(user_converters))
if encoding == 'bytes':
encoding = None
byte_converters = True
else:
byte_converters = False
# Initialize the filehandle, the LineSplitter and the NameValidator
try:
if isinstance(fname, os_PathLike):
fname = os_fspath(fname)
if isinstance(fname, basestring):
fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
fid_ctx = contextlib.closing(fid)
else:
fid = fname
fid_ctx = contextlib_nullcontext(fid)
fhd = iter(fid)
except TypeError:
raise TypeError(
"fname must be a string, filehandle, list of strings, "
"or generator. Got %s instead." % type(fname))
with fid_ctx:
split_line = LineSplitter(delimiter=delimiter, comments=comments,
autostrip=autostrip, encoding=encoding)
validate_names = NameValidator(excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Skip the first `skip_header` rows
try:
for i in range(skip_header):
next(fhd)
# Keep on until we find the first valid values
first_values = None
while not first_values:
first_line = _decode_line(next(fhd), encoding)
if (names is True) and (comments is not None):
if comments in first_line:
first_line = (
''.join(first_line.split(comments)[1:]))
first_values = split_line(first_line)
except StopIteration:
# return an empty array if the datafile is empty
first_line = ''
first_values = []
warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
# Should we take the first values as names ?
if names is True:
fval = first_values[0].strip()
if comments is not None:
if fval in comments:
del first_values[0]
# Check the columns to use: make sure `usecols` is a list
if usecols is not None:
try:
usecols = [_.strip() for _ in usecols.split(",")]
except AttributeError:
try:
usecols = list(usecols)
except TypeError:
usecols = [usecols, ]
nbcols = len(usecols or first_values)
# Check the names and overwrite the dtype.names if needed
if names is True:
names = validate_names([str(_.strip()) for _ in first_values])
first_line = ''
elif _is_string_like(names):
names = validate_names([_.strip() for _ in names.split(',')])
elif names:
names = validate_names(names)
# Get the dtype
if dtype is not None:
dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
excludelist=excludelist,
deletechars=deletechars,
case_sensitive=case_sensitive,
replace_space=replace_space)
# Make sure the names is a list (for 2.5)
if names is not None:
names = list(names)
if usecols:
for (i, current) in enumerate(usecols):
# if usecols is a list of names, convert to a list of indices
if _is_string_like(current):
usecols[i] = names.index(current)
elif current < 0:
usecols[i] = current + len(first_values)
# If the dtype is not None, make sure we update it
if (dtype is not None) and (len(dtype) > nbcols):
descr = dtype.descr
dtype = np.dtype([descr[_] for _ in usecols])
names = list(dtype.names)
# If `names` is not None, update the names
elif (names is not None) and (len(names) > nbcols):
names = [names[_] for _ in usecols]
elif (names is not None) and (dtype is not None):
names = list(dtype.names)
# Process the missing values ...............................
# Rename missing_values for convenience
user_missing_values = missing_values or ()
if isinstance(user_missing_values, bytes):
user_missing_values = user_missing_values.decode('latin1')
# Define the list of missing_values (one column: one list)
missing_values = [list(['']) for _ in range(nbcols)]
# We have a dictionary: process it field by field
if isinstance(user_missing_values, dict):
# Loop on the items
for (key, val) in user_missing_values.items():
# Is the key a string ?
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped
continue
# Redefine the key as needed if it's a column number
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Transform the value as a list of string
if isinstance(val, (list, tuple)):
val = [str(_) for _ in val]
else:
val = [str(val), ]
# Add the value(s) to the current list of missing
if key is None:
# None acts as default
for miss in missing_values:
miss.extend(val)
else:
missing_values[key].extend(val)
# We have a sequence : each item matches a column
elif isinstance(user_missing_values, (list, tuple)):
for (value, entry) in zip(user_missing_values, missing_values):
value = str(value)
if value not in entry:
entry.append(value)
# We have a string : apply it to all entries
elif isinstance(user_missing_values, basestring):
user_value = user_missing_values.split(",")
for entry in missing_values:
entry.extend(user_value)
# We have something else: apply it to all entries
else:
for entry in missing_values:
entry.extend([str(user_missing_values)])
# Process the filling_values ...............................
# Rename the input for convenience
user_filling_values = filling_values
if user_filling_values is None:
user_filling_values = []
# Define the default
filling_values = [None] * nbcols
# We have a dictionary : update each entry individually
if isinstance(user_filling_values, dict):
for (key, val) in user_filling_values.items():
if _is_string_like(key):
try:
# Transform it into an integer
key = names.index(key)
except ValueError:
# We couldn't find it: the name must have been dropped,
continue
# Redefine the key if it's a column number and usecols is defined
if usecols:
try:
key = usecols.index(key)
except ValueError:
pass
# Add the value to the list
filling_values[key] = val
# We have a sequence : update on a one-to-one basis
elif isinstance(user_filling_values, (list, tuple)):
n = len(user_filling_values)
if (n <= nbcols):
filling_values[:n] = user_filling_values
else:
filling_values = user_filling_values[:nbcols]
# We have something else : use it for all entries
else:
filling_values = [user_filling_values] * nbcols
# Initialize the converters ................................
if dtype is None:
# Note: we can't use a [...]*nbcols, as we would have 3 times the same
# ... converter, instead of 3 different converters.
converters = [StringConverter(None, missing_values=miss, default=fill)
for (miss, fill) in zip(missing_values, filling_values)]
else:
dtype_flat = flatten_dtype(dtype, flatten_base=True)
# Initialize the converters
if len(dtype_flat) > 1:
# Flexible type : get a converter from each dtype
zipit = zip(dtype_flat, missing_values, filling_values)
converters = [StringConverter(dt, locked=True,
missing_values=miss, default=fill)
for (dt, miss, fill) in zipit]
else:
# Set to a default converter (but w/ different missing values)
zipit = zip(missing_values, filling_values)
converters = [StringConverter(dtype, locked=True,
missing_values=miss, default=fill)
for (miss, fill) in zipit]
# Update the converters to use the user-defined ones
uc_update = []
for (j, conv) in user_converters.items():
# If the converter is specified by column names, use the index instead
if _is_string_like(j):
try:
j = names.index(j)
i = j
except ValueError:
continue
elif usecols:
try:
i = usecols.index(j)
except ValueError:
# Unused converter specified
continue
else:
i = j
# Find the value to test - first_line is not filtered by usecols:
if len(first_line):
testing_value = first_values[j]
else:
testing_value = None
if conv is bytes:
user_conv = asbytes
elif byte_converters:
# converters may use decode to workaround numpy's old behaviour,
# so encode the string again before passing to the user converter
def tobytes_first(x, conv):
if type(x) is bytes:
return conv(x)
return conv(x.encode("latin1"))
user_conv = functools.partial(tobytes_first, conv=conv)
else:
user_conv = conv
converters[i].update(user_conv, locked=True,
testing_value=testing_value,
default=filling_values[i],
missing_values=missing_values[i],)
uc_update.append((i, user_conv))
# Make sure we have the corrected keys in user_converters...
user_converters.update(uc_update)
# Fixme: possible error as following variable never used.
# miss_chars = [_.missing_values for _ in converters]
# Initialize the output lists ...
# ... rows
rows = []
append_to_rows = rows.append
# ... masks
if usemask:
masks = []
append_to_masks = masks.append
# ... invalid
invalid = []
append_to_invalid = invalid.append
# Parse each line
for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
values = split_line(line)
nbvalues = len(values)
# Skip an empty line
if nbvalues == 0:
continue
if usecols:
# Select only the columns we need
try:
values = [values[_] for _ in usecols]
except IndexError:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
elif nbvalues != nbcols:
append_to_invalid((i + skip_header + 1, nbvalues))
continue
# Store the values
append_to_rows(tuple(values))
if usemask:
append_to_masks(tuple([v.strip() in m
for (v, m) in zip(values,
missing_values)]))
if len(rows) == max_rows:
break
# Upgrade the converters (if needed)
if dtype is None:
for (i, converter) in enumerate(converters):
current_column = [itemgetter(i)(_m) for _m in rows]
try:
converter.iterupgrade(current_column)
except ConverterLockError:
errmsg = "Converter #%i is locked and cannot be upgraded: " % i
current_column = map(itemgetter(i), rows)
for (j, value) in enumerate(current_column):
try:
converter.upgrade(value)
except (ConverterError, ValueError):
errmsg += "(occurred line #%i for value '%s')"
errmsg %= (j + 1 + skip_header, value)
raise ConverterError(errmsg)
# Check that we don't have invalid values
nbinvalid = len(invalid)
if nbinvalid > 0:
nbrows = len(rows) + nbinvalid - skip_footer
# Construct the error message
template = " Line #%%i (got %%i columns instead of %i)" % nbcols
if skip_footer > 0:
nbinvalid_skipped = len([_ for _ in invalid
if _[0] > nbrows + skip_header])
invalid = invalid[:nbinvalid - nbinvalid_skipped]
skip_footer -= nbinvalid_skipped
#
# nbrows -= skip_footer
# errmsg = [template % (i, nb)
# for (i, nb) in invalid if i < nbrows]
# else:
errmsg = [template % (i, nb)
for (i, nb) in invalid]
if len(errmsg):
errmsg.insert(0, "Some errors were detected !")
errmsg = "\n".join(errmsg)
# Raise an exception ?
if invalid_raise:
raise ValueError(errmsg)
# Issue a warning ?
else:
warnings.warn(errmsg, ConversionWarning, stacklevel=2)
# Strip the last skip_footer data
if skip_footer > 0:
rows = rows[:-skip_footer]
if usemask:
masks = masks[:-skip_footer]
# Convert each value according to the converter:
# We want to modify the list in place to avoid creating a new one...
if loose:
rows = list(
zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
else:
rows = list(
zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
for (i, conv) in enumerate(converters)]))
# Reset the dtype
data = rows
if dtype is None:
# Get the dtypes from the types of the converters
column_types = [conv.type for conv in converters]
# Find the columns with strings...
strcolidx = [i for (i, v) in enumerate(column_types)
if v == np.unicode_]
if byte_converters and strcolidx:
# convert strings back to bytes for backward compatibility
warnings.warn(
"Reading unicode strings without specifying the encoding "
"argument is deprecated. Set the encoding, use None for the "
"system default.",
np.VisibleDeprecationWarning, stacklevel=2)
def encode_unicode_cols(row_tup):
row = list(row_tup)
for i in strcolidx:
row[i] = row[i].encode('latin1')
return tuple(row)
try:
data = [encode_unicode_cols(r) for r in data]
except UnicodeEncodeError:
pass
else:
for i in strcolidx:
column_types[i] = np.bytes_
# Update string types to be the right length
sized_column_types = column_types[:]
for i, col_type in enumerate(column_types):
if np.issubdtype(col_type, np.character):
n_chars = max(len(row[i]) for row in data)
sized_column_types[i] = (col_type, n_chars)
if names is None:
# If the dtype is uniform (before sizing strings)
base = {
c_type
for c, c_type in zip(converters, column_types)
if c._checked}
if len(base) == 1:
uniform_type, = base
(ddtype, mdtype) = (uniform_type, bool)
else:
ddtype = [(defaultfmt % i, dt)
for (i, dt) in enumerate(sized_column_types)]
if usemask:
mdtype = [(defaultfmt % i, bool)
for (i, dt) in enumerate(sized_column_types)]
else:
ddtype = list(zip(names, sized_column_types))
mdtype = list(zip(names, [bool] * len(sized_column_types)))
output = np.array(data, dtype=ddtype)
if usemask:
outputmask = np.array(masks, dtype=mdtype)
else:
# Overwrite the initial dtype names if needed
if names and dtype.names is not None:
dtype.names = names
# Case 1. We have a structured type
if len(dtype_flat) > 1:
# Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
# First, create the array using a flattened dtype:
# [('a', int), ('b1', int), ('b2', float)]
# Then, view the array using the specified dtype.
if 'O' in (_.char for _ in dtype_flat):
if has_nested_fields(dtype):
raise NotImplementedError(
"Nested fields involving objects are not supported...")
else:
output = np.array(data, dtype=dtype)
else:
rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
output = rows.view(dtype)
# Now, process the rowmasks the same way
if usemask:
rowmasks = np.array(
masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
# Construct the new dtype
mdtype = make_mask_descr(dtype)
outputmask = rowmasks.view(mdtype)
# Case #2. We have a basic dtype
else:
# We used some user-defined converters
if user_converters:
ishomogeneous = True
descr = []
for i, ttype in enumerate([conv.type for conv in converters]):
# Keep the dtype of the current converter
if i in user_converters:
ishomogeneous &= (ttype == dtype.type)
if np.issubdtype(ttype, np.character):
ttype = (ttype, max(len(row[i]) for row in data))
descr.append(('', ttype))
else:
descr.append(('', dtype))
# So we changed the dtype ?
if not ishomogeneous:
# We have more than one field
if len(descr) > 1:
dtype = np.dtype(descr)
# We have only one field: drop the name if not needed.
else:
dtype = np.dtype(ttype)
#
output = np.array(data, dtype)
if usemask:
if dtype.names is not None:
mdtype = [(_, bool) for _ in dtype.names]
else:
mdtype = bool
outputmask = np.array(masks, dtype=mdtype)
# Try to take care of the missing data we missed
names = output.dtype.names
if usemask and names:
for (name, conv) in zip(names, converters):
missing_values = [conv(_) for _ in conv.missing_values
if _ != '']
for mval in missing_values:
outputmask[name] |= (output[name] == mval)
# Construct the final array
if usemask:
output = output.view(MaskedArray)
output._mask = outputmask
if unpack:
return output.squeeze().T
return output.squeeze()
def ndfromtxt(fname, **kwargs):
"""
Load ASCII data stored in a file and return it as a single array.
.. deprecated:: 1.17
ndfromtxt` is a deprecated alias of `genfromtxt` which
overwrites the ``usemask`` argument with `False` even when
explicitly called as ``ndfromtxt(..., usemask=True)``.
Use `genfromtxt` instead.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function.
"""
kwargs['usemask'] = False
# Numpy 1.17
warnings.warn(
"np.ndfromtxt is a deprecated alias of np.genfromtxt, "
"prefer the latter.",
DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
def mafromtxt(fname, **kwargs):
"""
Load ASCII data stored in a text file and return a masked array.
.. deprecated:: 1.17
np.mafromtxt is a deprecated alias of `genfromtxt` which
overwrites the ``usemask`` argument with `True` even when
explicitly called as ``mafromtxt(..., usemask=False)``.
Use `genfromtxt` instead.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
"""
kwargs['usemask'] = True
# Numpy 1.17
warnings.warn(
"np.mafromtxt is a deprecated alias of np.genfromtxt, "
"prefer the latter.",
DeprecationWarning, stacklevel=2)
return genfromtxt(fname, **kwargs)
def recfromtxt(fname, **kwargs):
"""
Load ASCII data from a file and return it in a record array.
If ``usemask=False`` a standard `recarray` is returned,
if ``usemask=True`` a MaskedRecords array is returned.
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
kwargs.setdefault("dtype", None)
usemask = kwargs.get('usemask', False)
output = genfromtxt(fname, **kwargs)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
def recfromcsv(fname, **kwargs):
"""
Load ASCII data stored in a comma-separated file.
The returned array is a record array (if ``usemask=False``, see
`recarray`) or a masked record array (if ``usemask=True``,
see `ma.mrecords.MaskedRecords`).
Parameters
----------
fname, kwargs : For a description of input parameters, see `genfromtxt`.
See Also
--------
numpy.genfromtxt : generic function to load ASCII data.
Notes
-----
By default, `dtype` is None, which means that the data-type of the output
array will be determined from the data.
"""
# Set default kwargs for genfromtxt as relevant to csv import.
kwargs.setdefault("case_sensitive", "lower")
kwargs.setdefault("names", True)
kwargs.setdefault("delimiter", ",")
kwargs.setdefault("dtype", None)
output = genfromtxt(fname, **kwargs)
usemask = kwargs.get("usemask", False)
if usemask:
from numpy.ma.mrecords import MaskedRecords
output = output.view(MaskedRecords)
else:
output = output.view(np.recarray)
return output
| bsd-3-clause |
phdowling/scikit-learn | examples/linear_model/plot_polynomial_interpolation.py | 251 | 1895 | #!/usr/bin/env python
"""
========================
Polynomial interpolation
========================
This example demonstrates how to approximate a function with a polynomial of
degree n_degree by using ridge regression. Concretely, from n_samples 1d
points, it suffices to build the Vandermonde matrix, which is n_samples x
n_degree+1 and has the following form:
[[1, x_1, x_1 ** 2, x_1 ** 3, ...],
[1, x_2, x_2 ** 2, x_2 ** 3, ...],
...]
Intuitively, this matrix can be interpreted as a matrix of pseudo features (the
points raised to some power). The matrix is akin to (but different from) the
matrix induced by a polynomial kernel.
This example shows that you can do non-linear regression with a linear model,
using a pipeline to add non-linear features. Kernel methods extend this idea
and can induce very high (even infinite) dimensional feature spaces.
"""
print(__doc__)
# Author: Mathieu Blondel
# Jake Vanderplas
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
def f(x):
""" function to approximate by polynomial interpolation"""
return x * np.sin(x)
# generate points used to plot
x_plot = np.linspace(0, 10, 100)
# generate points and keep a subset of them
x = np.linspace(0, 10, 100)
rng = np.random.RandomState(0)
rng.shuffle(x)
x = np.sort(x[:20])
y = f(x)
# create matrix versions of these arrays
X = x[:, np.newaxis]
X_plot = x_plot[:, np.newaxis]
plt.plot(x_plot, f(x_plot), label="ground truth")
plt.scatter(x, y, label="training points")
for degree in [3, 4, 5]:
model = make_pipeline(PolynomialFeatures(degree), Ridge())
model.fit(X, y)
y_plot = model.predict(X_plot)
plt.plot(x_plot, y_plot, label="degree %d" % degree)
plt.legend(loc='lower left')
plt.show()
| bsd-3-clause |
LinkedEarth/Pyleoclim_util | pyleoclim/tests/test_ui_MultiplePSD.py | 1 | 3493 | ''' Tests for pyleoclim.core.ui.MultiplePSD
Naming rules:
1. class: Test{filename}{Class}{method} with appropriate camel case
2. function: test_{method}_t{test_id}
Notes on how to test:
0. Make sure [pytest](https://docs.pytest.org) has been installed: `pip install pytest`
1. execute `pytest {directory_path}` in terminal to perform all tests in all testing files inside the specified directory
2. execute `pytest {file_path}` in terminal to perform all tests in the specified file
3. execute `pytest {file_path}::{TestClass}::{test_method}` in terminal to perform a specific test class/method inside the specified file
4. after `pip install pytest-xdist`, one may execute "pytest -n 4" to test in parallel with number of workers specified by `-n`
5. for more details, see https://docs.pytest.org/en/stable/usage.html
'''
import numpy as np
import pandas as pd
from numpy.testing import assert_array_equal
from pandas.testing import assert_frame_equal
import pytest
import pyleoclim as pyleo
from pyleoclim.utils.tsmodel import (
ar1_sim,
colored_noise,
)
# a collection of useful functions
def gen_normal(loc=0, scale=1, nt=100):
''' Generate random data with a Gaussian distribution
'''
t = np.arange(nt)
v = np.random.normal(loc=loc, scale=scale, size=nt)
return t, v
def gen_colored_noise(alpha=1, nt=100, f0=None, m=None, seed=None):
''' Generate colored noise
'''
t = np.arange(nt)
v = colored_noise(alpha=alpha, t=t, f0=f0, m=m, seed=seed)
return t, v
# Tests below
class TestUiMultiplePsdBetaEst:
''' Tests for MultiplePSD.beta_est()
'''
def test_beta_est_t0(self, eps=0.3):
''' Test MultiplePSD.beta_est() of a list of colored noise
'''
alphas = np.arange(0.5, 1.5, 0.1)
t, v = {}, {}
series_list = []
for idx, alpha in enumerate(alphas):
t[idx], v[idx] = gen_colored_noise(nt=1000, alpha=alpha)
series_list.append(pyleo.Series(time=t[idx], value=v[idx]))
ts_surrs = pyleo.MultipleSeries(series_list=series_list)
psds = ts_surrs.spectral(method='mtm')
betas = psds.beta_est()['beta']
for idx, beta in enumerate(betas):
assert np.abs(beta-alphas[idx]) < eps
class TestUiMultiplePsdPlot:
''' Tests for MultiplePSD.plot()
'''
def test_plot_t0(self):
''' Test MultiplePSD.plot() of a list of colored noise
'''
alphas = np.arange(0.5, 1.5, 0.1)
t, v = {}, {}
series_list = []
for idx, alpha in enumerate(alphas):
t[idx], v[idx] = gen_colored_noise(nt=1000, alpha=alpha)
series_list.append(pyleo.Series(time=t[idx], value=v[idx]))
ts_surrs = pyleo.MultipleSeries(series_list=series_list)
psds = ts_surrs.spectral(method='mtm')
fig, ax = psds.plot(mute=True)
class TestUiMultiplePsdPlotEnvelope:
''' Tests for MultiplePSD.plot()
'''
def test_plot_envelope_t0(self):
''' Test MultiplePSD.plot() of a list of colored noise
'''
alphas = np.arange(0.5, 1.5, 0.1)
t, v = {}, {}
series_list = []
for idx, alpha in enumerate(alphas):
t[idx], v[idx] = gen_colored_noise(nt=1000, alpha=alpha)
series_list.append(pyleo.Series(time=t[idx], value=v[idx]))
ts_surrs = pyleo.MultipleSeries(series_list=series_list)
psds = ts_surrs.spectral(method='mtm')
fig, ax = psds.plot_envelope(mute=True) | gpl-3.0 |
precedenceguo/mxnet | example/named_entity_recognition/src/preprocess.py | 10 | 2002 | # !/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
#read in csv of NER training data
df = pd.read_csv("../data/ner_dataset.csv", encoding="ISO-8859-1")
#rename columns
df = df.rename(columns = {"Sentence #" : "utterance_id",
"Word" : "token",
"POS" : "POS_tag",
"Tag" : "BILOU_tag"})
#clean utterance_id column
df.loc[:, "utterance_id"] = df["utterance_id"].str.replace('Sentence: ', '')
#fill np.nan utterance ID's with the last valid entry
df = df.fillna(method='ffill')
df.loc[:, "utterance_id"] = df["utterance_id"].apply(int)
#melt BILOU tags and tokens into an array per utterance
df1 = df.groupby("utterance_id")["BILOU_tag"].apply(lambda x: np.array(x)).to_frame().reset_index()
df2 = df.groupby("utterance_id")["token"].apply(lambda x: np.array(x)).to_frame().reset_index()
df3 = df.groupby("utterance_id")["POS_tag"].apply(lambda x: np.array(x)).to_frame().reset_index()
#join the results on utterance id
df = df1.merge(df2.merge(df3, how = "left", on = "utterance_id"), how = "left", on = "utterance_id")
#save the dataframe to a csv file
df.to_pickle("../data/ner_data.pkl") | apache-2.0 |
bucricket/projectMASlst | processlst/landsatTools.py | 2 | 15909 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 5 13:03:17 2017
@author: mschull
"""
from __future__ import division, print_function, absolute_import
__author__ = 'jwely'
__all__ = ["landsat_metadata"]
# standard imports
import os.path
import numpy as np
import logging
from .utils import RasterError,_test_outside
logging.basicConfig(level=logging.DEBUG)
LOGGER = logging.getLogger('pydisalexi.geotiff')
from osgeo import gdal, osr
try:
from pyproj import Proj
except ImportError:
LOGGER.warning(
"PROJ4 is not available. " +
"Any method requiring coordinate transform will fail.")
from datetime import datetime
import inspect
class landsat_metadata:
"""
A landsat metadata object. This class builds is attributes
from the names of each tag in the xml formatted .MTL files that
come with landsat data. So, any tag that appears in the MTL file
will populate as an attribute of landsat_metadata.
You can access explore these attributes by using, for example
.. code-block:: python
from dnppy import landsat
meta = landsat.landsat_metadata(my_filepath) # create object
from pprint import pprint # import pprint
pprint(vars(m)) # pretty print output
scene_id = meta.LANDSAT_SCENE_ID # access specific attribute
:param filename: the filepath to an MTL file.
"""
def __init__(self, filename):
"""
There are several critical attributes that keep a common
naming convention between all landsat versions, so they are
initialized in this class for good record keeping and reference
"""
# custom attribute additions
self.FILEPATH = filename
self.DATETIME_OBJ = None
# product metadata attributes
self.LANDSAT_SCENE_ID = None
self.DATA_TYPE = None
self.ELEVATION_SOURCE = None
self.OUTPUT_FORMAT = None
self.SPACECRAFT_ID = None
self.SENSOR_ID = None
self.WRS_PATH = None
self.WRS_ROW = None
self.NADIR_OFFNADIR = None
self.TARGET_WRS_PATH = None
self.TARGET_WRS_ROW = None
self.DATE_ACQUIRED = None
self.SCENE_CENTER_TIME = None
# image attributes
self.CLOUD_COVER = None
self.IMAGE_QUALITY_OLI = None
self.IMAGE_QUALITY_TIRS = None
self.ROLL_ANGLE = None
self.SUN_AZIMUTH = None
self.SUN_ELEVATION = None
self.EARTH_SUN_DISTANCE = None # calculated for Landsats before 8.
# read the file and populate the MTL attributes
self._read(filename)
def _read(self, filename):
""" reads the contents of an MTL file """
# if the "filename" input is actually already a metadata class object, return it back.
if inspect.isclass(filename):
return filename
fields = []
values = []
metafile = open(filename, 'r')
metadata = metafile.readlines()
for line in metadata:
# skips lines that contain "bad flags" denoting useless data AND lines
# greater than 1000 characters. 1000 character limit works around an odd LC5
# issue where the metadata has 40,000+ characters of whitespace
bad_flags = ["END", "GROUP"]
if not any(x in line for x in bad_flags) and len(line) <= 1000:
try:
line = line.replace(" ", "")
line = line.replace("\n", "")
field_name, field_value = line.split(' = ')
fields.append(field_name)
values.append(field_value)
except:
pass
for i in range(len(fields)):
# format fields without quotes,dates, or times in them as floats
if not any(['"' in values[i], 'DATE' in fields[i], 'TIME' in fields[i]]):
setattr(self, fields[i], float(values[i]))
else:
values[i] = values[i].replace('"', '')
setattr(self, fields[i], values[i])
# create datetime_obj attribute (drop decimal seconds)
dto_string = self.DATE_ACQUIRED + self.SCENE_CENTER_TIME
self.DATETIME_OBJ = datetime.strptime(dto_string.split(".")[0], "%Y-%m-%d%H:%M:%S")
# only landsat 8 includes sun-earth-distance in MTL file, so calculate it
# for the Landsats 4,5,7 using solar module.
# if not self.SPACECRAFT_ID == "LANDSAT_8":
#
# # use 0s for lat and lon, sun_earth_distance is not a function of any one location on earth.
# s = solar(0, 0, self.DATETIME_OBJ, 0)
# self.EARTH_SUN_DISTANCE = s.get_rad_vector()
print("Scene {0} center time is {1}".format(self.LANDSAT_SCENE_ID, self.DATETIME_OBJ))
class GeoTIFF(object):
"""
Represents a GeoTIFF file for data access and processing and provides
a number of useful methods and attributes.
Arguments:
filepath (str): the full or relative file path
"""
def __init__(self, filepath):
try:
self.dataobj = gdal.Open(filepath)
except RuntimeError as err:
LOGGER.error("Could not open %s: %s" % (filepath, err.message))
raise
self.filepath = filepath
self.ncol = self.dataobj.RasterXSize
self.nrow = self.dataobj.RasterYSize
self.nbands = self.dataobj.RasterCount
self._gtr = self.dataobj.GetGeoTransform()
# see http://www.gdal.org/gdal_datamodel.html
self.ulx = self._gtr[0]
self.uly = self._gtr[3]
self.lrx = (self.ulx + self.ncol * self._gtr[1]
+ self.nrow * self._gtr[2])
self.lry = (self.uly + self.ncol * self._gtr[4]
+ self.nrow * self._gtr[5])
if self._gtr[2] != 0 or self._gtr[4] != 0:
LOGGER.warning(
"The dataset is not north-up. The geotransform is given "
+ "by: (%s). " % ', '.join([str(item) for item in self._gtr])
+ "Northing and easting values will not have expected meaning."
)
self.dataobj = None
@property
def data(self):
"""2D numpy array for single-band GeoTIFF file data. Otherwise, 3D. """
if not self.dataobj:
self.dataobj = gdal.Open(self.filepath)
dat = self.dataobj.ReadAsArray()
self.dataobj = None
return dat
@property
def projection(self):
"""The dataset's coordinate reference system as a Well-Known String"""
if not self.dataobj:
self.dataobj = gdal.Open(self.filepath)
dat = self.dataobj.GetProjection()
self.dataobj = None
return dat
@property
def proj4(self):
"""The dataset's coordinate reference system as a PROJ4 string"""
osrref = osr.SpatialReference()
osrref.ImportFromWkt(self.projection)
return osrref.ExportToProj4()
@property
def coordtrans(self):
"""A PROJ4 Proj object, which is able to perform coordinate
transformations"""
return Proj(self.proj4)
@property
def delx(self):
"""The sampling distance in x-direction, in physical units
(eg metres)"""
return self._gtr[1]
@property
def dely(self):
"""The sampling distance in y-direction, in physical units
(eg metres). Negative in northern hemisphere."""
return self._gtr[5]
@property
def easting(self):
"""The x-coordinates of first row pixel corners,
as a numpy array: upper-left corner of upper-left pixel
to upper-right corner of upper-right pixel (ncol+1)."""
delta = np.abs(
(self.lrx-self.ulx)/self.ncol
- self.delx
)
if delta > 10e-2:
LOGGER.warn(
"GeoTIFF issue: E-W grid step differs from "
+ "deltaX by more than 1% ")
return np.linspace(self.ulx, self.lrx, self.ncol+1)
@property
def northing(self):
"""The y-coordinates of first column pixel corners,
as a numpy array: lower-left corner of lower-left pixel to
upper-left corner of upper-left pixel (nrow+1)."""
# check if data grid step is consistent
delta = np.abs(
(self.lry-self.uly)/self.nrow
- self.dely
)
if delta > 10e-2:
LOGGER.warn(
"GeoTIFF issue: N-S grid step differs from "
+ "deltaY by more than 1% ")
return np.linspace(self.lry, self.uly, self.nrow+1)
@property
def x_pxcenter(self):
"""The x-coordinates of pixel centers, as a numpy array ncol."""
return np.linspace(
self.ulx + self.delx/2,
self.lrx - self.delx/2,
self.ncol)
@property
def y_pxcenter(self):
"""y-coordinates of pixel centers, nrow."""
return np.linspace(
self.lry - self.dely/2,
self.uly + self.dely/2,
self.nrow)
@property
def _XY(self):
"""Meshgrid of nrow+1, ncol+1 corner xy coordinates"""
return np.meshgrid(self.easting, self.northing)
@property
def _XY_pxcenter(self):
"""Meshgrid of nrow, ncol center xy coordinates"""
return np.meshgrid(self.x_pxcenter, self.y_pxcenter)
@property
def _LonLat_pxcorner(self):
"""Meshgrid of nrow+1, ncol+1 corner Lon/Lat coordinates"""
return self.coordtrans(*self._XY, inverse=True)
@property
def _LonLat_pxcenter(self):
"""Meshgrid of nrow, ncol center Lon/Lat coordinates"""
return self.coordtrans(*self._XY_pxcenter, inverse=True)
@property
def Lon(self):
"""Longitude coordinate of each pixel corner, as an array"""
return self._LonLat_pxcorner[0]
@property
def Lat(self):
"""Latitude coordinate of each pixel corner, as an array"""
return self._LonLat_pxcorner[1]
@property
def Lon_pxcenter(self):
"""Longitude coordinate of each pixel center, as an array"""
return self._LonLat_pxcenter[0]
@property
def Lat_pxcenter(self):
"""Latitude coordinate of each pixel center, as an array"""
return self._LonLat_pxcenter[1]
def ij2xy(self, i, j):
"""
Converts array index pair(s) to easting/northing coordinate pairs(s).
NOTE: array coordinate origin is in the top left corner whereas
easting/northing origin is in the bottom left corner. Easting and
northing are floating point numbers, and refer to the top-left corner
coordinate of the pixel. i runs from 0 to nrow-1, j from 0 to ncol-1.
For i=nrow and j=ncol, the bottom-right corner coordinate of the
bottom-right pixel will be returned. This is identical to the bottom-
right corner.
Arguments:
i (int): scalar or array of row coordinate index
j (int): scalar or array of column coordinate index
Returns:
x (float): scalar or array of easting coordinates
y (float): scalar or array of northing coordinates
"""
if (_test_outside(i, 0, self.nrow)
or _test_outside(j, 0, self.ncol)):
raise RasterError(
"Coordinates %d, %d out of bounds" % (i, j))
x = self.easting[0] + j * self.delx
y = self.northing[-1] + i * self.dely
return x, y
def xy2ij(self, x, y, precise=False):
"""
Convert easting/northing coordinate pair(s) to array coordinate
pairs(s).
NOTE: see note at ij2xy()
Arguments:
x (float): scalar or array of easting coordinates
y (float): scalar or array of northing coordinates
precise (bool): if true, return fractional array coordinates
Returns:
i (int, or float): scalar or array of row coordinate index
j (int, or float): scalar or array of column coordinate index
"""
if (_test_outside(x, self.easting[0], self.easting[-1]) or
_test_outside(y, self.northing[0], self.northing[-1])):
raise RasterError("Coordinates out of bounds")
i = (1 - (y - self.northing[0]) /
(self.northing[-1] - self.northing[0])) * self.nrow
j = ((x - self.easting[0]) /
(self.easting[-1] - self.easting[0])) * self.ncol
if precise:
return i, j
else:
return int(np.floor(i)), int(np.floor(j))
def simpleplot(self):
"""Quick and dirty plot of each band (channel, dataset) in the image.
Requires Matplotlib."""
import matplotlib.pyplot as plt
numbands = self.nbands
if numbands == 1:
plt.figure(figsize=(15, 10))
plt.imshow(self.data[:, :], cmap='bone')
elif numbands > 1:
for idx in range(numbands):
plt.figure(figsize=(15, 10))
plt.imshow(self.data[idx, :, :], cmap='bone')
return True
def clone(self, newpath, newdata):
"""
Creates new GeoTIFF object from existing: new data, same georeference.
Arguments:
newpath: valid file path
newdata: numpy array, 2 or 3-dim
Returns:
A raster.GeoTIFF object
"""
# convert Numpy dtype objects to GDAL type codes
# see https://gist.github.com/chryss/8366492
NPDTYPE2GDALTYPECODE = {
"uint8": 1,
"int8": 1,
"uint16": 2,
"int16": 3,
"uint32": 4,
"int32": 5,
"float32": 6,
"float64": 7,
"complex64": 10,
"complex128": 11,
}
# check if newpath is potentially a valid file path to save data
dirname, fname = os.path.split(newpath)
if dirname:
if not os.path.isdir(dirname):
print("%s is not a valid directory to save file to " % dirname)
if os.path.isdir(newpath):
LOGGER.warning(
"%s is a directory." % dirname + " Choose a name "
+ "that is suitable for writing a dataset to.")
if (newdata.shape != self.data.shape
and newdata.shape != self.data[0, ...].shape):
raise RasterError(
"New and cloned GeoTIFF dataset must be the same shape.")
dims = newdata.ndim
if dims == 2:
bands = 1
elif dims > 2:
bands = newdata.shape[0]
else:
raise RasterError(
"New data array has only %s dimensions." % dims)
try:
LOGGER.info(newdata.dtype.name)
LOGGER.info(NPDTYPE2GDALTYPECODE)
LOGGER.info(NPDTYPE2GDALTYPECODE[newdata.dtype.name])
gdaltype = NPDTYPE2GDALTYPECODE[newdata.dtype.name]
except KeyError as err:
raise RasterError(
"Data type in array %s " % newdata.dtype.name
+ "cannot be converted to GDAL data type: \n%s" % err.message)
proj = self.projection
geotrans = self._gtr
gtiffdr = gdal.GetDriverByName('GTiff')
gtiff = gtiffdr.Create(newpath, self.ncol, self.nrow, bands, gdaltype)
gtiff.SetProjection(proj)
gtiff.SetGeoTransform(geotrans)
if dims == 2:
gtiff.GetRasterBand(1).WriteArray(newdata)
else:
for idx in range(dims):
gtiff.GetRasterBand(idx+1).WriteArray(newdata[idx, :, :])
gtiff = None
return GeoTIFF(newpath) | bsd-3-clause |
rebeccabilbro/viz | diagnostics/pycon/code/anscombe.py | 1 | 1462 | # Writing the Anscomb visualization code
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
# Create the Anscombe arrays
i = np.array([
[10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0],
[8.04, 6.95, 7.58, 8.81, 8.33, 9.96, 7.24, 4.26, 10.84, 4.82, 5.68]
])
ii = np.array([
[10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0],
[9.14, 8.14, 8.74, 8.77, 9.26, 8.10, 6.13, 3.10, 9.13, 7.26, 4.74]
])
iii = np.array([
[10.0, 8.0, 13.0, 9.0, 11.0, 14.0, 6.0, 4.0, 12.0, 7.0, 5.0],
[7.46, 6.77, 12.74, 7.11, 7.81, 8.84, 6.08, 5.39, 8.15, 6.42, 5.73]
])
iv = np.array([
[8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 8.0, 19.0, 8.0, 8.0, 8.0],
[6.58, 5.76, 7.71, 8.84, 8.47, 7.04, 5.25, 12.50, 5.56, 7.91, 6.89]
])
def make_plot(a, b, c, d):
"""
Creates a grid plot of four datasets such that (a, b) and (c,d) share
the same Y axis and (a,c) and (b,d) share the same X axis.
"""
fig, ((axa, axb), (axc, axd)) = plt.subplots(2, 2, sharex='col', sharey='row')
for arr, ax in ((a, axa), (b, axb), (c, axc), (d, axd)):
x = arr[0]
y = arr[1]
ax.scatter(x, y, c='#2B94E9')
m,b = np.polyfit(x, y, 1)
X = np.linspace(ax.get_xlim()[0], ax.get_xlim()[1], 100)
ax.plot(X, m*X+b, '-', c='#666666')
plt.savefig("/Users/pepper/Desktop/Projects/DDL/pycon2016/vizMLslides/images/anscombe.png",transparent=True)
make_plot(i, ii, iii, iv)
| mit |
00krishna-research/py_university_gender_dynamics_pkg | pyugend/ReplicationModel.py | 1 | 4940 | """
Replication Module
------------------
This module simulates the original mathematical model from the paper [FILL
IN]. This model is deterministic in that it does not assign individuals to
promotion and hiring based upon a stochastic process.
"""
__author__ = 'krishnab'
from operator import neg, truediv
import numpy as np
import pandas as pd
from numpy.random import binomial
from pyugend.Models import Base_model
class Replication_model(Base_model):
def __init__(self, **kwds):
Base_model.__init__(self, **kwds)
self.name = "Replication Model(mathmod_orig)"
self.label = "Replication m"
def run_model(self):
self.res = np.zeros([self.duration,12], dtype=np.float32)
self.res[0,0] = self.nf1
self.res[0,1] = self.nf2
self.res[0,2] = self.nf3
self.res[0,3] = self.nm1
self.res[0,4] = self.nm2
self.res[0,5] = self.nm3
self.res[0,6] = self.vac3
self.res[0,7] = self.vac2
self.res[0,8] = self.vac1
self.res[0,9] = np.float32(self.nf1/(self.nf1 + self.nm1))
self.res[0,10] = np.float32(self.nf2/(self.nf2 + self.nm2))
self.res[0, 11] = np.float32(
sum(list([self.nf1, self.nf2, self.nf3])) / sum(list([self.nf1,
self.nf2,
self.nf3,
self.nm1,
self.nm2,
self.nm3])))
for i in range(1,self.duration):
#set promotion probabilities
# level 3
female_leave_3 = self.res[i-1,2]*self.df3
male_leave_3 = self.res[i-1, 5]*self.dm3
self.res[i,6] = total_vacancies = female_leave_3 + male_leave_3
hiring_female3 = total_vacancies*self.phire3*self.bf3
hiring_male3 = total_vacancies*self.phire3*(1-self.bf3)
hiring_total3 = total_vacancies*self.phire3
left_to_prom3 = total_vacancies - hiring_total3
self.res[i,2] = self.res[i-1,2] - female_leave_3 + hiring_female3 + \
left_to_prom3*np.float32(self.res[i-1,10])
self.res[i,5] = self.res[i-1,5] - male_leave_3 + hiring_male3 + left_to_prom3*(
1-np.float32(self.res[i-1,10]))
#level 2
f_go_2 = self.res[i-1,1]*self.df2 + left_to_prom3*np.float32(self.res[i-1, 10])
m_go_2 = self.res[i-1,4]*self.dm2 + left_to_prom3*np.float32((1 - self.res[i-1,
10]))
self.res[i,7] = f_go_2 + m_go_2
hiring_female2 = self.res[i,7]*self.phire2*self.bf2
hiring_male2 = self.res[i,7]*self.phire2*(1-self.bf2)
hiring_total2 = self.res[i,7]*self.phire2
left_to_prom2 = self.res[i,7] - hiring_total2
# Update the male/female level 2 numbers for the current iteration.
self.res[i,1] = self.res[i-1,1] - f_go_2 + hiring_female2 + \
left_to_prom2*np.float32(self.res[
i-1, 9])
self.res[i,4] = self.res[i-1,4] - m_go_2 + hiring_male2 + \
left_to_prom2*np.float32((1-self.res[
i-1,9]))
## update the promotion probability for level 2 -> 3 with new data
self.res[i,10] = np.float32(self.res[i,1]/(self.res[i,1] + self.res[i,4]))
## Level 1
f_go_1 = self.res[i-1, 0]*self.df1 + left_to_prom2*np.float32(self.res[i-1,9])
m_go_1 = self.res[i-1, 3]*self.dm1 + left_to_prom2*np.float32((1 - self.res[i-1,
9]))
self.res[i,8] = f_go_1 + m_go_1
self.res[i,0] = self.res[i-1,0] - f_go_1 + self.res[i,8]*self.bf1
self.res[i,3] = self.res[i-1,3] - m_go_1 + self.res[i,8]*(1-self.bf1)
self.res[i,9] = np.float32(self.res[i,0]/(self.res[i,0] + self.res[i,3]))
# Update overall department gender balance.
self.res[i, 11] = np.sum(self.res[i, 0:3]) / np.sum(self.res[i, 0:6])
## Print Data matrix
df_ = pd.DataFrame(self.res)
df_.columns = ['f1',
'f2',
'f3',
'm1',
'm2',
'm3',
't3',
't2',
't1',
'prom1',
'prom2',
'gendprop']
# print(df_)
recarray_results = df_.to_records(index=True)
self.run = recarray_results
return recarray_results
| mit |
hlin117/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 66 | 8261 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr, decimal=5)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=2)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features + 1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42, algorithm=algo)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
def test_singular_values():
# Check that the TruncatedSVD output has the correct singular values
rng = np.random.RandomState(0)
n_samples = 100
n_features = 80
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
rpca = TruncatedSVD(n_components=2, algorithm='arpack',
random_state=rng).fit(X)
assert_array_almost_equal(apca.singular_values_, rpca.singular_values_, 12)
# Compare to the Frobenius norm
X_apca = apca.transform(X)
X_rpca = rpca.transform(X)
assert_array_almost_equal(np.sum(apca.singular_values_**2.0),
np.linalg.norm(X_apca, "fro")**2.0, 12)
assert_array_almost_equal(np.sum(rpca.singular_values_**2.0),
np.linalg.norm(X_rpca, "fro")**2.0, 12)
# Compare to the 2-norms of the score vectors
assert_array_almost_equal(apca.singular_values_,
np.sqrt(np.sum(X_apca**2.0, axis=0)), 12)
assert_array_almost_equal(rpca.singular_values_,
np.sqrt(np.sum(X_rpca**2.0, axis=0)), 12)
# Set the singular values and see what we get back
rng = np.random.RandomState(0)
n_samples = 100
n_features = 110
X = rng.randn(n_samples, n_features)
apca = TruncatedSVD(n_components=3, algorithm='arpack',
random_state=rng)
rpca = TruncatedSVD(n_components=3, algorithm='randomized',
random_state=rng)
X_apca = apca.fit_transform(X)
X_rpca = rpca.fit_transform(X)
X_apca /= np.sqrt(np.sum(X_apca**2.0, axis=0))
X_rpca /= np.sqrt(np.sum(X_rpca**2.0, axis=0))
X_apca[:, 0] *= 3.142
X_apca[:, 1] *= 2.718
X_rpca[:, 0] *= 3.142
X_rpca[:, 1] *= 2.718
X_hat_apca = np.dot(X_apca, apca.components_)
X_hat_rpca = np.dot(X_rpca, rpca.components_)
apca.fit(X_hat_apca)
rpca.fit(X_hat_rpca)
assert_array_almost_equal(apca.singular_values_, [3.142, 2.718, 1.0], 14)
assert_array_almost_equal(rpca.singular_values_, [3.142, 2.718, 1.0], 14)
| bsd-3-clause |
NigelCleland/pyspd | docs/conf.py | 1 | 8940 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# complexity documentation build configuration file, created by
# sphinx-quickstart on Tue Jul 9 22:26:36 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# Get the project root dir, which is the parent dir of this
cwd = os.getcwd()
project_root = os.path.dirname(cwd)
# Insert the project root dir as the first element in the PYTHONPATH.
# This lets us ensure that the source package is imported, and that its
# version is used.
sys.path.insert(0, project_root)
class Mock(object):
def __init__(self, *args, **kwargs):
pass
def __call__(self, *args, **kwargs):
return Mock()
@classmethod
def __getattr__(cls, name):
if name in ('__file__', '__path__'):
return '/dev/null'
elif name[0] == name[0].upper():
mockType = type(name, (), {})
mockType.__module__ = __name__
return mockType
else:
return Mock()
MOCK_MODULES = ['numpy', 'pulp', 'pandas', 'matplotlib']
for mod_name in MOCK_MODULES:
sys.modules[mod_name] = Mock()
import pyspd
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.viewcode', 'sphinx.ext.pngmath', 'numpydoc']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'pyspd'
copyright = u'2013, Nige Cleland'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = pyspd.__version__
# The full version, including alpha/beta/rc tags.
release = pyspd.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'pyspddoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'pyspd.tex', u'pyspd Documentation',
u'Nige Cleland', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pyspd', u'pyspd Documentation',
[u'Nige Cleland'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'pyspd', u'pyspd Documentation',
u'Nige Cleland', 'pyspd', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| bsd-3-clause |
rhodges/watershed-priorities | util/scale_sensitivity.py | 3 | 6157 | from django.core.management import setup_environ
import os
import sys
sys.path.append(os.path.dirname(os.path.join('..','wp',__file__)))
import settings
setup_environ(settings)
#==================================#
from arp.models import WatershedPrioritization, ConservationFeature, PlanningUnit, Cost, PuVsCf, PuVsCost
from django.contrib.auth.models import User
from django.utils import simplejson as json
from django.conf import settings
import time
import random
def mean(alist):
floatNums = [float(x) for x in alist]
return sum(floatNums) / len(alist)
user, created = User.objects.get_or_create(username='tester')
scalefactors = []
num_species = []
num_units = []
factors = [0.2, 0.5, 0.75, 1.0, 1.25, 2, 4, 8]
numspecies = ['all', 'all', 1, 2, 4, 8, 16, 32]
numcosts = [1,2,3]
# these are random
targets = [0.25, 0.5, 0.75]
penalties = [0.01, 0.25, 0.5, 0.75, 1.0]
settings.MARXAN_NUMREPS = 3
#MODE = 'hardcoded'
#MODE = 'query'
MODE = 'create'
if MODE == 'query':
wp = WatershedPrioritization.objects.filter(name__startswith="Auto Test Scale Factor")
for w in wp:
print "Querying", w.name, w
scalefactors.append(w.input_scalefactor)
r = w.results
num_species.append(r['num_met'])
num_units.append(r['num_units'])
w.kml
COUNT = 0
def create_wp(target_dict, penalties_dict, costs_dict, sf):
global COUNT
COUNT += 1
wp = WatershedPrioritization(input_targets = json.dumps(
target_dict
),
input_penalties = json.dumps(
penalties_dict
),
input_relativecosts=json.dumps(
costs_dict
),
input_scalefactor=sf,
name="Auto Test Scale Factor %s" % sf, user=user)
return wp
if MODE == 'create':
wp = WatershedPrioritization.objects.filter(name__startswith="Auto Test Scale Factor")
wp.delete()
cfs = ConservationFeature.objects.all()
keys = []
for c in cfs:
a = c.level_string
while a.endswith('---'):
a = a[:-3]
keys.append(a)
fh = open("/home/mperry/results.csv", 'w+')
fh.write('ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, numspeciesmet, numplannningunits')
fh.write('\n')
fh.flush()
for f in factors:
for nc in numcosts:
for n in numspecies:
for i in range(2):
try:
n = int(n)
target_dict = {}
penalty_dict = {}
# pick n random species
selected_key = random.sample(keys, n) #'blah---blah'
if random.choice([True,False]):
t = random.choice(targets)
p = random.choice(penalties)
else:
t = None
p = None
for key in selected_key:
if t and p:
# Use the predetermined for ALL species
target_dict[key] = t
penalty_dict[key] = p
else:
# random for each species
target_dict[key] = random.choice(targets)
penalty_dict[key] = random.choice(penalties)
except ValueError:
# ALL species
t = random.choice(targets)
p = random.choice(penalties)
t2 = random.choice(targets)
p2 = random.choice(penalties)
target_dict = { "locally-endemic":t, "widespread":t2 }
penalty_dict = { "locally-endemic":p, "widespread":p2 }
costs_dict = { "watershed-condition":0, "invasives":0, "climate":0 }
for a in random.sample(costs_dict.keys(), nc):
costs_dict[a] = 1
sf = f
wp = create_wp(target_dict, penalty_dict, costs_dict, sf)
############
print "####################################"
print 'targets', wp.input_targets
print 'penalties', wp.input_penalties
print 'costs', wp.input_relativecosts
wp.save()
while not wp.done:
time.sleep(2)
print " ", wp.status_html
inpenalties = json.loads(wp.input_penalties)
if 'widespread' in inpenalties.keys():
nspecies = 71
else:
nspecies = len(inpenalties.keys())
r = wp.results
#'ncosts, nspecies, sumpenalties, meanpenalties, scalefactor, numspeciesmet, numplannningunits'
fh.write(','.join([str(x) for x in [
sum(json.loads(wp.input_relativecosts).values()),
nspecies,
sum(inpenalties.values()),
mean(inpenalties.values()),
wp.input_scalefactor,
r['num_met'],
r['num_units']
]]))
fh.write('\n')
fh.flush()
if MODE == 'hardcoded':
scalefactors = [0.1, 0.2, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0, 1.1, 1.25, 1.5, 2, 4, 8, 16, 32]
num_units = [0, 3, 9, 17, 46, 57, 63, 73, 76, 79, 81, 82, 82, 83, 85, 90, 92, 93, 91]
num_species = [0, 1, 4, 10, 27, 38, 37, 54, 57, 58, 63, 59, 62, 66, 66, 69, 71, 71, 71]
assert len(scalefactors) == len(num_species) == len(num_units)
print scalefactors
print num_units
print num_species
#import matplotlib.pyplot as plt
#fig = plt.figure()
#plt.xlabel('Scale Factor')
#plt.ylabel('Number of Species Goals Met')
#ax = fig.add_subplot(111)
#ax.scatter(scalefactors, num_species)
#ax.set_xscale('log')
#plt.show()
| bsd-3-clause |
jzt5132/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 59 | 35368 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
import scipy
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
from sklearn.metrics import log_loss
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
sp_version = tuple([int(s) for s in scipy.__version__.split('.')])
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raise_message(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raise_message(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = "Tolerance for stopping criteria must be positive"
assert_raise_message(ValueError, msg, LR(tol=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raise_message(ValueError, msg, LR(max_iter=-1).fit, X, Y1)
assert_raise_message(ValueError, msg, LR(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='sag', tol=1e-2,
multi_class='ovr', random_state=42)]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_check_solver_option():
X, y = iris.data, iris.target
for LR in [LogisticRegression, LogisticRegressionCV]:
msg = ("Logistic Regression supports only liblinear, newton-cg, lbfgs"
" and sag solvers, got wrong_name")
lr = LR(solver="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = "multi_class should be either multinomial or ovr, got wrong_name"
lr = LR(solver='newton-cg', multi_class="wrong_name")
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solver except 'newton-cg' and 'lfbgs'
for solver in ['liblinear', 'sag']:
msg = ("Solver %s does not support a multinomial backend." %
solver)
lr = LR(solver=solver, multi_class='multinomial')
assert_raise_message(ValueError, msg, lr.fit, X, y)
# all solvers except 'liblinear'
for solver in ['newton-cg', 'lbfgs', 'sag']:
msg = ("Solver %s supports only l2 penalties, got l1 penalty." %
solver)
lr = LR(solver=solver, penalty='l1')
assert_raise_message(ValueError, msg, lr.fit, X, y)
msg = ("Solver %s supports only dual=False, got dual=True" %
solver)
lr = LR(solver=solver, dual=True)
assert_raise_message(ValueError, msg, lr.fit, X, y)
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-5, solver=solver,
random_state=0)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-5,
random_state=0)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4,
err_msg="with solver = %s" % solver)
# test for fit_intercept=True
for solver in ('lbfgs', 'newton-cg', 'liblinear', 'sag'):
Cs = [1e3]
coefs, Cs, _ = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-6, solver=solver,
intercept_scaling=10000., random_state=0)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000., random_state=0)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4,
err_msg="with solver = %s" % solver)
def test_liblinear_dual_random_state():
# random_state is relevant for liblinear solver only if dual=True
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0, dual=True, max_iter=1, tol=1e-15)
lr2.fit(X, y)
lr3 = LogisticRegression(random_state=8, dual=True, max_iter=1, tol=1e-15)
lr3.fit(X, y)
# same result for same random state
assert_array_almost_equal(lr1.coef_, lr2.coef_)
# different results for different random states
msg = "Arrays are not almost equal to 6 decimals"
assert_raise_message(AssertionError, msg,
assert_array_almost_equal, lr1.coef_, lr3.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
lib = LogisticRegression(fit_intercept=False)
sag = LogisticRegression(solver='sag', fit_intercept=False,
random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=3)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
tol = 1e-6
ncg = LogisticRegression(solver='newton-cg', fit_intercept=False, tol=tol)
lbf = LogisticRegression(solver='lbfgs', fit_intercept=False, tol=tol)
lib = LogisticRegression(fit_intercept=False, tol=tol)
sag = LogisticRegression(solver='sag', fit_intercept=False, tol=tol,
max_iter=1000, random_state=42)
ncg.fit(X, y)
lbf.fit(X, y)
sag.fit(X, y)
lib.fit(X, y)
assert_array_almost_equal(ncg.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(lib.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(ncg.coef_, lbf.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lib.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, ncg.coef_, decimal=4)
assert_array_almost_equal(sag.coef_, lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=balanced
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='balanced')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='balanced')
clf_lib.fit(X, y)
clf_sag = LogisticRegressionCV(solver='sag', fit_intercept=False,
class_weight='balanced', max_iter=2000)
clf_sag.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_sag.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_sag.coef_, decimal=4)
def test_logistic_regression_sample_weights():
X, y = make_classification(n_samples=20, n_features=5, n_informative=3,
n_classes=2, random_state=0)
for LR in [LogisticRegression, LogisticRegressionCV]:
# Test that liblinear fails when sample weights are provided
clf_lib = LR(solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y,
sample_weight=np.ones(y.shape[0]))
# Test that passing sample_weight as ones is the same as
# not passing them at all (default None)
clf_sw_none = LR(solver='lbfgs', fit_intercept=False)
clf_sw_none.fit(X, y)
clf_sw_ones = LR(solver='lbfgs', fit_intercept=False)
clf_sw_ones.fit(X, y, sample_weight=np.ones(y.shape[0]))
assert_array_almost_equal(clf_sw_none.coef_, clf_sw_ones.coef_, decimal=4)
# Test that sample weights work the same with the lbfgs,
# newton-cg, and 'sag' solvers
clf_sw_lbfgs = LR(solver='lbfgs', fit_intercept=False)
clf_sw_lbfgs.fit(X, y, sample_weight=y + 1)
clf_sw_n = LR(solver='newton-cg', fit_intercept=False)
clf_sw_n.fit(X, y, sample_weight=y + 1)
clf_sw_sag = LR(solver='sag', fit_intercept=False,
max_iter=2000, tol=1e-7)
clf_sw_sag.fit(X, y, sample_weight=y + 1)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_n.coef_, decimal=4)
assert_array_almost_equal(clf_sw_lbfgs.coef_, clf_sw_sag.coef_, decimal=4)
# Test that passing class_weight as [1,2] is the same as
# passing class weight = [1,1] but adjusting sample weights
# to be 2 for all instances of class 2
clf_cw_12 = LR(solver='lbfgs', fit_intercept=False,
class_weight={0: 1, 1: 2})
clf_cw_12.fit(X, y)
sample_weight = np.ones(y.shape[0])
sample_weight[y == 1] = 2
clf_sw_12 = LR(solver='lbfgs', fit_intercept=False)
clf_sw_12.fit(X, y, sample_weight=sample_weight)
assert_array_almost_equal(clf_cw_12.coef_, clf_sw_12.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
def test_logreg_cv_penalty():
# Test that the correct penalty is passed to the final fit.
X, y = make_classification(n_samples=50, n_features=20, random_state=0)
lr_cv = LogisticRegressionCV(penalty="l1", Cs=[1.0], solver='liblinear')
lr_cv.fit(X, y)
lr = LogisticRegression(penalty="l1", C=1.0, solver='liblinear')
lr.fit(X, y)
assert_equal(np.count_nonzero(lr_cv.coef_), np.count_nonzero(lr.coef_))
def test_logreg_predict_proba_multinomial():
X, y = make_classification(n_samples=10, n_features=20, random_state=0,
n_classes=3, n_informative=10)
# Predicted probabilites using the true-entropy loss should give a
# smaller loss than those using the ovr method.
clf_multi = LogisticRegression(multi_class="multinomial", solver="lbfgs")
clf_multi.fit(X, y)
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_ovr = LogisticRegression(multi_class="ovr", solver="lbfgs")
clf_ovr.fit(X, y)
clf_ovr_loss = log_loss(y, clf_ovr.predict_proba(X))
assert_greater(clf_ovr_loss, clf_multi_loss)
# Predicted probabilites using the soft-max function should give a
# smaller loss than those using the logistic function.
clf_multi_loss = log_loss(y, clf_multi.predict_proba(X))
clf_wrong_loss = log_loss(y, clf_multi._predict_proba_lr(X))
assert_greater(clf_wrong_loss, clf_multi_loss)
@ignore_warnings
def test_max_iter():
# Test that the maximum number of iteration is reached
X, y_bin = iris.data, iris.target.copy()
y_bin[y_bin == 2] = 0
solvers = ['newton-cg', 'liblinear', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for max_iter in range(1, 5):
for solver in solvers:
lr = LogisticRegression(max_iter=max_iter, tol=1e-15,
random_state=0, solver=solver)
lr.fit(X, y_bin)
assert_equal(lr.n_iter_[0], max_iter)
def test_n_iter():
# Test that self.n_iter_ has the correct format.
X, y = iris.data, iris.target
y_bin = y.copy()
y_bin[y_bin == 2] = 0
n_Cs = 4
n_cv_fold = 2
for solver in ['newton-cg', 'liblinear', 'sag', 'lbfgs']:
# OvR case
n_classes = 1 if solver == 'liblinear' else np.unique(y).shape[0]
clf = LogisticRegression(tol=1e-2, multi_class='ovr',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
n_classes = np.unique(y).shape[0]
clf = LogisticRegressionCV(tol=1e-2, multi_class='ovr',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
# multinomial case
n_classes = 1
if solver in ('liblinear', 'sag'):
break
clf = LogisticRegression(tol=1e-2, multi_class='multinomial',
solver=solver, C=1.,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes,))
clf = LogisticRegressionCV(tol=1e-2, multi_class='multinomial',
solver=solver, Cs=n_Cs, cv=n_cv_fold,
random_state=42, max_iter=100)
clf.fit(X, y)
assert_equal(clf.n_iter_.shape, (n_classes, n_cv_fold, n_Cs))
clf.fit(X, y_bin)
assert_equal(clf.n_iter_.shape, (1, n_cv_fold, n_Cs))
@ignore_warnings
def test_warm_start():
# A 1-iteration second fit on same data should give almost same result
# with warm starting, and quite different result without warm starting.
# Warm starting does not work with liblinear solver.
X, y = iris.data, iris.target
solvers = ['newton-cg', 'sag']
# old scipy doesn't have maxiter
if sp_version >= (0, 12):
solvers.append('lbfgs')
for warm_start in [True, False]:
for fit_intercept in [True, False]:
for solver in solvers:
for multi_class in ['ovr', 'multinomial']:
if solver == 'sag' and multi_class == 'multinomial':
break
clf = LogisticRegression(tol=1e-4, multi_class=multi_class,
warm_start=warm_start,
solver=solver,
random_state=42, max_iter=100,
fit_intercept=fit_intercept)
clf.fit(X, y)
coef_1 = clf.coef_
clf.max_iter = 1
with ignore_warnings():
clf.fit(X, y)
cum_diff = np.sum(np.abs(coef_1 - clf.coef_))
msg = ("Warm starting issue with %s solver in %s mode "
"with fit_intercept=%s and warm_start=%s"
% (solver, multi_class, str(fit_intercept),
str(warm_start)))
if warm_start:
assert_greater(2.0, cum_diff, msg)
else:
assert_greater(cum_diff, 2.0, msg)
| bsd-3-clause |
sunshineDrizzle/FreeROI | froi/widgets/unused/voxeltimepointcurvedialog.py | 6 | 4212 | __author__ = 'zhouguangfu'
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from PyQt4.QtCore import *
from PyQt4.QtGui import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QTAgg as NavigationToolbar
from matplotlib.ticker import MultipleLocator
class VoxelTimePointCurveDialog(QDialog):
"""
A dialog for action of voxel time point curve display.
"""
def __init__(self, model, is_voxel=True, mask_row=None,parent=None):
super(VoxelTimePointCurveDialog, self).__init__(parent)
self._model = model
self._is_voxel = is_voxel
self._mask_row = mask_row
self._init_gui()
self._create_actions()
self._plot()
def _init_gui(self):
"""
Initialize GUI.
"""
# a figure instance to plot on
self.figure = plt.figure()
# this is the Canvas Widget that displays the `figure`
# it takes the `figure` instance as a parameter to __init__
self.canvas = FigureCanvas(self.figure)
# this is the Navigation widget,it takes the Canvas widget and a parent
self.toolbar = NavigationToolbar(self.canvas, self)
self.meanlabel = QLabel("Mean:")
self.varlabel = QLabel("Variance:")
self.save_data = QPushButton("Save Data")
# set the layout
layout = QVBoxLayout()
layout.addWidget(self.toolbar)
layout.addWidget(self.canvas)
hlayout = QHBoxLayout()
hlayout.addWidget(self.meanlabel)
hlayout.addWidget(self.varlabel)
hlayout.addWidget(self.save_data)
layout.addLayout(hlayout)
self.setLayout(layout)
def _create_actions(self):
''' create actions.'''
self._model.cross_pos_changed.connect(self._plot)
self.save_data.clicked.connect(self._save_data)
def _save_data(self):
"""
Save points data to a txt file.
"""
index = self._model.currentIndex()
temp_dir = str(QDir.currentPath())
filename = QFileDialog.getSaveFileName(
self,
'Save data as...',
temp_dir,
'Txt files (*.txt,*.csv)')
if not filename.isEmpty():
np.savetxt(str(filename), self.points, fmt="%f", delimiter="\n")
else:
print "Path is empty!"
def _plot(self):
''' plot time time point curve.'''
xyz = self._model.get_cross_pos()
if self._is_voxel:
self.points = self._model.get_current_value([xyz[1], xyz[0], xyz[2]],time_course=True)
if not self._model.data(self._model.currentIndex(),Qt.UserRole + 8):
self.points = np.array((self.points,))
else:
volume_data = self._model.data(self._model.currentIndex(),Qt.UserRole + 5)
mask_data = self._model.data(self._model.index(self._mask_row),Qt.UserRole + 5)
if self._model.data(self._model.currentIndex(),Qt.UserRole + 8):
self.points = volume_data[mask_data == self._model.get_row_value(
[xyz[1], xyz[0], xyz[2]],self._mask_row)].mean(axis=0)
else:
self.points = volume_data[mask_data == self._model.get_row_value(
[xyz[1], xyz[0], xyz[2]],self._mask_row)].mean()
self.points = np.array((self.points,))
self.meanlabel.setText("Mean:"+str(self.points.mean()))
self.varlabel.setText("Variance:"+str(self.points.var()))
# create an axis
ax = self.figure.add_subplot(111)
ax.hold(False)
ax.plot(self.points, '*-')
ax.xaxis.set_major_locator(MultipleLocator(2))
plt.xlabel("Time Point")
if(isinstance(self.points,np.ndarray)):
plt.xlim(0,self.points.shape[0])
else:
plt.xlim(0,1)
plt.ylabel("Intensity")
plt.grid()
self.canvas.draw()
def closeEvent(self, QCloseEvent):
self._model.cross_pos_changed.disconnect(self._plot)
| bsd-3-clause |
Kirubaharan/hydrology | ch_591/ch_591_wb_half_hourly.py | 2 | 39302 | __author__ = 'kiruba'
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import itertools
from spread import spread
from scipy.optimize import curve_fit
import math
from matplotlib import rc
import email.utils as eutils
import time
import datetime
from datetime import timedelta
import scipy as sp
import meteolib as met
import evaplib
from bisect import bisect_left
import matplotlib as mpl
import Pysolar as ps
# latex parameters
rc('font', **{'family': 'sans-serif', 'sans-serif': ['Helvetica']})
rc('text', usetex=True)
plt.rc('text', usetex=True)
plt.rc('font', family='serif', size=18)
# Weather file
weather_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/corrected_weather.csv'
# Rain file
rain_file = '/media/kiruba/New Volume/ACCUWA_Data/weather_station/smgollahalli/corrected_rain.csv'
# convert to pandas dataframe
weather_df = pd.read_csv(weather_file, sep=',', header=0)
# set index
date_format = '%Y-%m-%d %H:%M:%S'
# print weather_df.columns.values[0]
# weather_df.columns.values[0] = 'Date_Time'
# print weather_df.head()
weather_df['Date_Time'] = pd.to_datetime(weather_df['Date_Time'], format=date_format)
weather_df.set_index(weather_df['Date_Time'], inplace=True)
# sort based on index
weather_df.sort_index(inplace=True)
# drop date time column
weather_df = weather_df.drop('Date_Time', 1)
# print weather_df.head()
# print weather_df['2014-b06-30']
# print weather_df.head()
# Rain data frame
rain_df = pd.read_csv(rain_file, sep=',', header=0)
# set index
rain_df['Date_Time'] = pd.to_datetime(rain_df['Date_Time'], format=date_format)
rain_df.set_index(rain_df['Date_Time'], inplace=True)
# sort based on index
rain_df.sort_index(inplace=True)
# drop date time column
rain_df = rain_df.drop('Date_Time', 1)
# print rain_df.head()
"""
Check dam calibration
"""
# Polynomial fitting function
def polyfit(x, y, degree):
results = {}
coeffs = np.polyfit(x, y, degree)
results['polynomial'] = coeffs.tolist()
#r squared
p = np.poly1d(coeffs)
yhat = p(x)
ybar = np.sum(y)/len(y)
ssreg = np.sum((yhat-ybar)**2)
sstot = np.sum((y-ybar)**2)
results['determination'] = ssreg/sstot
return results
#check dam calibration values
y_cal = [10, 40, 100, 160, 225, 275, 300]
x_cal = [2036, 2458, 3025, 4078, 5156, 5874, 6198]
a_stage = polyfit(x_cal, y_cal, 1)
# coefficients of polynomial are stored in following list
coeff_cal = a_stage['polynomial']
def read_correct_ch_dam_data(csv_file):
"""
Function to read, calibrate and convert time format (day1 24:00:00
to day 2 00:00:00) in check dam data
:param csv_file:
:return: calibrated and time corrected data
"""
water_level = pd.read_csv(csv_file, skiprows=9, sep=',', header=0, names=['scan no', 'date', 'time', 'raw value', 'calibrated value'])
water_level['calibrated value'] = (water_level['raw value'] *coeff_cal[0]) + coeff_cal[1] #in cm
water_level['calibrated value'] /= 100 #convert to metre
# #change the column name
water_level.columns.values[4] = 'stage(m)'
# create date time index
format = '%d/%m/%Y %H:%M:%S'
c_str = ' 24:00:00'
for index, row in water_level.iterrows():
x_str = row['time']
if x_str == c_str:
# convert string to datetime object
r_date = pd.to_datetime(row['date'], format='%d/%m/%Y ')
# add 1 day
c_date = r_date + timedelta(days=1)
# convert datetime to string
c_date = c_date.strftime('%d/%m/%Y ')
c_time = ' 00:00:00'
water_level['date'][index] = c_date
water_level['time'][index] = c_time
water_level['date_time'] = pd.to_datetime(water_level['date'] + water_level['time'], format=format)
water_level.set_index(water_level['date_time'], inplace=True)
# # drop unneccessary columns before datetime aggregation
water_level.drop(['scan no', 'date', 'time', 'raw value', 'date_time'], inplace=True, axis=1)
return water_level
## Read check dam data
block_1 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_001.CSV'
water_level_1 = read_correct_ch_dam_data(block_1)
# print min(water_level_1.index), max(water_level_1.index)
# print water_level_1['stage(m)'][max(water_level_1.index)]
# print water_level_1.tail(20)
# water_level_1['stage(m)'][max(water_level_1.index)] = 0.5*(water_level_1['stage(m)'][max(water_level_1.index)])
block_2 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_002.CSV'
water_level_2 = read_correct_ch_dam_data(block_2)
# print water_level_2.head()
# print water_level_2.tail()
block_3 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_003.CSV'
water_level_3 = read_correct_ch_dam_data(block_3)
# print water_level_3.head()
# print water_level_3.tail()
block_4 = '/media/kiruba/New Volume/ACCUWA_Data/check_dam_water_level/2525_008_004.CSV'
water_level_4 = read_correct_ch_dam_data(block_4)
# print water_level_4.head()
# print water_level_4.tail()
water_level = pd.concat([water_level_1, water_level_2, water_level_3, water_level_4], axis=0)
# print water_level.head(20)
water_level = water_level['2014-05-14 18:30:00':'2014-09-10 23:30:00']
wl_05_30 = water_level['2014-06-24 00:00:00': '2014-06-24 23:30:00']
wl_05_31 = water_level['2014-06-25 00:00:00': '2014-06-25 23:30:00']
time_index = pd.date_range(start='00:00:00', end='23:30:00', freq='30min')
print time_index
print wl_05_30.head()
fig = plt.figure()
plt.plot(time_index, wl_05_30['stage(m)'], 'bo-', label='Negative Inflow')
plt.plot(time_index, wl_05_31['stage(m)'], 'ro-', label='No inflow ')
plt.legend()
# plt.title("May 30 and 31")
plt.annotate("Sharp Decline", xy=("06:30:00", 0.634), xycoords='data',
xytext=(-50,30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->", connectionstyle='angle, angleA=0, angleB=90, rad=10', ec="k", shrinkA=5, shrinkB=0.1))
fig.autofmt_xdate(rotation=90)
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/may_30_31_check')
plt.show()
wl_08_31 = water_level['2014-08-31 00:00:00': '2014-08-31 23:30:00']
wl_09_01 = water_level['2014-09-01 00:00:00': '2014-09-01 23:30:00']
wl_09_02 = water_level['2014-09-09 00:00:00': '2014-09-09 23:30:00']
time_index = pd.date_range(start='00:00:00', end='23:30:00', freq='30min')
print time_index
print wl_05_30.head()
fig = plt.figure()
# plt.plot(time_index, wl_09_01['stage(m)'], 'bo-', label='Negative Inflow')
plt.plot(time_index, wl_09_02['stage(m)'], 'ro-', label='Negative inflow ')
plt.legend()
plt.title("Sep 01 and 02")
plt.annotate("Sharp Decline", xy=("09:00:00", 1.794), xycoords='data',
xytext=(-50,30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->", connectionstyle='angle, angleA=0, angleB=90, rad=10', ec="k", shrinkA=5, shrinkB=0.1))
plt.annotate("Sharp Decline", xy=("12:30:00", 1.744), xycoords='data',
xytext=(-50,30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="0.8"),
arrowprops=dict(arrowstyle="->", connectionstyle='angle, angleA=0, angleB=90, rad=10', ec="k", shrinkA=5, shrinkB=0.1))
fig.autofmt_xdate(rotation=90)
plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/sep_01_02_check')
plt.show()
#
"""
Fill in missing values interpolate
"""
new_index = pd.date_range(start='2014-05-14 18:30:00', end='2014-09-10 23:30:00', freq='30min' )
water_level = water_level.reindex(new_index, method=None)
water_level = water_level.interpolate(method='time')
water_level.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/stage_30min.CSV')
"""
Join weather and rain data
"""
weather_df = weather_df.join(rain_df, how='right')
weather_df = weather_df['2014-05-14':'2014-09-10']
# print weather_df['2014-06-30']
# weather_df = weather_df[min(water_level.index): max(water_level.index)]
weather_df = weather_df.join(water_level, how='right')
# print weather_df['2014-06-30']
# print weather_df.head(20)
#
# """
# Evaporation from open water
# Equation according to J.D. Valiantzas (2006). Simplified versions
# for the Penman evaporation equation using routine weather data.
# J. Hydrology 331: 690-702. Following Penman (1948,1956). Albedo set
# at 0.06 for open water.
# Input (measured at 2 m height):
# - airtemp: (array of) average air temperatures [Celsius]
# - rh: (array of) average relative humidity [%]
# - airpress: (array of) average air pressure data [Pa]
# - Rs: (array of) incoming solar radiation [J/m2/day]
# - N: (array of) maximum daily sunshine hours [h]
# - Rext: (array of) daily extraterrestrial radiation [J/m2/day]
# - u: (array of) daily average wind speed at 2 m [m/s]
# - Z: (array of) site elevation [m a.s.l.], default is zero...
#
# Output:
# - E0: (array of) Penman open water evaporation values [mm/day]
#
# """
# """
# air pressure (Pa) = 101325(1-2.25577 10^-5 h)^5.25588
# h = altitude above sea level (m)
# http://www.engineeringtoolbox.com/air-altitude-pressure-d_462.html
# mean elevation over watershed = 803.441589 m
# Elevation at the check dam = 799 m
# """
# z = 799
# p = (1-(2.25577*(10**-5)*z))
# air_p_pa = 101325*(p**5.25588)
# # give air pressure value
# weather_df['AirPr(Pa)'] = air_p_pa
# """
# Half hourly Extraterrestrial Radiation Calculation(J/m2/30min)
# """
#
# SC_default = 1367.0 # Solar constant in W/m^2 is 1367.0.
#
#
# def extraterrestrial_irrad(local_datetime, latitude_deg, longitude_deg):
# """
# Calculates extraterrestrial radiation in MJ/m2/timeperiod
# :param local_datetime: datetime object
# :param latitude_deg: in decimal degree
# :param longitude_deg: in decimal degree
# :return: Extra terrestrial radiation in MJ/m2/timeperiod
# """
#
# S = 0.0820 # MJ m-2 min-1
# lat_rad = latitude_deg*(math.pi/180)
# day = ps.solar.GetDayOfYear(local_datetime)
# hour = float(local_datetime.hour)
# minute = float(local_datetime.minute)
# b = ((2*math.pi)*(day-81))/364
# sc = 0.1645*(math.sin(2*b)) - 0.1255*(math.cos(b)) - 0.025*(math.sin(b)) # seasonal correction in hour
# lz = 270 # for India longitude of local time zone in degrees west of greenwich
# lm = (180+(180-longitude_deg)) # longitude of measurement site
# t = (hour + (minute/60)) - 0.25
# t1 = 0.5 # 0.5 for 30 minute 1 for hourly period
# w = (math.pi/12)*((t + (0.0667*(lz-lm))+ sc) - 12)
# w1 = w - ((math.pi*t1)/24) # solar time angle at beginning of period [rad]
# w2 = w + ((math.pi*t1)/24) # solar time angle at end of period [rad]
# dr = 1 + (0.033*math.cos((2*math.pi*day)/365)) # inverse relative distance Earth-Sun
# dt = 0.409*math.sin(((2*math.pi*day)/365) - 1.39) # solar declination in radian
# ws = math.acos(-math.tan(lat_rad)*math.tan(dt))
# if (w > ws) or (w < -ws):
# Rext = 0.0
# else:
# Rext = ((12*60)/math.pi)*S*dr*(((w2-w1)*math.sin(lat_rad)*math.sin(dt))+(math.cos(lat_rad)*math.cos(dt)*(math.sin(w2) - math.sin(w1)))) # MJm-2(30min)-1
# return Rext
#
# ch_591_lat = 13.260196
# ch_591_long = 77.512085
# weather_df['Rext (MJ/m2/30min)'] = 0.000
# for i in weather_df.index:
# weather_df['Rext (MJ/m2/30min)'][i.strftime('%Y-%m-%d %H:%M:%S')] = (extraterrestrial_irrad(local_datetime=i,
# latitude_deg=ch_591_lat,
# longitude_deg=ch_591_long))
#
#
# # weather_df['Rext (MJ/m2/30min)'] =
# # weather_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/weather.csv')
# """
# wind speed from km/h to m/s
# 1 kmph = 0.277778 m/s
# """
# weather_df['Wind Speed (mps)'] = weather_df['Wind Speed (kmph)'] * 0.277778
# """
# Radiation unit conversion
# """
# # the radiation units are actually in W/m2 and
# # not in W/mm2 as given by weather station,
# # so multiply with 30*60 seconds
# # to convert to MJ divide by 10^6
# weather_df['Solar Radiation (MJ/m2/30min)'] = (weather_df['Solar Radiation (W/mm2)'] * 1800)/(10**6)
# """
# Average Temperature Calculation
# """
# weather_df['Average Temp (C)'] = 0.5*(weather_df['Min Air Temperature (C)'] + weather_df['Max Air Temperature (C)'])
#
# weather_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/weather.csv')
#
#
# """
# Open water evaporation function for half hour
# Modified from evaplib.py
# http://python.hydrology-amsterdam.nl/moduledoc/index.html#module-evaplib
# """
#
#
# def delta_calc(airtemp):
# """
# Calculates slope of saturation vapour pressure curve at air temperature [kPa/Celsius]
# http://www.fao.org/docrep/x0490e/x0490e07.htm
# :param airtemp: Temperature in Celsius
# :return: slope of saturation vapour pressure curve [kPa/Celsius]
# """
# l = sp.size(airtemp)
# if l < 2:
# temp_kelvin = airtemp + 237.3
# b = 0.6108*(math.exp((17.27*airtemp)/temp_kelvin))
# delta = (4098*b)/(temp_kelvin**2)
# else:
# delta = sp.zeros(l)
# for i in range(0, l):
# temp_kelvin = airtemp[i] + 237.3
# b = 0.6108*(math.exp(17.27*airtemp[i])/temp_kelvin)
# delta[i] = (4098*b)/(temp_kelvin**2)
# return delta
#
#
# def half_hour_E0(airtemp = sp.array([]),
# rh = sp.array([]),
# airpress = sp.array([]),
# Rs = sp.array([]),
# Rext = sp.array([]),
# u =sp.array([]),
# Z=0.0):
# """
# Function to calculate daily Penman open water evaporation (in mm/30min).
# Equation according to
# Shuttleworth, W. J. 2007. "Putting the 'Vap' into Evaporation."
# Hydrology and Earth System Sciences 11 (1): 210-44. doi:10.5194/hess-11-210-2007.
#
# :param airtemp: average air temperature [Celsius]
# :param rh: relative humidity[%]
# :param airpress: average air pressure[Pa]
# :param Rs: Incoming solar radiation [MJ/m2/30min]
# :param Rext: Extraterrestrial radiation [MJ/m2/30min]
# :param u: average wind speed at 2 m from ground [m/s]
# :param Z: site elevation, default is zero [metre]
# :return: Penman open water evaporation values [mm/30min]
# """
# # Set constants
# albedo = 0.06 # open water albedo
# # Stefan boltzmann constant = 5.670373*10-8 J/m2/k4/s
# # http://en.wikipedia.org/wiki/Stefan-Boltzmann_constant
# # sigma = 5.670373*(10**-8) # J/m2/K4/s
# sigma = (1.02066714*(10**-10)) #Stefan Boltzmann constant MJ/m2/K4/30min
# # Calculate Delta, gamma and lambda
# DELTA = delta_calc(airtemp) # [Kpa/C]
# # Lambda = met.L_calc(airtemp)/(10**6) # [MJ/Kg]
# # gamma = met.gamma_calc(airtemp, rh, airpress)/1000
# # Lambda = 2.501 -(0.002361*airtemp) # [MJ/kg]
# # gamma = (0.0016286 *(airpress/1000))/Lambda
# # Calculate saturated and actual water vapour pressure
# es = met.es_calc(airtemp) # [Pa]
# ea = met.ea_calc(airtemp,rh) # [Pa]
# #Determine length of array
# l = sp.size(airtemp)
# #Check if we have a single value or an array
# if l < 2:
# Lambda = 2.501 -(0.002361*airtemp) # [MJ/kg]
# gamma = (0.0016286 *(airpress/1000))/Lambda
# Rns = (1.0 - albedo)* Rs # shortwave component [MJ/m2/30min]
# #calculate clear sky radiation Rs0
# Rs0 = (0.75+(2E-5*Z))*Rext
# f = (1.35*(Rs/Rs0))-0.35
# epsilom = 0.34-(-0.14*sp.sqrt(ea/1000))
# Rnl = f*epsilom*sigma*(airtemp+273.16)**4 # Longwave component [MJ/m2/30min]
# Rnet = Rns - Rnl
# Ea = (1 + (0.536*u))*((es/1000)-(ea/1000))
# E0 = ((DELTA*Rnet) + gamma*(6.43*Ea))/(Lambda*(DELTA+gamma))
# else:
# # Inititate output array
# E0 = sp.zeros(l)
# Rns = sp.zeros(l)
# Rs0 = sp.zeros(l)
# f = sp.zeros(l)
# epsilom = sp.zeros(l)
# Rnl = sp.zeros(l)
# Rnet = sp.zeros(l)
# Ea = sp.zeros(l)
# Lambda = sp.zeros(l)
# gamma = sp.zeros(l)
# for i in range(0,l):
# Lambda[i] = 2.501 -(0.002361*airtemp[i])
# gamma[i] = (0.0016286 *(airpress[i]/1000))/Lambda[i]
# # calculate longwave radiation (MJ/m2/30min)
# Rns[i] = (1.0 - albedo) * Rs[i]
# # calculate clear sky radiation Rs0
# Rs0[i] = (0.75 + (2E-5*Z))
# f[i] = (1.35*(Rs[i]/Rs0[i]))-0.35
# epsilom[i] = 0.34-(-0.14*sp.sqrt(ea[i]/1000))
# Rnl[i] = f[i]*epsilom[i]*sigma*(airtemp[i]+273.16)**4 # Longwave component [MJ/m2/30min]
# Rnet[i] = Rns[i] - Rnl[i]
# Ea[i] = (1 + (0.536*u[i]))*((es[i]/1000)-(ea[i]/1000))
# E0[i] = ((DELTA[i]*Rnet[i]) + gamma[i]*(6.43*Ea[i]))/(Lambda[i]*(DELTA[i]+gamma[i]))
# return E0
#
#
# """
# Half hourly Evaporation calculation
# """
# airtemp = weather_df['Average Temp (C)']
# hum = weather_df['Humidity (%)']
# airpress = weather_df['AirPr(Pa)']
# rs = weather_df['Solar Radiation (MJ/m2/30min)']
# rext = weather_df['Rext (MJ/m2/30min)']
# wind_speed = weather_df['Wind Speed (mps)']
# weather_df['Evaporation (mm/30min)'] = half_hour_E0(airtemp=airtemp, rh=hum, airpress=airpress,
# Rs=rs, Rext=rext, u=wind_speed, Z=z)
# """
# Plot Evaporation
# """
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot_date(weather_df.index, weather_df['Evaporation (mm/30min)'], '-g', label='Evaporation (mm/30min)')
# plt.ylabel(r'\textbf{Evaporation ($mm/30min$)}')
# fig.autofmt_xdate(rotation=90)
# plt.title(r"Daily Evaporation for Check Dam - 591", fontsize=20)
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/evaporation_591_30min')
# # bar plots
# weather_sel_df = weather_df['2014-05-20':'2014-05-22']
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot_date(weather_sel_df.index, weather_sel_df['Evaporation (mm/30min)'], '-g')
# fig.autofmt_xdate(rotation=90)
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/evaporation_591_may20_22')
# # plt.show()
# """
# Remove Duplicates
# """
# # check for duplicates
# # df2 = dry_weather.groupby(level=0).filter(lambda x: len(x) > 1)
# # print(df2)
# weather_df['index'] = weather_df.index
# weather_df.drop_duplicates(subset='index', take_last=True, inplace=True)
# del weather_df['index']
# weather_df = weather_df.sort()
"""
Stage Volume relation estimation from survey data
"""
# neccessary functions
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2,s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return itertools.izip(a, b)
"""
Select data where stage is available
"""
weather_stage_avl_df = weather_df[min(water_level.index):max(water_level.index)]
"""
Convert observed stage to volume by linear interpolation
"""
# set stage as index
stage_vol_df = pd.read_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/stage_vol.csv',
sep=',', header=0, names=['sno', 'stage_m', 'total_vol_cu_m'])
# print stage_vol_df
stage_vol_df.drop('sno', inplace=True, axis=1)
stage_vol_df.set_index(stage_vol_df['stage_m'], inplace=True)
# function to find containing intervals
def find_range(array, ab):
if ab < max(array):
start = bisect_left(array, ab)
return array[start-1], array[start]
else:
return min(array), max(array)
# print weather_stage_avl_df.head()
# water_balance_df = weather_stage_avl_df[['Rain Collection (mm)', 'Evaporation (mm/30min)', 'stage(m)']]
water_balance_df = weather_stage_avl_df[['Rain Collection (mm)', 'stage(m)']]
# print find_range(stage_vol_df['stage_m'].tolist(), max(water_balance_df['stage(m)']))
water_balance_df['volume (cu.m)'] = 0.000
for index, row in water_balance_df.iterrows():
# print index
obs_stage = row['stage(m)'] # observed stage
x1, x2 = find_range(stage_vol_df['stage_m'].tolist(), obs_stage)
x_diff = x2-x1
y1 = stage_vol_df['total_vol_cu_m'][x1]
y2 = stage_vol_df['total_vol_cu_m'][x2]
y_diff = y2 - y1
slope = y_diff/x_diff
y_intercept = y2 - (slope*x2)
water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = (slope*obs_stage) + y_intercept
# # fig = plt.figure(figsize=(11.69, 8.27))
# # plt.plot_date(water_balance_df.index, water_balance_df['volume (cu.m)'], '-g')
# # plt.hlines(stage_vol_df['total_vol_cu_m'][1.9], min(water_balance_df.index), max(water_balance_df.index))
# # plt.title('before overflow correction')
water_balance_df['diff_volume_cu.m'] = 0.000
for index, row in water_balance_df.iterrows():
if index > min(water_balance_df.index):
previous_time = index - timedelta(seconds=1800)
v1 = water_balance_df['volume (cu.m)'][previous_time.strftime('%Y-%m-%d %H:%M:%S')]
v2 = water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')]
water_balance_df['diff_volume_cu.m'][index.strftime('%Y-%m-%d %H:%M:%S')] = v1 - v2
vol_df = water_balance_df[water_balance_df['diff_volume_cu.m'] > 15]
# fig = plt.figure()
# plt.plot(water_balance_df.index, water_balance_df['diff_volume_cu.m'], 'b-')
# fig.autofmt_xdate(rotation=90)
# plt.show()
vol_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/pumping_volume.csv')
# """
# Overflow
# """
# full_vol = stage_vol_df['total_vol_cu_m'][1.9]
# # print full_vol
# water_balance_df['overflow(cu.m)'] = 0.000
# for index, row in water_balance_df.iterrows():
# obs_vol = row['volume (cu.m)']
# if obs_vol > full_vol:
# # print obs_vol
# water_balance_df['overflow(cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = obs_vol - full_vol
# water_balance_df['volume (cu.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = full_vol
# water_balance_df['stage(m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = 1.9
#
# # start from May 15
# water_balance_df = water_balance_df["2014-05-15":]
# # print water_balance_df.head()
#
# # print ch_storage_df.head()
#
# # fig = plt.figure(figsize=(11.69, 8.27))
# # plt.plot_date(water_balance_df.index, water_balance_df['volume (cu.m)'], '-g')
# # plt.hlines(stage_vol_df['total_vol_cu_m'][1.9], min(water_balance_df.index), max(water_balance_df.index))
# # plt.title('after overflow correction')
# """
# Stage vs area linear relationship
# """
# stage_area_df = pd.read_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/cont_area.csv',
# sep=',', header=0, names=['sno', 'stage_m', 'total_area_sq_m'])
# stage_area_df.drop('sno', inplace=True, axis=1)
# # set stage as index
# stage_area_df.set_index(stage_area_df['stage_m'], inplace=True)
# # print max(water_balance_df['stage(m)'])
# # print find_range(stage_area_df['stage_m'].tolist(), max(water_balance_df['stage(m)']))
# #create empty column
# water_balance_df['ws_area(sq.m)'] = 0.000
# for index, row in water_balance_df.iterrows():
# obs_stage = row['stage(m)'] # observed stage
# x1, x2 = find_range(stage_area_df['stage_m'].tolist(), obs_stage)
# x_diff = x2-x1
# y1 = stage_area_df['total_area_sq_m'][x1]
# y2 = stage_area_df['total_area_sq_m'][x2]
# y_diff = y2 - y1
# slope = y_diff/x_diff
# y_intercept = y2 - (slope*x2)
# water_balance_df['ws_area(sq.m)'][index.strftime('%Y-%m-%d %H:%M:%S')] = (slope*obs_stage) + y_intercept
# """
# Evaporation Volume estimation
# """
# water_balance_df['Evaporation (cu.m)'] = (water_balance_df['Evaporation (mm/30min)'] * 0.001) * water_balance_df['ws_area(sq.m)']
# # start from May 15
# """
# Daily Totals of Rain, Evaporation, Overflow
# """
# sum_df = water_balance_df[['Rain Collection (mm)', 'Evaporation (cu.m)', 'Evaporation (mm/30min)', 'overflow(cu.m)']]
# sum_df = sum_df.resample('D', how=np.sum)
# # print sum_df.head(10)
# """
# Daily average of Stage
# """
# stage_df = water_balance_df[['stage(m)', 'ws_area(sq.m)']]
# stage_df = stage_df.resample('D', how=np.mean)
# # print stage_df.head()
# water_balance_daily_df = sum_df.join(stage_df, how='left')
# print water_balance_daily_df.head(10)
# """
# Change in storage
# """
# # separate out 23:30 readings
# hour = water_balance_df.index.hour
# minute = water_balance_df.index.minute
# ch_storage_df = water_balance_df[['volume (cu.m)']][((hour == 23) & (minute == 30))]
# ch_storage_df = ch_storage_df.resample('D', how=np.mean)
# water_balance_daily_df['change_storage(cu.m)'] = 0.000
# # print water_balance_daily_df.head()
# for d1, d2 in pairwise(ch_storage_df.index):
# if d2 > d1:
# diff = (d2-d1).days
# if diff == 1:
# d1_storage = ch_storage_df['volume (cu.m)'][d1.strftime('%Y-%m-%d')]
# d2_storage = ch_storage_df['volume (cu.m)'][d2.strftime('%Y-%m-%d')]
# water_balance_daily_df['change_storage(cu.m)'][d2.strftime('%Y-%m-%d')] = d2_storage - d1_storage
#
# # print water_balance_daily_df.head()
#
# # new_index = pd.date_range(start='2014-05-15', end='2014-09-10', freq='D' )
# # print new_index
# # print ch_storage_df.head()
# """
# Separate out no inflow/ non rainy days
# two continuous days of no rain
# """
# water_balance_daily_df['status'] = "Y"
# # water_balance_daily_df['total_outflow (cu.m)'] = water_balance_daily_df['Evaporation (cu.m)'] + water_balance_daily_df['overflow(cu.m)']
# # dry_water_balance_df = water_balance_daily_df[water_balance_daily_df['total_outflow (cu.m)'] > water_balance_daily_df['change_storage(cu.m)']]
# # rain_water_balance_df = water_balance_daily_df[water_balance_daily_df['total_outflow (cu.m)'] < water_balance_daily_df['change_storage(cu.m)']]
# no_rain_df = water_balance_daily_df[water_balance_daily_df['Rain Collection (mm)'] == 0]
# # no_rain_df['status'] = "Y"
# for index in no_rain_df.index:
# initial_time_stamp = pd.to_datetime("2014-05-16", format="%Y-%m-%d")
# if index > initial_time_stamp:
# start_date = index - timedelta(days=2)
# two_days_rain_df = water_balance_daily_df['Rain Collection (mm)'][start_date.strftime('%Y-%m-%d'):index.strftime('%Y-%m-%d')]
# sum_df = two_days_rain_df.sum(axis=0)
# if sum_df == 0:
# water_balance_daily_df['status'][index.strftime('%Y-%m-%d')] = "N"
#
# # print no_rain_df.head()
# water_balance_daily_df.to_csv("/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/water_bal.csv")
# no_rain_df.to_csv("/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/no_rain_df.csv")
# dry_water_balance_df = water_balance_daily_df[water_balance_daily_df['status'] == "N"]
# rain_water_balance_df = water_balance_daily_df[water_balance_daily_df['status'] == "Y"]
# # print dry_water_balance_df.head()
# # print rain_water_balance_df.head()
# # # b = plot_date(dry_water_balance_df, 'change_storage(cu.m)')
# """
# # Calculate infiltration
# # """
# # # calculate infiltration
# # dry_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/dry_wb_check.CSV')
# # # print dry_water_balance_df.head()
# dry_water_balance_df['infiltration(cu.m)'] = 0.000
# delta_s = water_balance_daily_df['change_storage(cu.m)']
# evap = water_balance_daily_df['Evaporation (cu.m)']
# outflow = water_balance_daily_df['overflow(cu.m)']
# # for t1, t2 in pairwise(dry_water_balance_df.index):
# # diff = abs((t2-t1).seconds)
# # if diff == 1800:
# # # print t1, t2
# # dry_water_balance_df['infiltration(cu.m)'][t1.strftime('%Y-%m-%d %H:%M:%S')] = -1*(delta_s[t2.strftime('%Y-%m-%d %H:%M:%S')] + evap[t2.strftime('%Y-%m-%d %H:%M:%S')] + outflow[t2.strftime('%Y-%m-%d %H:%M:%S')])
# for index, row in dry_water_balance_df.iterrows():
# if index > min(dry_water_balance_df.index):
# t_1 = index - timedelta(days=1)
# if t_1 < max(dry_water_balance_df.index):
# diff = abs((index-t_1).days)
# if diff == 1:
# # print index
# # print t_1
# dry_water_balance_df['infiltration(cu.m)'][index.strftime('%Y-%m-%d')] = -1.0*(delta_s[index.strftime('%Y-%m-%d')] + evap[t_1.strftime('%Y-%m-%d')] + outflow[t_1.strftime('%Y-%m-%d')])
# # print row
# #
# #
# # dry_water_balance_df['infiltration(cu.m)'] = -1.0*(evap + outflow + delta_s)
# # # print dry_water_balance_df.head()
# # # fig = plt.figure(figsize=(11.69, 8.27))
# # # plt.plot(dry_water_balance_df['average_stage_m'], dry_water_balance_df['infiltration(cu.m)'], 'bo')
# # # plt.show()
# dry_water_balance_df['infiltration rate (m/day)'] = dry_water_balance_df['infiltration(cu.m)']/dry_water_balance_df['ws_area(sq.m)']
# print dry_water_balance_df.head(10)
# """
# Dry infiltration vs rainfall
# """
# fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(11.69, 8.27))
# # fig.subplots_adjust(right=0.8)
# line1 = ax1.bar(water_balance_daily_df.index, water_balance_daily_df['Rain Collection (mm)'], 0.35, label=r'Rainfall(mm)')
# plt.gca().invert_yaxis()
# ax1.xaxis.tick_bottom()
# ax1.yaxis.tick_left()
# for t1 in ax1.get_yticklabels():
# t1.set_color('b')
# # plt.legend(loc='upper left')
# ax2 = ax1.twinx()
# cmap, norm = mpl.colors.from_levels_and_colors([0, 0.05, 1, 1.5, 2.0], ['red', 'yellow', 'green', 'blue'])
# line2 = ax2.scatter(dry_water_balance_df.index, dry_water_balance_df['infiltration(cu.m)'], label='Infiltration (cu.m)', c=dry_water_balance_df['stage(m)'], cmap=cmap, norm=norm)
# plt.hlines(0, min(dry_water_balance_df.index), max(dry_water_balance_df.index))
# ax2.xaxis.tick_bottom()
# ax2.yaxis.tick_right()
# for t1 in ax2.get_yticklabels():
# t1.set_color('r')
# # # plt.legend(loc='upper right')
# # # fig.autofmt_xdate(rotation=90)
# # # fig.subplots_adjust(right=0.8)
# # ax3 = ax2.twiny()
# # line3 = ax3.line(water_balance_daily_df.index, water_balance_daily_df['Evaporation (cu.m)'], 0.35, 'g', alpha=0.5, label='Evaporation (cu.m)' )
# # ax3.tick_params(axis='x',
# # which='both',
# # top='off',
# # bottom='off',
# # labeltop='off')
# # # ax3.xaxis.tick_bottom()
# # ax3.yaxis.tick_right()
# fig.autofmt_xdate(rotation=90)
# # lns = line1+line3
# # labs = [l.get_label() for l in lns]
# # ax3.legend(lns, labs, loc='upper center', fancybox=True, ncol=3, bbox_to_anchor=(0.5, 1.15))
# # ax3.set_xlim([min(dry_water_balance_df.index), max(dry_water_balance_df.index)])
# fig.subplots_adjust(right=0.8)
# cbar_ax = fig.add_axes([0.85, 0.50, 0.05, 0.3]) #first one distance from plot, second height
# # cax, kw = mpl.colorbar.make_axes([ax for ax in ax1.flat()])
# cbar = fig.colorbar(line2, cax=cbar_ax)
# cbar.ax.set_ylabel('Stage (m)')
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/dry_rain_infiltration_stage_591_30min')
# # plt.show()
# """
# Fitting exponential function
# """
# # dry_water_balance_df = dry_water_balance_df[dry_water_balance_df['stage(m)'] > 0.1]
# stage_cal = dry_water_balance_df['stage(m)']
# # stage_cal = dry_water_balance_df['average_stage_m']
# inf_cal = dry_water_balance_df['infiltration rate (m/day)']
# # print dry_water_balance_df.shape
#
# # fig = plt.figure(figsize=(11.69, 8.27))
# # plt.plot(stage_cal, inf_cal, 'bo', label=r'Observation')
# # plt.vlines(1.9, 0, max(inf_cal), 'g')
# # plt.hlines(0, min(stage_cal), max(stage_cal), 'y')
# # plt.legend(loc='upper left')
# # plt.xlabel(r'\textbf{Stage} (m)')
# # plt.ylabel(r'\textbf{Infiltration} ($m^3/day$)')
# # plt.title(r"Stage - Infiltration relationship during no inflow for 591 check dam")
# # plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/infiltration_stage_591_30min')
# # plt.show()
#
# def func(h, alpha, beta):
# return (alpha*(h**beta))
#
# popt, pcov = curve_fit(f=func, xdata=stage_cal, ydata=inf_cal)
#
# # print popt
# # print pcov
# # # print np.diag(pcov)
# # print np.sqrt(np.diag(pcov))
# ###Log plot
#
#
# def polyfit(x, y, degree):
# results = {}
# coeffs = np.polyfit(x, y, degree)
# results['polynomial'] = coeffs.tolist()
# #r squared
# p = np.poly1d(coeffs)
# yhat = p(x)
# ybar = np.sum(y)/len(y)
# ssreg = np.sum((yhat-ybar)**2)
# sstot = np.sum((y-ybar)**2)
# results['determination'] = ssreg/sstot
# return results
#
# # plot
# stage_cal_new = np.linspace(min(stage_cal), max(stage_cal), 50)
# inf_cal_new = func(stage_cal_new, *popt)
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot(stage_cal, inf_cal, 'bo', label=r'Observation')
# plt.plot(stage_cal_new, inf_cal_new, 'r-', label='Prediction')
# plt.vlines(1.9, 0, max(inf_cal), 'g')
# plt.hlines(0, min(stage_cal), max(stage_cal), 'y')
# plt.legend(loc='upper left')
# plt.xlabel(r'\textbf{Stage} (m))')
# plt.ylabel(r'\textbf{Infiltration Rate} ($m/day$)')
# plt.title(r"No inflow day's stage - infiltration relationship for 591 check dam")
# plt.text(x=0.75, y=.03, fontsize=15, s=r'$Infiltration = {0:.2f}{{h_{{av}}}}^{{{1:.2f}}}$'.format(popt[0], popt[1]))
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/stage_inf_exp_dry_591_30min')
#
#
#
#
#
# # plt.show()
# # print dry_water_balance_df
# # print dry_water_balance_df[dry_water_balance_df['infiltration(cu.m)'] < 0]
# # plot rainfall vs stage
# #
# # fig, ax1 = plt.subplots(figsize=(11.69, 8.27))
# # ax1.bar(water_balance_daily_df.index, water_balance_daily_df['Rain Collection (mm)'], 0.35, color='b', label=r'Rainfall(mm)')
# # plt.gca().invert_yaxis()
# # for t1 in ax1.get_yticklabels():
# # t1.set_color('b')
# # ax1.set_ylabel('Rainfall(mm)')
# # plt.legend(loc='upper left')
# # ax2 = ax1.twinx()
# # ax2.plot_date(water_balance_daily_df.index, water_balance_daily_df['stage(m)'], 'r', label='stage (m)')
# # for t1 in ax2.get_yticklabels():
# # t1.set_color('r')
# # plt.legend(loc='upper right')
# # fig.autofmt_xdate(rotation=90)
# # plt.show()
#
# """
# Rainy day infiltration
# """
# rain_water_balance_df['infiltration(cu.m)'] = (popt[0]*(rain_water_balance_df['stage(m)']**popt[1]))* rain_water_balance_df['ws_area(sq.m)']
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot(rain_water_balance_df['stage(m)'], rain_water_balance_df['infiltration(cu.m)'], 'bo', label='Predicted Infiltration' )
# plt.vlines(1.9, 0, 100, 'g')
# # plt.xlim([-1, 2.0])
# plt.legend(loc='upper left')
# plt.xlabel(r'\textbf{Stage} (m)')
# plt.ylabel(r'\textbf{Infiltration} ($m^3/day$)')
# plt.title(r"Inflow day's stage - infiltration relationship for 591 check dam")
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/rain_inf_591_30min')
#
# # plt.show()
#
# """
# Inflow calculation
# """
# rain_water_balance_df['Inflow (cu.m)'] = 0.000
# dry_water_balance_df['Inflow (cu.m)'] = 0.000
# delta_s_rain = rain_water_balance_df['change_storage(cu.m)']
# inf_rain = rain_water_balance_df['infiltration(cu.m)']
# evap_rain = rain_water_balance_df['Evaporation (cu.m)']
# outflow_rain = rain_water_balance_df['overflow(cu.m)']
# for i in rain_water_balance_df.index:
# rain_water_balance_df['Inflow (cu.m)'][i.strftime("%Y-%m-%d")] = (delta_s_rain[i.strftime("%Y-%m-%d")] + inf_rain[i.strftime("%Y-%m-%d")] + evap_rain[i.strftime("%Y-%m-%d")] + outflow_rain[i.strftime("%Y-%m-%d")])
#
# fig = plt.figure(figsize=(11.69, 8.27))
# plt.plot(rain_water_balance_df['Rain Collection (mm)'], rain_water_balance_df['Inflow (cu.m)'], 'bo', label='Predicted Inflow' )
# # # plt.vlines(1.9, 0, 100, 'g')
# # # plt.xlim([-1, 2.0])
# # # plt.legend(loc='upper left')
# plt.xlabel(r'\textbf{Rainfall} (mm)')
# plt.ylabel(r'\textbf{Inflow} ($m^3/day$)')
# plt.title(r"Inflow day's Rainfall-Inflow relationship for 591 check dam")
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/rain_inflow_591_30min')
#
# """
# Inflow vs Rainfall
# """
# fig, ax1 = plt.subplots(nrows=1, ncols=1, figsize=(11.69, 8.27))
# # fig.subplots_adjust(right=0.8)
# line1 = ax1.bar(rain_water_balance_df.index, rain_water_balance_df['Rain Collection (mm)'], 0.45, label=r'Rainfall(mm)')
# plt.gca().invert_yaxis()
# ax1.xaxis.tick_bottom()
# ax1.yaxis.tick_left()
# for t1 in ax1.get_yticklabels():
# t1.set_color('b')
# # plt.legend(loc='upper left')
# ax2 = ax1.twinx()
# line2 = ax2.bar(rain_water_balance_df.index, rain_water_balance_df['Inflow (cu.m)'], 0.45, color='r', label=r'\textbf{Inflow ($m^3/day$)}')
# plt.hlines(0, min(rain_water_balance_df.index), max(rain_water_balance_df.index))
# ax2.xaxis.tick_bottom()
# ax2.yaxis.tick_right()
# for t1 in ax2.get_yticklabels():
# t1.set_color('r')
# lns = [line1, line2]
# labs = [r'\textbf{Rainfall(mm)}', r'\textbf{Inflow ($m^3/day$)}']
# ax2.legend(lns, labs, loc='upper center', fancybox=True, ncol=3, bbox_to_anchor=(0.5, 1.15))
# fig.autofmt_xdate(rotation=90)
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/rain_inflow_591_30min')
# # plt.show()
# # plt.show()
#
# merged_water_balance = pd.concat([dry_water_balance_df, rain_water_balance_df])
# # pd.PeriodIndex(ch_storage_df.index, freq='D')
# merged_water_balance = merged_water_balance.join(ch_storage_df, how='left')
# merged_water_balance.sort_index(inplace=True)
# dry_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/dry_wb_30min.CSV')
# rain_water_balance_df.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/rain_wb_30min.CSV')
# merged_water_balance.to_csv('/media/kiruba/New Volume/ACCUWA_Data/Checkdam_water_balance/591/wb_30min.CSV')
# """
# Evaporation vs infiltration
# """
# fig, ax1 = plt.subplots(figsize=(11.69, 8.27))
# line1 = ax1.bar(merged_water_balance.index, merged_water_balance['Evaporation (cu.m)'], 0.45, color='r', label=r"\textbf{Evaporation ($m^3/day$)}")
# # plt.title("Evaporation vs Infiltration for Check dam 591")
# for t1 in ax1.get_yticklabels():
# t1.set_color('r')
# ax2 = ax1.twiny()
# line2 = ax2.bar(merged_water_balance.index, merged_water_balance['infiltration(cu.m)'], 0.45, color='g', alpha=0.5, label=r"\textbf{Infiltration ($m^3/day$}")
# for t1 in ax2.get_yticklabels():
# t1.set_color('g')
# lns = [line1, line2]
# lab = [r"\textbf{Evaporation ($m^3/day$)}", r"\textbf{Infiltration ($m^3/day$}" ]
# # ax2.legend(lns, lab, loc='upper center', fancybox=True, ncol=2, bbox_to_anchor=(0.5, 1.15))
# fig.autofmt_xdate(rotation=90)
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/evap_infilt_591_30min')
# # plt.show()
# """
# half hour stage vs daily inflow
# """
# # print weather_df.head()
# # print merged_water_balance.head() # Inflow (cu.m)
# stage_30min_df = weather_df[['stage(m)']]
# fig1, ax1 = plt.subplots(figsize=(11.69, 8.27))
# line1 = ax1.plot(weather_df.index, weather_df['Rain Collection (mm)'],'r-')
# ax2 = ax1.twinx()
# line2 = ax2.plot(stage_30min_df.index, stage_30min_df['stage(m)'], 'b-')
# fig1.autofmt_xdate(rotation=90)
# plt.savefig('/media/kiruba/New Volume/ACCUWA_Data/python_plots/check_dam_591/inflow_stage_591_30min')
# plt.show(fig1) | gpl-3.0 |
bkimmig/fiery_llama | scripts/basic_phot_filter.py | 1 | 2542 | import numpy as np
import pandas as pd
import argparse
from astropy.io import fits
from fiery_llama.matched_filters import PointFilter, cubeify
parser = argparse.ArgumentParser()
help_data = "Must be .h5 or .fits file type."
parser.add_argument("data", help=help_data)
table_help = "if .h5 file provide table name"
parser.add_argument("--data-table", help=table_help)
help_signal = "Must be .h5 or .fits file type."
parser.add_argument("signal", help=help_signal)
parser.add_argument("--signal-table", help=table_help)
parser.add_argument("--nra", default=100)
parser.add_argument("--ndec", default=100)
_help = "the columns to filter on, if not given defaults to all filter columns"
parser.add_argument("--signal-columns", nargs="*", help=_help)
parser.add_argument("--create-image")
if __name__ == "__main__":
args = parser.parse_args()
if args.data_table is not None:
data = pd.read_hdf(args.data, args.data_table)
else:
hdul = fits.open(args.data)
data = pd.DataFrame(hdul[1].data)
if args.signal_table is not None:
signal_pts = pd.read_hdf(args.signal, args.signal_table)
else:
hdul = fits.open(args.signal)
signal_pts = pd.DataFrame(hdul[1].data)
signal_columns = args.signal_columns
if signal_columns is None:
signal_columns = signal_pts.columns
signal_filter = PointFilter(
signal_pts,
filtered_columns=signal_columns,
sigma_vec=np.repeat(0.2, len(signal_columns)))
dsr = data
weights = signal_filter.get_weights(dsr)
dsr['weights'] = weights
dsr.to_hdf('output.h5', 'photometry')
if args.create_image is not None:
out_img = cubeify(
dsr,
n=(int(args.nra), int(args.ndec)),
columns=['RA', 'DEC'],
target='weights')
cards = [
# fits.Card(keyword='NAXIS1', value=out_img.shape[0]),
# fits.Card(keyword='NAXIS2', value=out_img.shape[1]),
fits.Card(keyword='RAINMIN', value=np.min(dsr['RA'])),
fits.Card(keyword='RAINMAX', value=np.max(dsr['RA'])),
fits.Card(keyword='DECINMIN', value=np.min(dsr['DEC'])),
fits.Card(keyword='DECINMAX', value=np.max(dsr['DEC'])),
]
header = fits.Header(cards=cards)
primary_hdu = fits.PrimaryHDU(header=header)
img_hdu = fits.ImageHDU(out_img)
hdulist = fits.HDUList(
[primary_hdu, img_hdu]
)
hdulist.writeto(args.create_image)
| mit |
hitszxp/scikit-learn | sklearn/metrics/tests/test_common.py | 2 | 42318 | from __future__ import division, print_function
from functools import partial
from itertools import product
import numpy as np
import scipy.sparse as sp
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import LabelBinarizer, MultiLabelBinarizer
from sklearn.utils.multiclass import type_of_target
from sklearn.utils.validation import check_random_state
from sklearn.utils import shuffle
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import ignore_warnings
from sklearn.metrics import accuracy_score
from sklearn.metrics import average_precision_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import coverage_error
from sklearn.metrics import explained_variance_score
from sklearn.metrics import f1_score
from sklearn.metrics import fbeta_score
from sklearn.metrics import hamming_loss
from sklearn.metrics import hinge_loss
from sklearn.metrics import jaccard_similarity_score
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import log_loss
from sklearn.metrics import matthews_corrcoef
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
from sklearn.metrics import median_absolute_error
from sklearn.metrics import precision_score
from sklearn.metrics import r2_score
from sklearn.metrics import recall_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import zero_one_loss
# TODO Curve are currently not coverd by invariance test
# from sklearn.metrics import precision_recall_curve
# from sklearn.metrics import roc_curve
from sklearn.metrics.base import _average_binary_score
# Note toward developers about metric testing
# -------------------------------------------
# It is often possible to write one general test for several metrics:
#
# - invariance properties, e.g. invariance to sample order
# - common behavior for an argument, e.g. the "normalize" with value True
# will return the mean of the metrics and with value False will return
# the sum of the metrics.
#
# In order to improve the overall metric testing, it is a good idea to write
# first a specific test for the given metric and then add a general test for
# all metrics that have the same behavior.
#
# Two types of datastructures are used in order to implement this system:
# dictionaries of metrics and lists of metrics wit common properties.
#
# Dictionaries of metrics
# ------------------------
# The goal of having those dictionaries is to have an easy way to call a
# particular metric and associate a name to each function:
#
# - REGRESSION_METRICS: all regression metrics.
# - CLASSIFICATION_METRICS: all classification metrics
# which compare a ground truth and the estimated targets as returned by a
# classifier.
# - THRESHOLDED_METRICS: all classification metrics which
# compare a ground truth and a score, e.g. estimated probabilities or
# decision function (format might vary)
#
# Those dictionaries will be used to test systematically some invariance
# properties, e.g. invariance toward several input layout.
#
REGRESSION_METRICS = {
"mean_absolute_error": mean_absolute_error,
"mean_squared_error": mean_squared_error,
"median_absolute_error": median_absolute_error,
"explained_variance_score": explained_variance_score,
"r2_score": r2_score,
}
CLASSIFICATION_METRICS = {
"accuracy_score": accuracy_score,
"unnormalized_accuracy_score": partial(accuracy_score, normalize=False),
"confusion_matrix": confusion_matrix,
"hamming_loss": hamming_loss,
"jaccard_similarity_score": jaccard_similarity_score,
"unnormalized_jaccard_similarity_score":
partial(jaccard_similarity_score, normalize=False),
"zero_one_loss": zero_one_loss,
"unnormalized_zero_one_loss": partial(zero_one_loss, normalize=False),
"precision_score": precision_score,
"recall_score": recall_score,
"f1_score": f1_score,
"f2_score": partial(fbeta_score, beta=2),
"f0.5_score": partial(fbeta_score, beta=0.5),
"matthews_corrcoef_score": matthews_corrcoef,
"weighted_f0.5_score": partial(fbeta_score, average="weighted", beta=0.5),
"weighted_f1_score": partial(f1_score, average="weighted"),
"weighted_f2_score": partial(fbeta_score, average="weighted", beta=2),
"weighted_precision_score": partial(precision_score, average="weighted"),
"weighted_recall_score": partial(recall_score, average="weighted"),
"micro_f0.5_score": partial(fbeta_score, average="micro", beta=0.5),
"micro_f1_score": partial(f1_score, average="micro"),
"micro_f2_score": partial(fbeta_score, average="micro", beta=2),
"micro_precision_score": partial(precision_score, average="micro"),
"micro_recall_score": partial(recall_score, average="micro"),
"macro_f0.5_score": partial(fbeta_score, average="macro", beta=0.5),
"macro_f1_score": partial(f1_score, average="macro"),
"macro_f2_score": partial(fbeta_score, average="macro", beta=2),
"macro_precision_score": partial(precision_score, average="macro"),
"macro_recall_score": partial(recall_score, average="macro"),
"samples_f0.5_score": partial(fbeta_score, average="samples", beta=0.5),
"samples_f1_score": partial(f1_score, average="samples"),
"samples_f2_score": partial(fbeta_score, average="samples", beta=2),
"samples_precision_score": partial(precision_score, average="samples"),
"samples_recall_score": partial(recall_score, average="samples"),
}
THRESHOLDED_METRICS = {
"coverage_error": coverage_error,
"log_loss": log_loss,
"unnormalized_log_loss": partial(log_loss, normalize=False),
"hinge_loss": hinge_loss,
"roc_auc_score": roc_auc_score,
"weighted_roc_auc": partial(roc_auc_score, average="weighted"),
"samples_roc_auc": partial(roc_auc_score, average="samples"),
"micro_roc_auc": partial(roc_auc_score, average="micro"),
"macro_roc_auc": partial(roc_auc_score, average="macro"),
"average_precision_score": average_precision_score,
"weighted_average_precision_score":
partial(average_precision_score, average="weighted"),
"samples_average_precision_score":
partial(average_precision_score, average="samples"),
"micro_average_precision_score":
partial(average_precision_score, average="micro"),
"macro_average_precision_score":
partial(average_precision_score, average="macro"),
"label_ranking_average_precision_score":
label_ranking_average_precision_score,
}
ALL_METRICS = dict()
ALL_METRICS.update(THRESHOLDED_METRICS)
ALL_METRICS.update(CLASSIFICATION_METRICS)
ALL_METRICS.update(REGRESSION_METRICS)
# Lists of metrics with common properties
# ---------------------------------------
# Lists of metrics with common properties are used to test systematically some
# functionalities and invariance, e.g. SYMMETRIC_METRICS lists all metrics that
# are symmetric with respect to their input argument y_true and y_pred.
#
# When you add a new metric or functionality, check if a general test
# is already written.
# Metric undefined with "binary" or "multiclass" input
METRIC_UNDEFINED_MULTICLASS = [
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
# Those metrics don't support multiclass outputs
"average_precision_score", "weighted_average_precision_score",
"micro_average_precision_score", "macro_average_precision_score",
"samples_average_precision_score",
"label_ranking_average_precision_score",
"roc_auc_score", "micro_roc_auc", "weighted_roc_auc",
"macro_roc_auc", "samples_roc_auc",
"coverage_error",
]
# Metrics with an "average" argument
METRICS_WITH_AVERAGING = [
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score"
]
# Treshold-based metrics with an "average" argument
THRESHOLDED_METRICS_WITH_AVERAGING = [
"roc_auc_score", "average_precision_score",
]
# Metrics with a "pos_label" argument
METRICS_WITH_POS_LABEL = [
"roc_curve", "hinge_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "labels" argument
# XXX: Handle multi_class metrics that has a labels argument as well as a
# decision function argument. e.g hinge_loss
METRICS_WITH_LABELS = [
"confusion_matrix",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
]
# Metrics with a "normalize" option
METRICS_WITH_NORMALIZE_OPTION = [
"accuracy_score",
"jaccard_similarity_score",
"zero_one_loss",
]
# Threshold-based metrics with "multilabel-indicator" format support
THRESHOLDED_MULTILABEL_METRICS = [
"log_loss",
"unnormalized_log_loss",
"roc_auc_score", "weighted_roc_auc", "samples_roc_auc",
"micro_roc_auc", "macro_roc_auc",
"average_precision_score", "weighted_average_precision_score",
"samples_average_precision_score", "micro_average_precision_score",
"macro_average_precision_score",
"coverage_error",
]
# Classification metrics with "multilabel-indicator" and
# "multilabel-sequence" format support
MULTILABELS_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"precision_score", "recall_score", "f1_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f1_score", "weighted_f2_score",
"weighted_precision_score", "weighted_recall_score",
"micro_f0.5_score", "micro_f1_score", "micro_f2_score",
"micro_precision_score", "micro_recall_score",
"macro_f0.5_score", "macro_f1_score", "macro_f2_score",
"macro_precision_score", "macro_recall_score",
"samples_f0.5_score", "samples_f1_score", "samples_f2_score",
"samples_precision_score", "samples_recall_score",
]
# Regression metrics with "multioutput-continuous" format support
MULTIOUTPUT_METRICS = [
"mean_absolute_error", "mean_squared_error", "r2_score",
]
# Symmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) == metric(y_pred, y_true).
SYMMETRIC_METRICS = [
"accuracy_score", "unnormalized_accuracy_score",
"hamming_loss",
"jaccard_similarity_score", "unnormalized_jaccard_similarity_score",
"zero_one_loss", "unnormalized_zero_one_loss",
"f1_score", "weighted_f1_score", "micro_f1_score", "macro_f1_score",
"matthews_corrcoef_score", "mean_absolute_error", "mean_squared_error",
"median_absolute_error"
]
# Asymmetric with respect to their input arguments y_true and y_pred
# metric(y_true, y_pred) != metric(y_pred, y_true).
NOT_SYMMETRIC_METRICS = [
"explained_variance_score",
"r2_score",
"confusion_matrix",
"precision_score", "recall_score", "f2_score", "f0.5_score",
"weighted_f0.5_score", "weighted_f2_score", "weighted_precision_score",
"weighted_recall_score",
"micro_f0.5_score", "micro_f2_score", "micro_precision_score",
"micro_recall_score",
"macro_f0.5_score", "macro_f2_score", "macro_precision_score",
"macro_recall_score", "log_loss", "hinge_loss"
]
# No Sample weight support
METRICS_WITHOUT_SAMPLE_WEIGHT = [
"confusion_matrix",
"hamming_loss",
"matthews_corrcoef_score",
"median_absolute_error",
]
def test_symmetry():
"""Test the symmetry of score and loss functions"""
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
# We shouldn't forget any metrics
assert_equal(set(SYMMETRIC_METRICS).union(NOT_SYMMETRIC_METRICS,
THRESHOLDED_METRICS,
METRIC_UNDEFINED_MULTICLASS),
set(ALL_METRICS))
assert_equal(
set(SYMMETRIC_METRICS).intersection(set(NOT_SYMMETRIC_METRICS)),
set([]))
# Symmetric metric
for name in SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_pred, y_true),
err_msg="%s is not symmetric" % name)
# Not symmetric metrics
for name in NOT_SYMMETRIC_METRICS:
metric = ALL_METRICS[name]
assert_true(np.any(metric(y_true, y_pred) != metric(y_pred, y_true)),
msg="%s seems to be symmetric" % name)
def test_sample_order_invariance():
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(20, ))
y_pred = random_state.randint(0, 2, size=(20, ))
y_true_shuffle, y_pred_shuffle = shuffle(y_true, y_pred, random_state=0)
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
def test_sample_order_invariance_multilabel_and_multioutput():
random_state = check_random_state(0)
# Generate some data
y_true = random_state.randint(0, 2, size=(20, 25))
y_pred = random_state.randint(0, 2, size=(20, 25))
y_score = random_state.normal(size=y_true.shape)
y_true_shuffle, y_pred_shuffle, y_score_shuffle = shuffle(y_true,
y_pred,
y_score,
random_state=0)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in THRESHOLDED_MULTILABEL_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_almost_equal(metric(y_true, y_score),
metric(y_true_shuffle, y_score_shuffle),
err_msg="%s is not sample order invariant"
% name)
assert_almost_equal(metric(y_true, y_pred),
metric(y_true_shuffle, y_pred_shuffle),
err_msg="%s is not sample order invariant"
% name)
def test_format_invariance_with_1d_vectors():
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_list = list(y1)
y2_list = list(y2)
y1_1d, y2_1d = np.array(y1), np.array(y2)
assert_equal(y1_1d.ndim, 1)
assert_equal(y2_1d.ndim, 1)
y1_column = np.reshape(y1_1d, (-1, 1))
y2_column = np.reshape(y2_1d, (-1, 1))
y1_row = np.reshape(y1_1d, (1, -1))
y2_row = np.reshape(y2_1d, (1, -1))
for name, metric in ALL_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure = metric(y1, y2)
assert_almost_equal(metric(y1_list, y2_list), measure,
err_msg="%s is not representation invariant "
"with list" % name)
assert_almost_equal(metric(y1_1d, y2_1d), measure,
err_msg="%s is not representation invariant "
"with np-array-1d" % name)
assert_almost_equal(metric(y1_column, y2_column), measure,
err_msg="%s is not representation invariant "
"with np-array-column" % name)
# Mix format support
assert_almost_equal(metric(y1_1d, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_list, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and list" % name)
assert_almost_equal(metric(y1_1d, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_1d), measure,
err_msg="%s is not representation invariant "
"with mix np-array-1d and np-array-column"
% name)
assert_almost_equal(metric(y1_list, y2_column), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
assert_almost_equal(metric(y1_column, y2_list), measure,
err_msg="%s is not representation invariant "
"with mix list and np-array-column"
% name)
# These mix representations aren't allowed
assert_raises(ValueError, metric, y1_1d, y2_row)
assert_raises(ValueError, metric, y1_row, y2_1d)
assert_raises(ValueError, metric, y1_list, y2_row)
assert_raises(ValueError, metric, y1_row, y2_list)
assert_raises(ValueError, metric, y1_column, y2_row)
assert_raises(ValueError, metric, y1_row, y2_column)
# NB: We do not test for y1_row, y2_row as these may be
# interpreted as multilabel or multioutput data.
if (name not in (MULTIOUTPUT_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTILABELS_METRICS)):
assert_raises(ValueError, metric, y1_row, y2_row)
def test_invariance_string_vs_numbers_labels():
"""Ensure that classification metrics with string labels"""
random_state = check_random_state(0)
y1 = random_state.randint(0, 2, size=(20, ))
y2 = random_state.randint(0, 2, size=(20, ))
y1_str = np.array(["eggs", "spam"])[y1]
y2_str = np.array(["eggs", "spam"])[y2]
pos_label_str = "spam"
labels_str = ["eggs", "spam"]
for name, metric in CLASSIFICATION_METRICS.items():
if name in METRIC_UNDEFINED_MULTICLASS:
continue
measure_with_number = metric(y1, y2)
# Ugly, but handle case with a pos_label and label
metric_str = metric
if name in METRICS_WITH_POS_LABEL:
metric_str = partial(metric_str, pos_label=pos_label_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number invariance "
"test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
if name in METRICS_WITH_LABELS:
metric_str = partial(metric_str, labels=labels_str)
measure_with_str = metric_str(y1_str, y2_str)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric_str(y1_str.astype('O'),
y2_str.astype('O'))
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string vs number "
"invariance test".format(name))
for name, metric in THRESHOLDED_METRICS.items():
if name in ("log_loss", "hinge_loss", "unnormalized_log_loss"):
measure_with_number = metric(y1, y2)
measure_with_str = metric(y1_str, y2)
assert_array_equal(measure_with_number, measure_with_str,
err_msg="{0} failed string vs number "
"invariance test".format(name))
measure_with_strobj = metric(y1_str.astype('O'), y2)
assert_array_equal(measure_with_number, measure_with_strobj,
err_msg="{0} failed string object vs number "
"invariance test".format(name))
else:
# TODO those metrics doesn't support string label yet
assert_raises(ValueError, metric, y1_str, y2)
assert_raises(ValueError, metric, y1_str.astype('O'), y2)
@ignore_warnings
def check_single_sample(name):
"""Non-regression test: scores should work with a single sample.
This is important for leave-one-out cross validation.
Score functions tested are those that formerly called np.squeeze,
which turns an array of size 1 into a 0-d array (!).
"""
metric = ALL_METRICS[name]
# assert that no exception is thrown
for i, j in product([0, 1], repeat=2):
metric([i], [j])
@ignore_warnings
def check_single_sample_multioutput(name):
metric = ALL_METRICS[name]
for i, j, k, l in product([0, 1], repeat=4):
metric(np.array([[i, j]]), np.array([[k, l]]))
def test_single_sample():
for name in ALL_METRICS:
if name in METRIC_UNDEFINED_MULTICLASS or name in THRESHOLDED_METRICS:
# Those metrics are not always defined with one sample
# or in multiclass classification
continue
yield check_single_sample, name
for name in MULTIOUTPUT_METRICS + MULTILABELS_METRICS:
yield check_single_sample_multioutput, name
def test_multioutput_number_of_output_differ():
y_true = np.array([[1, 0, 0, 1], [0, 1, 1, 1], [1, 1, 0, 1]])
y_pred = np.array([[0, 0], [1, 0], [0, 0]])
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
assert_raises(ValueError, metric, y_true, y_pred)
def test_multioutput_regression_invariance_to_dimension_shuffling():
# test invariance to dimension shuffling
random_state = check_random_state(0)
y_true = random_state.uniform(0, 2, size=(20, 5))
y_pred = random_state.uniform(0, 2, size=(20, 5))
for name in MULTIOUTPUT_METRICS:
metric = ALL_METRICS[name]
error = metric(y_true, y_pred)
for _ in range(3):
perm = random_state.permutation(y_true.shape[1])
assert_almost_equal(metric(y_true[:, perm], y_pred[:, perm]),
error,
err_msg="%s is not dimension shuffling "
"invariant" % name)
def test_multilabel_representation_invariance():
# Generate some data
n_classes = 4
n_samples = 50
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y1 = make_ml(n_features=1, n_classes=n_classes, random_state=0,
n_samples=n_samples)
_, y2 = make_ml(n_features=1, n_classes=n_classes, random_state=1,
n_samples=n_samples)
# Be sure to have at least one empty label
y1 += ([], )
y2 += ([], )
# NOTE: The "sorted" trick is necessary to shuffle labels, because it
# allows to return the shuffled tuple.
rng = check_random_state(42)
shuffled = lambda x: sorted(x, key=lambda *args: rng.rand())
y1_shuffle = [shuffled(x) for x in y1]
y2_shuffle = [shuffled(x) for x in y2]
# Let's have redundant labels
y2_redundant = [x * rng.randint(1, 4) for x in y2]
# Binary indicator matrix format
lb = MultiLabelBinarizer().fit([range(n_classes)])
y1_binary_indicator = lb.transform(y1)
y2_binary_indicator = lb.transform(y2)
y1_sparse_indicator = sp.coo_matrix(y1_binary_indicator)
y2_sparse_indicator = sp.coo_matrix(y2_binary_indicator)
y1_shuffle_binary_indicator = lb.transform(y1_shuffle)
y2_shuffle_binary_indicator = lb.transform(y2_shuffle)
for name in MULTILABELS_METRICS:
metric = ALL_METRICS[name]
# XXX cruel hack to work with partial functions
if isinstance(metric, partial):
metric.__module__ = 'tmp'
metric.__name__ = name
measure = metric(y1_binary_indicator, y2_binary_indicator)
# Check representation invariance
assert_almost_equal(metric(y1_sparse_indicator,
y2_sparse_indicator),
measure,
err_msg="%s failed representation invariance "
"between dense and sparse indicator "
"formats." % name)
# Check shuffling invariance with dense binary indicator matrix
assert_almost_equal(metric(y1_shuffle_binary_indicator,
y2_shuffle_binary_indicator), measure,
err_msg="%s failed shuffling invariance "
" with dense binary indicator format."
% name)
# Check deprecation warnings related to sequence of sequences
deprecated_metric = partial(assert_warns, DeprecationWarning, metric)
# Check representation invariance
assert_almost_equal(deprecated_metric(y1, y2),
measure,
err_msg="%s failed representation invariance "
"between list of list of labels "
"format and dense binary indicator "
"format." % name)
# Check invariance with redundant labels with list of labels
assert_almost_equal(deprecated_metric(y1, y2_redundant), measure,
err_msg="%s failed rendundant label invariance"
% name)
# Check shuffling invariance with list of labels
assert_almost_equal(deprecated_metric(y1_shuffle, y2_shuffle), measure,
err_msg="%s failed shuffling invariance "
"with list of list of labels format."
% name)
# Check raises error with mix input representation
assert_raises(ValueError, deprecated_metric, y1, y2_binary_indicator)
assert_raises(ValueError, deprecated_metric, y1_binary_indicator, y2)
def test_normalize_option_binary_classification(n_samples=20):
# Test in the binary case
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multiclasss_classification():
# Test in the multiclass case
random_state = check_random_state(0)
y_true = random_state.randint(0, 4, size=(20, ))
y_pred = random_state.randint(0, 4, size=(20, ))
n_samples = y_true.shape[0]
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
measure = metrics(y_true, y_pred, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true, y_pred, normalize=False)
/ n_samples, measure)
def test_normalize_option_multilabel_classification():
# Test in the multilabel case
n_classes = 4
n_samples = 100
# using sequence of sequences is deprecated, but still tested
make_ml = ignore_warnings(make_multilabel_classification)
_, y_true = make_ml(n_features=1, n_classes=n_classes,
random_state=0, n_samples=n_samples)
_, y_pred = make_ml(n_features=1, n_classes=n_classes,
random_state=1, n_samples=n_samples)
# Be sure to have at least one empty label
y_true += ([], )
y_pred += ([], )
n_samples += 1
lb = MultiLabelBinarizer().fit([range(n_classes)])
y_true_binary_indicator = lb.transform(y_true)
y_pred_binary_indicator = lb.transform(y_pred)
for name in METRICS_WITH_NORMALIZE_OPTION:
metrics = ALL_METRICS[name]
# List of list of labels
measure = assert_warns(DeprecationWarning, metrics, y_true, y_pred,
normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(ignore_warnings(metrics)(y_true, y_pred,
normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
# Indicator matrix format
measure = metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=True)
assert_greater(measure, 0,
msg="We failed to test correctly the normalize option")
assert_almost_equal(metrics(y_true_binary_indicator,
y_pred_binary_indicator, normalize=False)
/ n_samples, measure,
err_msg="Failed with %s" % name)
@ignore_warnings
def _check_averaging(metric, y_true, y_pred, y_true_binarize, y_pred_binarize,
is_multilabel):
n_samples, n_classes = y_true_binarize.shape
# No averaging
label_measure = metric(y_true, y_pred, average=None)
assert_array_almost_equal(label_measure,
[metric(y_true_binarize[:, i],
y_pred_binarize[:, i])
for i in range(n_classes)])
# Micro measure
micro_measure = metric(y_true, y_pred, average="micro")
assert_almost_equal(micro_measure, metric(y_true_binarize.ravel(),
y_pred_binarize.ravel()))
# Macro measure
macro_measure = metric(y_true, y_pred, average="macro")
assert_almost_equal(macro_measure, np.mean(label_measure))
# Weighted measure
weights = np.sum(y_true_binarize, axis=0, dtype=int)
if np.sum(weights) != 0:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, np.average(label_measure,
weights=weights))
else:
weighted_measure = metric(y_true, y_pred, average="weighted")
assert_almost_equal(weighted_measure, 0)
# Sample measure
if is_multilabel:
sample_measure = metric(y_true, y_pred, average="samples")
assert_almost_equal(sample_measure,
np.mean([metric(y_true_binarize[i],
y_pred_binarize[i])
for i in range(n_samples)]))
assert_raises(ValueError, metric, y_true, y_pred, average="unknown")
assert_raises(ValueError, metric, y_true, y_pred, average="garbage")
def check_averaging(name, y_true, y_true_binarize, y_pred, y_pred_binarize,
y_score):
is_multilabel = type_of_target(y_true).startswith("multilabel")
metric = ALL_METRICS[name]
if name in METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel)
elif name in THRESHOLDED_METRICS_WITH_AVERAGING:
_check_averaging(metric, y_true, y_score, y_true_binarize,
y_score, is_multilabel)
else:
raise ValueError("Metric is not recorded as having an average option")
def test_averaging_multiclass(n_samples=50, n_classes=3):
random_state = check_random_state(0)
y_true = random_state.randint(0, n_classes, size=(n_samples, ))
y_pred = random_state.randint(0, n_classes, size=(n_samples, ))
y_score = random_state.uniform(size=(n_samples, n_classes))
lb = LabelBinarizer().fit(y_true)
y_true_binarize = lb.transform(y_true)
y_pred_binarize = lb.transform(y_pred)
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel(n_classes=5, n_samples=40):
_, y = make_multilabel_classification(n_features=1, n_classes=n_classes,
random_state=5, n_samples=n_samples,
return_indicator=True,
allow_unlabeled=False)
y_true = y[:20]
y_pred = y[20:]
y_score = check_random_state(0).normal(size=(20, n_classes))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING + THRESHOLDED_METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
def test_averaging_multilabel_all_zeroes():
y_true = np.zeros((20, 3))
y_pred = np.zeros((20, 3))
y_score = np.zeros((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
# Test _average_binary_score for weight.sum() == 0
binary_metric = (lambda y_true, y_score, average="macro":
_average_binary_score(
precision_score, y_true, y_score, average))
_check_averaging(binary_metric, y_true, y_pred, y_true_binarize,
y_pred_binarize, is_multilabel=True)
def test_averaging_multilabel_all_ones():
y_true = np.ones((20, 3))
y_pred = np.ones((20, 3))
y_score = np.ones((20, 3))
y_true_binarize = y_true
y_pred_binarize = y_pred
for name in METRICS_WITH_AVERAGING:
yield (check_averaging, name, y_true, y_true_binarize, y_pred,
y_pred_binarize, y_score)
@ignore_warnings
def check_sample_weight_invariance(name, metric, y1, y2):
rng = np.random.RandomState(0)
sample_weight = rng.randint(1, 10, size=len(y1))
# check that unit weights gives the same score as no weight
unweighted_score = metric(y1, y2, sample_weight=None)
assert_almost_equal(
unweighted_score,
metric(y1, y2, sample_weight=np.ones(shape=len(y1))),
err_msg="For %s sample_weight=None is not equivalent to "
"sample_weight=ones" % name)
# check that the weighted and unweighted scores are unequal
weighted_score = metric(y1, y2, sample_weight=sample_weight)
assert_not_equal(
unweighted_score, weighted_score,
msg="Unweighted and weighted scores are unexpectedly "
"equal (%f) for %s" % (weighted_score, name))
# check that sample_weight can be a list
weighted_score_list = metric(y1, y2,
sample_weight=sample_weight.tolist())
assert_almost_equal(
weighted_score, weighted_score_list,
err_msg="Weighted scores for array and list sample_weight input are "
"not equal (%f != %f) for %s" % (
weighted_score, weighted_score_list, name))
# check that integer weights is the same as repeated samples
repeat_weighted_score = metric(
np.repeat(y1, sample_weight, axis=0),
np.repeat(y2, sample_weight, axis=0), sample_weight=None)
assert_almost_equal(
weighted_score, repeat_weighted_score,
err_msg="Weighting %s is not equal to repeating samples" % name)
# check that ignoring a fraction of the samples is equivalent to setting
# the corresponding weights to zero
sample_weight_subset = sample_weight[1::2]
sample_weight_zeroed = np.copy(sample_weight)
sample_weight_zeroed[::2] = 0
y1_subset = y1[1::2]
y2_subset = y2[1::2]
weighted_score_subset = metric(y1_subset, y2_subset,
sample_weight=sample_weight_subset)
weighted_score_zeroed = metric(y1, y2,
sample_weight=sample_weight_zeroed)
assert_almost_equal(
weighted_score_subset, weighted_score_zeroed,
err_msg=("Zeroing weights does not give the same result as "
"removing the corresponding samples (%f != %f) for %s" %
(weighted_score_zeroed, weighted_score_subset, name)))
if not name.startswith('unnormalized'):
# check that the score is invariant under scaling of the weights by a
# common factor
for scaling in [2, 0.3]:
assert_almost_equal(
weighted_score,
metric(y1, y2, sample_weight=sample_weight * scaling),
err_msg="%s sample_weight is not invariant "
"under scaling" % name)
# Check that if sample_weight.shape[0] != y_true.shape[0], it raised an
# error
assert_raises(Exception, metric, y1, y2,
sample_weight=np.hstack([sample_weight, sample_weight]))
def test_sample_weight_invariance(n_samples=50):
random_state = check_random_state(0)
# binary output
random_state = check_random_state(0)
y_true = random_state.randint(0, 2, size=(n_samples, ))
y_pred = random_state.randint(0, 2, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples,))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multiclass
random_state = check_random_state(0)
y_true = random_state.randint(0, 5, size=(n_samples, ))
y_pred = random_state.randint(0, 5, size=(n_samples, ))
y_score = random_state.random_sample(size=(n_samples, 5))
for name in ALL_METRICS:
if (name in METRICS_WITHOUT_SAMPLE_WEIGHT or
name in METRIC_UNDEFINED_MULTICLASS):
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield check_sample_weight_invariance, name, metric, y_true, y_score
else:
yield check_sample_weight_invariance, name, metric, y_true, y_pred
# multilabel sequence
y_true = 2 * [(1, 2, ), (1, ), (0, ), (0, 1), (1, 2)]
y_pred = 2 * [(0, 2, ), (2, ), (0, ), (2, ), (1,)]
y_score = random_state.randn(10, 3)
for name in MULTILABELS_METRICS:
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
# multilabel indicator
_, ya = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=0, n_samples=100,
return_indicator=True, allow_unlabeled=False)
_, yb = make_multilabel_classification(
n_features=1, n_classes=20,
random_state=1, n_samples=100,
return_indicator=True, allow_unlabeled=False)
y_true = np.vstack([ya, yb])
y_pred = np.vstack([ya, ya])
y_score = random_state.randint(1, 4, size=y_true.shape)
for name in (MULTILABELS_METRICS + THRESHOLDED_MULTILABEL_METRICS +
MULTIOUTPUT_METRICS):
if name in METRICS_WITHOUT_SAMPLE_WEIGHT:
continue
metric = ALL_METRICS[name]
if name in THRESHOLDED_METRICS:
yield (check_sample_weight_invariance, name, metric, y_true,
y_score)
else:
yield (check_sample_weight_invariance, name, metric, y_true,
y_pred)
| bsd-3-clause |
aaronroodman/DeconvolvePSF | src/afterburner.py | 1 | 22347 | # !/nfs/slac/g/ki/ki06/roodman/Software/anaconda/bin/python
# @Author Sean McLaughlin
desc = '''
Arguments:\n
- expid: the exposure ID of the exposure to run against\n
- output_dir: the directory in which to create a subdirectory for temporary files and final outputs.\n
Requirements:\n
-WavefrontPSF\n
-numpy, pandas, astropy or pyfits\n
-a psfex installation and python binding\n
\n
This module is the main module for my project for the winter quarter of 2016 in Aaron Roodman's group.\n
This is built on top of previous work conducted by Aaron and his graduate student Chris Davis. They have\n
developed WavefrontPSF, which estimates the optical contribution of the PSF with a relatively simple model.\n
It was found that the optical portion was not as signifcant a fraction of the total PSF as hoped,\n
so some sort of afterburner is going to need to be added. This module deconvolves the optical portion of the \n
psf from the observed stars. After the optical portion has been deconvolved\n
from the stars (using Richardson-Lucy deconvolution), the remainder is be treated as the "atmospheric"\n
portion of the psf. This module load in preprocessed observed stars, run WavefrontPSF on them, deconvolve\n
the optical PSF, then run PSFEX (a packaged PSF modeler) on the residual.\n
'''
if __name__ == '__main__':
# TODO flag to delete temp files, or use them if they're already there, and other options?
from argparse import ArgumentParser
parser = ArgumentParser(description=desc)
parser.add_argument('expid', metavar='expid', type=int, help= \
'ID of the exposure to analyze')
# May want to rename to tmp
parser.add_argument('output_dir', metavar='output_dir', type=str, help= \
'Directory to store outputs.')
parser.add_argument('-v', '--verbose', action='store_true', help='Print progress updates.')
args = vars(parser.parse_args())
expid = args['expid']
output_dir = args['output_dir']
verbose = args['verbose']
# Ensure provided dir exists
from os import path, mkdir
if not path.isdir(output_dir):
raise IOError("The directory %s does not exist." % output_dir)
if output_dir[-1] != '/':
output_dir += '/'
# Make new dir to store files from this run
if not path.isdir(output_dir + '00%d/' % expid):
try:
mkdir(output_dir + '00%d/' % expid)
except OSError:
if verbose:
print 'Failed making directory; using original output directory.'
else:
output_dir += '00%d/' % expid
else:
output_dir += '00%d/' % expid
# Do imports here instead of before the argparse because users will be able to acces
# the help script without these packages instalelled, and more quickly
from WavefrontPSF.psf_interpolator import Mesh_Interpolator
from WavefrontPSF.digestor import Digestor
from WavefrontPSF.donutengine import DECAM_Model_Wavefront
try:
from astropy.io import fits # TODO change to fitsio?
except ImportError:
import pyfits as fits # should have the same API
import numpy as np
from psfex import PSFEx # Move into one function that uses it?
from glob import glob
from itertools import izip
from subprocess import call
from src.optical_model import get_optical_psf
from src.lucy import deconvolve, convolve
# Value with which to mask failed deconvolutions
MASK_VAL = -9999
def get_hdu_idxs(meta_hdulist):
"""
helper function to calculate the start/end idxs of each HDUlist in the 1D flattened case
:param meta_hdulist:
:return: hdu_idxs
"""
hdu_lengths = np.zeros((62,))
for ccd, hdulist in enumerate(meta_hdulist):
hdu_lengths[ccd] = hdulist[2].data.shape[0]
hdu_idxs = hdu_lengths.cumsum()
return np.insert(hdu_idxs, 0, 0) # insert 0 as first elem
def get_ccd_idx(idx_1d, hdu_idxs):
"""
return the ccd and ccd idx given a 1d idx of a star.
:param idx_1d: the idx in the flattened case
:param hdu_idxs: output of getHDULength, the start/end idxs of each hdu in the 1D flattened case
:return: ccd_num, ccd_idx
"""
last_idx = 0
for ccd_num, hdu_idx in enumerate(hdu_idxs):
if idx_1d > hdu_idx:
last_idx = hdu_idx
continue
break
return ccd_num, idx_1d - last_idx
def get_vignettes(NObj, meta_hdulist, hdu_idxs=None):
"""
Get the vignettes from the hdulists as a numpy array datacube
:param NObj: number of stars
:param meta_hdulist: list of hdulists with the snippets in ['VIGNET']
:param hdu_idxs: (Optional) Defines the idxs where the hdus start/end in the 1D flattened case
:return: vignettes (nObj, 32,32) datacube of star vignettes
"""
if hdu_idxs is None:
hdu_idxs = get_hdu_idxs(meta_hdulist)
vignettes = np.zeros((NObj, 32, 32))
for ccd_num, hdulist in enumerate(meta_hdulist):
sliced_vig = hdulist[2].data['VIGNET'][:, 15:47, 15:47] # slice to same size as stamps
# TODO more clever interpolations?
sliced_vig[sliced_vig < -1000] = 0 # set really negative values to 0; it's a mask
sliced_vig = sliced_vig / sliced_vig.sum((1, 2))[:, None, None] # normalize
vignettes[hdu_idxs[ccd_num]:hdu_idxs[ccd_num + 1]] = sliced_vig
return vignettes
def deconv_optpsf(NObj, optpsf_arr, vignettes):
"""
deconvolves the optical model from the given vignettes.
Returns the residuals and a boolean array of which
deconvolutions were successful according the the LR deconv algorithm
:param NObj: number of stars
:param optpsf_arr: (nObj, 32,32) datacube of the optical model of the psf
:param vignettes: (nObj, 32,32) datacube of the star vignettes
:return: resid_arr and deconv_successful, (nObj, 63,63) array of residuals and
deconv_successful, a boolean array if a deconvolution was successful
"""
resid_arr = np.zeros((NObj, 63, 63))
deconv_successful = np.ones((NObj,), dtype=bool)
for idx, (optpsf, vignette) in enumerate(izip(optpsf_arr, vignettes)):
# background is all pixels below 1 std. Could vary but won't make much difference.
background = vignette[vignette < vignette.mean() + vignette.std()]
try:
# this makes initial guess be all ones; could guess vignette,
# result isn't all that different
resid_small = deconvolve(optpsf, vignette, mask=None, mu0=background.mean(),
niterations=50, convergence=1e-2, extra=False)
resid_arr[idx, 15:47, 15:47] = resid_small
except RuntimeError: # Some will fail
resid_arr[idx] += MASK_VAL # forcing a mask
deconv_successful[idx] = False
# If i wanted to store bad stars by ccd and ccd_idx, I'd call get_ccd_idx here
return resid_arr, deconv_successful
def write_resid(output_dir, meta_hdulist, resid_arr, hdu_idxs=None):
"""
Take the calculated residuals, insert them into the existing hdulists, and write them to file.
Returns the filenames written to.
:param output_dir: the directory to store the output and temp files
:param meta_hdulist: list of hdulists to insert residuals into
:param resid_arr: residuals from deconvolution.
:param hdu_idxs: (Optional) Defines the idxs where the hdus start/end in the 1D flattened case
:return: fnames, the filenames the hdulists were written to
"""
if hdu_idxs is None:
hdu_idxs = get_hdu_idxs(meta_hdulist)
fnames = []
for ccd_num, hdulist in enumerate(meta_hdulist):
hdulist[2].data['VIGNET'] = resid_arr[hdu_idxs[ccd_num]:hdu_idxs[ccd_num + 1]]
# Make new filename from old one.
original_fname = hdulist.filename().split('/')[-1] # just get the filename, not the path
original_fname_split = original_fname.split('_')
original_fname_split[-1] = 'seldeconv.fits'
fname = output_dir + '_'.join(original_fname_split)
hdulist.writeto(fname, clobber=True)
fnames.append(fname)
return fnames
# TODO include as option in write resid, or separate function?
# Can't decide between balance of copied code and different purposes.
def write_resid_new_file(output_dir, meta_hdulist, resid_arr, deconv_successful, hdu_idxs=None):
"""
Similar to write_resid, but removes stars where deconvolution failed.
Creates new HDUlists to do this. NOTE currently not compatible with PSFEx
param meta_hdulist: list of hdulists to insert residuals into
:param output_dir: the directory to store the output and temp files
:param resid_arr: residuals from deconvolution.
:param deconv_successful: a boolean array defining which deconvolutions were successful
:param hdu_idxs: (Optional) Defines the idxs where the hdus start/ end in the 1D flattened case
:return: fnames, the filenames the hdulists were written to.
Also new_meta_hdulist, a list of the new hdulists
"""
if hdu_idxs is None:
hdu_idxs = get_hdu_idxs(meta_hdulist)
new_meta_hdulist = []
fnames = []
for ccd_num, hdulist in enumerate(meta_hdulist):
hdulist[2].data['VIGNET'] = resid_arr[hdu_idxs[ccd_num]:hdu_idxs[ccd_num + 1]]
# make a new hdulist, removing the stars we've masked.
# NOTE currently not working with PSFEx
primary_table = hdulist[0].copy() # will shallow copy work?
imhead = hdulist[1].copy()
data = hdulist[2].data[deconv_successful[hdu_idxs[ccd_num]:hdu_idxs[ccd_num + 1]]]
objects = fits.BinTableHDU(data=data,
header=hdulist[2].header,
name=hdulist[2].name)
# Not sure these do anything, but trying
objects.header.set('EXTNAME', 'LDAC_OBJECTS', 'a name')
objects.header.set('NAXIS2',
str(deconv_successful[hdu_idxs[ccd_num]:hdu_idxs[ccd_num + 1]].sum()),
'Trying this...')
new_hdulist = fits.HDUList(hdus=[primary_table, imhead, objects])
new_meta_hdulist.append(new_hdulist)
# Make new filename from old one.
original_fname = hdulist.filename().split('/')[-1] # just get the filename, not the path
original_fname_split = original_fname.split('_')
original_fname_split[-1] = 'seldeconv.fits'
fname = output_dir + '_'.join(original_fname_split)
new_hdulist.writeto(fname, clobber=True)
fnames.append(fname)
return fnames, new_meta_hdulist
def call_psfex(expid, output_dir, fnames=None):
"""
calls psfex on ki-ls on the files. returns True if the call executed without error.
:param expid: The id of the exposure being studied
:param output_dir: the directory to store the output and temp files
:param fnames: (Optional) filenames to call psfex on.
If omitted, will be called on all fits files in output_dir.
:return: psfex_success, True if the call didn't return an error
"""
psfex_path = '/nfs/slac/g/ki/ki22/roodman/EUPS_DESDM/eups/packages/Linux64/psfex/3.17.3+0/bin/psfex'
psfex_config = '/afs/slac.stanford.edu/u/ec/roodman/Astrophysics/PSF/desdm-plus.psfex'
outcat_name = output_dir + '%d_outcat.cat' % expid
if fnames is None:
file_string = output_dir + '*.fits'
else:
file_string = " ".join(fnames)
command_list = [psfex_path, file_string, "-c", psfex_config, "-OUTCAT_NAME", outcat_name]
# If shell != True, the wildcard won't work
psfex_return = call(' '.join(command_list), shell=True)
return True if psfex_return == 0 else False
def load_psfex(psf_files, NObj, meta_hdulist):
"""
Loads output files from PSFEx for given stars
:param psf_files: the output files from psfex
:param NObj: the number of objects that will be loaded
:param meta_hdulist: the list of HDULists
:return: psfex_arr: a (NObj, 32,32) datacube
"""
psfex_arr = np.zeros((NObj, 32, 32))
for idx, (file, hdulist) in enumerate(izip(psf_files, meta_hdulist)):
pex = PSFEx(file)
for yimage, ximage in izip(hdulist[2].data['Y_IMAGE'], hdulist[2].data['X_IMAGE']):
# psfex has a tendency to return images of weird and varying sizes
# This scheme ensures that they will all be the same 32x32 by zero padding
# assumes the images are square and smaller than 32x32
# Proof god is real and hates observational astronomers.
psfex_loaded = pex.get_rec(yimage, ximage)
atm_shape = psfex_loaded.shape[0] # assumed to be square
if atm_shape < psfex_arr.shape[1]:
pad_amount = int((psfex_arr.shape[1] - psfex_loaded.shape[0]) / 2)
pad_amount_upper = pad_amount + psfex_loaded.shape[0]
psfex_arr[idx, pad_amount:pad_amount_upper, pad_amount:pad_amount_upper] = psfex_loaded
elif atm_shape > psfex_arr.shape[1]:
# now we have to cut psf for... reasons
# TODO: I am 95% certain we don't care if the psf is centered; let us worry anyways
center = int(atm_shape / 2)
lower = center - int(psfex_arr.shape[1] / 2)
upper = lower + psfex_arr.shape[1]
psfex_arr[idx] = psfex_loaded[lower:upper, lower:upper]
return psfex_arr
def make_stars(NObj, optpsf_arr, atmpsf_arr, deconv_successful=None):
"""
convolve the optical and psf models to make a full model for the psf of the stars
:param NObj: number of stars
:param optpsf_arr: array of optical psf models
:param atmpsf_arr: array of atmospheric psf models
:param deconv_successful: boolean array denoting if the deconvolution converged.
If passed in, will be used to slice bad indexs from optpsf_arr
:return: stars, (nObj, 32,32) array of star psf estimates.
"""
stars = np.zeros((NObj, 32, 32))
# Note that atmpsf_arr will already have the bad stars removed if the user is using that scheme.
if deconv_successful is not None:
# TODO make sure this isn't modifying the outer object
optpsf_arr = optpsf_arr[deconv_successful] # slice off failed ones.
for idx, (optpsf, atmpsf) in enumerate(izip(optpsf_arr, atmpsf_arr)):
try:
stars[idx] = convolve(optpsf, atmpsf)
except ValueError:
raise ValueError('Convolve failed on object (1D Index) #%d' % (idx))
return stars
def evaluate_stamps_and_combine_with_data(WF, stamps, data):
eval_data = WF.evaluate_psf(stamps)
eval_data.index = data.index
combined_df = eval_data.combine_first(data)
return combined_df
def make_wavefront(expid, output_dir, optpsf=None, atmpsf=None, starminusopt=None, model=None):
"""
Make a wavefront, useful for diagnostic plots
:param expid: the id of the exposure being studied
:param output_dir: the directory to store the output and temp files
:param optpsf: (Optional) the optical psf in a datacube
:param atmpsf: (Optional) the atmopsheric psf in a datacube
:param starminusopt: (Optional) the residual when the optpsf is deconvolved
:param model: (model) the convolution of optpsf and atmpsf
:return: None
"""
# these give the deconvolved stars
# Wish I knew how to loop this
if optpsf is None:
deconvopt_loc = output_dir + '{0:08d}/{0}_opt.npy'.format(expid)
optpsf = np.load(deconvopt_loc)
if atmpsf is None:
deconvatm_loc = output_dir + '{0:08d}/{0}_atm.npy'.format(expid)
atmpsf = np.load(deconvatm_loc)
if starminusopt is None:
deconvstarsminusopt_loc = output_dir + '{0:08d}/{0}_stars_minus_opt.npy'.format(expid)
# set the shape to be right
starminusopt = np.load(deconvstarsminusopt_loc)[:, 15:47, 15:47]
if model is None:
deconvmodel_loc = output_dir + '{0:08d}/{0}_stars.npy'.format(expid)
model = np.load(deconvmodel_loc)
mesh_directory = '/nfs/slac/g/ki/ki22/roodman/ComboMeshesv20'
# directory containing the input data files
base_directory = '/nfs/slac/g/ki/ki18/des/cpd/psfex_catalogs/SVA1_FINALCUT/psfcat/'
# set up objects. make sure I get the right mesh
digestor = Digestor()
mesh_name = 'Science-20121120s1-v20i2_All'
PSF_Interpolator = Mesh_Interpolator(mesh_name=mesh_name, directory=mesh_directory)
# This will be our main wavefront
WF = DECAM_Model_Wavefront(PSF_Interpolator=PSF_Interpolator)
# load up data
expid_path = '/{0:08d}/{1:08d}'.format(expid - expid % 1000, expid)
data_directory = base_directory + expid_path
files = sorted(glob(data_directory + '/*{0}'.format('_selpsfcat.fits')))
data_df = digestor.digest_fits(files[0], do_exclude=False)
# Can't use the new one above, because we're calling on different data.
meta_hdulist = [fits.open(files[0])]
for file in files[1:]:
tmpData = digestor.digest_fits(file, do_exclude=False)
data_df = data_df.append(tmpData)
meta_hdulist.append(fits.open(file))
hdu_idxs = get_hdu_idxs(meta_hdulist)
NObj = hdu_idxs[-1]
# make the psfex models for both portions
psf_files = sorted(glob(data_directory + '/*{0}'.format('psfcat_validation_subtracted.psf')))
psfexpsf = load_psfex(psf_files, NObj, meta_hdulist)
stars = get_vignettes(NObj, meta_hdulist, hdu_idxs)
stars_df = evaluate_stamps_and_combine_with_data(WF, stars, data_df)
psfexpsf_df = evaluate_stamps_and_combine_with_data(WF, psfexpsf, data_df)
atmpsf_df = evaluate_stamps_and_combine_with_data(WF, atmpsf, data_df)
optpsf_df = evaluate_stamps_and_combine_with_data(WF, optpsf, data_df)
starminusopt_df = evaluate_stamps_and_combine_with_data(WF, starminusopt, data_df)
model_df = evaluate_stamps_and_combine_with_data(WF, model, data_df)
combinekeys = ['e0', 'e1', 'e2', 'E1norm', 'E2norm', 'delta1', 'delta2', 'zeta1', 'zeta2']
# make a big df with all the above columns combined
df = stars_df.copy()
names = ['model', 'psfex', 'starminusopt', 'opt', 'atm', 'psfex_flip']
df_list = [model_df, psfexpsf_df, starminusopt_df, optpsf_df, atmpsf_df]
# names += ['opt_load']
# df_list += [optpsf_load_df]
# names += ['atm_make']
# df_list += [atmpsf_make_df]
for key in combinekeys:
# add the other medsub
if key == 'E1norm':
df[key] = df['e1'] / df['e0']
elif key == 'E2norm':
df[key] = df['e2'] / df['e0']
df['{0}_medsub'.format(key)] = df[key] - df[key].median()
for name, psf in zip(names, df_list):
if key == 'E1norm':
psf[key] = psf['e1'] / psf['e0']
elif key == 'E2norm':
psf[key] = psf['e2'] / psf['e0']
df['{0}_{1}'.format(name, key)] = psf[key]
# add medsub
df['{0}_{1}_medsub'.format(name, key)] = df['{0}_{1}'.format(name, key)] - df[
'{0}_{1}'.format(name, key)].median()
df['{0}_{1}_diff'.format(name, key)] = df['{0}_{1}'.format(name, key)] - df[key]
df['{0}_{1}_medsub_diff'.format(name, key)] = df['{0}_{1}_medsub'.format(name, key)]\
- df['{0}_medsub'.format(key)]
np.save(output_dir + '{0:08d}/{0}_psfexalone.npy'.format(expid), psfexpsf)
np.save(output_dir + '{0:08d}/{0}_data.npy'.format(expid), stars)
df.to_hdf(output_dir + '{0:08d}/results.h5'.format(expid),
key='table_{0:08d}'.format(expid),
mode='a', format='table', append=False)
if __name__ == '__main__':
if verbose:
print 'Starting.'
# get optical PSF
optpsf_stamps, meta_hdulist = get_optical_psf(expid)
NObj = optpsf_stamps.shape[0] # I'm undecided about hte use of this carrier
# The number of stars is contained implicitly in the other passed around
# still, being explicit costs next to nothing and is clear to the user.
hdu_idxs = get_hdu_idxs(meta_hdulist)
# np.save(output_dir+'%s_opt_test.npy'%expid, optpsf_stamps)
if verbose:
print 'Opts Calculated.'
# extract star vignettes from the hdulists
vignettes = get_vignettes(NObj, meta_hdulist, hdu_idxs)
# deconvolve the optical model from the observed stars
resid_arr, deconv_successful = deconv_optpsf(NObj, optpsf_stamps, vignettes)
if verbose:
print 'Deconv done.'
# now, insert the atmospheric portion back into the hdulists, and write them to disk
# PSFEx needs the information in those lists to run correctly.
resid_fnames = write_resid(output_dir, meta_hdulist, resid_arr, hdu_idxs)
# resid_fnames, new_meta_hdulist = write_resid_new_file(meta_hdulist, resid_arr, deconv_successful, hdu_idxs)
# if making the new HDUlist, the number of objects has changed. Make sure to account.
# NObj = deconv_successful.sum()
if verbose:
print 'Copy and write done.'
psfex_success = call_psfex(expid, output_dir, resid_fnames)
# no use continuing if the psfex call failed.
if not psfex_success:
print 'Call to PSFEx failed; Exiting.'
from sys import exit
exit(1)
elif verbose:
print 'PSFEx call successful'
# It'd be nice if I could get these from call_psfex
psf_files = sorted(glob(output_dir + '*.psf'))
atmpsf_arr = load_psfex(psf_files, NObj, meta_hdulist)
# atmpsf_arr = load_atmpsf(psf_files, NObj, new_meta_hdulist)
stars = make_stars(NObj, optpsf_stamps, atmpsf_arr)
# stars = make_stars(NObj, optpsf_stamps, atmpsf_arr, deconv_successful)
# TODO what to save?
# TODO saved deconv_succsseful sliced arrays?
# Note that these won't al have the same dimensions without a slice by deconv_successful
np.save(output_dir + '%s_stars.npy' % expid, stars)
np.save(output_dir + '%s_opt.npy' % expid, optpsf_stamps)
np.save(output_dir + '%s_atm.npy' % expid, atmpsf_arr)
np.save(output_dir + '%d_stars_minus_opt.npy' % expid, resid_arr)
np.save(output_dir + '%s_deconv_successful.npy', deconv_successful)
if verbose:
print 'Done'
# optpsf_stamps = optpsf_stamps[deconv_successful]
# resid_arr = resid_arr[deconv_successful]
# TODO optional make_wavefront call
| mit |
rogerallen/kaggle | dogscats/run.py | 1 | 2489 | #!/usr/bin/env python
import os, sys
sys.path.insert(1, os.path.join(sys.path[0], '../utils'))
from utils import *
from vgg16bn import Vgg16BN
from sklearn.metrics import confusion_matrix
from keras.callbacks import ModelCheckpoint, LearningRateScheduler
current_dir = os.getcwd()
LESSON_HOME_DIR = current_dir
DATA_HOME_DIR = current_dir+'/data'
# or '/sample/'
path = DATA_HOME_DIR + '/'
test_path = DATA_HOME_DIR + '/test/' #We use all the test data
results_path=DATA_HOME_DIR + '/results/'
train_path=path + '/train/'
valid_path=path + '/valid/'
vgg = Vgg16BN()
batch_size = 64
# ADJUST THIS
no_of_epochs = 30
latest_weights_filename = None
run_index = 23
#learning_rate = 0.0001 # was 0.01. reduced at 23 again at 28
# augment images
gen = image.ImageDataGenerator(
rotation_range=15,
width_shift_range=0.1,
height_shift_range=0.1,
zoom_range=0.1,
horizontal_flip=True)
batches = vgg.get_batches(train_path, gen, batch_size=batch_size)
val_batches = vgg.get_batches(valid_path, batch_size=batch_size*2)
vgg.finetune(batches)
#vgg.model.optimizer.lr = learning_rate
INIT_LR=0.2
EPOCHS_DROP=5.0
DROP=0.5
def step_decay(epoch, initial_lrate = INIT_LR, epochs_drop = EPOCHS_DROP, drop = DROP):
lrate = initial_lrate * math.pow(drop, math.floor((1+epoch)/epochs_drop))
return lrate
if latest_weights_filename != None:
print "loading %s"%(results_path+latest_weights_filename)
vgg.model.load_weights(results_path+latest_weights_filename)
filepath=results_path+"run-%02d-weights-{epoch:02d}-{val_acc:.2f}.hdf5"%(run_index)
history_filepath=results_path+"run-%02d-history.csv"%(run_index)
checkpoint = ModelCheckpoint(filepath,
# seemed ot get worse results with val_acc
#monitor='val_acc', mode='max',
monitor='val_loss', mode='min',
verbose=1,
save_weights_only=True, save_best_only=True)
lr_scheduler = LearningRateScheduler(step_decay)
callbacks = [checkpoint,lr_scheduler]
history = vgg.fit(batches, val_batches, no_of_epochs, callbacks)
val_batches, probs = vgg.test(valid_path, batch_size = batch_size)
filenames = val_batches.filenames
expected_labels = val_batches.classes #0 or 1
#Round our predictions to 0/1 to generate labels
our_predictions = probs[:,0]
our_labels = np.round(1-our_predictions)
cm = confusion_matrix(expected_labels, our_labels)
print "Confusion Matrix"
print cm
| apache-2.0 |
LiaoPan/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
khkaminska/scikit-learn | sklearn/mixture/gmm.py | 68 | 31091 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from time import time
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Read more in the :ref:`User Guide <gmm>`.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
verbose : int, default: 0
Enable verbose output. If 1 then it always prints the current
initialization and iteration step. If greater than 1 then
it prints additionally the change and time needed for each step.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001, verbose=0)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc',
verbose=0):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
self.verbose = verbose
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on ``cvtype``::
(n_states, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_states, n_features) if 'diag',
(n_states, n_features, n_features) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type) +
np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit_predict(self, X, y=None):
"""Fit and then predict labels for data.
Warning: due to the final maximization step in the EM algorithm,
with low iterations the prediction may not be 100% accurate
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,) component memberships
"""
return self._fit(X, y).argmax(axis=1)
def _fit(self, X, y=None, do_prediction=False):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
responsibilities : array, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation.
"""
# initialization step
X = check_array(X, dtype=np.float64, ensure_min_samples=2)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
if self.verbose > 0:
print('Expectation-maximization algorithm started.')
for init in range(self.n_init):
if self.verbose > 0:
print('Initialization ' + str(init + 1))
start_init_time = time()
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if self.verbose > 1:
print('\tMeans have been initialized.')
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if self.verbose > 1:
print('\tWeights have been initialized.')
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
if self.verbose > 1:
print('\tCovariance matrices have been initialized.')
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
if self.verbose > 0:
print('\tEM iteration ' + str(i + 1))
start_iter_time = time()
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when deprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if self.verbose > 1:
print('\t\tChange: ' + str(change))
if change < tol:
self.converged_ = True
if self.verbose > 0:
print('\t\tEM algorithm converged.')
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
if self.verbose > 1:
print('\t\tEM iteration ' + str(i + 1) + ' took {0:.5f}s'.format(
time() - start_iter_time))
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
if self.verbose > 1:
print('\tBetter parameters were found.')
if self.verbose > 1:
print('\tInitialization ' + str(init + 1) + ' took {0:.5f}s'.format(
time() - start_init_time))
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
else: # self.n_iter == 0 occurs when using GMM within HMM
# Need to make sure that there are responsibilities to output
# Output zeros because it was just a quick initialization
responsibilities = np.zeros((X.shape[0], self.n_components))
return responsibilities
def fit(self, X, y=None):
"""Estimate model parameters with the EM algorithm.
A initialization step is performed before entering the
expectation-maximization (EM) algorithm. If you want to avoid
this step, set the keyword argument init_params to the empty
string '' when creating the GMM object. Likewise, if you would
like just to do an initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
self
"""
self._fit(X, y)
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weights
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
# some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices."""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
try:
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
except linalg.LinAlgError:
raise ValueError("'covars' must be symmetric, "
"positive-definite")
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
ahoyosid/scikit-learn | examples/bicluster/plot_spectral_coclustering.py | 276 | 1736 | """
==============================================
A demo of the Spectral Co-Clustering algorithm
==============================================
This example demonstrates how to generate a dataset and bicluster it
using the the Spectral Co-Clustering algorithm.
The dataset is generated using the ``make_biclusters`` function, which
creates a matrix of small values and implants bicluster with large
values. The rows and columns are then shuffled and passed to the
Spectral Co-Clustering algorithm. Rearranging the shuffled matrix to
make biclusters contiguous shows how accurately the algorithm found
the biclusters.
"""
print(__doc__)
# Author: Kemal Eren <kemal@kemaleren.com>
# License: BSD 3 clause
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import make_biclusters
from sklearn.datasets import samples_generator as sg
from sklearn.cluster.bicluster import SpectralCoclustering
from sklearn.metrics import consensus_score
data, rows, columns = make_biclusters(
shape=(300, 300), n_clusters=5, noise=5,
shuffle=False, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Original dataset")
data, row_idx, col_idx = sg._shuffle(data, random_state=0)
plt.matshow(data, cmap=plt.cm.Blues)
plt.title("Shuffled dataset")
model = SpectralCoclustering(n_clusters=5, random_state=0)
model.fit(data)
score = consensus_score(model.biclusters_,
(rows[:, row_idx], columns[:, col_idx]))
print("consensus score: {:.3f}".format(score))
fit_data = data[np.argsort(model.row_labels_)]
fit_data = fit_data[:, np.argsort(model.column_labels_)]
plt.matshow(fit_data, cmap=plt.cm.Blues)
plt.title("After biclustering; rearranged to show biclusters")
plt.show()
| bsd-3-clause |
Laurae2/LightGBM | python-package/lightgbm/sklearn.py | 1 | 37359 | # coding: utf-8
# pylint: disable = invalid-name, W0105, C0111, C0301
"""Scikit-Learn Wrapper interface for LightGBM."""
from __future__ import absolute_import
import numpy as np
import warnings
try:
import pandas as pd
_IS_PANDAS_INSTALLED = True
except ImportError:
_IS_PANDAS_INSTALLED = False
from .basic import Dataset, LightGBMError
from .compat import (SKLEARN_INSTALLED, _LGBMClassifierBase,
LGBMNotFittedError, _LGBMLabelEncoder, _LGBMModelBase,
_LGBMRegressorBase, _LGBMCheckXY, _LGBMCheckArray, _LGBMCheckConsistentLength,
_LGBMCheckClassificationTargets, _LGBMComputeSampleWeight,
argc_, range_, LGBMDeprecationWarning)
from .engine import train
def _objective_function_wrapper(func):
"""Decorate an objective function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
and you should group grad and hess in this way as well.
Parameters
----------
func: callable
Expects a callable with signature ``func(y_true, y_pred)`` or ``func(y_true, y_pred, group):
y_true: array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
group: array-like
Group/query data, used for ranking task.
Returns
-------
new_func: callable
The new objective function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array-like of shape = [n_samples] or shape = [n_samples * n_classes]
The predicted values.
dataset: ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``.
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
grad, hess = func(labels, preds)
elif argc == 3:
grad, hess = func(labels, preds, dataset.get_group())
else:
raise TypeError("Self-defined objective function should have 2 or 3 arguments, got %d" % argc)
"""weighted for objective"""
weight = dataset.get_weight()
if weight is not None:
"""only one class"""
if len(weight) == len(grad):
grad = np.multiply(grad, weight)
hess = np.multiply(hess, weight)
else:
num_data = len(weight)
num_class = len(grad) // num_data
if num_class * num_data != len(grad):
raise ValueError("Length of grad and hess should equal to num_class * num_data")
for k in range_(num_class):
for i in range_(num_data):
idx = k * num_data + i
grad[idx] *= weight[i]
hess[idx] *= weight[i]
return grad, hess
return inner
def _eval_function_wrapper(func):
"""Decorate an eval function
Note: for multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
Parameters
----------
func: callable
Expects a callable with following functions:
``func(y_true, y_pred)``,
``func(y_true, y_pred, weight)``
or ``func(y_true, y_pred, weight, group)``
and return (eval_name->str, eval_result->float, is_bigger_better->Bool):
y_true: array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
weight: array_like of shape = [n_samples]
The weight of samples.
group: array-like
Group/query data, used for ranking task.
Returns
-------
new_func: callable
The new eval function as expected by ``lightgbm.engine.train``.
The signature is ``new_func(preds, dataset)``:
preds: array-like of shape = [n_samples] or shape = [n_samples * n_classes]
The predicted values.
dataset: ``dataset``
The training set from which the labels will be extracted using
``dataset.get_label()``.
"""
def inner(preds, dataset):
"""internal function"""
labels = dataset.get_label()
argc = argc_(func)
if argc == 2:
return func(labels, preds)
elif argc == 3:
return func(labels, preds, dataset.get_weight())
elif argc == 4:
return func(labels, preds, dataset.get_weight(), dataset.get_group())
else:
raise TypeError("Self-defined eval function should have 2, 3 or 4 arguments, got %d" % argc)
return inner
class LGBMModel(_LGBMModelBase):
"""Implementation of the scikit-learn API for LightGBM."""
def __init__(self, boosting_type="gbdt", num_leaves=31, max_depth=-1,
learning_rate=0.1, n_estimators=100,
subsample_for_bin=200000, objective=None, class_weight=None,
min_split_gain=0., min_child_weight=1e-3, min_child_samples=20,
subsample=1., subsample_freq=1, colsample_bytree=1.,
reg_alpha=0., reg_lambda=0., random_state=None,
n_jobs=-1, silent=True, **kwargs):
"""Construct a gradient boosting model.
Parameters
----------
boosting_type : string, optional (default="gbdt")
'gbdt', traditional Gradient Boosting Decision Tree.
'dart', Dropouts meet Multiple Additive Regression Trees.
'goss', Gradient-based One-Side Sampling.
'rf', Random Forest.
num_leaves : int, optional (default=31)
Maximum tree leaves for base learners.
max_depth : int, optional (default=-1)
Maximum tree depth for base learners, -1 means no limit.
learning_rate : float, optional (default=0.1)
Boosting learning rate.
n_estimators : int, optional (default=100)
Number of boosted trees to fit.
subsample_for_bin : int, optional (default=50000)
Number of samples for constructing bins.
objective : string, callable or None, optional (default=None)
Specify the learning task and the corresponding learning objective or
a custom objective function to be used (see note below).
default: 'regression' for LGBMRegressor, 'binary' or 'multiclass' for LGBMClassifier, 'lambdarank' for LGBMRanker.
class_weight : dict, 'balanced' or None, optional (default=None)
Weights associated with classes in the form ``{class_label: weight}``.
Use this parameter only for multi-class classification task;
for binary classification task you may use ``is_unbalance`` or ``scale_pos_weight`` parameters.
The 'balanced' mode uses the values of y to automatically adjust weights
inversely proportional to class frequencies in the input data as ``n_samples / (n_classes * np.bincount(y))``.
If None, all classes are supposed to have weight one.
Note that these weights will be multiplied with ``sample_weight`` (passed through the fit method)
if ``sample_weight`` is specified.
min_split_gain : float, optional (default=0.)
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : float, optional (default=1e-3)
Minimum sum of instance weight(hessian) needed in a child(leaf).
min_child_samples : int, optional (default=20)
Minimum number of data need in a child(leaf).
subsample : float, optional (default=1.)
Subsample ratio of the training instance.
subsample_freq : int, optional (default=1)
Frequence of subsample, <=0 means no enable.
colsample_bytree : float, optional (default=1.)
Subsample ratio of columns when constructing each tree.
reg_alpha : float, optional (default=0.)
L1 regularization term on weights.
reg_lambda : float, optional (default=0.)
L2 regularization term on weights.
random_state : int or None, optional (default=None)
Random number seed.
Will use default seeds in c++ code if set to None.
n_jobs : int, optional (default=-1)
Number of parallel threads.
silent : bool, optional (default=True)
Whether to print messages while running boosting.
**kwargs : other parameters
Check http://lightgbm.readthedocs.io/en/latest/Parameters.html for more parameters.
Note
----
\*\*kwargs is not supported in sklearn, it may cause unexpected issues.
Attributes
----------
n_features_ : int
The number of features of fitted model.
classes_ : array of shape = [n_classes]
The class label array (only for classification problem).
n_classes_ : int
The number of classes (only for classification problem).
best_score_ : dict or None
The best score of fitted model.
best_iteration_ : int or None
The best iteration of fitted model if ``early_stopping_rounds`` has been specified.
objective_ : string or callable
The concrete objective used while fitting this model.
booster_ : Booster
The underlying Booster of this model.
evals_result_ : dict or None
The evaluation results if ``early_stopping_rounds`` has been specified.
feature_importances_ : array of shape = [n_features]
The feature importances (the higher, the more important the feature).
Note
----
A custom objective function can be provided for the ``objective``
parameter. In this case, it should have the signature
``objective(y_true, y_pred) -> grad, hess`` or
``objective(y_true, y_pred, group) -> grad, hess``:
y_true: array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The predicted values.
group: array-like
Group/query data, used for ranking task.
grad: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the gradient for each sample point.
hess: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class task)
The value of the second derivative for each sample point.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i]
and you should group grad and hess in this way as well.
"""
if not SKLEARN_INSTALLED:
raise LightGBMError('Scikit-learn is required for this module')
self.boosting_type = boosting_type
self.objective = objective
self.num_leaves = num_leaves
self.max_depth = max_depth
self.learning_rate = learning_rate
self.n_estimators = n_estimators
self.subsample_for_bin = subsample_for_bin
self.min_split_gain = min_split_gain
self.min_child_weight = min_child_weight
self.min_child_samples = min_child_samples
self.subsample = subsample
self.subsample_freq = subsample_freq
self.colsample_bytree = colsample_bytree
self.reg_alpha = reg_alpha
self.reg_lambda = reg_lambda
self.random_state = random_state
self.n_jobs = n_jobs
self.silent = silent
self._Booster = None
self._evals_result = None
self._best_score = None
self._best_iteration = None
self._other_params = {}
self._objective = objective
self.class_weight = class_weight
self._n_features = None
self._classes = None
self._n_classes = None
self.set_params(**kwargs)
def get_params(self, deep=True):
params = super(LGBMModel, self).get_params(deep=deep)
params.update(self._other_params)
return params
# minor change to support `**kwargs`
def set_params(self, **params):
for key, value in params.items():
setattr(self, key, value)
if hasattr(self, '_' + key):
setattr(self, '_' + key, value)
self._other_params[key] = value
return self
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_group=None,
eval_metric=None, early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
"""Build a gradient boosting model from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input feature matrix.
y : array-like of shape = [n_samples]
The target values (class labels in classification, real numbers in regression).
sample_weight : array-like of shape = [n_samples] or None, optional (default=None)
Weights of training data.
init_score : array-like of shape = [n_samples] or None, optional (default=None)
Init score of training data.
group : array-like of shape = [n_samples] or None, optional (default=None)
Group data of training data.
eval_set : list or None, optional (default=None)
A list of (X, y) tuple pairs to use as a validation sets for early-stopping.
eval_names : list of strings or None, optional (default=None)
Names of eval_set.
eval_sample_weight : list of arrays or None, optional (default=None)
Weights of eval data.
eval_class_weight : list or None, optional (default=None)
Class weights of eval data.
eval_init_score : list of arrays or None, optional (default=None)
Init score of eval data.
eval_group : list of arrays or None, optional (default=None)
Group data of eval data.
eval_metric : string, list of strings, callable or None, optional (default=None)
If string, it should be a built-in evaluation metric to use.
If callable, it should be a custom evaluation metric, see note for more details.
early_stopping_rounds : int or None, optional (default=None)
Activates early stopping. The model will train until the validation score stops improving.
Validation error needs to decrease at least every ``early_stopping_rounds`` round(s)
to continue training.
verbose : bool, optional (default=True)
If True and an evaluation set is used, writes the evaluation progress.
feature_name : list of strings or 'auto', optional (default="auto")
Feature names.
If 'auto' and data is pandas DataFrame, data columns names are used.
categorical_feature : list of strings or int, or 'auto', optional (default="auto")
Categorical features.
If list of int, interpreted as indices.
If list of strings, interpreted as feature names (need to specify ``feature_name`` as well).
If 'auto' and data is pandas DataFrame, pandas categorical columns are used.
callbacks : list of callback functions or None, optional (default=None)
List of callback functions that are applied at each iteration.
See Callbacks in Python API for more information.
Returns
-------
self : object
Returns self.
Note
----
Custom eval function expects a callable with following functions:
``func(y_true, y_pred)``, ``func(y_true, y_pred, weight)`` or
``func(y_true, y_pred, weight, group)``.
Returns (eval_name, eval_result, is_bigger_better) or
list of (eval_name, eval_result, is_bigger_better)
y_true: array-like of shape = [n_samples]
The target values.
y_pred: array-like of shape = [n_samples] or shape = [n_samples * n_classes] (for multi-class)
The predicted values.
weight: array-like of shape = [n_samples]
The weight of samples.
group: array-like
Group/query data, used for ranking task.
eval_name: str
The name of evaluation.
eval_result: float
The eval result.
is_bigger_better: bool
Is eval result bigger better, e.g. AUC is bigger_better.
For multi-class task, the y_pred is group by class_id first, then group by row_id.
If you want to get i-th row y_pred in j-th class, the access way is y_pred[j * num_data + i].
"""
if self._objective is None:
if isinstance(self, LGBMRegressor):
self._objective = "regression"
elif isinstance(self, LGBMClassifier):
self._objective = "binary"
elif isinstance(self, LGBMRanker):
self._objective = "lambdarank"
else:
raise ValueError("Unknown LGBMModel type.")
if callable(self._objective):
self._fobj = _objective_function_wrapper(self._objective)
else:
self._fobj = None
evals_result = {}
params = self.get_params()
# sklearn interface has another naming convention
params.setdefault('seed', params.pop('random_state'))
params.setdefault('nthread', params.pop('n_jobs'))
# user can set verbose with kwargs, it has higher priority
if 'verbose' not in params and self.silent:
params['verbose'] = 0
params.pop('silent', None)
params.pop('n_estimators', None)
params.pop('class_weight', None)
if self._n_classes is not None and self._n_classes > 2:
params['num_class'] = self._n_classes
if hasattr(self, '_eval_at'):
params['ndcg_eval_at'] = self._eval_at
params['objective'] = self._objective
if self._fobj:
params['objective'] = 'None' # objective = nullptr for unknown objective
if callable(eval_metric):
feval = _eval_function_wrapper(eval_metric)
else:
feval = None
params['metric'] = eval_metric
if not _IS_PANDAS_INSTALLED or not isinstance(X, pd.DataFrame):
X, y = _LGBMCheckXY(X, y, accept_sparse=True, force_all_finite=False, ensure_min_samples=2)
_LGBMCheckConsistentLength(X, y, sample_weight)
if self.class_weight is not None:
class_sample_weight = _LGBMComputeSampleWeight(self.class_weight, y)
if sample_weight is None or len(sample_weight) == 0:
sample_weight = class_sample_weight
else:
sample_weight = np.multiply(sample_weight, class_sample_weight)
self._n_features = X.shape[1]
def _construct_dataset(X, y, sample_weight, init_score, group, params):
ret = Dataset(X, label=y, weight=sample_weight, group=group, params=params)
ret.set_init_score(init_score)
return ret
train_set = _construct_dataset(X, y, sample_weight, init_score, group, params)
valid_sets = []
if eval_set is not None:
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, valid_data in enumerate(eval_set):
# reduce cost for prediction training data
if valid_data[0] is X and valid_data[1] is y:
valid_set = train_set
else:
def get_meta_data(collection, i):
if collection is None:
return None
elif isinstance(collection, list):
return collection[i] if len(collection) > i else None
elif isinstance(collection, dict):
return collection.get(i, None)
else:
raise TypeError('eval_sample_weight, eval_class_weight, eval_init_score, and eval_group should be dict or list')
valid_weight = get_meta_data(eval_sample_weight, i)
if get_meta_data(eval_class_weight, i) is not None:
valid_class_sample_weight = _LGBMComputeSampleWeight(get_meta_data(eval_class_weight, i), valid_data[1])
if valid_weight is None or len(valid_weight) == 0:
valid_weight = valid_class_sample_weight
else:
valid_weight = np.multiply(valid_weight, valid_class_sample_weight)
valid_init_score = get_meta_data(eval_init_score, i)
valid_group = get_meta_data(eval_group, i)
valid_set = _construct_dataset(valid_data[0], valid_data[1], valid_weight, valid_init_score, valid_group, params)
valid_sets.append(valid_set)
self._Booster = train(params, train_set,
self.n_estimators, valid_sets=valid_sets, valid_names=eval_names,
early_stopping_rounds=early_stopping_rounds,
evals_result=evals_result, fobj=self._fobj, feval=feval,
verbose_eval=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
if evals_result:
self._evals_result = evals_result
if early_stopping_rounds is not None:
self._best_iteration = self._Booster.best_iteration
self._best_score = self._Booster.best_score
# free dataset
self.booster_.free_dataset()
del train_set, valid_sets
return self
def predict(self, X, raw_score=False, num_iteration=0):
"""Return the predicted value for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int, optional (default=0)
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
predicted_result : array-like of shape = [n_samples] or shape = [n_samples, n_classes]
The predicted values.
"""
if self._n_features is None:
raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
if not _IS_PANDAS_INSTALLED or not isinstance(X, pd.DataFrame):
X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features_ is %s and "
"input n_features is %s "
% (self._n_features, n_features))
return self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
def apply(self, X, num_iteration=0):
"""Return the predicted leaf every tree for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
num_iteration : int, optional (default=0)
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
X_leaves : array-like of shape = [n_samples, n_trees]
The predicted leaf every tree for each sample.
"""
if self._n_features is None:
raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
if not _IS_PANDAS_INSTALLED or not isinstance(X, pd.DataFrame):
X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features_ is %s and "
"input n_features is %s "
% (self._n_features, n_features))
return self.booster_.predict(X, pred_leaf=True, num_iteration=num_iteration)
@property
def n_features_(self):
"""Get the number of features of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No n_features found. Need to call fit beforehand.')
return self._n_features
@property
def best_score_(self):
"""Get the best score of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No best_score found. Need to call fit beforehand.')
return self._best_score
@property
def best_iteration_(self):
"""Get the best iteration of fitted model."""
if self._n_features is None:
raise LGBMNotFittedError('No best_iteration found. Need to call fit with early_stopping_rounds beforehand.')
return self._best_iteration
@property
def objective_(self):
"""Get the concrete objective used while fitting this model."""
if self._n_features is None:
raise LGBMNotFittedError('No objective found. Need to call fit beforehand.')
return self._objective
@property
def booster_(self):
"""Get the underlying lightgbm Booster of this model."""
if self._Booster is None:
raise LGBMNotFittedError('No booster found. Need to call fit beforehand.')
return self._Booster
@property
def evals_result_(self):
"""Get the evaluation results."""
if self._n_features is None:
raise LGBMNotFittedError('No results found. Need to call fit with eval_set beforehand.')
return self._evals_result
@property
def feature_importances_(self):
"""Get feature importances.
Note
----
Feature importance in sklearn interface used to normalize to 1,
it's deprecated after 2.0.4 and same as Booster.feature_importance() now.
"""
if self._n_features is None:
raise LGBMNotFittedError('No feature_importances found. Need to call fit beforehand.')
return self.booster_.feature_importance()
class LGBMRegressor(LGBMModel, _LGBMRegressorBase):
"""LightGBM regressor."""
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_metric="l2", early_stopping_rounds=None,
verbose=True, feature_name='auto', categorical_feature='auto', callbacks=None):
super(LGBMRegressor, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')] +
base_doc[base_doc.find('eval_init_score :'):])
base_doc = fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] +
'eval_metric : string, list of strings, callable or None, optional (default="l2")\n' +
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
class LGBMClassifier(LGBMModel, _LGBMClassifierBase):
"""LightGBM classifier."""
def fit(self, X, y,
sample_weight=None, init_score=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_class_weight=None, eval_init_score=None, eval_metric="logloss",
early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
_LGBMCheckClassificationTargets(y)
self._le = _LGBMLabelEncoder().fit(y)
_y = self._le.transform(y)
self._classes = self._le.classes_
self._n_classes = len(self._classes)
if self._n_classes > 2:
# Switch to using a multiclass objective in the underlying LGBM instance
if self._objective != "multiclassova" and not callable(self._objective):
self._objective = "multiclass"
if eval_metric == 'logloss' or eval_metric == 'binary_logloss':
eval_metric = "multi_logloss"
elif eval_metric == 'error' or eval_metric == 'binary_error':
eval_metric = "multi_error"
else:
if eval_metric == 'logloss' or eval_metric == 'multi_logloss':
eval_metric = 'binary_logloss'
elif eval_metric == 'error' or eval_metric == 'multi_error':
eval_metric = 'binary_error'
if eval_set is not None:
if isinstance(eval_set, tuple):
eval_set = [eval_set]
for i, (valid_x, valid_y) in enumerate(eval_set):
if valid_x is X and valid_y is y:
eval_set[i] = (valid_x, _y)
else:
eval_set[i] = (valid_x, self._le.transform(valid_y))
super(LGBMClassifier, self).fit(X, _y, sample_weight=sample_weight,
init_score=init_score, eval_set=eval_set,
eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_class_weight=eval_class_weight,
eval_init_score=eval_init_score,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] +
'eval_metric : string, list of strings, callable or None, optional (default="logloss")\n' +
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):])
def predict(self, X, raw_score=False, num_iteration=0):
class_probs = self.predict_proba(X, raw_score, num_iteration)
class_index = np.argmax(class_probs, axis=1)
return self._le.inverse_transform(class_index)
def predict_proba(self, X, raw_score=False, num_iteration=0):
"""Return the predicted probability for each class for each sample.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
Input features matrix.
raw_score : bool, optional (default=False)
Whether to predict raw scores.
num_iteration : int, optional (default=0)
Limit number of iterations in the prediction; defaults to 0 (use all trees).
Returns
-------
predicted_probability : array-like of shape = [n_samples, n_classes]
The predicted probability for each class for each sample.
"""
if self._n_features is None:
raise LGBMNotFittedError("Estimator not fitted, call `fit` before exploiting the model.")
if not _IS_PANDAS_INSTALLED or not isinstance(X, pd.DataFrame):
X = _LGBMCheckArray(X, accept_sparse=True, force_all_finite=False)
n_features = X.shape[1]
if self._n_features != n_features:
raise ValueError("Number of features of the model must "
"match the input. Model n_features_ is %s and "
"input n_features is %s "
% (self._n_features, n_features))
class_probs = self.booster_.predict(X, raw_score=raw_score, num_iteration=num_iteration)
if self._n_classes > 2:
return class_probs
else:
return np.vstack((1. - class_probs, class_probs)).transpose()
@property
def classes_(self):
"""Get the class label array."""
if self._classes is None:
raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
return self._classes
@property
def n_classes_(self):
"""Get the number of classes."""
if self._n_classes is None:
raise LGBMNotFittedError('No classes found. Need to call fit beforehand.')
return self._n_classes
class LGBMRanker(LGBMModel):
"""LightGBM ranker."""
def fit(self, X, y,
sample_weight=None, init_score=None, group=None,
eval_set=None, eval_names=None, eval_sample_weight=None,
eval_init_score=None, eval_group=None, eval_metric='ndcg',
eval_at=[1], early_stopping_rounds=None, verbose=True,
feature_name='auto', categorical_feature='auto', callbacks=None):
# check group data
if group is None:
raise ValueError("Should set group for ranking task")
if eval_set is not None:
if eval_group is None:
raise ValueError("Eval_group cannot be None when eval_set is not None")
elif len(eval_group) != len(eval_set):
raise ValueError("Length of eval_group should be equal to eval_set")
elif (isinstance(eval_group, dict) and any(i not in eval_group or eval_group[i] is None for i in range_(len(eval_group)))) \
or (isinstance(eval_group, list) and any(group is None for group in eval_group)):
raise ValueError("Should set group for all eval datasets for ranking task; "
"if you use dict, the index should start from 0")
self._eval_at = eval_at
super(LGBMRanker, self).fit(X, y, sample_weight=sample_weight,
init_score=init_score, group=group,
eval_set=eval_set, eval_names=eval_names,
eval_sample_weight=eval_sample_weight,
eval_init_score=eval_init_score, eval_group=eval_group,
eval_metric=eval_metric,
early_stopping_rounds=early_stopping_rounds,
verbose=verbose, feature_name=feature_name,
categorical_feature=categorical_feature,
callbacks=callbacks)
return self
base_doc = LGBMModel.fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_class_weight :')] +
base_doc[base_doc.find('eval_init_score :'):])
base_doc = fit.__doc__
fit.__doc__ = (base_doc[:base_doc.find('eval_metric :')] +
'eval_metric : string, list of strings, callable or None, optional (default="ndcg")\n' +
base_doc[base_doc.find(' If string, it should be a built-in evaluation metric to use.'):base_doc.find('early_stopping_rounds :')] +
'eval_at : list of int, optional (default=[1])\n'
' The evaluation positions of NDCG.\n' +
base_doc[base_doc.find(' early_stopping_rounds :'):])
| mit |
hertogp/jabs | jabs/ilf/gen_numbers.py | 1 | 7292 | #!/usr/bin/env python3
'''
Helper script:
- reads IANA IPv4 proto numbers & services
- writes numbers.py
'''
import sys
import argparse
import logging
import pandas as pd
import numpy as np
__version__ = '0.1'
log = logging.getLogger(__name__)
log.setLevel(logging.WARNING)
URL_BASE = 'https://www.iana.org/assignments'
URL_PROTOCOLS = '{}/protocol-numbers/protocol-numbers-1.csv'.format(URL_BASE)
# URL_SERVICES = '{}/service-names-port-numbers/service-names-port-numbers.csv'.format(URL_BASE)
URL_SERVICES = '{0}/{1}/{1}.csv'.format(URL_BASE, 'service-names-port-numbers')
PY_OUTFILE = 'numbers.py'
def console_logging(log_level):
'set console logging to level given by args.v'
console_fmt = logging.Formatter('%(funcName)s %(levelname)s: %(message)s')
console_hdl = logging.StreamHandler(stream=sys.stderr)
console_hdl.set_name('console')
console_hdl.setFormatter(console_fmt)
console_hdl.setLevel(log_level)
log.setLevel(log_level)
log.addHandler(console_hdl)
def load_csv(url):
'load a csv into a df and normalize column names somewhat'
df = pd.read_csv(url)
df.columns = df.columns.str.lower()
df.columns = df.columns.str.replace(r'\s+', '_')
log.info('done reading url')
return df
def load_protocols(url):
'load protocol numbers from iana'
try:
df = load_csv(url)
cols = 'decimal keyword protocol'.split()
df = df[cols]
except KeyError:
raise Exception('Unexpected/different data, wrong url {}?'.format(url))
# clean up values
log.info('cleaning up strings')
df['protocol'] = df['protocol'].str.replace(r'\s+', ' ') # clean spaces
df['keyword'] = df['keyword'].str.strip()
df['keyword'] = df['keyword'].str.replace(r'\s.*$', '') # 1st word
df['keyword'] = df['keyword'].str.lower()
df['decimal'] = df['decimal'].astype(str) # ensure they're all strings!
df['decimal'] = df['decimal'].str.replace(r'\s+', '') # no whitespace
df = df.drop_duplicates(subset='decimal', keep='first') # drop dups
# eliminate protocol-ranges by making them explicit
log.info('making protocol ranges explicit')
rows = []
for idx, row in df[df['decimal'].str.contains('-')].iterrows():
parts = row['decimal'].split('-')
start = int(parts[0])
stop = int(parts[-1])
proto = row['protocol']
orgkey = row['keyword']
for num in range(start, stop+1):
keyw = 'ip{}'.format(num) if pd.isnull(orgkey) else orgkey
rows.append({'decimal': str(num),
'keyword': keyw,
'protocol': proto})
df = df.append(rows, ignore_index=True)
df = df[~df['decimal'].str.contains('-')] # drop the 'start-max' entries
# set any remaining NaN keywords to <nr>
# donot use '{}/ip'.format(df['decimal']) <-- insert whole decimal column!
log.info('filling empty strings (if any) with sane defaults')
df['keyword'] = np.where(df['keyword'].isnull(),
'ip' + df['decimal'],
df['keyword'])
# set any remaining NaN protocols to keyword
df['protocol'] = np.where(df['protocol'].isnull(),
df['keyword'],
df['protocol'])
return df
def load_services(url):
'load ip4 services from iana'
cols = 'port_number transport_protocol service_name'.split()
df = load_csv(URL_SERVICES)
log.info('keep only columns {!r}'.format(cols))
df = df[cols]
df = df.dropna() # if any field is nan, drop the row
log.info('cleaning up strings')
for col in cols:
df[col] = df[col].astype(str) # ensure strings
df[col] = df[col].str.lower() # lowercase
df[col] = df[col].str.replace(r'\s.*$', '') # 1st word only
df[col] = df[col].str.replace('_', '-') # aliased names -/_
# eliminate port-ranges by making them explicit
log.info('make port-ranges explicit')
rows = []
for idx, row in df[df['port_number'].str.contains('-')].iterrows():
parts = row['port_number'].split('-')
start = int(parts[0])
stop = int(parts[-1])
proto = row['transport_protocol']
if not proto:
continue
service = row['service_name']
for num in range(start, stop+1):
srv = service if service else 'p-{}'.format(num)
rows.append(dict(zip(cols, [str(num), proto, srv])))
df = df.append(rows, ignore_index=True)
df = df[~df['port_number'].str.contains('-')]
log.info('{} entries after clean up'.format(len(df.index)))
return df
def protocol_topy(df, fh):
'write protocols dict'
df['decimal'] = df['decimal'].astype('int64')
dd = df.set_index('decimal')
dd = dd.drop_duplicates()
dct = dict(zip(dd.index, zip(dd['keyword'], dd['protocol'])))
print("", file=fh)
print('IP4PROTOCOLS = {', file=fh)
for k, v in sorted(dct.items()):
print(' {}: {},'.format(k, v), file=fh)
print('}', file=fh)
log.info('wrote {} protocol numbers to {}'.format(len(dct), fh.name))
def services_topy(df, fh):
'write services dict'
dd = df.copy()
pt = 'port_number transport_protocol'.split()
dd['port'] = dd[pt].apply(lambda g: '/'.join(x for x in g), axis=1)
dct = dict(zip(dd['port'], dd['service_name']))
print("", file=fh)
print('IP4SERVICES = {', file=fh)
for k, v in sorted(dct.items()):
print(' {!r}: {!r},'.format(k, v), file=fh)
print('}', file=fh)
log.info('wrote {} service entries to {}'.format(len(dct), fh.name))
def parse_args(argv):
'parse command line arguments'
p = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description=__doc__)
padd = p.add_argument
padd('-v', '--verbose', action='store_const', dest='log_level',
const=logging.INFO, default=logging.WARNING,
help='show informational messages')
padd('-d', '--debug', action='store_const', dest='log_level',
const=logging.DEBUG, help='show debug messages')
padd('-V', '--Version', action='version',
version='{} {}'.format(argv[0], __version__))
arg = p.parse_args(argv[1:])
arg.prog = argv[0]
return arg
def main():
with open(PY_OUTFILE, 'w') as outf:
print("'''", file=outf)
print('This file is generated by ' + __file__, file=outf)
print('Donot edit, override entries via objects:', file=outf)
print(' - ilf.IP4Protocols', file=outf)
print(' - ilf.IP4Services', file=outf)
print('Data retrieved from:', file=outf)
print(' - {}'.format(URL_PROTOCOLS), file=outf)
print(' - {}'.format(URL_SERVICES), file=outf)
print("'''", file=outf)
log.info('retrieving protocols, url {}'.format(URL_PROTOCOLS))
dfp = load_protocols(URL_PROTOCOLS)
protocol_topy(dfp, outf)
log.info('retrieving services, url {}'.format(URL_SERVICES))
dfs = load_services(URL_SERVICES)
services_topy(dfs, outf)
log.info('done!')
if __name__ == '__main__':
args = parse_args(sys.argv)
console_logging(args.log_level)
sys.exit(main())
| mit |
dhruv13J/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 85 | 6377 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
p1 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[0])
p2 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[1])
p3 = plt.Rectangle((0, 0), 0.1, 0.1, fc=colors[2])
labels = ['n_estimators=' + str(params_list[0]['n_estimators']) +
', n_candidates=' + str(params_list[0]['n_candidates']),
'n_estimators=' + str(params_list[1]['n_estimators']) +
', n_candidates=' + str(params_list[1]['n_candidates']),
'n_estimators=' + str(params_list[2]['n_estimators']) +
', n_candidates=' + str(params_list[2]['n_candidates'])]
# Plot precision
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend((p1, p2, p3), (labels[0], labels[1], labels[2]),
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
tomsilver/NAB | nab/scorer.py | 1 | 14681 | # ----------------------------------------------------------------------
# Copyright (C) 2014-2015, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import math
import os
import pandas
from nab.util import (convertResultsPathToDataPath,
convertAnomalyScoresToDetections)
class Window(object):
"""Class to store a single window in a datafile."""
def __init__(self, windowId, limits, data):
"""
@param windowId (int) An integer id for the window.
@limits (tuple) (start timestamp, end timestamp).
@data (pandas.Series) Raw rows of the whole datafile.
"""
self.id = windowId
self.t1, self.t2 = limits
temp = data[data["timestamp"] >= self.t1]
self.window = temp[temp["timestamp"] <= self.t2]
self.indices = self.window.index
self.length = len(self.indices)
def __repr__(self):
"""
String representation of Window. For debugging.
"""
s = "WINDOW id=" + str(self.id)
s += ", limits: [" + str(self.t1) + ", " + str(self.t2) + "]"
s += ", length: " + str(self.length)
s += "\nwindow data:\n" + str(self.window)
return s
def getFirstTruePositive(self):
"""Get the index of the first true positive within a window.
@return (int) Index of the first occurrence of the true positive within
the window. -1 if there are none.
"""
tp = self.window[self.window["type"] == "tp"]
if len(tp) > 0:
return tp.iloc[0].name
else:
return -1
class Scorer(object):
"""Class used to score a datafile."""
def __init__(self,
timestamps,
predictions,
labels,
windowLimits,
costMatrix,
probationaryPeriod):
"""
@param predictions (pandas.Series) Detector predictions of
whether each record is anomalous or
not. predictions[
0:probationaryPeriod] are ignored.
@param labels (pandas.DataFrame) Ground truth for each record.
For each record there should be a 1
or a 0. A 1 implies this record is
within an anomalous window.
@param windowLimits (list) All the window limits in tuple
form: (timestamp start, timestamp
end).
@param costmatrix (dict) Dictionary containing the
cost matrix for this profile.
type: True positive (tp)
False positive (fp)
True Negative (tn)
False Negative (fn)
@param probationaryPeriod
(int) Row index after which predictions
are scored.
"""
self.data = pandas.DataFrame()
self.data["timestamp"] = timestamps
self.data["label"] = labels
self.probationaryPeriod = probationaryPeriod
self.costMatrix = costMatrix
self.totalCount = len(self.data["label"])
self.counts = {
"tp": 0,
"tn": 0,
"fp": 0,
"fn": 0}
self.score = None
self.length = len(predictions)
self.data["type"] = self.getAlertTypes(predictions)
self.windows = self.getWindows(windowLimits)
def getWindows(self, limits):
"""Create list of windows for this datafile.
@return (list) All the window limits in tuple form: (timestamp start,
timestamp end).
"""
# Sort windows before putting them into list
windows = [Window(i, limit, self.data) for i, limit in enumerate(limits)]
return windows
def getAlertTypes(self, predictions):
"""For each record, decide whether it is a tp, fp, tn, or fn. Populate
counts dictionary with the total number of records in each category.
Return a list of strings containing each prediction type."""
types = []
for i, row in self.data.iterrows():
if i < self.probationaryPeriod:
types.append("probationaryPeriod")
continue
pred = predictions[int(i)]
diff = abs(pred - row["label"])
category = str()
category += "f" if bool(diff) else "t"
category += "p" if bool(pred) else "n"
self.counts[category] += 1
types.append(category)
return types
def getScore(self):
"""Score the entire datafile and return a single floating point score.
The position in a given window is calculated as the distance from the end
of the window, normalized [-1,0]. I.e. positions 1.0 and 0.0 are at the very
front and back of the anomaly window, respectively.
@return (float) Score at each timestamp of the datafile.
"""
# Scoring section (i) handles TP and FN, (ii) handles FP, and TN are 0.
# Input to the scoring function is var position: within a given window, the
# position relative to the true anomaly.
scores = pandas.DataFrame([0]*len(self.data), columns=["S(t)"])
# (i) Calculate the score for each window. Each window will either have one
# or more true positives or no predictions (i.e. a false negative). FNs
# lead to a negative contribution, TPs a positive one.
tpScore = 0
fnScore = 0
for window in self.windows:
tpIndex = window.getFirstTruePositive()
if tpIndex == -1:
# False negative; mark once for the whole window (at the start)
thisFN = -self.costMatrix["fnWeight"]
scores.iloc[window.indices[0]] = thisFN
fnScore += thisFN
else:
# True positive
position = -(window.indices[-1] - tpIndex + 1)/float(window.length)
thisTP = scaledSigmoid(position)*self.costMatrix["tpWeight"] / 0.98661
scores.iloc[window.indices[0]] = thisTP
tpScore += thisTP
# Go through each false positive and score it. Each FP leads to a negative
# contribution dependent on how far it is from the previous window.
fpLabels = self.data[self.data["type"] == "fp"]
fpScore = 0
for i in fpLabels.index:
windowId = self.getClosestPrecedingWindow(i)
if windowId == -1:
thisFP = -self.costMatrix["fpWeight"]
scores.iloc[i] = thisFP
fpScore += thisFP
else:
window = self.windows[windowId]
position = abs(window.indices[-1] - i)/float(window.length-1)
thisFP = scaledSigmoid(position)*self.costMatrix["fpWeight"]
scores.iloc[i] = thisFP
fpScore += thisFP
self.score = tpScore + fpScore + fnScore
return (scores, self.score)
def getClosestPrecedingWindow(self, index):
"""Given a record index, find the closest preceding window.
This helps score false positives.
@param index (int) Index of a record.
@return (int) Window id for the last window preceding the given
index.
"""
minDistance = float("inf")
windowId = -1
for window in self.windows:
if window.indices[-1] < index:
dist = index - window.indices[-1]
if dist < minDistance:
minDistance = dist
windowId = window.id
return windowId
def sigmoid(x):
"""Standard sigmoid function."""
return 1 / (1 + math.exp(-x))
def scaledSigmoid(relativePositionInWindow):
"""Return a scaled sigmoid function given a relative position within a
labeled window. The function is computed as follows:
A relative position of -1.0 is the far left edge of the anomaly window and
corresponds to S = 2*sigmoid(5) - 1.0 = 0.98661. This is the earliest to be
counted as a true positive.
A relative position of -0.5 is halfway into the anomaly window and
corresponds to S = 2*sigmoid(0.5*5) - 1.0 = 0.84828.
A relative position of 0.0 consists of the right edge of the window and
corresponds to S = 2*sigmoid(0) - 1 = 0.0.
Relative positions > 0 correspond to false positives increasingly far away
from the right edge of the window. A relative position of 1.0 is past the
right edge of the window and corresponds to a score of 2*sigmoid(-5) - 1.0 =
-0.98661.
@param relativePositionInWindow (float) A relative position
within a window calculated per the
rules above.
@return (float)
"""
if relativePositionInWindow > 3.0:
# FP well behind window
return -1.0
else:
return 2*sigmoid(-5*relativePositionInWindow) - 1.0
def scoreCorpus(threshold, args):
"""Scores the corpus given a detector's results and a user profile.
Scores the corpus in parallel.
@param threshold (float) Threshold value to convert an anomaly score value
to a detection.
@param args (tuple) Contains:
pool (multiprocessing.Pool) Pool of processes to perform
tasks in parallel.
detectorName (string) Name of detector.
profileName (string) Name of scoring profile.
costMatrix (dict) Cost matrix to weight the
true positives, false negatives,
and false positives during
scoring.
resultsDetectorDir (string) Directory for the results CSVs.
resultsCorpus (nab.Corpus) Corpus object that holds the per
record anomaly scores for a
given detector.
corpusLabel (nab.CorpusLabel) Ground truth anomaly labels for
the NAB corpus.
probationaryPercent (float) Percent of each data file not
to be considered during scoring.
"""
(pool,
detectorName,
profileName,
costMatrix,
resultsDetectorDir,
resultsCorpus,
corpusLabel,
probationaryPercent,
scoreFlag) = args
args = []
for relativePath, dataSet in resultsCorpus.dataFiles.iteritems():
if "_scores.csv" in relativePath:
continue
# relativePath: raw dataset file,
# e.g. 'artificialNoAnomaly/art_noisy.csv'
relativePath = convertResultsPathToDataPath( \
os.path.join(detectorName, relativePath))
# outputPath: dataset results file,
# e.g. 'results/detector/artificialNoAnomaly/detector_art_noisy.csv'
relativeDir, fileName = os.path.split(relativePath)
fileName = detectorName + "_" + fileName
outputPath = os.path.join(resultsDetectorDir, relativeDir, fileName)
windows = corpusLabel.windows[relativePath]
labels = corpusLabel.labels[relativePath]
probationaryPeriod = math.floor(probationaryPercent * labels.shape[0])
predicted = convertAnomalyScoresToDetections(
dataSet.data["anomaly_score"], threshold)
args.append((
detectorName,
profileName,
relativePath,
outputPath,
threshold,
predicted,
windows,
labels,
costMatrix,
probationaryPeriod,
scoreFlag))
results = pool.map(scoreDataSet, args)
# Total the 6 scoring metrics for all data files
totals = [None]*3 + [0]*6
for row in results:
for i in xrange(6):
totals[i+3] += row[i+4]
results.append(["Totals"] + totals)
resultsDF = pandas.DataFrame(data=results,
columns=("Detector", "Profile", "File",
"Threshold", "Score", "TP", "TN",
"FP", "FN", "Total_Count"))
return resultsDF
def scoreDataSet(args):
"""Function called to score each dataset in the corpus.
@param args (tuple) Arguments to get the detection score for a dataset.
@return (tuple) Contains:
detectorName (string) Name of detector used to get anomaly scores.
profileName (string) Name of profile used to weight each detection type.
(tp, tn, fp, fn)
relativePath (string) Path of dataset scored.
threshold (float) Threshold used to convert anomaly scores to
detections.
score (float) The score of the dataset.
counts, tp (int) The number of true positive records.
counts, tn (int) The number of true negative records.
counts, fp (int) The number of false positive records.
counts, fn (int) The number of false negative records.
total count (int) The total number of records.
"""
(detectorName,
profileName,
relativePath,
outputPath,
threshold,
predicted,
windows,
labels,
costMatrix,
probationaryPeriod,
scoreFlag) = args
scorer = Scorer(
timestamps=labels["timestamp"],
predictions=predicted,
labels=labels["label"],
windowLimits=windows,
costMatrix=costMatrix,
probationaryPeriod=probationaryPeriod)
(scores,_) = scorer.getScore()
if scoreFlag:
# Append scoring function values to the respective results file
df_csv = pandas.read_csv(outputPath, header=0, parse_dates=[0])
df_csv["S(t)_%s" % profileName] = scores
df_csv.to_csv(outputPath, index=False)
counts = scorer.counts
return (detectorName, profileName, relativePath, threshold, scorer.score,
counts["tp"], counts["tn"], counts["fp"], counts["fn"], scorer.length)
| gpl-3.0 |
DarkEnergyScienceCollaboration/Twinkles | python/desc/twinkles/cleanupspectra.py | 2 | 2612 | from __future__ import absolute_import, division, print_function
import tarfile
import pandas as pd
import gzip
import shutil
import os
import time
def namelist(fname):
"""
return the tarfile nameand the targz file name
"""
basename = fname.split('.')[0]
tarfilename = basename + '.tar'
targzname = tarfilename + '.gz'
return tarfilename, targzname
def tarfilelist(lst, fname):
"""
tar up all the filenames in lst into base.tar where base = basename for
fname
"""
outfname, _ = namelist(fname)
f = tarfile.open(outfname, 'w')
for s in lst:
f.add(s)
f.close()
return outfname
def snspectralist(fname, logffname=None):
"""
List all the spectra files associated with a phosim instance catalog
"""
x = []
with open(fname, 'r') as f:
for line in f:
if 'spectra_file' in line:
x.append(line.split()[5])
return x
def listFiles(logfile, prefix='InstanceCatalogs/phosim_input_'):
"""
Read the log file to get a list of phosim instance catalogs done
"""
df = pd.read_csv(logfile)
fileList = [prefix + str(x) + '.txt' for x in df.obsHistID.values]
return fileList
return x
def gziptarfile(fname, prefix=''):
"""
gzip a tarred up file
"""
tarfilename, targzname = namelist(fname)
targzname = prefix + targzname
with open(tarfilename, 'rb') as f_in, gzip.open(targzname, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
def cleanup(fname):
l = snspectralist(fname)
tarfilename, _ = namelist(fname)
for file in l:
os.remove(file)
os.remove(tarfilename)
if __name__=='__main__':
import pandas as pd
import sys
import gzip
logfilename = 'run.log'
filenames = listFiles(logfilename, prefix='InstanceCatalogs/phosim_input_')
for fname in filenames:
starttime = time.time()
print(fname)
tgzfile = fname.split('.')[0] + '.tar.gz'
if os.path.exists(tgzfile):
continue
with open(fname, 'rb') as fin, gzip.open(fname + '.gz', 'wb') as fout:
shutil.copyfileobj(fin, fout)
x = snspectralist(fname)
listtime = time.time()
print(len(x))
tarfiles = tarfilelist(x, fname)
tartime = time.time()
gziptarfile(fname)
ziptime = time.time()
totaltime = ziptime - starttime
zippingtime = ziptime - starttime
tarringtime = tartime - starttime
print(totaltime, zippingtime, tarringtime)
cleanup(fname)
print(fname, tarfile, x)
| mit |
olafhauk/mne-python | examples/visualization/plot_evoked_topomap.py | 14 | 5554 | # -*- coding: utf-8 -*-
"""
.. _ex-evoked-topomap:
========================================
Plotting topographic maps of evoked data
========================================
Load evoked data and plot topomaps for selected time points using multiple
additional options.
"""
# Authors: Christian Brodbeck <christianbrodbeck@nyu.edu>
# Tal Linzen <linzen@nyu.edu>
# Denis A. Engeman <denis.engemann@gmail.com>
# Mikołaj Magnuski <mmagnuski@swps.edu.pl>
# Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
# sphinx_gallery_thumbnail_number = 5
import numpy as np
import matplotlib.pyplot as plt
from mne.datasets import sample
from mne import read_evokeds
print(__doc__)
path = sample.data_path()
fname = path + '/MEG/sample/sample_audvis-ave.fif'
# load evoked corresponding to a specific condition
# from the fif file and subtract baseline
condition = 'Left Auditory'
evoked = read_evokeds(fname, condition=condition, baseline=(None, 0))
###############################################################################
# Basic :func:`~mne.viz.plot_topomap` options
# -------------------------------------------
#
# We plot evoked topographies using :func:`mne.Evoked.plot_topomap`. The first
# argument, ``times`` allows to specify time instants (in seconds!) for which
# topographies will be shown. We select timepoints from 50 to 150 ms with a
# step of 20ms and plot magnetometer data:
times = np.arange(0.05, 0.151, 0.02)
evoked.plot_topomap(times, ch_type='mag', time_unit='s')
###############################################################################
# If times is set to None at most 10 regularly spaced topographies will be
# shown:
evoked.plot_topomap(ch_type='mag', time_unit='s')
###############################################################################
# We can use ``nrows`` and ``ncols`` parameter to create multiline plots
# with more timepoints.
all_times = np.arange(-0.2, 0.5, 0.03)
evoked.plot_topomap(all_times, ch_type='mag', time_unit='s',
ncols=8, nrows='auto')
###############################################################################
# Instead of showing topographies at specific time points we can compute
# averages of 50 ms bins centered on these time points to reduce the noise in
# the topographies:
evoked.plot_topomap(times, ch_type='mag', average=0.05, time_unit='s')
###############################################################################
# We can plot gradiometer data (plots the RMS for each pair of gradiometers)
evoked.plot_topomap(times, ch_type='grad', time_unit='s')
###############################################################################
# Additional :func:`~mne.viz.plot_topomap` options
# ------------------------------------------------
#
# We can also use a range of various :func:`mne.viz.plot_topomap` arguments
# that control how the topography is drawn. For example:
#
# * ``cmap`` - to specify the color map
# * ``res`` - to control the resolution of the topographies (lower resolution
# means faster plotting)
# * ``outlines='skirt'`` to see the topography stretched beyond the head circle
# * ``contours`` to define how many contour lines should be plotted
evoked.plot_topomap(times, ch_type='mag', cmap='Spectral_r', res=32,
outlines='skirt', contours=4, time_unit='s')
###############################################################################
# If you look at the edges of the head circle of a single topomap you'll see
# the effect of extrapolation. There are three extrapolation modes:
#
# - ``extrapolate='local'`` extrapolates only to points close to the sensors.
# - ``extrapolate='head'`` extrapolates out to the head head circle.
# - ``extrapolate='box'`` extrapolates to a large box stretching beyond the
# head circle.
#
# The default value ``extrapolate='auto'`` will use ``'local'`` for MEG sensors
# and ``'head'`` otherwise. Here we show each option:
extrapolations = ['local', 'head', 'box']
fig, axes = plt.subplots(figsize=(7.5, 4.5), nrows=2, ncols=3)
# Here we look at EEG channels, and use a custom head sphere to get all the
# sensors to be well within the drawn head surface
for axes_row, ch_type in zip(axes, ('mag', 'eeg')):
for ax, extr in zip(axes_row, extrapolations):
evoked.plot_topomap(0.1, ch_type=ch_type, size=2, extrapolate=extr,
axes=ax, show=False, colorbar=False,
sphere=(0., 0., 0., 0.09))
ax.set_title('%s %s' % (ch_type.upper(), extr), fontsize=14)
fig.tight_layout()
###############################################################################
# More advanced usage
# -------------------
#
# Now we plot magnetometer data as topomap at a single time point: 100 ms
# post-stimulus, add channel labels, title and adjust plot margins:
evoked.plot_topomap(0.1, ch_type='mag', show_names=True, colorbar=False,
size=6, res=128, title='Auditory response',
time_unit='s')
plt.subplots_adjust(left=0.01, right=0.99, bottom=0.01, top=0.88)
###############################################################################
# Animating the topomap
# ---------------------
#
# Instead of using a still image we can plot magnetometer data as an animation,
# which animates properly only in matplotlib interactive mode.
# sphinx_gallery_thumbnail_number = 9
times = np.arange(0.05, 0.151, 0.01)
fig, anim = evoked.animate_topomap(
times=times, ch_type='mag', frame_rate=2, time_unit='s', blit=False)
| bsd-3-clause |
mirca/fsopy | fsopy/examples/plot_th_roc.py | 1 | 1042 | from fsopy.receiver_operating_characteristic import th_roc_glq, th_roc_num
from matplotlib import pyplot as plt
# ook modulation
mod_order = 2
# signal to noise ratio in dB
snr_db = 10
# number of transmitted symbols
n_samples = 20
# number of points to make the ROC
n_thresh = 1000
# number of terms for the GL quadrature
n_terms = 90
# fading type
fading = 'gamma_gamma'
# fading parameters
alpha = 1
eta = 1
beta = 1
Pf, Pm1 = th_roc_glq(mod_order, snr_db, n_samples, n_thresh, n_terms, fading,
beta, alpha)
Pf, Pm2 = th_roc_num(mod_order, snr_db, n_samples, n_thresh, fading, beta, alpha)
plt.figure()
plt.loglog(Pf, Pm1)
plt.figure()
plt.semilogx(Pf, Pm1-Pm2)
plt.show()
fading = 'exp_weibull'
Pf, Pm1 = th_roc_glq(mod_order, snr_db, n_samples, n_thresh, n_terms, fading,
beta, alpha, eta)
Pf, Pm2 = th_roc_num(mod_order, snr_db, n_samples, n_thresh, fading, beta,
alpha, eta)
plt.figure()
plt.loglog(Pf, Pm1)
plt.figure()
plt.semilogx(Pf, Pm1-Pm2)
plt.show()
| mit |
GbalsaC/bitnamiP | venv/lib/python2.7/site-packages/sklearn/neighbors/classification.py | 2 | 11846 | """Nearest Neighbor Classification"""
# Authors: Jake Vanderplas <vanderplas@astro.washington.edu>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Sparseness support by Lars Buitinck <L.J.Buitinck@uva.nl>
#
# License: BSD, (C) INRIA, University of Amsterdam
import numpy as np
from scipy import stats
from ..utils.extmath import weighted_mode
from .base import \
_check_weights, _get_weights, \
NeighborsBase, KNeighborsMixin,\
RadiusNeighborsMixin, SupervisedIntegerMixin
from ..base import ClassifierMixin
from ..utils import atleast2d_or_csr
class KNeighborsClassifier(NeighborsBase, KNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing the k-nearest neighbors vote.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
warn_on_equidistant : boolean, optional. Defaults to True.
Generate a warning if equidistant neighbors are discarded.
For classification or regression based on k-neighbors, if
neighbor k and neighbor k+1 have identical distances but
different labels, then the result will be dependent on the
ordering of the training data.
If the fit method is ``'kd_tree'``, no warnings will be generated.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import KNeighborsClassifier
>>> neigh = KNeighborsClassifier(n_neighbors=3)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
KNeighborsClassifier(...)
>>> print(neigh.predict([[1.1]]))
[0]
>>> print(neigh.predict_proba([[0.9]]))
[[ 0.66666667 0.33333333]]
See also
--------
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5,
weights='uniform',
algorithm='auto', leaf_size=30,
warn_on_equidistant=True, p=2):
self._init_params(n_neighbors=n_neighbors,
algorithm=algorithm,
leaf_size=leaf_size,
warn_on_equidistant=warn_on_equidistant,
p=p)
self.weights = _check_weights(weights)
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode, _ = stats.mode(pred_labels, axis=1)
else:
mode, _ = weighted_mode(pred_labels, weights, axis=1)
return mode.flatten().astype(self._y.dtype)
def predict_proba(self, X):
"""Return probability estimates for the test data X.
Parameters
----------
X: array, shape = (n_samples, n_features)
A 2-D array representing the test points.
Returns
-------
probabilities : array, shape = [n_samples, n_classes]
Probabilities of the samples for each class in the model,
where classes are ordered arithmetically.
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.kneighbors(X)
pred_labels = self._y[neigh_ind]
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
weights = np.ones_like(pred_labels)
probabilities = np.zeros((X.shape[0], self._classes.size))
# Translate class label to a column index in probabilities array.
# This may not be needed provided classes labels are guaranteed to be
# np.arange(n_classes) (e.g. consecutive and starting with 0)
pred_indices = pred_labels.copy()
for k, c in enumerate(self._classes):
pred_indices[pred_labels == c] = k
# a simple ':' index doesn't work right
all_rows = np.arange(X.shape[0])
for i, idx in enumerate(pred_indices.T): # loop is O(n_neighbors)
probabilities[all_rows, idx] += weights[:, i]
# normalize 'votes' into real [0,1] probabilities
probabilities = (probabilities.T / probabilities.sum(axis=1)).T
return probabilities
class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin,
SupervisedIntegerMixin, ClassifierMixin):
"""Classifier implementing a vote among neighbors within a given radius
Parameters
----------
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
weights : str or callable
weight function used in prediction. Possible values:
- 'uniform' : uniform weights. All points in each neighborhood
are weighted equally.
- 'distance' : weight points by the inverse of their distance.
in this case, closer neighbors of a query point will have a
greater influence than neighbors which are further away.
- [callable] : a user-defined function which accepts an
array of distances, and returns an array of the same shape
containing the weights.
Uniform weights are used by default.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`scipy.spatial.cKDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or cKDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
outlier_label: int, optional (default = None)
Label, which is given for outlier samples (samples with no
neighbors on given radius).
If set to None, ValueError is raised, when outlier is detected.
Examples
--------
>>> X = [[0], [1], [2], [3]]
>>> y = [0, 0, 1, 1]
>>> from sklearn.neighbors import RadiusNeighborsClassifier
>>> neigh = RadiusNeighborsClassifier(radius=1.0)
>>> neigh.fit(X, y) # doctest: +ELLIPSIS
RadiusNeighborsClassifier(...)
>>> print(neigh.predict([[1.5]]))
[0]
See also
--------
KNeighborsClassifier
RadiusNeighborsRegressor
KNeighborsRegressor
NearestNeighbors
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, radius=1.0, weights='uniform',
algorithm='auto', leaf_size=30, p=2, outlier_label=None):
self._init_params(radius=radius,
algorithm=algorithm,
leaf_size=leaf_size,
p=p)
self.weights = _check_weights(weights)
self.outlier_label = outlier_label
def predict(self, X):
"""Predict the class labels for the provided data
Parameters
----------
X: array
A 2-D array representing the test points.
Returns
-------
labels: array
List of class labels (one for each data sample).
"""
X = atleast2d_or_csr(X)
neigh_dist, neigh_ind = self.radius_neighbors(X)
pred_labels = [self._y[ind] for ind in neigh_ind]
if self.outlier_label:
outlier_label = np.array((self.outlier_label, ))
small_value = np.array((1e-6, ))
for i, pl in enumerate(pred_labels):
# Check that all have at least 1 neighbor
if len(pl) < 1:
pred_labels[i] = outlier_label
neigh_dist[i] = small_value
else:
for pl in pred_labels:
# Check that all have at least 1 neighbor
if len(pl) < 1:
raise ValueError('no neighbors found for a test sample, '
'you can try using larger radius, '
'give a label for outliers, '
'or consider removing them in your '
'dataset')
weights = _get_weights(neigh_dist, self.weights)
if weights is None:
mode = np.array([stats.mode(pl)[0] for pl in pred_labels],
dtype=self._y.dtype)
else:
mode = np.array([weighted_mode(pl, w)[0]
for (pl, w) in zip(pred_labels, weights)],
dtype=self._y.dtype)
return mode.flatten()
| agpl-3.0 |
mark-r-g/hydrus | hydrus/preprocess.py | 1 | 3668 | # Mark Gatheman <markrg@protonmail.com>
#
# This file is part of Hydrus.
#
# Hydrus is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Hydrus is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Hydrus. If not, see <http://www.gnu.org/licenses/>.
import os
import logging
from numpy import nan, where
from pandas import read_sas
from hydrus.utility import set_config, winsorize
from hydrus import constants
def preprocess(infile=None, settings_file=None, cfg=None):
"""
Preprocess CMS's raw data file. Remove non-qualifying data according to
CMS's specifications, standardize each measure score, and winsorize the
scores at three standard deviations. Return the new DataFrame and a dict
of the final measures for each measure group.
"""
if cfg is None:
if settings_file:
cfg = set_config(settings_file)
else:
logging.warning("Using default configuration file.")
cfg = set_config()
if infile is None:
infile = os.path.join(constants.IN, cfg.INFILE)
# Load CMS's SAS data file.
df = read_sas(infile, index='PROVIDER_ID')
df.index = df.index.astype(str)
# Combine measures IMM-3 and OP-27.
mask = df['IMM_3'].notnull()
df['IMM_3_OP_27'] = where(mask, df['IMM_3'], df['OP_27'])
df['IMM_3_OP_27_DEN'] = where(mask, df['IMM_3_DEN'], df['OP_27_DEN'])
for x in ['IMM_3', 'OP_27', 'IMM_3_DEN', 'OP_27_DEN']:
df.drop(x, axis=1, inplace=True)
# Remove columns where <= 100 hospitals have data.
incl_meas, incl_den = [], []
counts = df.count() # nonnull hospitals per measure
for k, v in counts.items():
if k.endswith('_DEN'):
continue
if v <= 100:
df.drop(k, axis=1, inplace=True)
df.drop(k+'_DEN', axis=1, inplace=True)
logging.info(f'dropped {k} (<=100 hospitals have data)')
else:
incl_meas.append(k)
incl_den.append(k+'_DEN')
# Create special denominators for patient experience group.
patientexp_denom = df['H_NUMB_COMP'] * df['H_RESP_RATE_P'] / 100
for col_name in cfg.PATIENTEXP_DENOM_COLS:
df[col_name] = patientexp_denom
# For each measure, if the denominator is NAN, make the numerator NAN too.
for x, y in zip(incl_meas, incl_den):
if y in df.columns: # skips H_RESP_RATE_P and H_NUMB_COMP
df.loc[df[y].isnull(), x] = nan
# Create final list of measures for each measure group.
final_meas = {}
for g in cfg.GROUPS:
final_meas[g] = (
[x for x in incl_meas if x in cfg.MEAS_GROUPS[g]],
[y for x, y in zip(incl_meas, incl_den) if x in cfg.MEAS_GROUPS[g]]
)
# Remove hospitals with no final measures.
df = df.dropna(thresh=1)
# Convert to z-scores.
for col_name in incl_meas:
col = df[col_name]
df[col_name] = (col - col.mean()) / col.std()
# Switch sign of measures for which a lower score is good.
for col_name in cfg.FLIPPED_MEASURES:
df[col_name] = -1 * df[col_name]
# Winsorize z-scores at +/-3.
for col_name in incl_meas:
df[col_name] = df[col_name].map(winsorize)
return df, final_meas
| gpl-3.0 |
dparks1134/PhyloRank | setup.py | 1 | 1876 | import os
import re
from setuptools import setup, find_packages
def read_meta():
path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'phylorank/__init__.py')
with open(path) as fh:
hits = re.findall(r'__(\w+)__ ?= ?["\'](.+)["\']\n', fh.read())
return {k: v for k, v in hits}
def readme():
with open('README.md') as fh:
return fh.read()
meta = read_meta()
setup(
author=meta['author'],
author_email=meta['author_email'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: Scientific/Engineering :: Bio-Informatics',
],
data_files=[("", ["LICENSE"])],
description=meta['description'],
entry_points={
'console_scripts': [
'phylorank = phylorank.__main__:main'
]
},
install_requires=['biolib >= 0.1.0', 'numpy', 'matplotlib',
'dendropy>=4.1.0', 'scipy', 'mpld3>=0.5.2'],
keywords='phylogenetics taxonomy relative evolutionary divergence tree '
'decorate decoration',
license=meta['license'],
long_description=readme(),
long_description_content_type='text/markdown',
name=meta['title'],
packages=find_packages(),
project_urls={
"Bug Tracker": "https://github.com/dparks1134/PhyloRank/issues",
"Documentation": "https://github.com/dparks1134/PhyloRank",
"Source Code": "https://github.com/dparks1134/PhyloRank",
},
python_requires='>=3.6',
url=meta['url'],
version=meta['version']
)
| gpl-3.0 |
fspaolo/scikit-learn | examples/manifold/plot_compare_methods.py | 8 | 3592 | """
=========================================
Comparison of Manifold Learning methods
=========================================
An illustration of dimensionality reduction on the S-curve dataset
with various manifold learning methods.
For a discussion and comparison of these algorithms, see the
:ref:`manifold module page <manifold>`
For a similar example, where the methods are applied to a
sphere dataset, see :ref:`example_manifold_plot_manifold_sphere.py`
Note that the purpose of the MDS is to find a low-dimensional
representation of the data (here 2D) in which the distances respect well
the distances in the original high-dimensional space, unlike other
manifold-learning algorithms, it does not seeks an isotropic
representation of the data in the low-dimensional space.
"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
print(__doc__)
from time import time
import pylab as pl
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import NullFormatter
from sklearn import manifold, datasets
# Next line to silence pyflakes. This import is needed.
Axes3D
n_points = 1000
X, color = datasets.samples_generator.make_s_curve(n_points, random_state=0)
n_neighbors = 10
n_components = 2
fig = pl.figure(figsize=(15, 8))
pl.suptitle("Manifold Learning with %i points, %i neighbors"
% (1000, n_neighbors), fontsize=14)
try:
# compatibility matplotlib < 1.0
ax = fig.add_subplot(241, projection='3d')
ax.scatter(X[:, 0], X[:, 1], X[:, 2], c=color, cmap=pl.cm.Spectral)
ax.view_init(4, -72)
except:
ax = fig.add_subplot(241, projection='3d')
pl.scatter(X[:, 0], X[:, 2], c=color, cmap=pl.cm.Spectral)
methods = ['standard', 'ltsa', 'hessian', 'modified']
labels = ['LLE', 'LTSA', 'Hessian LLE', 'Modified LLE']
for i, method in enumerate(methods):
t0 = time()
Y = manifold.LocallyLinearEmbedding(n_neighbors, n_components,
eigen_solver='auto',
method=method).fit_transform(X)
t1 = time()
print("%s: %.2g sec" % (methods[i], t1 - t0))
ax = fig.add_subplot(242 + i)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("%s (%.2g sec)" % (labels[i], t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
Y = manifold.Isomap(n_neighbors, n_components).fit_transform(X)
t1 = time()
print("Isomap: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(246)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("Isomap (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
mds = manifold.MDS(n_components, max_iter=100, n_init=1)
Y = mds.fit_transform(X)
t1 = time()
print("MDS: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(247)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("MDS (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
t0 = time()
se = manifold.SpectralEmbedding(n_components=n_components,
n_neighbors=n_neighbors)
Y = se.fit_transform(X)
t1 = time()
print("SpectralEmbedding: %.2g sec" % (t1 - t0))
ax = fig.add_subplot(248)
pl.scatter(Y[:, 0], Y[:, 1], c=color, cmap=pl.cm.Spectral)
pl.title("SpectralEmbedding (%.2g sec)" % (t1 - t0))
ax.xaxis.set_major_formatter(NullFormatter())
ax.yaxis.set_major_formatter(NullFormatter())
pl.axis('tight')
pl.show()
| bsd-3-clause |
rafwiewiora/msmbuilder | msmbuilder/project_templates/cluster/cluster-plot.py | 9 | 1089 | """Plot cluster centers on tICA coordinates
{{header}}
"""
# ? include "plot_header.template"
# ? from "plot_macros.template" import xdg_open with context
import numpy as np
import seaborn as sns
from matplotlib import pyplot as plt
from msmbuilder.io import load_trajs, load_generic
sns.set_style('ticks')
colors = sns.color_palette()
## Load
kmeans = load_generic('kmeans.pickl')
meta, ktrajs = load_trajs('ktrajs')
meta, ttrajs = load_trajs('ttrajs', meta)
txx = np.concatenate(list(ttrajs.values()))
def plot_cluster_centers(ax):
ax.hexbin(txx[:, 0], txx[:, 1],
cmap=sns.cubehelix_palette(as_cmap=True),
mincnt=1,
bins='log',
)
ax.scatter(kmeans.cluster_centers_[:, 0],
kmeans.cluster_centers_[:, 1],
s=40, c=colors[0],
)
ax.set_xlabel("tIC 1", fontsize=16)
ax.set_ylabel("tIC 2", fontsize=16)
## Plot 1
fig, ax = plt.subplots(figsize=(7, 5))
plot_cluster_centers(ax)
fig.tight_layout()
fig.savefig('kmeans-centers.pdf')
# {{xdg_open('kmeans-centers.pdf')}}
| lgpl-2.1 |
phil-mansfield/gotetra | render/scripts/plot_savgol_profile.py | 1 | 8413 | from __future__ import division
import sys
import matplotlib.pyplot as plt
import numpy as np
import scipy.signal as signal
import scipy.stats as stats
import scipy.interpolate as intr
import deriv
class SplinedBinConverter(object):
def __init__(self, low, high, bins):
self.low, self.high, self.bins = low, high, bins
dx = (self.high - self.low) / bins
self.edges = np.linspace(low, high, bins + 1)
def dx(self):
return (self.high - self.low) / self.bins
def convert(self, xs, ys):
ys = ys[(self.edges[0] <= xs) & (xs <= self.edges[-1])]
xs = xs[(self.edges[0] <= xs) & (xs <= self.edges[-1])]
if len(xs) <= 3 or len(ys) <= 3: return None, None
low_edge_idx = np.searchsorted(self.edges, xs[0])
if low_edge_idx == 0: low_edge_idx = 1
high_edge_idx = np.searchsorted(self.edges, xs[-1])
sp = intr.UnivariateSpline(xs, ys, s=0)
if high_edge_idx == low_edge_idx:
return (np.array([xs[0], xs[-1]]),
np.array([sp.integral(xs[0], xs[-1]) / (xs[-1] - xs[0])]))
edges = self.edges[low_edge_idx - 1: high_edge_idx + 1]
first = self._first_bin(edges, xs, sp)
mid = self._mid_bins(edges, xs, sp)
last = self._last_bin(edges, xs, sp)
return edges, append(first, mid, last)
def _first_bin(self, edges, xs, sp):
if xs[0] == edges[1]: return []
return [sp.integral(xs[0], edges[1]) / (edges[1] - xs[0])]
def _mid_bins(self, edges, xs, sp):
vals = np.zeros(len(edges) - 3)
for i in xrange(len(vals)):
start_edge, end_edge = edges[i + 1], edges[i + 2]
vals[i] = (sp.integral(start_edge, end_edge) /
(end_edge - start_edge))
return vals
def _last_bin(self, edges, xs, sp):
if xs[-1] == edges[-2]: return []
return [sp.integral(edges[-2], xs[-1]) / (xs[-1] - edges[-2])]
def append(*arrays):
out, idx = np.zeros(sum(map(len, arrays))), 0
for array in arrays:
for i in xrange(len(array)):
out[idx] = array[i]
idx += 1
return out
def pretty_fig(n):
""" pretty_fig(n) is equivalent to plt.figure(n), except it also
sets a number of options which make the resulting plot look nicer.
"""
plt.figure(n)
plt.rc('text', usetex=True)
plt.rc('font',size=19)
plt.rc('xtick.major',pad=5); plt.rc('xtick.minor',pad=5)
plt.rc('ytick.major',pad=5); plt.rc('ytick.minor',pad=5)
def nan_split(rs, rhos):
""" nan_split(rs, rhos) splits up rs and rhos into lists of contiguous
non-NaN seqeunces of values.
"""
rs_group, rhos_group = [], []
start_i = 0
prev_nan = False
for i in xrange(len(rs)):
if np.isnan(rhos[i]):
if not prev_nan:
if i != start_i:
rs_group.append(np.array(rs[start_i: i]))
rhos_group.append(np.array(rhos[start_i: i]))
prev_nan = True
else:
if prev_nan:
start_i = i
prev_nan = False
if not prev_nan:
rs_group.append(np.array(rs[start_i:len(rs)]))
rhos_group.append(np.array(rhos[start_i:len(rhos)]))
return rs_group, rhos_group
def r_sp(rs, rhos, derivs, lim=-5):
curr_min = rhos <= np.minimum.accumulate(rhos)
idxs = signal.argrelextrema(derivs, np.less)[0]
idxs = np.array([idx for idx in idxs if idx != 0 and idx != len(rs) - 1])
if len(idxs) == 0: return np.nan
idxs = idxs[curr_min[idxs]]
if len(idxs) == 0: return np.nan
idxs = idxs[derivs[idxs] < lim]
if len(idxs) == 0: return np.nan
min_idx = idxs[np.argmin(derivs[idxs])]
return rs[min_idx]
# rs must be evenly spaced.
def find_splashback(rs, rhos, sg_window):
sm_rhos = signal.savgol_filter(rhos, sg_window, 4)
dr = (rs[-1] - rs[0]) / (len(rs) - 1)
sm_derivs = signal.savgol_filter(rhos, sg_window, 4, deriv=1, delta=dr)
return r_sp(rs, sm_rhos, sm_derivs, lim=-5)
def splashback_range(rs, rhos, range_max=101):
if rs[0] <= 0: rs, rhos = rs[1:], rhos[1:]
r_sps = []
lrs, lrhos = np.log10(rs), np.log10(rhos)
for sg_window in xrange(2, range_max//2):
sg_window = sg_window * 2 + 1
if sg_window >= len(lrs): break
r_sp = find_splashback(lrs, lrhos, sg_window)
if np.isnan(r_sp): break
r_sps.append(r_sp)
return 10**np.array(r_sps)
cs = ["k", "r", "b", "g", "pink", "orange",
"brown", "y", "DarkMagenta", "LightSlateGray"]
if __name__ == "__main__":
rows = np.loadtxt(sys.argv[1])
vec_xs, vec_ys, vec_zs = rows[:3]
idxs = np.arange(len(vec_xs) // 2) * 2
vec_xs, vec_ys, vec_zs = vec_xs[idxs], vec_ys[idxs], vec_zs[idxs]
rows = rows[3:]
cols = map(lambda *x: x, *rows)
profile_count = len(cols) // 2
bins = 200
range_max = 100
log_low, log_high = -2, 0
log_converter = SplinedBinConverter(log_low, log_high, bins)
max_cutoff, std_cutoff = 0.1, 0.1
n, m = 0, 0
maxes, stds, valid_rs = [], [], []
for i in xrange(profile_count):
rs_group, rhos_group = nan_split(cols[2*i], cols[2*i + 1])
for rs, rhos in zip(rs_group, rhos_group):
R = np.nan
if rs[0] <= 0: rs, rhos = rs[1:], rhos[1:]
edges, vals = log_converter.convert(np.log10(rs), np.log10(rhos))
if edges is None: continue
rs, rhos = 10**((edges[:-1] + edges[1:]) / 2), 10**vals
if len(rs) <= 21: continue
rs_range = splashback_range(rs, rhos, range_max)
if len(rs_range) * 2 <= 21: continue
r_mean, r_std = np.mean(rs_range), np.std(rs_range)
drs = np.abs(rs_range[1:] - rs_range[:-1])
dr_max, dr_max_mean = np.max(drs), np.mean(drs)
dr_sign_max_mean = np.mean(rs_range[1:] - rs_range[:-1])
rs_range_diff = np.max(rs_range) - np.min(rs_range)
# Howwwwwww??
if dr_max == 0 or r_std == 0: continue
m += 1
maxes.append(dr_max / r_mean)
stds.append(r_std / r_mean)
# Figure 0
plt.figure(0)
c = cs[i] if i < len(cs) else "w"
plt.plot(dr_max / r_mean, r_std / r_mean, "o")
if i < 10:
plt.figure(4)
windows = np.arange(2, len(rs_range) + 2)*2 + 1
plt.plot(windows, rs_range, c=c, lw=3)
is_good = dr_max/r_mean<max_cutoff and r_std/r_mean<std_cutoff
plt.plot(windows, rs_range, "o", c=c, lw=3,
label="%.3f %.3f %.3f %.5f %s" %
(dr_max / r_mean, r_std / r_mean,
rs_range_diff / r_mean,
np.abs(dr_sign_max_mean / r_mean),
"*" if is_good else ""))
if dr_max / r_mean < max_cutoff and r_std / r_mean < std_cutoff:
n += 1
R = rs_range[0]
valid_rs.append(R)
print ("%9.4g %9.4g %9.4g %9.4g %9d" %
(vec_xs[i], vec_ys[i], vec_zs[i], R, i))
sys.stderr.write("%d LoS's found, %d are good\n" % (m, n))
# Figure 1
plt.figure(1)
plt.hist(np.log10(maxes), bins=40, range=(-2, 0))
# Figure 2
plt.figure(2)
plt.hist(np.log10(stds), bins=40, range=(-2, 0))
# Figure 3
plt.figure(3)
plt.hist(valid_rs, bins=40, range=(0, 1))
########################
# Do all the plotting. #
########################
# Figure 0
pretty_fig(0)
plt.ylabel(r"stdev($R_{\rm sp}$) [Mpc/$h$]")
plt.ylim((1e-2, 1))
plt.yscale("log")
plt.xlabel(r"max($R_{\rm sp}$) [Mpc/$h$]")
plt.xlim((1e-2, 1))
plt.xscale("log")
plt.grid()
plt.savefig("%s_std_max1.png" % sys.argv[2])
#Figure 1
pretty_fig(1)
plt.xlabel(r"$\log_{10}\ $max($R_{\rm sp}$) [Mpc/$h$]")
plt.savefig("%s_max_hist.png" % sys.argv[2])
# Figure 2
pretty_fig(2)
plt.xlabel(r"$\log_{10}\ $stdev($\Delta R_{\rm sp}$) [Mpc/$h$]")
plt.savefig("%s_std_hist.png" % sys.argv[2])
#Figure 3
pretty_fig(3)
plt.xlabel(r"$R_{\rm sp}$ [Mpc/$h$]")
plt.savefig("%s_r_hist.png" % sys.argv[2])
# Figure 4
pretty_fig(4)
plt.xlim((0, 125))
plt.ylabel(r"$R_{\rm sp}$ [Mpc/$h$]")
plt.ylim((0, 1))
plt.legend(fontsize=12)
plt.savefig("%s_r_window2.png" % sys.argv[2])
| mit |
gertingold/scipy | scipy/optimize/_shgo_lib/triangulation.py | 9 | 26925 | import numpy as np
import copy
try:
from functools import lru_cache # For Python 3 only
except ImportError: # Python 2:
import time
import functools
import collections
# Note to avoid using external packages such as functools32 we use this code
# only using the standard library
def lru_cache(maxsize=255, timeout=None):
"""
Thanks to ilialuk @ https://stackoverflow.com/users/2121105/ilialuk for
this code snippet. Modifications by S. Endres
"""
class LruCacheClass(object):
def __init__(self, input_func, max_size, timeout):
self._input_func = input_func
self._max_size = max_size
self._timeout = timeout
# This will store the cache for this function,
# format - {caller1 : [OrderedDict1, last_refresh_time1],
# caller2 : [OrderedDict2, last_refresh_time2]}.
# In case of an instance method - the caller is the instance,
# in case called from a regular function - the caller is None.
self._caches_dict = {}
def cache_clear(self, caller=None):
# Remove the cache for the caller, only if exists:
if caller in self._caches_dict:
del self._caches_dict[caller]
self._caches_dict[caller] = [collections.OrderedDict(),
time.time()]
def __get__(self, obj, objtype):
""" Called for instance methods """
return_func = functools.partial(self._cache_wrapper, obj)
return_func.cache_clear = functools.partial(self.cache_clear,
obj)
# Return the wrapped function and wraps it to maintain the
# docstring and the name of the original function:
return functools.wraps(self._input_func)(return_func)
def __call__(self, *args, **kwargs):
""" Called for regular functions """
return self._cache_wrapper(None, *args, **kwargs)
# Set the cache_clear function in the __call__ operator:
__call__.cache_clear = cache_clear
def _cache_wrapper(self, caller, *args, **kwargs):
# Create a unique key including the types (in order to
# differentiate between 1 and '1'):
kwargs_key = "".join(map(
lambda x: str(x) + str(type(kwargs[x])) + str(kwargs[x]),
sorted(kwargs)))
key = "".join(
map(lambda x: str(type(x)) + str(x), args)) + kwargs_key
# Check if caller exists, if not create one:
if caller not in self._caches_dict:
self._caches_dict[caller] = [collections.OrderedDict(),
time.time()]
else:
# Validate in case the refresh time has passed:
if self._timeout is not None:
if (time.time() - self._caches_dict[caller][1]
> self._timeout):
self.cache_clear(caller)
# Check if the key exists, if so - return it:
cur_caller_cache_dict = self._caches_dict[caller][0]
if key in cur_caller_cache_dict:
return cur_caller_cache_dict[key]
# Validate we didn't exceed the max_size:
if len(cur_caller_cache_dict) >= self._max_size:
# Delete the first item in the dict:
try:
cur_caller_cache_dict.popitem(False)
except KeyError:
pass
# Call the function and store the data in the cache (call it
# with the caller in case it's an instance function)
if caller is not None:
args = (caller,) + args
cur_caller_cache_dict[key] = self._input_func(*args, **kwargs)
return cur_caller_cache_dict[key]
# Return the decorator wrapping the class (also wraps the instance to
# maintain the docstring and the name of the original function):
return (lambda input_func: functools.wraps(input_func)(
LruCacheClass(input_func, maxsize, timeout)))
class Complex:
def __init__(self, dim, func, func_args=(), symmetry=False, bounds=None,
g_cons=None, g_args=()):
self.dim = dim
self.bounds = bounds
self.symmetry = symmetry # TODO: Define the functions to be used
# here in init to avoid if checks
self.gen = 0
self.perm_cycle = 0
# Every cell is stored in a list of its generation,
# ex. the initial cell is stored in self.H[0]
# 1st get new cells are stored in self.H[1] etc.
# When a cell is subgenerated it is removed from this list
self.H = [] # Storage structure of cells
# Cache of all vertices
self.V = VertexCache(func, func_args, bounds, g_cons, g_args)
# Generate n-cube here:
self.n_cube(dim, symmetry=symmetry)
# TODO: Assign functions to a the complex instead
if symmetry:
self.generation_cycle = 1
# self.centroid = self.C0()[-1].x
# self.C0.centroid = self.centroid
else:
self.add_centroid()
self.H.append([])
self.H[0].append(self.C0)
self.hgr = self.C0.homology_group_rank()
self.hgrd = 0 # Complex group rank differential
# self.hgr = self.C0.hg_n
# Build initial graph
self.graph_map()
self.performance = []
self.performance.append(0)
self.performance.append(0)
def __call__(self):
return self.H
def n_cube(self, dim, symmetry=False, printout=False):
"""
Generate the simplicial triangulation of the n dimensional hypercube
containing 2**n vertices
"""
origin = list(np.zeros(dim, dtype=int))
self.origin = origin
supremum = list(np.ones(dim, dtype=int))
self.supremum = supremum
# tuple versions for indexing
origintuple = tuple(origin)
supremumtuple = tuple(supremum)
x_parents = [origintuple]
if symmetry:
self.C0 = Simplex(0, 0, 0, self.dim) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
i_s = 0
self.perm_symmetry(i_s, x_parents, origin)
self.C0.add_vertex(self.V[supremumtuple])
else:
self.C0 = Cell(0, 0, origin, supremum) # Initial cell object
self.C0.add_vertex(self.V[origintuple])
self.C0.add_vertex(self.V[supremumtuple])
i_parents = []
self.perm(i_parents, x_parents, origin)
if printout:
print("Initial hyper cube:")
for v in self.C0():
v.print_out()
def perm(self, i_parents, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
# Construct required iterator
iter_range = [x for x in range(self.dim) if x not in i_parents]
for i in iter_range:
i2_parents = copy.copy(i_parents)
i2_parents.append(i)
xi2 = copy.copy(xi)
xi2[i] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbours and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
# Permutate
self.perm(i2_parents, x_parents2, xi2)
def perm_symmetry(self, i_s, x_parents, xi):
# TODO: Cut out of for if outside linear constraint cutting planes
xi_t = tuple(xi)
xi2 = copy.copy(xi)
xi2[i_s] = 1
# Make new vertex list a hashable tuple
xi2_t = tuple(xi2)
# Append to cell
self.C0.add_vertex(self.V[xi2_t])
# Connect neighbours and vice versa
# Parent point
self.V[xi2_t].connect(self.V[xi_t])
# Connect all family of simplices in parent containers
for x_ip in x_parents:
self.V[xi2_t].connect(self.V[x_ip])
x_parents2 = copy.copy(x_parents)
x_parents2.append(xi_t)
i_s += 1
if i_s == self.dim:
return
# Permutate
self.perm_symmetry(i_s, x_parents2, xi2)
def add_centroid(self):
"""Split the central edge between the origin and supremum of
a cell and add the new vertex to the complex"""
self.centroid = list(
(np.array(self.origin) + np.array(self.supremum)) / 2.0)
self.C0.add_vertex(self.V[tuple(self.centroid)])
self.C0.centroid = self.centroid
# Disconnect origin and supremum
self.V[tuple(self.origin)].disconnect(self.V[tuple(self.supremum)])
# Connect centroid to all other vertices
for v in self.C0():
self.V[tuple(self.centroid)].connect(self.V[tuple(v.x)])
self.centroid_added = True
return
# Construct incidence array:
def incidence(self):
if self.centroid_added:
self.structure = np.zeros([2 ** self.dim + 1, 2 ** self.dim + 1],
dtype=int)
else:
self.structure = np.zeros([2 ** self.dim, 2 ** self.dim],
dtype=int)
for v in self.HC.C0():
for v2 in v.nn:
self.structure[v.index, v2.index] = 1
return
# A more sparse incidence generator:
def graph_map(self):
""" Make a list of size 2**n + 1 where an entry is a vertex
incidence, each list element contains a list of indexes
corresponding to that entries neighbours"""
self.graph = [[v2.index for v2 in v.nn] for v in self.C0()]
# Graph structure method:
# 0. Capture the indices of the initial cell.
# 1. Generate new origin and supremum scalars based on current generation
# 2. Generate a new set of vertices corresponding to a new
# "origin" and "supremum"
# 3. Connected based on the indices of the previous graph structure
# 4. Disconnect the edges in the original cell
def sub_generate_cell(self, C_i, gen):
"""Subgenerate a cell `C_i` of generation `gen` and
homology group rank `hgr`."""
origin_new = tuple(C_i.centroid)
centroid_index = len(C_i()) - 1
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Generate subcubes using every extreme vertex in C_i as a supremum
# and the centroid of C_i as the origin
H_new = [] # list storing all the new cubes split from C_i
for i, v in enumerate(C_i()[:-1]):
supremum = tuple(v.x)
H_new.append(
self.construct_hypercube(origin_new, supremum, gen, C_i.hg_n))
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
if i == centroid_index: # Break out of centroid
break
for j in connections:
C_i()[i].disconnect(C_i()[j])
# Destroy the old cell
if C_i is not self.C0: # Garbage collector does this anyway; not needed
del C_i
# TODO: Recalculate all the homology group ranks of each cell
return H_new
def split_generation(self):
"""
Run sub_generate_cell for every cell in the current complex self.gen
"""
no_splits = False # USED IN SHGO
try:
for c in self.H[self.gen]:
if self.symmetry:
# self.sub_generate_cell_symmetry(c, self.gen + 1)
self.split_simplex_symmetry(c, self.gen + 1)
else:
self.sub_generate_cell(c, self.gen + 1)
except IndexError:
no_splits = True # USED IN SHGO
self.gen += 1
return no_splits # USED IN SHGO
# @lru_cache(maxsize=None)
def construct_hypercube(self, origin, supremum, gen, hgr,
printout=False):
"""
Build a hypercube with triangulations symmetric to C0.
Parameters
----------
origin : vec
supremum : vec (tuple)
gen : generation
hgr : parent homology group rank
"""
# Initiate new cell
C_new = Cell(gen, hgr, origin, supremum)
C_new.centroid = tuple(
(np.array(origin) + np.array(supremum)) / 2.0)
# Build new indexed vertex list
V_new = []
# Cached calculation
for i, v in enumerate(self.C0()[:-1]):
t1 = self.generate_sub_cell_t1(origin, v.x)
t2 = self.generate_sub_cell_t2(supremum, v.x)
vec = t1 + t2
vec = tuple(vec)
C_new.add_vertex(self.V[vec])
V_new.append(vec)
# Add new centroid
C_new.add_vertex(self.V[C_new.centroid])
V_new.append(C_new.centroid)
# Connect new vertices #TODO: Thread into other loop; no need for V_new
for i, connections in enumerate(self.graph):
# Present vertex V_new[i]; connect to all connections:
for j in connections:
self.V[V_new[i]].connect(self.V[V_new[j]])
if printout:
print("A sub hyper cube with:")
print("origin: {}".format(origin))
print("supremum: {}".format(supremum))
for v in C_new():
v.print_out()
# Append the new cell to the to complex
self.H[gen].append(C_new)
return C_new
def split_simplex_symmetry(self, S, gen):
"""
Split a hypersimplex S into two sub simplices by building a hyperplane
which connects to a new vertex on an edge (the longest edge in
dim = {2, 3}) and every other vertex in the simplex that is not
connected to the edge being split.
This function utilizes the knowledge that the problem is specified
with symmetric constraints
The longest edge is tracked by an ordering of the
vertices in every simplices, the edge between first and second
vertex is the longest edge to be split in the next iteration.
"""
# If not gen append
try:
self.H[gen]
except IndexError:
self.H.append([])
# Find new vertex.
# V_new_x = tuple((np.array(C()[0].x) + np.array(C()[1].x)) / 2.0)
s = S()
firstx = s[0].x
lastx = s[-1].x
V_new = self.V[tuple((np.array(firstx) + np.array(lastx)) / 2.0)]
# Disconnect old longest edge
self.V[firstx].disconnect(self.V[lastx])
# Connect new vertices to all other vertices
for v in s[:]:
v.connect(self.V[V_new.x])
# New "lower" simplex
S_new_l = Simplex(gen, S.hg_n, self.generation_cycle,
self.dim)
S_new_l.add_vertex(s[0])
S_new_l.add_vertex(V_new) # Add new vertex
for v in s[1:-1]: # Add all other vertices
S_new_l.add_vertex(v)
# New "upper" simplex
S_new_u = Simplex(gen, S.hg_n, S.generation_cycle, self.dim)
# First vertex on new long edge
S_new_u.add_vertex(s[S_new_u.generation_cycle + 1])
for v in s[1:-1]: # Remaining vertices
S_new_u.add_vertex(v)
for k, v in enumerate(s[1:-1]): # iterate through inner vertices
if k == S.generation_cycle:
S_new_u.add_vertex(V_new)
else:
S_new_u.add_vertex(v)
S_new_u.add_vertex(s[-1]) # Second vertex on new long edge
self.H[gen].append(S_new_l)
self.H[gen].append(S_new_u)
return
@lru_cache(maxsize=None)
def generate_sub_cell_2(self, origin, supremum, v_x_t): # No hits
"""
Use the origin and supremum vectors to find a new cell in that
subspace direction
NOTE: NOT CURRENTLY IN USE!
Parameters
----------
origin : tuple vector (hashable)
supremum : tuple vector (hashable)
Returns
-------
"""
t1 = self.generate_sub_cell_t1(origin, v_x_t)
t2 = self.generate_sub_cell_t2(supremum, v_x_t)
vec = t1 + t2
return tuple(vec)
@lru_cache(maxsize=None)
def generate_sub_cell_t1(self, origin, v_x):
# TODO: Calc these arrays outside
v_o = np.array(origin)
return v_o - v_o * np.array(v_x)
@lru_cache(maxsize=None)
def generate_sub_cell_t2(self, supremum, v_x):
v_s = np.array(supremum)
return v_s * np.array(v_x)
# Plots
def plot_complex(self):
"""
Here C is the LIST of simplexes S in the
2 or 3 dimensional complex
To plot a single simplex S in a set C, use ex. [C[0]]
"""
from matplotlib import pyplot
if self.dim == 2:
pyplot.figure()
for C in self.H:
for c in C:
for v in c():
if self.bounds is None:
x_a = np.array(v.x, dtype=float)
else:
x_a = np.array(v.x, dtype=float)
for i in range(len(self.bounds)):
x_a[i] = (x_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('v.x_a = {}'.format(x_a))
pyplot.plot([x_a[0]], [x_a[1]], 'o')
xlines = []
ylines = []
for vn in v.nn:
if self.bounds is None:
xn_a = np.array(vn.x, dtype=float)
else:
xn_a = np.array(vn.x, dtype=float)
for i in range(len(self.bounds)):
xn_a[i] = (xn_a[i] * (self.bounds[i][1]
- self.bounds[i][0])
+ self.bounds[i][0])
# logging.info('vn.x = {}'.format(vn.x))
xlines.append(xn_a[0])
ylines.append(xn_a[1])
xlines.append(x_a[0])
ylines.append(x_a[1])
pyplot.plot(xlines, ylines)
if self.bounds is None:
pyplot.ylim([-1e-2, 1 + 1e-2])
pyplot.xlim([-1e-2, 1 + 1e-2])
else:
pyplot.ylim(
[self.bounds[1][0] - 1e-2, self.bounds[1][1] + 1e-2])
pyplot.xlim(
[self.bounds[0][0] - 1e-2, self.bounds[0][1] + 1e-2])
pyplot.show()
elif self.dim == 3:
fig = pyplot.figure()
ax = fig.add_subplot(111, projection='3d')
for C in self.H:
for c in C:
for v in c():
x = []
y = []
z = []
# logging.info('v.x = {}'.format(v.x))
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
for vn in v.nn:
x.append(vn.x[0])
y.append(vn.x[1])
z.append(vn.x[2])
x.append(v.x[0])
y.append(v.x[1])
z.append(v.x[2])
# logging.info('vn.x = {}'.format(vn.x))
ax.plot(x, y, z, label='simplex')
pyplot.show()
else:
print("dimension higher than 3 or wrong complex format")
return
class VertexGroup(object):
def __init__(self, p_gen, p_hgr):
self.p_gen = p_gen # parent generation
self.p_hgr = p_hgr # parent homology group rank
self.hg_n = None
self.hg_d = None
# Maybe add parent homology group rank total history
# This is the sum off all previously split cells
# cumulatively throughout its entire history
self.C = []
def __call__(self):
return self.C
def add_vertex(self, V):
if V not in self.C:
self.C.append(V)
def homology_group_rank(self):
"""
Returns the homology group order of the current cell
"""
if self.hg_n is None:
self.hg_n = sum(1 for v in self.C if v.minimiser())
return self.hg_n
def homology_group_differential(self):
"""
Returns the difference between the current homology group of the
cell and it's parent group
"""
if self.hg_d is None:
self.hgd = self.hg_n - self.p_hgr
return self.hgd
def polytopial_sperner_lemma(self):
"""
Returns the number of stationary points theoretically contained in the
cell based information currently known about the cell
"""
pass
def print_out(self):
"""
Print the current cell to console
"""
for v in self():
v.print_out()
class Cell(VertexGroup):
"""
Contains a cell that is symmetric to the initial hypercube triangulation
"""
def __init__(self, p_gen, p_hgr, origin, supremum):
super(Cell, self).__init__(p_gen, p_hgr)
self.origin = origin
self.supremum = supremum
self.centroid = None # (Not always used)
# TODO: self.bounds
class Simplex(VertexGroup):
"""
Contains a simplex that is symmetric to the initial symmetry constrained
hypersimplex triangulation
"""
def __init__(self, p_gen, p_hgr, generation_cycle, dim):
super(Simplex, self).__init__(p_gen, p_hgr)
self.generation_cycle = (generation_cycle + 1) % (dim - 1)
class Vertex:
def __init__(self, x, bounds=None, func=None, func_args=(), g_cons=None,
g_cons_args=(), nn=None, index=None):
self.x = x
self.order = sum(x)
x_a = np.array(x, dtype=float)
if bounds is not None:
for i, (lb, ub) in enumerate(bounds):
x_a[i] = x_a[i] * (ub - lb) + lb
# TODO: Make saving the array structure optional
self.x_a = x_a
# Note Vertex is only initiated once for all x so only
# evaluated once
if func is not None:
self.feasible = True
if g_cons is not None:
for g, args in zip(g_cons, g_cons_args):
if g(self.x_a, *args) < 0.0:
self.f = np.inf
self.feasible = False
break
if self.feasible:
self.f = func(x_a, *func_args)
if nn is not None:
self.nn = nn
else:
self.nn = set()
self.fval = None
self.check_min = True
# Index:
if index is not None:
self.index = index
def __hash__(self):
return hash(self.x)
def connect(self, v):
if v is not self and v not in self.nn:
self.nn.add(v)
v.nn.add(self)
if self.minimiser():
v._min = False
v.check_min = False
# TEMPORARY
self.check_min = True
v.check_min = True
def disconnect(self, v):
if v in self.nn:
self.nn.remove(v)
v.nn.remove(self)
self.check_min = True
v.check_min = True
def minimiser(self):
"""Check whether this vertex is strictly less than all its neighbours"""
if self.check_min:
self._min = all(self.f < v.f for v in self.nn)
self.check_min = False
return self._min
def print_out(self):
print("Vertex: {}".format(self.x))
constr = 'Connections: '
for vc in self.nn:
constr += '{} '.format(vc.x)
print(constr)
print('Order = {}'.format(self.order))
class VertexCache:
def __init__(self, func, func_args=(), bounds=None, g_cons=None,
g_cons_args=(), indexed=True):
self.cache = {}
self.func = func
self.g_cons = g_cons
self.g_cons_args = g_cons_args
self.func_args = func_args
self.bounds = bounds
self.nfev = 0
self.size = 0
if indexed:
self.index = -1
def __getitem__(self, x, indexed=True):
try:
return self.cache[x]
except KeyError:
if indexed:
self.index += 1
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args,
index=self.index)
else:
xval = Vertex(x, bounds=self.bounds,
func=self.func, func_args=self.func_args,
g_cons=self.g_cons,
g_cons_args=self.g_cons_args)
# logging.info("New generated vertex at x = {}".format(x))
# NOTE: Surprisingly high performance increase if logging is commented out
self.cache[x] = xval
# TODO: Check
if self.func is not None:
if self.g_cons is not None:
if xval.feasible:
self.nfev += 1
self.size += 1
else:
self.size += 1
else:
self.nfev += 1
self.size += 1
return self.cache[x]
| bsd-3-clause |
seckcoder/lang-learn | python/sklearn/examples/semi_supervised/plot_label_propagation_structure.py | 5 | 2399 | """
==============================================
Label Propagation learning a complex structure
==============================================
Example of LabelPropagation learning a complex internal structure
to demonstrate "manifold learning". The outer circle should be
labeled "red" and the inner circle "blue". Because both label groups
lie inside their own distinct shape, we can see that the labels
propagate correctly around the circle.
"""
print __doc__
# Authors: Clay Woolam <clay@woolam.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Licence: BSD
import numpy as np
import pylab as pl
from sklearn.semi_supervised import label_propagation
from sklearn.datasets import make_circles
# generate ring with inner box
n_samples = 200
X, y = make_circles(n_samples=n_samples, shuffle=False)
outer, inner = 0, 1
labels = -np.ones(n_samples)
labels[0] = outer
labels[-1] = inner
###############################################################################
# Learn with LabelSpreading
label_spread = label_propagation.LabelSpreading(kernel='knn', alpha=1.0)
label_spread.fit(X, labels)
###############################################################################
# Plot output labels
output_labels = label_spread.transduction_
pl.figure(figsize=(8.5, 4))
pl.subplot(1, 2, 1)
plot_outer_labeled, = pl.plot(X[labels == outer, 0],
X[labels == outer, 1], 'rs')
plot_unlabeled, = pl.plot(X[labels == -1, 0], X[labels == -1, 1], 'g.')
plot_inner_labeled, = pl.plot(X[labels == inner, 0],
X[labels == inner, 1], 'bs')
pl.legend((plot_outer_labeled, plot_inner_labeled, plot_unlabeled),
('Outer Labeled', 'Inner Labeled', 'Unlabeled'), 'upper left',
numpoints=1, shadow=False)
pl.title("Raw data (2 classes=red and blue)")
pl.subplot(1, 2, 2)
output_label_array = np.asarray(output_labels)
outer_numbers = np.where(output_label_array == outer)[0]
inner_numbers = np.where(output_label_array == inner)[0]
plot_outer, = pl.plot(X[outer_numbers, 0], X[outer_numbers, 1], 'rs')
plot_inner, = pl.plot(X[inner_numbers, 0], X[inner_numbers, 1], 'bs')
pl.legend((plot_outer, plot_inner), ('Outer Learned', 'Inner Learned'),
'upper left', numpoints=1, shadow=False)
pl.title("Labels learned with Label Spreading (KNN)")
pl.subplots_adjust(left=0.07, bottom=0.07, right=0.93, top=0.92)
pl.show()
| unlicense |
sp-etx/example-model | example_data.py | 1 | 1304 | # -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
from pandas.tseries.offsets import YearBegin
def make_data():
index = pd.DatetimeIndex(
start='2013-01-01 00:00:00',
end='2015-01-01 00:00:00',
freq='h',
tz='UTC')
index.name = 'Time (UTC)'
year_start = index + YearBegin(normalize=True) - YearBegin(normalize=True)
next_year_start = index + YearBegin(normalize=True)
year_length_values = next_year_start.values - year_start.values
year_progression_values = index.values - year_start.values
relative_year_progression = year_progression_values / year_length_values
annual_cosine = np.cos(relative_year_progression * 2 * np.pi) * 0.5 + 0.5
(pd.DataFrame
.from_dict({
'Renova CHP': pd.Series(data=150 + 40 * annual_cosine, index=index),
'Other': pd.Series(data=10 + 1000 * annual_cosine, index=index)
})
.to_csv('data/heat_history.csv'))
(pd.Series(
data=180 + 0 * annual_cosine, index=index, name='Power price')
.to_csv('data/power_price.csv', header=True))
(pd.Series(
data=550 + 100 * annual_cosine, index=index, name='Power demand')
.to_csv('data/power_demand.csv', header=True))
if __name__ == '__main__':
make_data() | lgpl-3.0 |
hrjn/scikit-learn | sklearn/cluster/tests/test_mean_shift.py | 48 | 3653 | """
Testing for mean shift clustering methods
"""
import numpy as np
import warnings
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raise_message
from sklearn.cluster import MeanShift
from sklearn.cluster import mean_shift
from sklearn.cluster import estimate_bandwidth
from sklearn.cluster import get_bin_seeds
from sklearn.datasets.samples_generator import make_blobs
n_clusters = 3
centers = np.array([[1, 1], [-1, -1], [1, -1]]) + 10
X, _ = make_blobs(n_samples=300, n_features=2, centers=centers,
cluster_std=0.4, shuffle=True, random_state=11)
def test_estimate_bandwidth():
# Test estimate_bandwidth
bandwidth = estimate_bandwidth(X, n_samples=200)
assert_true(0.9 <= bandwidth <= 1.5)
def test_mean_shift():
# Test MeanShift algorithm
bandwidth = 1.2
ms = MeanShift(bandwidth=bandwidth)
labels = ms.fit(X).labels_
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
cluster_centers, labels = mean_shift(X, bandwidth=bandwidth)
labels_unique = np.unique(labels)
n_clusters_ = len(labels_unique)
assert_equal(n_clusters_, n_clusters)
def test_parallel():
ms1 = MeanShift(n_jobs=2)
ms1.fit(X)
ms2 = MeanShift()
ms2.fit(X)
assert_array_equal(ms1.cluster_centers_, ms2.cluster_centers_)
assert_array_equal(ms1.labels_, ms2.labels_)
def test_meanshift_predict():
# Test MeanShift.predict
ms = MeanShift(bandwidth=1.2)
labels = ms.fit_predict(X)
labels2 = ms.predict(X)
assert_array_equal(labels, labels2)
def test_meanshift_all_orphans():
# init away from the data, crash with a sensible warning
ms = MeanShift(bandwidth=0.1, seeds=[[-9, -9], [-10, -10]])
msg = "No point was within bandwidth=0.1"
assert_raise_message(ValueError, msg, ms.fit, X,)
def test_unfitted():
# Non-regression: before fit, there should be not fitted attributes.
ms = MeanShift()
assert_false(hasattr(ms, "cluster_centers_"))
assert_false(hasattr(ms, "labels_"))
def test_bin_seeds():
# Test the bin seeding technique which can be used in the mean shift
# algorithm
# Data is just 6 points in the plane
X = np.array([[1., 1.], [1.4, 1.4], [1.8, 1.2],
[2., 1.], [2.1, 1.1], [0., 0.]])
# With a bin coarseness of 1.0 and min_bin_freq of 1, 3 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.), (0., 0.)])
test_bins = get_bin_seeds(X, 1, 1)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin coarseness of 1.0 and min_bin_freq of 2, 2 bins should be
# found
ground_truth = set([(1., 1.), (2., 1.)])
test_bins = get_bin_seeds(X, 1, 2)
test_result = set([tuple(p) for p in test_bins])
assert_true(len(ground_truth.symmetric_difference(test_result)) == 0)
# With a bin size of 0.01 and min_bin_freq of 1, 6 bins should be found
# we bail and use the whole data here.
with warnings.catch_warnings(record=True):
test_bins = get_bin_seeds(X, 0.01, 1)
assert_array_equal(test_bins, X)
# tight clusters around [0, 0] and [1, 1], only get two bins
X, _ = make_blobs(n_samples=100, n_features=2, centers=[[0, 0], [1, 1]],
cluster_std=0.1, random_state=0)
test_bins = get_bin_seeds(X, 1)
assert_array_equal(test_bins, [[0, 0], [1, 1]])
| bsd-3-clause |
arahuja/scikit-learn | sklearn/linear_model/tests/test_logistic.py | 2 | 24391 | import numpy as np
import scipy.sparse as sp
from scipy import linalg, optimize, sparse
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_raise_message
from sklearn.utils import ConvergenceWarning
from sklearn.linear_model.logistic import (
LogisticRegression,
logistic_regression_path, LogisticRegressionCV,
_logistic_loss_and_grad, _logistic_grad_hess,
_multinomial_grad_hess, _logistic_loss,
)
from sklearn.cross_validation import StratifiedKFold
from sklearn.datasets import load_iris, make_classification
X = [[-1, 0], [0, 1], [1, 1]]
X_sp = sp.csr_matrix(X)
Y1 = [0, 1, 1]
Y2 = [2, 1, 0]
iris = load_iris()
def check_predictions(clf, X, y):
"""Check that the model is able to fit the classification data"""
n_samples = len(y)
classes = np.unique(y)
n_classes = classes.shape[0]
predicted = clf.fit(X, y).predict(X)
assert_array_equal(clf.classes_, classes)
assert_equal(predicted.shape, (n_samples,))
assert_array_equal(predicted, y)
probabilities = clf.predict_proba(X)
assert_equal(probabilities.shape, (n_samples, n_classes))
assert_array_almost_equal(probabilities.sum(axis=1), np.ones(n_samples))
assert_array_equal(probabilities.argmax(axis=1), y)
def test_predict_2_classes():
# Simple sanity check on a 2 classes dataset
# Make sure it predicts the correct result on simple datasets.
check_predictions(LogisticRegression(random_state=0), X, Y1)
check_predictions(LogisticRegression(random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X, Y1)
check_predictions(LogisticRegression(C=100, random_state=0), X_sp, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X, Y1)
check_predictions(LogisticRegression(fit_intercept=False,
random_state=0), X_sp, Y1)
def test_error():
# Test for appropriate exception on errors
msg = "Penalty term must be positive"
assert_raises_regexp(ValueError, msg,
LogisticRegression(C=-1).fit, X, Y1)
assert_raises_regexp(ValueError, msg,
LogisticRegression(C="test").fit, X, Y1)
msg = "Tolerance for stopping criteria must be positive"
assert_raises_regexp(ValueError, msg,
LogisticRegression(tol=-1).fit, X, Y1)
assert_raises_regexp(ValueError, msg,
LogisticRegression(tol="test").fit, X, Y1)
msg = "Maximum number of iteration must be positive"
assert_raises_regexp(ValueError, msg,
LogisticRegression(max_iter=-1).fit, X, Y1)
assert_raises_regexp(ValueError, msg,
LogisticRegression(max_iter="test").fit, X, Y1)
def test_predict_3_classes():
check_predictions(LogisticRegression(C=10), X, Y2)
check_predictions(LogisticRegression(C=10), X_sp, Y2)
def test_predict_iris():
# Test logistic regression with the iris dataset
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
# Test that both multinomial and OvR solvers handle
# multiclass data correctly and give good accuracy
# score (>0.95) for the training data.
for clf in [LogisticRegression(C=len(iris.data)),
LogisticRegression(C=len(iris.data), solver='lbfgs',
multi_class='multinomial'),
LogisticRegression(C=len(iris.data), solver='newton-cg',
multi_class='multinomial')]:
clf.fit(iris.data, target)
assert_array_equal(np.unique(target), clf.classes_)
pred = clf.predict(iris.data)
assert_greater(np.mean(pred == target), .95)
probabilities = clf.predict_proba(iris.data)
assert_array_almost_equal(probabilities.sum(axis=1),
np.ones(n_samples))
pred = iris.target_names[probabilities.argmax(axis=1)]
assert_greater(np.mean(pred == target), .95)
def test_multinomial_validation():
for solver in ['lbfgs', 'newton-cg']:
lr = LogisticRegression(C=-1, solver=solver, multi_class='multinomial')
assert_raises(ValueError, lr.fit, [[0, 1], [1, 0]], [0, 1])
def test_multinomial_binary():
# Test multinomial LR on a binary problem.
target = (iris.target > 0).astype(np.intp)
target = np.array(["setosa", "not-setosa"])[target]
for solver in ['lbfgs', 'newton-cg']:
clf = LogisticRegression(solver=solver, multi_class='multinomial')
clf.fit(iris.data, target)
assert_equal(clf.coef_.shape, (1, iris.data.shape[1]))
assert_equal(clf.intercept_.shape, (1,))
assert_array_equal(clf.predict(iris.data), target)
mlr = LogisticRegression(solver=solver, multi_class='multinomial',
fit_intercept=False)
mlr.fit(iris.data, target)
pred = clf.classes_[np.argmax(clf.predict_log_proba(iris.data),
axis=1)]
assert_greater(np.mean(pred == target), .9)
def test_sparsify():
# Test sparsify and densify members.
n_samples, n_features = iris.data.shape
target = iris.target_names[iris.target]
clf = LogisticRegression(random_state=0).fit(iris.data, target)
pred_d_d = clf.decision_function(iris.data)
clf.sparsify()
assert_true(sp.issparse(clf.coef_))
pred_s_d = clf.decision_function(iris.data)
sp_data = sp.coo_matrix(iris.data)
pred_s_s = clf.decision_function(sp_data)
clf.densify()
pred_d_s = clf.decision_function(sp_data)
assert_array_almost_equal(pred_d_d, pred_s_d)
assert_array_almost_equal(pred_d_d, pred_s_s)
assert_array_almost_equal(pred_d_d, pred_d_s)
def test_inconsistent_input():
# Test that an exception is raised on inconsistent input
rng = np.random.RandomState(0)
X_ = rng.random_sample((5, 10))
y_ = np.ones(X_.shape[0])
y_[0] = 0
clf = LogisticRegression(random_state=0)
# Wrong dimensions for training data
y_wrong = y_[:-1]
assert_raises(ValueError, clf.fit, X, y_wrong)
# Wrong dimensions for test data
assert_raises(ValueError, clf.fit(X_, y_).predict,
rng.random_sample((3, 12)))
def test_write_parameters():
# Test that we can write to coef_ and intercept_
clf = LogisticRegression(random_state=0)
clf.fit(X, Y1)
clf.coef_[:] = 0
clf.intercept_[:] = 0
assert_array_almost_equal(clf.decision_function(X), 0)
@raises(ValueError)
def test_nan():
# Test proper NaN handling.
# Regression test for Issue #252: fit used to go into an infinite loop.
Xnan = np.array(X, dtype=np.float64)
Xnan[0, 1] = np.nan
LogisticRegression(random_state=0).fit(Xnan, Y1)
def test_consistency_path():
# Test that the path algorithm is consistent
rng = np.random.RandomState(0)
X = np.concatenate((rng.randn(100, 2) + [1, 1], rng.randn(100, 2)))
y = [1] * 100 + [-1] * 100
Cs = np.logspace(0, 4, 10)
f = ignore_warnings
# can't test with fit_intercept=True since LIBLINEAR
# penalizes the intercept
for method in ('lbfgs', 'newton-cg', 'liblinear'):
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=False, tol=1e-16, solver=method)
for i, C in enumerate(Cs):
lr = LogisticRegression(C=C, fit_intercept=False, tol=1e-16)
lr.fit(X, y)
lr_coef = lr.coef_.ravel()
assert_array_almost_equal(lr_coef, coefs[i], decimal=4)
# test for fit_intercept=True
for method in ('lbfgs', 'newton-cg', 'liblinear'):
Cs = [1e3]
coefs, Cs = f(logistic_regression_path)(
X, y, Cs=Cs, fit_intercept=True, tol=1e-4, solver=method)
lr = LogisticRegression(C=Cs[0], fit_intercept=True, tol=1e-4,
intercept_scaling=10000)
lr.fit(X, y)
lr_coef = np.concatenate([lr.coef_.ravel(), lr.intercept_])
assert_array_almost_equal(lr_coef, coefs[0], decimal=4)
def test_liblinear_random_state():
X, y = make_classification(n_samples=20)
lr1 = LogisticRegression(random_state=0)
lr1.fit(X, y)
lr2 = LogisticRegression(random_state=0)
lr2.fit(X, y)
assert_array_almost_equal(lr1.coef_, lr2.coef_)
def test_logistic_loss_and_grad():
X_ref, y = make_classification(n_samples=20)
n_features = X_ref.shape[1]
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = np.zeros(n_features)
# First check that our derivation of the grad is correct
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad, approx_grad, decimal=2)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(
w, X, y, alpha=1.
)
assert_array_almost_equal(loss, loss_interp)
approx_grad = optimize.approx_fprime(
w, lambda w: _logistic_loss_and_grad(w, X, y, alpha=1.)[0], 1e-3
)
assert_array_almost_equal(grad_interp, approx_grad, decimal=2)
def test_logistic_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features = 50, 5
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
X_sp = X_ref.copy()
X_sp[X_sp < .1] = 0
X_sp = sp.csr_matrix(X_sp)
for X in (X_ref, X_sp):
w = .1 * np.ones(n_features)
# First check that _logistic_grad_hess is consistent
# with _logistic_loss_and_grad
loss, grad = _logistic_loss_and_grad(w, X, y, alpha=1.)
grad_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(grad, grad_2)
# Now check our hessian along the second direction of the grad
vector = np.zeros_like(grad)
vector[1] = 1
hess_col = hess(vector)
# Computation of the Hessian is particularly fragile to numerical
# errors when doing simple finite differences. Here we compute the
# grad along a path in the direction of the vector and then use a
# least-square regression to estimate the slope
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_logistic_loss_and_grad(w + t * vector, X, y, alpha=1.)[1]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(approx_hess_col, hess_col, decimal=3)
# Second check that our intercept implementation is good
w = np.zeros(n_features + 1)
loss_interp, grad_interp = _logistic_loss_and_grad(w, X, y, alpha=1.)
loss_interp_2 = _logistic_loss(w, X, y, alpha=1.)
grad_interp_2, hess = _logistic_grad_hess(w, X, y, alpha=1.)
assert_array_almost_equal(loss_interp, loss_interp_2)
assert_array_almost_equal(grad_interp, grad_interp_2)
def test_logistic_cv():
# test for LogisticRegressionCV object
n_samples, n_features = 50, 5
rng = np.random.RandomState(0)
X_ref = rng.randn(n_samples, n_features)
y = np.sign(X_ref.dot(5 * rng.randn(n_features)))
X_ref -= X_ref.mean()
X_ref /= X_ref.std()
lr_cv = LogisticRegressionCV(Cs=[1.], fit_intercept=False,
solver='liblinear')
lr_cv.fit(X_ref, y)
lr = LogisticRegression(C=1., fit_intercept=False)
lr.fit(X_ref, y)
assert_array_almost_equal(lr.coef_, lr_cv.coef_)
assert_array_equal(lr_cv.coef_.shape, (1, n_features))
assert_array_equal(lr_cv.classes_, [-1, 1])
assert_equal(len(lr_cv.classes_), 2)
coefs_paths = np.asarray(list(lr_cv.coefs_paths_.values()))
assert_array_equal(coefs_paths.shape, (1, 3, 1, n_features))
assert_array_equal(lr_cv.Cs_.shape, (1, ))
scores = np.asarray(list(lr_cv.scores_.values()))
assert_array_equal(scores.shape, (1, 3, 1))
def test_logistic_cv_sparse():
X, y = make_classification(n_samples=50, n_features=5,
random_state=0)
X[X < 1.0] = 0.0
csr = sp.csr_matrix(X)
clf = LogisticRegressionCV(fit_intercept=True)
clf.fit(X, y)
clfs = LogisticRegressionCV(fit_intercept=True)
clfs.fit(csr, y)
assert_array_almost_equal(clfs.coef_, clf.coef_)
assert_array_almost_equal(clfs.intercept_, clf.intercept_)
assert_equal(clfs.C_, clf.C_)
def test_intercept_logistic_helper():
n_samples, n_features = 10, 5
X, y = make_classification(n_samples=n_samples, n_features=n_features,
random_state=0)
# Fit intercept case.
alpha = 1.
w = np.ones(n_features + 1)
grad_interp, hess_interp = _logistic_grad_hess(w, X, y, alpha)
loss_interp = _logistic_loss(w, X, y, alpha)
# Do not fit intercept. This can be considered equivalent to adding
# a feature vector of ones, i.e column of one vectors.
X_ = np.hstack((X, np.ones(10)[:, np.newaxis]))
grad, hess = _logistic_grad_hess(w, X_, y, alpha)
loss = _logistic_loss(w, X_, y, alpha)
# In the fit_intercept=False case, the feature vector of ones is
# penalized. This should be taken care of.
assert_almost_equal(loss_interp + 0.5 * (w[-1] ** 2), loss)
# Check gradient.
assert_array_almost_equal(grad_interp[:n_features], grad[:n_features])
assert_almost_equal(grad_interp[-1] + alpha * w[-1], grad[-1])
rng = np.random.RandomState(0)
grad = rng.rand(n_features + 1)
hess_interp = hess_interp(grad)
hess = hess(grad)
assert_array_almost_equal(hess_interp[:n_features], hess[:n_features])
assert_almost_equal(hess_interp[-1] + alpha * grad[-1], hess[-1])
def test_ovr_multinomial_iris():
# Test that OvR and multinomial are correct using the iris dataset.
train, target = iris.data, iris.target
n_samples, n_features = train.shape
# Use pre-defined fold as folds generated for different y
cv = StratifiedKFold(target, 3)
clf = LogisticRegressionCV(cv=cv)
clf.fit(train, target)
clf1 = LogisticRegressionCV(cv=cv)
target_copy = target.copy()
target_copy[target_copy == 0] = 1
clf1.fit(train, target_copy)
assert_array_almost_equal(clf.scores_[2], clf1.scores_[2])
assert_array_almost_equal(clf.intercept_[2:], clf1.intercept_)
assert_array_almost_equal(clf.coef_[2][np.newaxis, :], clf1.coef_)
# Test the shape of various attributes.
assert_equal(clf.coef_.shape, (3, n_features))
assert_array_equal(clf.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10, n_features + 1))
assert_equal(clf.Cs_.shape, (10, ))
scores = np.asarray(list(clf.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
# Test that for the iris data multinomial gives a better accuracy than OvR
for solver in ['lbfgs', 'newton-cg']:
clf_multi = LogisticRegressionCV(
solver=solver, multi_class='multinomial', max_iter=15
)
clf_multi.fit(train, target)
multi_score = clf_multi.score(train, target)
ovr_score = clf.score(train, target)
assert_greater(multi_score, ovr_score)
# Test attributes of LogisticRegressionCV
assert_equal(clf.coef_.shape, clf_multi.coef_.shape)
assert_array_equal(clf_multi.classes_, [0, 1, 2])
coefs_paths = np.asarray(list(clf_multi.coefs_paths_.values()))
assert_array_almost_equal(coefs_paths.shape, (3, 3, 10,
n_features + 1))
assert_equal(clf_multi.Cs_.shape, (10, ))
scores = np.asarray(list(clf_multi.scores_.values()))
assert_equal(scores.shape, (3, 3, 10))
def test_logistic_regression_solvers():
X, y = make_classification(n_features=10, n_informative=5, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=3)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=3)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=3)
def test_logistic_regression_solvers_multiclass():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
clf_n = LogisticRegression(solver='newton-cg', fit_intercept=False)
clf_n.fit(X, y)
clf_lbf = LogisticRegression(solver='lbfgs', fit_intercept=False)
clf_lbf.fit(X, y)
clf_lib = LogisticRegression(fit_intercept=False)
clf_lib.fit(X, y)
assert_array_almost_equal(clf_n.coef_, clf_lib.coef_, decimal=4)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
assert_array_almost_equal(clf_n.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regressioncv_class_weights():
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
n_classes=3, random_state=0)
# Test the liblinear fails when class_weight of type dict is
# provided, when it is multiclass. However it can handle
# binary problems.
clf_lib = LogisticRegressionCV(class_weight={0: 0.1, 1: 0.2},
solver='liblinear')
assert_raises(ValueError, clf_lib.fit, X, y)
y_ = y.copy()
y_[y == 2] = 1
clf_lib.fit(X, y_)
assert_array_equal(clf_lib.classes_, [0, 1])
# Test for class_weight=auto
X, y = make_classification(n_samples=20, n_features=20, n_informative=10,
random_state=0)
clf_lbf = LogisticRegressionCV(solver='lbfgs', fit_intercept=False,
class_weight='auto')
clf_lbf.fit(X, y)
clf_lib = LogisticRegressionCV(solver='liblinear', fit_intercept=False,
class_weight='auto')
clf_lib.fit(X, y)
assert_array_almost_equal(clf_lib.coef_, clf_lbf.coef_, decimal=4)
def test_logistic_regression_convergence_warnings():
# Test that warnings are raised if model does not converge
X, y = make_classification(n_samples=20, n_features=20)
clf_lib = LogisticRegression(solver='liblinear', max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, clf_lib.fit, X, y)
assert_equal(clf_lib.n_iter_, 2)
def test_logistic_regression_multinomial():
# Tests for the multinomial option in logistic regression
# Some basic attributes of Logistic Regression
n_samples, n_features, n_classes = 50, 20, 3
X, y = make_classification(n_samples=n_samples,
n_features=n_features,
n_informative=10,
n_classes=n_classes, random_state=0)
clf_int = LogisticRegression(solver='lbfgs', multi_class='multinomial')
clf_int.fit(X, y)
assert_array_equal(clf_int.coef_.shape, (n_classes, n_features))
clf_wint = LogisticRegression(solver='lbfgs', multi_class='multinomial',
fit_intercept=False)
clf_wint.fit(X, y)
assert_array_equal(clf_wint.coef_.shape, (n_classes, n_features))
# Similar tests for newton-cg solver option
clf_ncg_int = LogisticRegression(solver='newton-cg',
multi_class='multinomial')
clf_ncg_int.fit(X, y)
assert_array_equal(clf_ncg_int.coef_.shape, (n_classes, n_features))
clf_ncg_wint = LogisticRegression(solver='newton-cg', fit_intercept=False,
multi_class='multinomial')
clf_ncg_wint.fit(X, y)
assert_array_equal(clf_ncg_wint.coef_.shape, (n_classes, n_features))
# Compare solutions between lbfgs and newton-cg
assert_almost_equal(clf_int.coef_, clf_ncg_int.coef_, decimal=3)
assert_almost_equal(clf_wint.coef_, clf_ncg_wint.coef_, decimal=3)
assert_almost_equal(clf_int.intercept_, clf_ncg_int.intercept_, decimal=3)
# Test that the path give almost the same results. However since in this
# case we take the average of the coefs after fitting across all the
# folds, it need not be exactly the same.
for solver in ['lbfgs', 'newton-cg']:
clf_path = LogisticRegressionCV(solver=solver,
multi_class='multinomial', Cs=[1.])
clf_path.fit(X, y)
assert_array_almost_equal(clf_path.coef_, clf_int.coef_, decimal=3)
assert_almost_equal(clf_path.intercept_, clf_int.intercept_, decimal=3)
def test_multinomial_grad_hess():
rng = np.random.RandomState(0)
n_samples, n_features, n_classes = 100, 5, 3
X = rng.randn(n_samples, n_features)
w = rng.rand(n_classes, n_features)
Y = np.zeros((n_samples, n_classes))
ind = np.argmax(np.dot(X, w.T), axis=1)
Y[range(0, n_samples), ind] = 1
w = w.ravel()
sample_weights = np.ones(X.shape[0])
grad, hessp = _multinomial_grad_hess(w, X, Y, alpha=1.,
sample_weight=sample_weights)
# extract first column of hessian matrix
vec = np.zeros(n_features * n_classes)
vec[0] = 1
hess_col = hessp(vec)
# Estimate hessian using least squares as done in
# test_logistic_grad_hess
e = 1e-3
d_x = np.linspace(-e, e, 30)
d_grad = np.array([
_multinomial_grad_hess(w + t * vec, X, Y, alpha=1.,
sample_weight=sample_weights)[0]
for t in d_x
])
d_grad -= d_grad.mean(axis=0)
approx_hess_col = linalg.lstsq(d_x[:, np.newaxis], d_grad)[0].ravel()
assert_array_almost_equal(hess_col, approx_hess_col)
def test_liblinear_decision_function_zero():
# Test negative prediction when decision_function values are zero.
# Liblinear predicts the positive class when decision_function values
# are zero. This is a test to verify that we do not do the same.
# See Issue: https://github.com/scikit-learn/scikit-learn/issues/3600
# and the PR https://github.com/scikit-learn/scikit-learn/pull/3623
X, y = make_classification(n_samples=5, n_features=5)
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, y)
# Dummy data such that the decision function becomes zero.
X = np.zeros((5, 5))
assert_array_equal(clf.predict(X), np.zeros(5))
def test_liblinear_logregcv_sparse():
# Test LogRegCV with solver='liblinear' works for sparse matrices
X, y = make_classification(n_samples=10, n_features=5)
clf = LogisticRegressionCV(solver='liblinear')
clf.fit(sparse.csr_matrix(X), y)
def test_logreg_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
clf = LogisticRegression(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % clf.intercept_scaling)
assert_raise_message(ValueError, msg, clf.fit, X, Y1)
def test_logreg_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
clf = LogisticRegression(fit_intercept=False)
clf.fit(X, Y1)
assert_equal(clf.intercept_, 0.)
| bsd-3-clause |
IshankGulati/scikit-learn | sklearn/neural_network/tests/test_mlp.py | 28 | 22183 | """
Testing for Multi-layer Perceptron module (sklearn.neural_network)
"""
# Author: Issam H. Laradji
# License: BSD 3 clause
import sys
import warnings
import numpy as np
from numpy.testing import assert_almost_equal, assert_array_equal
from sklearn.datasets import load_digits, load_boston, load_iris
from sklearn.datasets import make_regression, make_multilabel_classification
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals.six.moves import cStringIO as StringIO
from sklearn.metrics import roc_auc_score
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from scipy.sparse import csr_matrix
from sklearn.utils.testing import (assert_raises, assert_greater, assert_equal,
assert_false, ignore_warnings)
from sklearn.utils.testing import assert_raise_message
np.seterr(all='warn')
ACTIVATION_TYPES = ["identity", "logistic", "tanh", "relu"]
digits_dataset_multi = load_digits(n_class=3)
X_digits_multi = MinMaxScaler().fit_transform(digits_dataset_multi.data[:200])
y_digits_multi = digits_dataset_multi.target[:200]
digits_dataset_binary = load_digits(n_class=2)
X_digits_binary = MinMaxScaler().fit_transform(
digits_dataset_binary.data[:200])
y_digits_binary = digits_dataset_binary.target[:200]
classification_datasets = [(X_digits_multi, y_digits_multi),
(X_digits_binary, y_digits_binary)]
boston = load_boston()
Xboston = StandardScaler().fit_transform(boston.data)[: 200]
yboston = boston.target[:200]
iris = load_iris()
X_iris = iris.data
y_iris = iris.target
def test_alpha():
# Test that larger alpha yields weights closer to zero
X = X_digits_binary[:100]
y = y_digits_binary[:100]
alpha_vectors = []
alpha_values = np.arange(2)
absolute_sum = lambda x: np.sum(np.abs(x))
for alpha in alpha_values:
mlp = MLPClassifier(hidden_layer_sizes=10, alpha=alpha, random_state=1)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
alpha_vectors.append(np.array([absolute_sum(mlp.coefs_[0]),
absolute_sum(mlp.coefs_[1])]))
for i in range(len(alpha_values) - 1):
assert (alpha_vectors[i] > alpha_vectors[i + 1]).all()
def test_fit():
# Test that the algorithm solution is equal to a worked out example.
X = np.array([[0.6, 0.8, 0.7]])
y = np.array([0])
mlp = MLPClassifier(solver='sgd', learning_rate_init=0.1, alpha=0.1,
activation='logistic', random_state=1, max_iter=1,
hidden_layer_sizes=2, momentum=0)
# set weights
mlp.coefs_ = [0] * 2
mlp.intercepts_ = [0] * 2
mlp.n_outputs_ = 1
mlp.coefs_[0] = np.array([[0.1, 0.2], [0.3, 0.1], [0.5, 0]])
mlp.coefs_[1] = np.array([[0.1], [0.2]])
mlp.intercepts_[0] = np.array([0.1, 0.1])
mlp.intercepts_[1] = np.array([1.0])
mlp._coef_grads = [] * 2
mlp._intercept_grads = [] * 2
# Initialize parameters
mlp.n_iter_ = 0
mlp.learning_rate_ = 0.1
# Compute the number of layers
mlp.n_layers_ = 3
# Pre-allocate gradient matrices
mlp._coef_grads = [0] * (mlp.n_layers_ - 1)
mlp._intercept_grads = [0] * (mlp.n_layers_ - 1)
mlp.out_activation_ = 'logistic'
mlp.t_ = 0
mlp.best_loss_ = np.inf
mlp.loss_curve_ = []
mlp._no_improvement_count = 0
mlp._intercept_velocity = [np.zeros_like(intercepts) for
intercepts in
mlp.intercepts_]
mlp._coef_velocity = [np.zeros_like(coefs) for coefs in
mlp.coefs_]
mlp.partial_fit(X, y, classes=[0, 1])
# Manually worked out example
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.1 + 0.8 * 0.3 + 0.7 * 0.5 + 0.1)
# = 0.679178699175393
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.2 + 0.8 * 0.1 + 0.7 * 0 + 0.1)
# = 0.574442516811659
# o1 = g(h * W2 + b21) = g(0.679 * 0.1 + 0.574 * 0.2 + 1)
# = 0.7654329236196236
# d21 = -(0 - 0.765) = 0.765
# d11 = (1 - 0.679) * 0.679 * 0.765 * 0.1 = 0.01667
# d12 = (1 - 0.574) * 0.574 * 0.765 * 0.2 = 0.0374
# W1grad11 = X1 * d11 + alpha * W11 = 0.6 * 0.01667 + 0.1 * 0.1 = 0.0200
# W1grad11 = X1 * d12 + alpha * W12 = 0.6 * 0.0374 + 0.1 * 0.2 = 0.04244
# W1grad21 = X2 * d11 + alpha * W13 = 0.8 * 0.01667 + 0.1 * 0.3 = 0.043336
# W1grad22 = X2 * d12 + alpha * W14 = 0.8 * 0.0374 + 0.1 * 0.1 = 0.03992
# W1grad31 = X3 * d11 + alpha * W15 = 0.6 * 0.01667 + 0.1 * 0.5 = 0.060002
# W1grad32 = X3 * d12 + alpha * W16 = 0.6 * 0.0374 + 0.1 * 0 = 0.02244
# W2grad1 = h1 * d21 + alpha * W21 = 0.679 * 0.765 + 0.1 * 0.1 = 0.5294
# W2grad2 = h2 * d21 + alpha * W22 = 0.574 * 0.765 + 0.1 * 0.2 = 0.45911
# b1grad1 = d11 = 0.01667
# b1grad2 = d12 = 0.0374
# b2grad = d21 = 0.765
# W1 = W1 - eta * [W1grad11, .., W1grad32] = [[0.1, 0.2], [0.3, 0.1],
# [0.5, 0]] - 0.1 * [[0.0200, 0.04244], [0.043336, 0.03992],
# [0.060002, 0.02244]] = [[0.098, 0.195756], [0.2956664,
# 0.096008], [0.4939998, -0.002244]]
# W2 = W2 - eta * [W2grad1, W2grad2] = [[0.1], [0.2]] - 0.1 *
# [[0.5294], [0.45911]] = [[0.04706], [0.154089]]
# b1 = b1 - eta * [b1grad1, b1grad2] = 0.1 - 0.1 * [0.01667, 0.0374]
# = [0.098333, 0.09626]
# b2 = b2 - eta * b2grad = 1.0 - 0.1 * 0.765 = 0.9235
assert_almost_equal(mlp.coefs_[0], np.array([[0.098, 0.195756],
[0.2956664, 0.096008],
[0.4939998, -0.002244]]),
decimal=3)
assert_almost_equal(mlp.coefs_[1], np.array([[0.04706], [0.154089]]),
decimal=3)
assert_almost_equal(mlp.intercepts_[0],
np.array([0.098333, 0.09626]), decimal=3)
assert_almost_equal(mlp.intercepts_[1], np.array(0.9235), decimal=3)
# Testing output
# h1 = g(X1 * W_i1 + b11) = g(0.6 * 0.098 + 0.8 * 0.2956664 +
# 0.7 * 0.4939998 + 0.098333) = 0.677
# h2 = g(X2 * W_i2 + b12) = g(0.6 * 0.195756 + 0.8 * 0.096008 +
# 0.7 * -0.002244 + 0.09626) = 0.572
# o1 = h * W2 + b21 = 0.677 * 0.04706 +
# 0.572 * 0.154089 + 0.9235 = 1.043
# prob = sigmoid(o1) = 0.739
assert_almost_equal(mlp.predict_proba(X)[0, 1], 0.739, decimal=3)
def test_gradient():
# Test gradient.
# This makes sure that the activation functions and their derivatives
# are correct. The numerical and analytical computation of the gradient
# should be close.
for n_labels in [2, 3]:
n_samples = 5
n_features = 10
X = np.random.random((n_samples, n_features))
y = 1 + np.mod(np.arange(n_samples) + 1, n_labels)
Y = LabelBinarizer().fit_transform(y)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(activation=activation, hidden_layer_sizes=10,
solver='lbfgs', alpha=1e-5,
learning_rate_init=0.2, max_iter=1,
random_state=1)
mlp.fit(X, y)
theta = np.hstack([l.ravel() for l in mlp.coefs_ +
mlp.intercepts_])
layer_units = ([X.shape[1]] + [mlp.hidden_layer_sizes] +
[mlp.n_outputs_])
activations = []
deltas = []
coef_grads = []
intercept_grads = []
activations.append(X)
for i in range(mlp.n_layers_ - 1):
activations.append(np.empty((X.shape[0],
layer_units[i + 1])))
deltas.append(np.empty((X.shape[0],
layer_units[i + 1])))
fan_in = layer_units[i]
fan_out = layer_units[i + 1]
coef_grads.append(np.empty((fan_in, fan_out)))
intercept_grads.append(np.empty(fan_out))
# analytically compute the gradients
def loss_grad_fun(t):
return mlp._loss_grad_lbfgs(t, X, Y, activations, deltas,
coef_grads, intercept_grads)
[value, grad] = loss_grad_fun(theta)
numgrad = np.zeros(np.size(theta))
n = np.size(theta, 0)
E = np.eye(n)
epsilon = 1e-5
# numerically compute the gradients
for i in range(n):
dtheta = E[:, i] * epsilon
numgrad[i] = ((loss_grad_fun(theta + dtheta)[0] -
loss_grad_fun(theta - dtheta)[0]) /
(epsilon * 2.0))
assert_almost_equal(numgrad, grad)
def test_lbfgs_classification():
# Test lbfgs on classification.
# It should achieve a score higher than 0.95 for the binary and multi-class
# versions of the digits dataset.
for X, y in classification_datasets:
X_train = X[:150]
y_train = y[:150]
X_test = X[150:]
expected_shape_dtype = (X_test.shape[0], y_train.dtype.kind)
for activation in ACTIVATION_TYPES:
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X_train, y_train)
y_predict = mlp.predict(X_test)
assert_greater(mlp.score(X_train, y_train), 0.95)
assert_equal((y_predict.shape[0], y_predict.dtype.kind),
expected_shape_dtype)
def test_lbfgs_regression():
# Test lbfgs on the boston dataset, a regression problems.
X = Xboston
y = yboston
for activation in ACTIVATION_TYPES:
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50,
max_iter=150, shuffle=True, random_state=1,
activation=activation)
mlp.fit(X, y)
if activation == 'identity':
assert_greater(mlp.score(X, y), 0.84)
else:
# Non linear models perform much better than linear bottleneck:
assert_greater(mlp.score(X, y), 0.95)
def test_learning_rate_warmstart():
# Tests that warm_start reuse past solutions.
X = [[3, 2], [1, 6], [5, 6], [-2, -4]]
y = [1, 1, 1, 0]
for learning_rate in ["invscaling", "constant"]:
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=4,
learning_rate=learning_rate, max_iter=1,
power_t=0.25, warm_start=True)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
prev_eta = mlp._optimizer.learning_rate
mlp.fit(X, y)
post_eta = mlp._optimizer.learning_rate
if learning_rate == 'constant':
assert_equal(prev_eta, post_eta)
elif learning_rate == 'invscaling':
assert_equal(mlp.learning_rate_init / pow(8 + 1, mlp.power_t),
post_eta)
def test_multilabel_classification():
# Test that multi-label classification works as expected.
# test fit method
X, y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=50, alpha=1e-5,
max_iter=150, random_state=0, activation='logistic',
learning_rate_init=0.2)
mlp.fit(X, y)
assert_equal(mlp.score(X, y), 1)
# test partial fit method
mlp = MLPClassifier(solver='sgd', hidden_layer_sizes=50, max_iter=150,
random_state=0, activation='logistic', alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=[0, 1, 2, 3, 4])
assert_greater(mlp.score(X, y), 0.9)
def test_multioutput_regression():
# Test that multi-output regression works as expected
X, y = make_regression(n_samples=200, n_targets=5)
mlp = MLPRegressor(solver='lbfgs', hidden_layer_sizes=50, max_iter=200,
random_state=1)
mlp.fit(X, y)
assert_greater(mlp.score(X, y), 0.9)
def test_partial_fit_classes_error():
# Tests that passing different classes to partial_fit raises an error
X = [[3, 2]]
y = [0]
clf = MLPClassifier(solver='sgd')
clf.partial_fit(X, y, classes=[0, 1])
assert_raises(ValueError, clf.partial_fit, X, y, classes=[1, 2])
def test_partial_fit_classification():
# Test partial_fit on classification.
# `partial_fit` should yield the same results as 'fit' for binary and
# multi-class classification.
for X, y in classification_datasets:
X = X
y = y
mlp = MLPClassifier(solver='sgd', max_iter=100, random_state=1,
tol=0, alpha=1e-5, learning_rate_init=0.2)
with ignore_warnings(category=ConvergenceWarning):
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPClassifier(solver='sgd', random_state=1, alpha=1e-5,
learning_rate_init=0.2)
for i in range(100):
mlp.partial_fit(X, y, classes=np.unique(y))
pred2 = mlp.predict(X)
assert_array_equal(pred1, pred2)
assert_greater(mlp.score(X, y), 0.95)
def test_partial_fit_unseen_classes():
# Non regression test for bug 6994
# Tests for labeling errors in partial fit
clf = MLPClassifier(random_state=0)
clf.partial_fit([[1], [2], [3]], ["a", "b", "c"],
classes=["a", "b", "c", "d"])
clf.partial_fit([[4]], ["d"])
assert_greater(clf.score([[1], [2], [3], [4]], ["a", "b", "c", "d"]), 0)
def test_partial_fit_regression():
# Test partial_fit on regression.
# `partial_fit` should yield the same results as 'fit' for regression.
X = Xboston
y = yboston
for momentum in [0, .9]:
mlp = MLPRegressor(solver='sgd', max_iter=100, activation='relu',
random_state=1, learning_rate_init=0.01,
batch_size=X.shape[0], momentum=momentum)
with warnings.catch_warnings(record=True):
# catch convergence warning
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp = MLPRegressor(solver='sgd', activation='relu',
learning_rate_init=0.01, random_state=1,
batch_size=X.shape[0], momentum=momentum)
for i in range(100):
mlp.partial_fit(X, y)
pred2 = mlp.predict(X)
assert_almost_equal(pred1, pred2, decimal=2)
score = mlp.score(X, y)
assert_greater(score, 0.75)
def test_partial_fit_errors():
# Test partial_fit error handling.
X = [[3, 2], [1, 6]]
y = [1, 0]
# no classes passed
assert_raises(ValueError,
MLPClassifier(solver='sgd').partial_fit, X, y, classes=[2])
# lbfgs doesn't support partial_fit
assert_false(hasattr(MLPClassifier(solver='lbfgs'), 'partial_fit'))
def test_params_errors():
# Test that invalid parameters raise value error
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier
assert_raises(ValueError, clf(hidden_layer_sizes=-1).fit, X, y)
assert_raises(ValueError, clf(max_iter=-1).fit, X, y)
assert_raises(ValueError, clf(shuffle='true').fit, X, y)
assert_raises(ValueError, clf(alpha=-1).fit, X, y)
assert_raises(ValueError, clf(learning_rate_init=-1).fit, X, y)
assert_raises(ValueError, clf(momentum=2).fit, X, y)
assert_raises(ValueError, clf(momentum=-0.5).fit, X, y)
assert_raises(ValueError, clf(nesterovs_momentum='invalid').fit, X, y)
assert_raises(ValueError, clf(early_stopping='invalid').fit, X, y)
assert_raises(ValueError, clf(validation_fraction=1).fit, X, y)
assert_raises(ValueError, clf(validation_fraction=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_1=1).fit, X, y)
assert_raises(ValueError, clf(beta_1=-0.5).fit, X, y)
assert_raises(ValueError, clf(beta_2=1).fit, X, y)
assert_raises(ValueError, clf(beta_2=-0.5).fit, X, y)
assert_raises(ValueError, clf(epsilon=-0.5).fit, X, y)
assert_raises(ValueError, clf(solver='hadoken').fit, X, y)
assert_raises(ValueError, clf(learning_rate='converge').fit, X, y)
assert_raises(ValueError, clf(activation='cloak').fit, X, y)
def test_predict_proba_binary():
# Test that predict_proba works as expected for binary class.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], 2
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
assert_equal(roc_auc_score(y, y_proba[:, 1]), 1.0)
def test_predict_proba_multiclass():
# Test that predict_proba works as expected for multi class.
X = X_digits_multi[:10]
y = y_digits_multi[:10]
clf = MLPClassifier(hidden_layer_sizes=5)
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
y_proba = clf.predict_proba(X)
y_log_proba = clf.predict_log_proba(X)
(n_samples, n_classes) = y.shape[0], np.unique(y).size
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_predict_proba_multilabel():
# Test that predict_proba works as expected for multilabel.
# Multilabel should not use softmax which makes probabilities sum to 1
X, Y = make_multilabel_classification(n_samples=50, random_state=0,
return_indicator=True)
n_samples, n_classes = Y.shape
clf = MLPClassifier(solver='lbfgs', hidden_layer_sizes=30,
random_state=0)
clf.fit(X, Y)
y_proba = clf.predict_proba(X)
assert_equal(y_proba.shape, (n_samples, n_classes))
assert_array_equal(y_proba > 0.5, Y)
y_log_proba = clf.predict_log_proba(X)
proba_max = y_proba.argmax(axis=1)
proba_log_max = y_log_proba.argmax(axis=1)
assert_greater((y_proba.sum(1) - 1).dot(y_proba.sum(1) - 1), 1e-10)
assert_array_equal(proba_max, proba_log_max)
assert_array_equal(y_log_proba, np.log(y_proba))
def test_sparse_matrices():
# Test that sparse and dense input matrices output the same results.
X = X_digits_binary[:50]
y = y_digits_binary[:50]
X_sparse = csr_matrix(X)
mlp = MLPClassifier(solver='lbfgs', hidden_layer_sizes=15,
random_state=1)
mlp.fit(X, y)
pred1 = mlp.predict(X)
mlp.fit(X_sparse, y)
pred2 = mlp.predict(X_sparse)
assert_almost_equal(pred1, pred2)
pred1 = mlp.predict(X)
pred2 = mlp.predict(X_sparse)
assert_array_equal(pred1, pred2)
def test_tolerance():
# Test tolerance.
# It should force the solver to exit the loop when it converges.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
def test_verbose_sgd():
# Test verbose.
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(solver='sgd', max_iter=2, verbose=10,
hidden_layer_sizes=2)
old_stdout = sys.stdout
sys.stdout = output = StringIO()
with ignore_warnings(category=ConvergenceWarning):
clf.fit(X, y)
clf.partial_fit(X, y)
sys.stdout = old_stdout
assert 'Iteration' in output.getvalue()
def test_early_stopping():
X = X_digits_binary[:100]
y = y_digits_binary[:100]
tol = 0.2
clf = MLPClassifier(tol=tol, max_iter=3000, solver='sgd',
early_stopping=True)
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
valid_scores = clf.validation_scores_
best_valid_score = clf.best_validation_score_
assert_equal(max(valid_scores), best_valid_score)
assert_greater(best_valid_score + tol, valid_scores[-2])
assert_greater(best_valid_score + tol, valid_scores[-1])
def test_adaptive_learning_rate():
X = [[3, 2], [1, 6]]
y = [1, 0]
clf = MLPClassifier(tol=0.5, max_iter=3000, solver='sgd',
learning_rate='adaptive')
clf.fit(X, y)
assert_greater(clf.max_iter, clf.n_iter_)
assert_greater(1e-6, clf._optimizer.learning_rate)
@ignore_warnings(RuntimeError)
def test_warm_start():
X = X_iris
y = y_iris
y_2classes = np.array([0] * 75 + [1] * 75)
y_3classes = np.array([0] * 40 + [1] * 40 + [2] * 70)
y_3classes_alt = np.array([0] * 50 + [1] * 50 + [3] * 50)
y_4classes = np.array([0] * 37 + [1] * 37 + [2] * 38 + [3] * 38)
y_5classes = np.array([0] * 30 + [1] * 30 + [2] * 30 + [3] * 30 + [4] * 30)
# No error raised
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
clf.fit(X, y)
clf.fit(X, y_3classes)
for y_i in (y_2classes, y_3classes_alt, y_4classes, y_5classes):
clf = MLPClassifier(hidden_layer_sizes=2, solver='lbfgs',
warm_start=True).fit(X, y)
message = ('warm_start can only be used where `y` has the same '
'classes as in the previous call to fit.'
' Previously got [0 1 2], `y` has %s' % np.unique(y_i))
assert_raise_message(ValueError, message, clf.fit, X, y_i)
| bsd-3-clause |
JT5D/scikit-learn | examples/svm/plot_oneclass.py | 2 | 2271 | """
==========================================
One-class SVM with non-linear kernel (RBF)
==========================================
An example using a one-class SVM for novelty detection.
:ref:`One-class SVM <svm_outlier_detection>` is an unsupervised
algorithm that learns a decision function for novelty detection:
classifying new data as similar or different to the training set.
"""
print(__doc__)
import numpy as np
import pylab as pl
import matplotlib.font_manager
from sklearn import svm
xx, yy = np.meshgrid(np.linspace(-5, 5, 500), np.linspace(-5, 5, 500))
# Generate train data
X = 0.3 * np.random.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * np.random.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = np.random.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
y_pred_train = clf.predict(X_train)
y_pred_test = clf.predict(X_test)
y_pred_outliers = clf.predict(X_outliers)
n_error_train = y_pred_train[y_pred_train == -1].size
n_error_test = y_pred_test[y_pred_test == -1].size
n_error_outliers = y_pred_outliers[y_pred_outliers == 1].size
# plot the line, the points, and the nearest vectors to the plane
Z = clf.decision_function(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
pl.title("Novelty Detection")
pl.contourf(xx, yy, Z, levels=np.linspace(Z.min(), 0, 7), cmap=pl.cm.Blues_r)
a = pl.contour(xx, yy, Z, levels=[0], linewidths=2, colors='red')
pl.contourf(xx, yy, Z, levels=[0, Z.max()], colors='orange')
b1 = pl.scatter(X_train[:, 0], X_train[:, 1], c='white')
b2 = pl.scatter(X_test[:, 0], X_test[:, 1], c='green')
c = pl.scatter(X_outliers[:, 0], X_outliers[:, 1], c='red')
pl.axis('tight')
pl.xlim((-5, 5))
pl.ylim((-5, 5))
pl.legend([a.collections[0], b1, b2, c],
["learned frontier", "training observations",
"new regular observations", "new abnormal observations"],
loc="upper left",
prop=matplotlib.font_manager.FontProperties(size=11))
pl.xlabel(
"error train: %d/200 ; errors novel regular: %d/40 ; "
"errors novel abnormal: %d/40"
% (n_error_train, n_error_test, n_error_outliers))
pl.show()
| bsd-3-clause |
tapomayukh/projects_in_python | classification/Classification_with_HMM/Single_Contact_Classification/Variable_Stiffness_Variable_Velocity/HMM/with padding 3s/hmm_crossvalidation_force_10_states.py | 1 | 27452 | # Hidden Markov Model Implementation
import pylab as pyl
import numpy as np
import matplotlib.pyplot as pp
#from enthought.mayavi import mlab
import scipy as scp
import scipy.ndimage as ni
import roslib; roslib.load_manifest('sandbox_tapo_darpa_m3')
import rospy
#import hrl_lib.mayavi2_util as mu
import hrl_lib.viz as hv
import hrl_lib.util as ut
import hrl_lib.matplotlib_util as mpu
import pickle
import ghmm
import sys
sys.path.insert(0, '/home/tapo/svn/robot1_data/usr/tapo/data_code/Classification/Data/Single_Contact_HMM/Variable_Stiffness_Variable_Velocity/with_padding_3s/')
from data_padding_hshv_3s import Fmat_original_hshv
from data_padding_hslv_3s import Fmat_original_hslv
from data_padding_lshv_3s import Fmat_original_lshv
from data_padding_lslv_3s import Fmat_original_lslv
# Returns mu,sigma for 10 hidden-states from feature-vectors(123,35) for RF,SF,RM,SM models
def feature_to_mu_sigma(fvec):
index = 0
m,n = np.shape(fvec)
#print m,n
mu = np.matrix(np.zeros((10,1)))
sigma = np.matrix(np.zeros((10,1)))
DIVS = m/10
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),0:]
#if index == 1:
#print temp_fvec
mu[index] = scp.mean(temp_fvec)
sigma[index] = scp.std(temp_fvec)
index = index+1
return mu,sigma
# Returns sequence given raw data
def create_seq(fvec):
m,n = np.shape(fvec)
#print m,n
seq = np.matrix(np.zeros((10,n)))
DIVS = m/10
for i in range(n):
index = 0
while (index < 10):
m_init = index*DIVS
temp_fvec = fvec[(m_init):(m_init+DIVS),i]
#if index == 1:
#print temp_fvec
seq[index,i] = scp.mean(temp_fvec)
index = index+1
return seq
if __name__ == '__main__':
# HMM - Implementation:
F = ghmm.Float() # emission domain of this model
# A - Transition Matrix
A = [[0.1, 0.25, 0.15, 0.15, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.1, 0.25, 0.25, 0.2, 0.1, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.1, 0.25, 0.25, 0.2, 0.05, 0.05, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.1, 0.3, 0.30, 0.20, 0.1, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.1, 0.30, 0.30, 0.20, 0.05, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.1, 0.35, 0.30, 0.20, 0.05],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.2, 0.30, 0.30, 0.20],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.2, 0.50, 0.30],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.4, 0.60],
[0.0, 0.0, 0.0, 0.0, 0.00, 0.00, 0.00, 0.00, 0.00, 1.00]]
# pi - initial probabilities per state
pi = [0.1] * 10
# Confusion Matrix
cmat = np.zeros((4,4))
#############################################################################################################################################
# HSHV as testing set and Rest as training set
# Checking the Data-Matrix
mu_rf_hshv,sigma_rf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:301,0:15], Fmat_original_lshv[0:301,0:15], Fmat_original_lslv[0:301,0:15]))))
mu_rm_hshv,sigma_rm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:301,15:30], Fmat_original_lshv[0:301,15:30], Fmat_original_lslv[0:301,15:30]))))
mu_sf_hshv,sigma_sf_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:301,30:45], Fmat_original_lshv[0:301,30:45], Fmat_original_lslv[0:301,30:45]))))
mu_sm_hshv,sigma_sm_hshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hslv[0:301,45:60], Fmat_original_lshv[0:301,45:60], Fmat_original_lslv[0:301,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hshv = np.zeros((10,2))
B_rm_hshv = np.zeros((10,2))
B_sf_hshv = np.zeros((10,2))
B_sm_hshv = np.zeros((10,2))
for num_states in range(10):
B_rf_hshv[num_states,0] = mu_rf_hshv[num_states]
B_rf_hshv[num_states,1] = sigma_rf_hshv[num_states]
B_rm_hshv[num_states,0] = mu_rm_hshv[num_states]
B_rm_hshv[num_states,1] = sigma_rm_hshv[num_states]
B_sf_hshv[num_states,0] = mu_sf_hshv[num_states]
B_sf_hshv[num_states,1] = sigma_sf_hshv[num_states]
B_sm_hshv[num_states,0] = mu_sm_hshv[num_states]
B_sm_hshv[num_states,1] = sigma_sm_hshv[num_states]
B_rf_hshv = B_rf_hshv.tolist()
B_rm_hshv = B_rm_hshv.tolist()
B_sf_hshv = B_sf_hshv.tolist()
B_sm_hshv = B_sm_hshv.tolist()
# generate RF, RM, SF, SM models from parameters
model_rf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hshv, pi) # Will be Trained
model_rm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hshv, pi) # Will be Trained
model_sf_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hshv, pi) # Will be Trained
model_sm_hshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hshv, pi) # Will be Trained
# For Training
total_seq_rf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:301,0:15], Fmat_original_lshv[0:301,0:15], Fmat_original_lslv[0:301,0:15])))
total_seq_rm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:301,15:30], Fmat_original_lshv[0:301,15:30], Fmat_original_lslv[0:301,15:30])))
total_seq_sf_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:301,30:45], Fmat_original_lshv[0:301,30:45], Fmat_original_lslv[0:301,30:45])))
total_seq_sm_hshv = np.matrix(np.column_stack((Fmat_original_hslv[0:301,45:60], Fmat_original_lshv[0:301,45:60], Fmat_original_lslv[0:301,45:60])))
train_seq_rf_hshv = (np.array(total_seq_rf_hshv).T).tolist()
train_seq_rm_hshv = (np.array(total_seq_rm_hshv).T).tolist()
train_seq_sf_hshv = (np.array(total_seq_sf_hshv).T).tolist()
train_seq_sm_hshv = (np.array(total_seq_sm_hshv).T).tolist()
#print train_seq_rf_hshv
final_ts_rf_hshv = ghmm.SequenceSet(F,train_seq_rf_hshv)
final_ts_rm_hshv = ghmm.SequenceSet(F,train_seq_rm_hshv)
final_ts_sf_hshv = ghmm.SequenceSet(F,train_seq_sf_hshv)
final_ts_sm_hshv = ghmm.SequenceSet(F,train_seq_sm_hshv)
model_rf_hshv.baumWelch(final_ts_rf_hshv)
model_rm_hshv.baumWelch(final_ts_rm_hshv)
model_sf_hshv.baumWelch(final_ts_sf_hshv)
model_sm_hshv.baumWelch(final_ts_sm_hshv)
# For Testing
total_seq_obj_hshv = Fmat_original_hshv[0:301,:]
rf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
rm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sf_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
sm_hshv = np.matrix(np.zeros(np.size(total_seq_obj_hshv,1)))
k = 0
while (k < np.size(total_seq_obj_hshv,1)):
test_seq_obj_hshv = (np.array(total_seq_obj_hshv[0:301,k]).T).tolist()
new_test_seq_obj_hshv = np.array(sum(test_seq_obj_hshv,[]))
#print new_test_seq_obj_hshv
ts_obj_hshv = new_test_seq_obj_hshv
#print np.shape(ts_obj_hshv)
final_ts_obj_hshv = ghmm.EmissionSequence(F,ts_obj_hshv.tolist())
# Find Viterbi Path
path_rf_obj_hshv = model_rf_hshv.viterbi(final_ts_obj_hshv)
path_rm_obj_hshv = model_rm_hshv.viterbi(final_ts_obj_hshv)
path_sf_obj_hshv = model_sf_hshv.viterbi(final_ts_obj_hshv)
path_sm_obj_hshv = model_sm_hshv.viterbi(final_ts_obj_hshv)
obj_hshv = max(path_rf_obj_hshv[1],path_rm_obj_hshv[1],path_sf_obj_hshv[1],path_sm_obj_hshv[1])
if obj_hshv == path_rf_obj_hshv[1]:
rf_hshv[0,k] = 1
elif obj_hshv == path_rm_obj_hshv[1]:
rm_hshv[0,k] = 1
elif obj_hshv == path_sf_obj_hshv[1]:
sf_hshv[0,k] = 1
else:
sm_hshv[0,k] = 1
k = k+1
#print rf_hshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hshv[0,45:60])
#print cmat
#############################################################################################################################################
# HSLV as testing set and Rest as training set
mu_rf_hslv,sigma_rf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,0:15], Fmat_original_lshv[0:301,0:15], Fmat_original_lslv[0:301,0:15]))))
mu_rm_hslv,sigma_rm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,15:30], Fmat_original_lshv[0:301,15:30], Fmat_original_lslv[0:301,15:30]))))
mu_sf_hslv,sigma_sf_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,30:45], Fmat_original_lshv[0:301,30:45], Fmat_original_lslv[0:301,30:45]))))
mu_sm_hslv,sigma_sm_hslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,45:60], Fmat_original_lshv[0:301,45:60], Fmat_original_lslv[0:301,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_hslv = np.zeros((10,2))
B_rm_hslv = np.zeros((10,2))
B_sf_hslv = np.zeros((10,2))
B_sm_hslv = np.zeros((10,2))
for num_states in range(10):
B_rf_hslv[num_states,0] = mu_rf_hslv[num_states]
B_rf_hslv[num_states,1] = sigma_rf_hslv[num_states]
B_rm_hslv[num_states,0] = mu_rm_hslv[num_states]
B_rm_hslv[num_states,1] = sigma_rm_hslv[num_states]
B_sf_hslv[num_states,0] = mu_sf_hslv[num_states]
B_sf_hslv[num_states,1] = sigma_sf_hslv[num_states]
B_sm_hslv[num_states,0] = mu_sm_hslv[num_states]
B_sm_hslv[num_states,1] = sigma_sm_hslv[num_states]
B_rf_hslv = B_rf_hslv.tolist()
B_rm_hslv = B_rm_hslv.tolist()
B_sf_hslv = B_sf_hslv.tolist()
B_sm_hslv = B_sm_hslv.tolist()
model_rf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_hslv, pi) # Will be Trained
model_rm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_hslv, pi) # Will be Trained
model_sf_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_hslv, pi) # Will be Trained
model_sm_hslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_hslv, pi) # Will be Trained
# For Training
total_seq_rf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,0:15], Fmat_original_lshv[0:301,0:15], Fmat_original_lslv[0:301,0:15])))
total_seq_rm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,15:30], Fmat_original_lshv[0:301,15:30], Fmat_original_lslv[0:301,15:30])))
total_seq_sf_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,30:45], Fmat_original_lshv[0:301,30:45], Fmat_original_lslv[0:301,30:45])))
total_seq_sm_hslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,45:60], Fmat_original_lshv[0:301,45:60], Fmat_original_lslv[0:301,45:60])))
train_seq_rf_hslv = (np.array(total_seq_rf_hslv).T).tolist()
train_seq_rm_hslv = (np.array(total_seq_rm_hslv).T).tolist()
train_seq_sf_hslv = (np.array(total_seq_sf_hslv).T).tolist()
train_seq_sm_hslv = (np.array(total_seq_sm_hslv).T).tolist()
#print train_seq_rf_hslv
final_ts_rf_hslv = ghmm.SequenceSet(F,train_seq_rf_hslv)
final_ts_rm_hslv = ghmm.SequenceSet(F,train_seq_rm_hslv)
final_ts_sf_hslv = ghmm.SequenceSet(F,train_seq_sf_hslv)
final_ts_sm_hslv = ghmm.SequenceSet(F,train_seq_sm_hslv)
model_rf_hslv.baumWelch(final_ts_rf_hslv)
model_rm_hslv.baumWelch(final_ts_rm_hslv)
model_sf_hslv.baumWelch(final_ts_sf_hslv)
model_sm_hslv.baumWelch(final_ts_sm_hslv)
# For Testing
total_seq_obj_hslv = Fmat_original_hslv[0:301,:]
rf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
rm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sf_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
sm_hslv = np.matrix(np.zeros(np.size(total_seq_obj_hslv,1)))
k = 0
while (k < np.size(total_seq_obj_hslv,1)):
test_seq_obj_hslv = (np.array(total_seq_obj_hslv[0:301,k]).T).tolist()
new_test_seq_obj_hslv = np.array(sum(test_seq_obj_hslv,[]))
#print new_test_seq_obj_hslv
ts_obj_hslv = new_test_seq_obj_hslv
#print np.shape(ts_obj_hslv)
final_ts_obj_hslv = ghmm.EmissionSequence(F,ts_obj_hslv.tolist())
# Find Viterbi Path
path_rf_obj_hslv = model_rf_hslv.viterbi(final_ts_obj_hslv)
path_rm_obj_hslv = model_rm_hslv.viterbi(final_ts_obj_hslv)
path_sf_obj_hslv = model_sf_hslv.viterbi(final_ts_obj_hslv)
path_sm_obj_hslv = model_sm_hslv.viterbi(final_ts_obj_hslv)
obj_hslv = max(path_rf_obj_hslv[1],path_rm_obj_hslv[1],path_sf_obj_hslv[1],path_sm_obj_hslv[1])
if obj_hslv == path_rf_obj_hslv[1]:
rf_hslv[0,k] = 1
elif obj_hslv == path_rm_obj_hslv[1]:
rm_hslv[0,k] = 1
elif obj_hslv == path_sf_obj_hslv[1]:
sf_hslv[0,k] = 1
else:
sm_hslv[0,k] = 1
k = k+1
#print rf_hslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_hslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_hslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_hslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_hslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_hslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_hslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_hslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_hslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_hslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_hslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_hslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_hslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_hslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_hslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_hslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_hslv[0,45:60])
#print cmat
############################################################################################################################################
# LSHV as testing set and Rest as training set
mu_rf_lshv,sigma_rf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,0:15], Fmat_original_hslv[0:301,0:15], Fmat_original_lslv[0:301,0:15]))))
mu_rm_lshv,sigma_rm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,15:30], Fmat_original_hslv[0:301,15:30], Fmat_original_lslv[0:301,15:30]))))
mu_sf_lshv,sigma_sf_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,30:45], Fmat_original_hslv[0:301,30:45], Fmat_original_lslv[0:301,30:45]))))
mu_sm_lshv,sigma_sm_lshv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,45:60], Fmat_original_hslv[0:301,45:60], Fmat_original_lslv[0:301,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lshv = np.zeros((10,2))
B_rm_lshv = np.zeros((10,2))
B_sf_lshv = np.zeros((10,2))
B_sm_lshv = np.zeros((10,2))
for num_states in range(10):
B_rf_lshv[num_states,0] = mu_rf_lshv[num_states]
B_rf_lshv[num_states,1] = sigma_rf_lshv[num_states]
B_rm_lshv[num_states,0] = mu_rm_lshv[num_states]
B_rm_lshv[num_states,1] = sigma_rm_lshv[num_states]
B_sf_lshv[num_states,0] = mu_sf_lshv[num_states]
B_sf_lshv[num_states,1] = sigma_sf_lshv[num_states]
B_sm_lshv[num_states,0] = mu_sm_lshv[num_states]
B_sm_lshv[num_states,1] = sigma_sm_lshv[num_states]
B_rf_lshv = B_rf_lshv.tolist()
B_rm_lshv = B_rm_lshv.tolist()
B_sf_lshv = B_sf_lshv.tolist()
B_sm_lshv = B_sm_lshv.tolist()
model_rf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lshv, pi) # Will be Trained
model_rm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lshv, pi) # Will be Trained
model_sf_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lshv, pi) # Will be Trained
model_sm_lshv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lshv, pi) # Will be Trained
# For Training
total_seq_rf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,0:15], Fmat_original_hslv[0:301,0:15], Fmat_original_lslv[0:301,0:15])))
total_seq_rm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,15:30], Fmat_original_hslv[0:301,15:30], Fmat_original_lslv[0:301,15:30])))
total_seq_sf_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,30:45], Fmat_original_hslv[0:301,30:45], Fmat_original_lslv[0:301,30:45])))
total_seq_sm_lshv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,45:60], Fmat_original_hslv[0:301,45:60], Fmat_original_lslv[0:301,45:60])))
train_seq_rf_lshv = (np.array(total_seq_rf_lshv).T).tolist()
train_seq_rm_lshv = (np.array(total_seq_rm_lshv).T).tolist()
train_seq_sf_lshv = (np.array(total_seq_sf_lshv).T).tolist()
train_seq_sm_lshv = (np.array(total_seq_sm_lshv).T).tolist()
#print train_seq_rf_lshv
final_ts_rf_lshv = ghmm.SequenceSet(F,train_seq_rf_lshv)
final_ts_rm_lshv = ghmm.SequenceSet(F,train_seq_rm_lshv)
final_ts_sf_lshv = ghmm.SequenceSet(F,train_seq_sf_lshv)
final_ts_sm_lshv = ghmm.SequenceSet(F,train_seq_sm_lshv)
model_rf_lshv.baumWelch(final_ts_rf_lshv)
model_rm_lshv.baumWelch(final_ts_rm_lshv)
model_sf_lshv.baumWelch(final_ts_sf_lshv)
model_sm_lshv.baumWelch(final_ts_sm_lshv)
# For Testing
total_seq_obj_lshv = Fmat_original_lshv[0:301,:]
rf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
rm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sf_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
sm_lshv = np.matrix(np.zeros(np.size(total_seq_obj_lshv,1)))
k = 0
while (k < np.size(total_seq_obj_lshv,1)):
test_seq_obj_lshv = (np.array(total_seq_obj_lshv[0:301,k]).T).tolist()
new_test_seq_obj_lshv = np.array(sum(test_seq_obj_lshv,[]))
#print new_test_seq_obj_lshv
ts_obj_lshv = new_test_seq_obj_lshv
#print np.shape(ts_obj_lshv)
final_ts_obj_lshv = ghmm.EmissionSequence(F,ts_obj_lshv.tolist())
# Find Viterbi Path
path_rf_obj_lshv = model_rf_lshv.viterbi(final_ts_obj_lshv)
path_rm_obj_lshv = model_rm_lshv.viterbi(final_ts_obj_lshv)
path_sf_obj_lshv = model_sf_lshv.viterbi(final_ts_obj_lshv)
path_sm_obj_lshv = model_sm_lshv.viterbi(final_ts_obj_lshv)
obj_lshv = max(path_rf_obj_lshv[1],path_rm_obj_lshv[1],path_sf_obj_lshv[1],path_sm_obj_lshv[1])
if obj_lshv == path_rf_obj_lshv[1]:
rf_lshv[0,k] = 1
elif obj_lshv == path_rm_obj_lshv[1]:
rm_lshv[0,k] = 1
elif obj_lshv == path_sf_obj_lshv[1]:
sf_lshv[0,k] = 1
else:
sm_lshv[0,k] = 1
k = k+1
#print rf_lshv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lshv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lshv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lshv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lshv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lshv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lshv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lshv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lshv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lshv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lshv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lshv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lshv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lshv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lshv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lshv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lshv[0,45:60])
#print cmat
#############################################################################################################################################
# LSLV as testing set and Rest as training set
mu_rf_lslv,sigma_rf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,0:15], Fmat_original_hslv[0:301,0:15], Fmat_original_lshv[0:301,0:15]))))
mu_rm_lslv,sigma_rm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,15:30], Fmat_original_hslv[0:301,15:30], Fmat_original_lshv[0:301,15:30]))))
mu_sf_lslv,sigma_sf_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,30:45], Fmat_original_hslv[0:301,30:45], Fmat_original_lshv[0:301,30:45]))))
mu_sm_lslv,sigma_sm_lslv = feature_to_mu_sigma(np.matrix(np.column_stack((Fmat_original_hshv[0:301,45:60], Fmat_original_hslv[0:301,45:60], Fmat_original_lshv[0:301,45:60]))))
# B - Emission Matrix, parameters of emission distributions in pairs of (mu, sigma)
B_rf_lslv = np.zeros((10,2))
B_rm_lslv = np.zeros((10,2))
B_sf_lslv = np.zeros((10,2))
B_sm_lslv = np.zeros((10,2))
for num_states in range(10):
B_rf_lslv[num_states,0] = mu_rf_lslv[num_states]
B_rf_lslv[num_states,1] = sigma_rf_lslv[num_states]
B_rm_lslv[num_states,0] = mu_rm_lslv[num_states]
B_rm_lslv[num_states,1] = sigma_rm_lslv[num_states]
B_sf_lslv[num_states,0] = mu_sf_lslv[num_states]
B_sf_lslv[num_states,1] = sigma_sf_lslv[num_states]
B_sm_lslv[num_states,0] = mu_sm_lslv[num_states]
B_sm_lslv[num_states,1] = sigma_sm_lslv[num_states]
B_rf_lslv = B_rf_lslv.tolist()
B_rm_lslv = B_rm_lslv.tolist()
B_sf_lslv = B_sf_lslv.tolist()
B_sm_lslv = B_sm_lslv.tolist()
model_rf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rf_lslv, pi) # Will be Trained
model_rm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_rm_lslv, pi) # Will be Trained
model_sf_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sf_lslv, pi) # Will be Trained
model_sm_lslv = ghmm.HMMFromMatrices(F,ghmm.GaussianDistribution(F), A, B_sm_lslv, pi) # Will be Trained
# For Training
total_seq_rf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,0:15], Fmat_original_hslv[0:301,0:15], Fmat_original_lshv[0:301,0:15])))
total_seq_rm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,15:30], Fmat_original_hslv[0:301,15:30], Fmat_original_lshv[0:301,15:30])))
total_seq_sf_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,30:45], Fmat_original_hslv[0:301,30:45], Fmat_original_lshv[0:301,30:45])))
total_seq_sm_lslv = np.matrix(np.column_stack((Fmat_original_hshv[0:301,45:60], Fmat_original_hslv[0:301,45:60], Fmat_original_lshv[0:301,45:60])))
train_seq_rf_lslv = (np.array(total_seq_rf_lslv).T).tolist()
train_seq_rm_lslv = (np.array(total_seq_rm_lslv).T).tolist()
train_seq_sf_lslv = (np.array(total_seq_sf_lslv).T).tolist()
train_seq_sm_lslv = (np.array(total_seq_sm_lslv).T).tolist()
#print train_seq_rf_lslv
final_ts_rf_lslv = ghmm.SequenceSet(F,train_seq_rf_lslv)
final_ts_rm_lslv = ghmm.SequenceSet(F,train_seq_rm_lslv)
final_ts_sf_lslv = ghmm.SequenceSet(F,train_seq_sf_lslv)
final_ts_sm_lslv = ghmm.SequenceSet(F,train_seq_sm_lslv)
model_rf_lslv.baumWelch(final_ts_rf_lslv)
model_rm_lslv.baumWelch(final_ts_rm_lslv)
model_sf_lslv.baumWelch(final_ts_sf_lslv)
model_sm_lslv.baumWelch(final_ts_sm_lslv)
# For Testing
total_seq_obj_lslv = Fmat_original_lslv[0:301,:]
rf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
rm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sf_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
sm_lslv = np.matrix(np.zeros(np.size(total_seq_obj_lslv,1)))
k = 0
while (k < np.size(total_seq_obj_lslv,1)):
test_seq_obj_lslv = (np.array(total_seq_obj_lslv[0:301,k]).T).tolist()
new_test_seq_obj_lslv = np.array(sum(test_seq_obj_lslv,[]))
#print new_test_seq_obj_lslv
ts_obj_lslv = new_test_seq_obj_lslv
#print np.shape(ts_obj_lslv)
final_ts_obj_lslv = ghmm.EmissionSequence(F,ts_obj_lslv.tolist())
# Find Viterbi Path
path_rf_obj_lslv = model_rf_lslv.viterbi(final_ts_obj_lslv)
path_rm_obj_lslv = model_rm_lslv.viterbi(final_ts_obj_lslv)
path_sf_obj_lslv = model_sf_lslv.viterbi(final_ts_obj_lslv)
path_sm_obj_lslv = model_sm_lslv.viterbi(final_ts_obj_lslv)
obj_lslv = max(path_rf_obj_lslv[1],path_rm_obj_lslv[1],path_sf_obj_lslv[1],path_sm_obj_lslv[1])
if obj_lslv == path_rf_obj_lslv[1]:
rf_lslv[0,k] = 1
elif obj_lslv == path_rm_obj_lslv[1]:
rm_lslv[0,k] = 1
elif obj_lslv == path_sf_obj_lslv[1]:
sf_lslv[0,k] = 1
else:
sm_lslv[0,k] = 1
k = k+1
#print rf_lslv.T
cmat[0][0] = cmat[0][0] + np.sum(rf_lslv[0,0:15])
cmat[0][1] = cmat[0][1] + np.sum(rf_lslv[0,15:30])
cmat[0][2] = cmat[0][2] + np.sum(rf_lslv[0,30:45])
cmat[0][3] = cmat[0][3] + np.sum(rf_lslv[0,45:60])
cmat[1][0] = cmat[1][0] + np.sum(rm_lslv[0,0:15])
cmat[1][1] = cmat[1][1] + np.sum(rm_lslv[0,15:30])
cmat[1][2] = cmat[1][2] + np.sum(rm_lslv[0,30:45])
cmat[1][3] = cmat[1][3] + np.sum(rm_lslv[0,45:60])
cmat[2][0] = cmat[2][0] + np.sum(sf_lslv[0,0:15])
cmat[2][1] = cmat[2][1] + np.sum(sf_lslv[0,15:30])
cmat[2][2] = cmat[2][2] + np.sum(sf_lslv[0,30:45])
cmat[2][3] = cmat[2][3] + np.sum(sf_lslv[0,45:60])
cmat[3][0] = cmat[3][0] + np.sum(sm_lslv[0,0:15])
cmat[3][1] = cmat[3][1] + np.sum(sm_lslv[0,15:30])
cmat[3][2] = cmat[3][2] + np.sum(sm_lslv[0,30:45])
cmat[3][3] = cmat[3][3] + np.sum(sm_lslv[0,45:60])
#print cmat
############################################################################################################################################
# Plot Confusion Matrix
Nlabels = 4
fig = pp.figure()
ax = fig.add_subplot(111)
figplot = ax.matshow(cmat, interpolation = 'nearest', origin = 'upper', extent=[0, Nlabels, 0, Nlabels])
ax.set_title('Performance of HMM Models')
pp.xlabel("Targets")
pp.ylabel("Predictions")
ax.set_xticks([0.5,1.5,2.5,3.5])
ax.set_xticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
ax.set_yticks([3.5,2.5,1.5,0.5])
ax.set_yticklabels(['Rigid-Fixed', 'Rigid-Movable', 'Soft-Fixed', 'Soft-Movable'])
figbar = fig.colorbar(figplot)
i = 0
while (i < 4):
j = 0
while (j < 4):
pp.text(j+0.5,3.5-i,cmat[i][j])
j = j+1
i = i+1
pp.savefig('results_force_10_states.png')
pp.show()
| mit |
cython-testbed/pandas | pandas/tests/dtypes/test_generic.py | 3 | 3844 | # -*- coding: utf-8 -*-
from warnings import catch_warnings, simplefilter
import numpy as np
import pandas as pd
from pandas.core.dtypes import generic as gt
from pandas.util import testing as tm
class TestABCClasses(object):
tuples = [[1, 2, 2], ['red', 'blue', 'red']]
multi_index = pd.MultiIndex.from_arrays(tuples, names=('number', 'color'))
datetime_index = pd.to_datetime(['2000/1/1', '2010/1/1'])
timedelta_index = pd.to_timedelta(np.arange(5), unit='s')
period_index = pd.period_range('2000/1/1', '2010/1/1/', freq='M')
categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])
categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)
df = pd.DataFrame({'names': ['a', 'b', 'c']}, index=multi_index)
sparse_series = pd.Series([1, 2, 3]).to_sparse()
sparse_array = pd.SparseArray(np.random.randn(10))
sparse_frame = pd.SparseDataFrame({'a': [1, -1, None]})
def test_abc_types(self):
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndex)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCInt64Index)
assert isinstance(pd.UInt64Index([1, 2, 3]), gt.ABCUInt64Index)
assert isinstance(pd.Float64Index([1, 2, 3]), gt.ABCFloat64Index)
assert isinstance(self.multi_index, gt.ABCMultiIndex)
assert isinstance(self.datetime_index, gt.ABCDatetimeIndex)
assert isinstance(self.timedelta_index, gt.ABCTimedeltaIndex)
assert isinstance(self.period_index, gt.ABCPeriodIndex)
assert isinstance(self.categorical_df.index, gt.ABCCategoricalIndex)
assert isinstance(pd.Index(['a', 'b', 'c']), gt.ABCIndexClass)
assert isinstance(pd.Int64Index([1, 2, 3]), gt.ABCIndexClass)
assert isinstance(pd.Series([1, 2, 3]), gt.ABCSeries)
assert isinstance(self.df, gt.ABCDataFrame)
with catch_warnings(record=True):
simplefilter('ignore', FutureWarning)
assert isinstance(self.df.to_panel(), gt.ABCPanel)
assert isinstance(self.sparse_series, gt.ABCSparseSeries)
assert isinstance(self.sparse_array, gt.ABCSparseArray)
assert isinstance(self.sparse_frame, gt.ABCSparseDataFrame)
assert isinstance(self.categorical, gt.ABCCategorical)
assert isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCPeriod)
assert isinstance(pd.DateOffset(), gt.ABCDateOffset)
assert isinstance(pd.Period('2012', freq='A-DEC').freq,
gt.ABCDateOffset)
assert not isinstance(pd.Period('2012', freq='A-DEC'),
gt.ABCDateOffset)
assert isinstance(pd.Interval(0, 1.5), gt.ABCInterval)
assert not isinstance(pd.Period('2012', freq='A-DEC'), gt.ABCInterval)
def test_setattr_warnings():
# GH7175 - GOTCHA: You can't use dot notation to add a column...
d = {'one': pd.Series([1., 2., 3.], index=['a', 'b', 'c']),
'two': pd.Series([1., 2., 3., 4.], index=['a', 'b', 'c', 'd'])}
df = pd.DataFrame(d)
with catch_warnings(record=True) as w:
# successfully add new column
# this should not raise a warning
df['three'] = df.two + 1
assert len(w) == 0
assert df.three.sum() > df.two.sum()
with catch_warnings(record=True) as w:
# successfully modify column in place
# this should not raise a warning
df.one += 1
assert len(w) == 0
assert df.one.iloc[0] == 2
with catch_warnings(record=True) as w:
# successfully add an attribute to a series
# this should not raise a warning
df.two.not_an_index = [1, 2]
assert len(w) == 0
with tm.assert_produces_warning(UserWarning):
# warn when setting column to nonexistent name
df.four = df.two + 2
assert df.four.sum() > df.two.sum()
| bsd-3-clause |
moberweger/deep-prior | src/main_nyu_com_refine.py | 1 | 8681 | """This is the main file for training hand detection refinement on NYU dataset
Copyright 2015 Markus Oberweger, ICG,
Graz University of Technology <oberweger@icg.tugraz.at>
This file is part of DeepPrior.
DeepPrior is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
DeepPrior is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with DeepPrior. If not, see <http://www.gnu.org/licenses/>.
"""
import numpy
import matplotlib
matplotlib.use('Agg') # plot to file
import matplotlib.pyplot as plt
from net.scalenet import ScaleNetParams, ScaleNet
from trainer.scalenettrainer import ScaleNetTrainerParams, ScaleNetTrainer
from util.handdetector import HandDetector
import theano
import os
import cPickle
import sys
from data.importers import NYUImporter
from data.dataset import NYUDataset
from util.handpose_evaluation import NYUHandposeEvaluation
import cv2
if __name__ == '__main__':
eval_prefix = 'NYU_COM'
if not os.path.exists('./eval/'+eval_prefix+'/'):
os.makedirs('./eval/'+eval_prefix+'/')
floatX = theano.config.floatX # @UndefinedVariable
rng = numpy.random.RandomState(23455)
print("create data")
di = NYUImporter('../data/NYU/')
Seq1 = di.loadSequence('train',shuffle=True,rng=rng,docom=True)
trainSeqs = [Seq1]
Seq2_1 = di.loadSequence('test_1',docom=True)
Seq2_2 = di.loadSequence('test_2',docom=True)
testSeqs = [Seq2_1, Seq2_2]
# create training data
trainDataSet = NYUDataset(trainSeqs)
train_data, train_gt3D = trainDataSet.imgStackDepthOnly('train')
mb = (train_data.nbytes) / (1024 * 1024)
print("data size: {}Mb".format(mb))
testDataSet = NYUDataset(testSeqs)
test_data1, test_gt3D1 = testDataSet.imgStackDepthOnly('test_1')
test_data2, test_gt3D2 = testDataSet.imgStackDepthOnly('test_2')
val_data = test_data1
val_gt3D = test_gt3D1
####################################
# resize data
dsize = (int(train_data.shape[2]//2), int(train_data.shape[3]//2))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
train_data2 = train_data[:, :, ystart:yend, xstart:xend]
dsize = (int(train_data.shape[2]//4), int(train_data.shape[3]//4))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
train_data4 = train_data[:, :, ystart:yend, xstart:xend]
dsize = (int(train_data.shape[2]//2), int(train_data.shape[3]//2))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
val_data2 = val_data[:, :, ystart:yend, xstart:xend]
dsize = (int(train_data.shape[2]//4), int(train_data.shape[3]//4))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
val_data4 = val_data[:, :, ystart:yend, xstart:xend]
dsize = (int(train_data.shape[2]//2), int(train_data.shape[3]//2))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
test_data12 = test_data1[:, :, ystart:yend, xstart:xend]
dsize = (int(train_data.shape[2]//4), int(train_data.shape[3]//4))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
test_data14 = test_data1[:, :, ystart:yend, xstart:xend]
dsize = (int(train_data.shape[2]//2), int(train_data.shape[3]//2))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
test_data22 = test_data2[:, :, ystart:yend, xstart:xend]
dsize = (int(train_data.shape[2]//4), int(train_data.shape[3]//4))
xstart = int(train_data.shape[2]/2-dsize[0]/2)
xend = xstart + dsize[0]
ystart = int(train_data.shape[3]/2-dsize[1]/2)
yend = ystart + dsize[1]
test_data24 = test_data2[:, :, ystart:yend, xstart:xend]
print train_gt3D.max(), test_gt3D1.max(), train_gt3D.min(), test_gt3D1.min()
print train_data.max(), test_data1.max(), train_data.min(), test_data1.min()
imgSizeW = train_data.shape[3]
imgSizeH = train_data.shape[2]
nChannels = train_data.shape[1]
#############################################################################
print("create network")
batchSize = 64
poseNetParams = ScaleNetParams(type=1, nChan=nChannels, wIn=imgSizeW, hIn=imgSizeH, batchSize=batchSize,
resizeFactor=2, numJoints=1, nDims=3)
poseNet = ScaleNet(rng, cfgParams=poseNetParams)
poseNetTrainerParams = ScaleNetTrainerParams()
poseNetTrainerParams.batch_size = batchSize
poseNetTrainerParams.learning_rate = 0.0005
poseNetTrainerParams.weightreg_factor = 0.0001
poseNetTrainerParams.lr_of_ep = lambda ep: poseNetTrainerParams.learning_rate/(1+0.1*ep)
print("setup trainer")
poseNetTrainer = ScaleNetTrainer(poseNet, poseNetTrainerParams, rng)
poseNetTrainer.setData(train_data, train_gt3D[:, 13, :], val_data, val_gt3D[:, 13, :])
poseNetTrainer.addStaticData({'val_data_x1': val_data2, 'val_data_x2': val_data4})
poseNetTrainer.addManagedData({'train_data_x1': train_data2, 'train_data_x2': train_data4})
poseNetTrainer.compileFunctions()
###################################################################
# TRAIN
train_res = poseNetTrainer.train(n_epochs=100, storeFilters=True)
train_costs = train_res[0]
wvals = train_res[1]
val_errs = train_res[2]
# plot cost
fig = plt.figure()
plt.semilogy(train_costs)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_cost.png')
fig = plt.figure()
plt.semilogy(val_errs)
plt.show(block=False)
fig.savefig('./eval/'+eval_prefix+'/'+eval_prefix+'_errs.png')
# save results
poseNet.save("./eval/{}/net_{}.pkl".format(eval_prefix, eval_prefix))
# poseNet.load("./eval/{}/net_{}.pkl".format(eval_prefix,eval_prefix))
####################################################
# TEST
print("Testing ...")
gt3D = []
joints = []
gt3D.extend([j.gt3Dorig[13].reshape(1,3) for j in testSeqs[0].data])
jts = poseNet.computeOutput([test_data1, test_data12, test_data14])
for i in xrange(test_data1.shape[0]):
joints.append(jts[i].reshape(1, 3)*(testSeqs[0].config['cube'][2]/2.) + testSeqs[0].data[i].com)
gt3D.extend([j.gt3Dorig[13].reshape(1,3) for j in testSeqs[1].data])
jts = poseNet.computeOutput([test_data2, test_data22, test_data24])
for i in range(test_data2.shape[0]):
joints.append(jts[i].reshape(1, 3)*(testSeqs[1].config['cube'][2]/2.) + testSeqs[1].data[i].com)
hpe = NYUHandposeEvaluation(gt3D, joints)
hpe.subfolder += '/'+eval_prefix+'/'
mean_error = hpe.getMeanError()
max_error = hpe.getMaxError()
print("Mean error: {}mm, max error: {}mm".format(mean_error, max_error))
# save results
cPickle.dump(joints, open("./eval/{}/result_{}_{}.pkl".format(eval_prefix,os.path.split(__file__)[1],eval_prefix), "wb"), protocol=cPickle.HIGHEST_PROTOCOL)
print "Testing baseline"
#################################
# BASELINE
# Load the evaluation
data_baseline = di.loadBaseline('../data/NYU/test/test_predictions.mat',numpy.concatenate([numpy.asarray([j.gt3Dorig for j in testSeqs[0].data]), numpy.asarray([j.gt3Dorig for j in testSeqs[1].data])]))
hpe_base = NYUHandposeEvaluation(gt3D, numpy.asarray(data_baseline)[:, 13, :].reshape((len(gt3D), 1, 3)))
hpe_base.subfolder += '/'+eval_prefix+'/'
print("Mean error: {}mm".format(hpe_base.getMeanError()))
com = [j.com for j in testSeqs[0].data]
com.extend([j.com for j in testSeqs[1].data])
hpe_com = NYUHandposeEvaluation(gt3D, numpy.asarray(com).reshape((len(gt3D),1,3)))
hpe_com.subfolder += '/'+eval_prefix+'/'
print("Mean error: {}mm".format(hpe_com.getMeanError()))
| gpl-3.0 |
mugizico/scikit-learn | sklearn/calibration.py | 137 | 18876 | """Calibration of predicted probabilities."""
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Balazs Kegl <balazs.kegl@gmail.com>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# Mathieu Blondel <mathieu@mblondel.org>
#
# License: BSD 3 clause
from __future__ import division
import inspect
import warnings
from math import log
import numpy as np
from scipy.optimize import fmin_bfgs
from .base import BaseEstimator, ClassifierMixin, RegressorMixin, clone
from .preprocessing import LabelBinarizer
from .utils import check_X_y, check_array, indexable, column_or_1d
from .utils.validation import check_is_fitted
from .isotonic import IsotonicRegression
from .svm import LinearSVC
from .cross_validation import check_cv
from .metrics.classification import _check_binary_probabilistic_predictions
class CalibratedClassifierCV(BaseEstimator, ClassifierMixin):
"""Probability calibration with isotonic regression or sigmoid.
With this class, the base_estimator is fit on the train set of the
cross-validation generator and the test set is used for calibration.
The probabilities for each of the folds are then averaged
for prediction. In case that cv="prefit" is passed to __init__,
it is it is assumed that base_estimator has been
fitted already and all data is used for calibration. Note that
data for fitting the classifier and for calibrating it must be disjpint.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. If cv=prefit, the
classifier must have been fit already on data.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach. It is not advised to use isotonic calibration
with too few calibration samples (<<1000) since it tends to overfit.
Use sigmoids (Platt's calibration) in this case.
cv : integer or cross-validation generator or "prefit", optional
If an integer is passed, it is the number of folds (default 3).
Specific cross-validation objects can be passed, see
sklearn.cross_validation module for the list of possible objects.
If "prefit" is passed, it is assumed that base_estimator has been
fitted already and all data is used for calibration.
Attributes
----------
classes_ : array, shape (n_classes)
The class labels.
calibrated_classifiers_: list (len() equal to cv or 1 if cv == "prefit")
The list of calibrated classifiers, one for each crossvalidation fold,
which has been fitted on all but the validation fold and calibrated
on the validation fold.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator=None, method='sigmoid', cv=3):
self.base_estimator = base_estimator
self.method = method
self.cv = cv
def fit(self, X, y, sample_weight=None):
"""Fit the calibrated model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X, y = check_X_y(X, y, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
X, y = indexable(X, y)
lb = LabelBinarizer().fit(y)
self.classes_ = lb.classes_
# Check that we each cross-validation fold can have at least one
# example per class
n_folds = self.cv if isinstance(self.cv, int) \
else self.cv.n_folds if hasattr(self.cv, "n_folds") else None
if n_folds and \
np.any([np.sum(y == class_) < n_folds for class_ in self.classes_]):
raise ValueError("Requesting %d-fold cross-validation but provided"
" less than %d examples for at least one class."
% (n_folds, n_folds))
self.calibrated_classifiers_ = []
if self.base_estimator is None:
# we want all classifiers that don't expose a random_state
# to be deterministic (and we don't want to expose this one).
base_estimator = LinearSVC(random_state=0)
else:
base_estimator = self.base_estimator
if self.cv == "prefit":
calibrated_classifier = _CalibratedClassifier(
base_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X, y, sample_weight)
else:
calibrated_classifier.fit(X, y)
self.calibrated_classifiers_.append(calibrated_classifier)
else:
cv = check_cv(self.cv, X, y, classifier=True)
arg_names = inspect.getargspec(base_estimator.fit)[0]
estimator_name = type(base_estimator).__name__
if (sample_weight is not None
and "sample_weight" not in arg_names):
warnings.warn("%s does not support sample_weight. Samples"
" weights are only used for the calibration"
" itself." % estimator_name)
base_estimator_sample_weight = None
else:
base_estimator_sample_weight = sample_weight
for train, test in cv:
this_estimator = clone(base_estimator)
if base_estimator_sample_weight is not None:
this_estimator.fit(
X[train], y[train],
sample_weight=base_estimator_sample_weight[train])
else:
this_estimator.fit(X[train], y[train])
calibrated_classifier = _CalibratedClassifier(
this_estimator, method=self.method)
if sample_weight is not None:
calibrated_classifier.fit(X[test], y[test],
sample_weight[test])
else:
calibrated_classifier.fit(X[test], y[test])
self.calibrated_classifiers_.append(calibrated_classifier)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
X = check_array(X, accept_sparse=['csc', 'csr', 'coo'],
force_all_finite=False)
# Compute the arithmetic mean of the predictions of the calibrated
# classfiers
mean_proba = np.zeros((X.shape[0], len(self.classes_)))
for calibrated_classifier in self.calibrated_classifiers_:
proba = calibrated_classifier.predict_proba(X)
mean_proba += proba
mean_proba /= len(self.calibrated_classifiers_)
return mean_proba
def predict(self, X):
"""Predict the target of new samples. Can be different from the
prediction of the uncalibrated classifier.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples,)
The predicted class.
"""
check_is_fitted(self, ["classes_", "calibrated_classifiers_"])
return self.classes_[np.argmax(self.predict_proba(X), axis=1)]
class _CalibratedClassifier(object):
"""Probability calibration with isotonic regression or sigmoid.
It assumes that base_estimator has already been fit, and trains the
calibration on the input set of the fit function. Note that this class
should not be used as an estimator directly. Use CalibratedClassifierCV
with cv="prefit" instead.
Parameters
----------
base_estimator : instance BaseEstimator
The classifier whose output decision function needs to be calibrated
to offer more accurate predict_proba outputs. No default value since
it has to be an already fitted estimator.
method : 'sigmoid' | 'isotonic'
The method to use for calibration. Can be 'sigmoid' which
corresponds to Platt's method or 'isotonic' which is a
non-parameteric approach based on isotonic regression.
References
----------
.. [1] Obtaining calibrated probability estimates from decision trees
and naive Bayesian classifiers, B. Zadrozny & C. Elkan, ICML 2001
.. [2] Transforming Classifier Scores into Accurate Multiclass
Probability Estimates, B. Zadrozny & C. Elkan, (KDD 2002)
.. [3] Probabilistic Outputs for Support Vector Machines and Comparisons to
Regularized Likelihood Methods, J. Platt, (1999)
.. [4] Predicting Good Probabilities with Supervised Learning,
A. Niculescu-Mizil & R. Caruana, ICML 2005
"""
def __init__(self, base_estimator, method='sigmoid'):
self.base_estimator = base_estimator
self.method = method
def _preproc(self, X):
n_classes = len(self.classes_)
if hasattr(self.base_estimator, "decision_function"):
df = self.base_estimator.decision_function(X)
if df.ndim == 1:
df = df[:, np.newaxis]
elif hasattr(self.base_estimator, "predict_proba"):
df = self.base_estimator.predict_proba(X)
if n_classes == 2:
df = df[:, 1:]
else:
raise RuntimeError('classifier has no decision_function or '
'predict_proba method.')
idx_pos_class = np.arange(df.shape[1])
return df, idx_pos_class
def fit(self, X, y, sample_weight=None):
"""Calibrate the fitted model
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,)
Target values.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
lb = LabelBinarizer()
Y = lb.fit_transform(y)
self.classes_ = lb.classes_
df, idx_pos_class = self._preproc(X)
self.calibrators_ = []
for k, this_df in zip(idx_pos_class, df.T):
if self.method == 'isotonic':
calibrator = IsotonicRegression(out_of_bounds='clip')
elif self.method == 'sigmoid':
calibrator = _SigmoidCalibration()
else:
raise ValueError('method should be "sigmoid" or '
'"isotonic". Got %s.' % self.method)
calibrator.fit(this_df, Y[:, k], sample_weight)
self.calibrators_.append(calibrator)
return self
def predict_proba(self, X):
"""Posterior probabilities of classification
This function returns posterior probabilities of classification
according to each class on an array of test vectors X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
The samples.
Returns
-------
C : array, shape (n_samples, n_classes)
The predicted probas. Can be exact zeros.
"""
n_classes = len(self.classes_)
proba = np.zeros((X.shape[0], n_classes))
df, idx_pos_class = self._preproc(X)
for k, this_df, calibrator in \
zip(idx_pos_class, df.T, self.calibrators_):
if n_classes == 2:
k += 1
proba[:, k] = calibrator.predict(this_df)
# Normalize the probabilities
if n_classes == 2:
proba[:, 0] = 1. - proba[:, 1]
else:
proba /= np.sum(proba, axis=1)[:, np.newaxis]
# XXX : for some reason all probas can be 0
proba[np.isnan(proba)] = 1. / n_classes
# Deal with cases where the predicted probability minimally exceeds 1.0
proba[(1.0 < proba) & (proba <= 1.0 + 1e-5)] = 1.0
return proba
def _sigmoid_calibration(df, y, sample_weight=None):
"""Probability Calibration with sigmoid method (Platt 2000)
Parameters
----------
df : ndarray, shape (n_samples,)
The decision function or predict proba for the samples.
y : ndarray, shape (n_samples,)
The targets.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
a : float
The slope.
b : float
The intercept.
References
----------
Platt, "Probabilistic Outputs for Support Vector Machines"
"""
df = column_or_1d(df)
y = column_or_1d(y)
F = df # F follows Platt's notations
tiny = np.finfo(np.float).tiny # to avoid division by 0 warning
# Bayesian priors (see Platt end of section 2.2)
prior0 = float(np.sum(y <= 0))
prior1 = y.shape[0] - prior0
T = np.zeros(y.shape)
T[y > 0] = (prior1 + 1.) / (prior1 + 2.)
T[y <= 0] = 1. / (prior0 + 2.)
T1 = 1. - T
def objective(AB):
# From Platt (beginning of Section 2.2)
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
l = -(T * np.log(P + tiny) + T1 * np.log(1. - P + tiny))
if sample_weight is not None:
return (sample_weight * l).sum()
else:
return l.sum()
def grad(AB):
# gradient of the objective function
E = np.exp(AB[0] * F + AB[1])
P = 1. / (1. + E)
TEP_minus_T1P = P * (T * E - T1)
if sample_weight is not None:
TEP_minus_T1P *= sample_weight
dA = np.dot(TEP_minus_T1P, F)
dB = np.sum(TEP_minus_T1P)
return np.array([dA, dB])
AB0 = np.array([0., log((prior0 + 1.) / (prior1 + 1.))])
AB_ = fmin_bfgs(objective, AB0, fprime=grad, disp=False)
return AB_[0], AB_[1]
class _SigmoidCalibration(BaseEstimator, RegressorMixin):
"""Sigmoid regression model.
Attributes
----------
a_ : float
The slope.
b_ : float
The intercept.
"""
def fit(self, X, y, sample_weight=None):
"""Fit the model using X, y as training data.
Parameters
----------
X : array-like, shape (n_samples,)
Training data.
y : array-like, shape (n_samples,)
Training target.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted.
Returns
-------
self : object
Returns an instance of self.
"""
X = column_or_1d(X)
y = column_or_1d(y)
X, y = indexable(X, y)
self.a_, self.b_ = _sigmoid_calibration(X, y, sample_weight)
return self
def predict(self, T):
"""Predict new data by linear interpolation.
Parameters
----------
T : array-like, shape (n_samples,)
Data to predict from.
Returns
-------
T_ : array, shape (n_samples,)
The predicted data.
"""
T = column_or_1d(T)
return 1. / (1. + np.exp(self.a_ * T + self.b_))
def calibration_curve(y_true, y_prob, normalize=False, n_bins=5):
"""Compute true and predicted probabilities for a calibration curve.
Read more in the :ref:`User Guide <calibration>`.
Parameters
----------
y_true : array, shape (n_samples,)
True targets.
y_prob : array, shape (n_samples,)
Probabilities of the positive class.
normalize : bool, optional, default=False
Whether y_prob needs to be normalized into the bin [0, 1], i.e. is not
a proper probability. If True, the smallest value in y_prob is mapped
onto 0 and the largest one onto 1.
n_bins : int
Number of bins. A bigger number requires more data.
Returns
-------
prob_true : array, shape (n_bins,)
The true probability in each bin (fraction of positives).
prob_pred : array, shape (n_bins,)
The mean predicted probability in each bin.
References
----------
Alexandru Niculescu-Mizil and Rich Caruana (2005) Predicting Good
Probabilities With Supervised Learning, in Proceedings of the 22nd
International Conference on Machine Learning (ICML).
See section 4 (Qualitative Analysis of Predictions).
"""
y_true = column_or_1d(y_true)
y_prob = column_or_1d(y_prob)
if normalize: # Normalize predicted values into interval [0, 1]
y_prob = (y_prob - y_prob.min()) / (y_prob.max() - y_prob.min())
elif y_prob.min() < 0 or y_prob.max() > 1:
raise ValueError("y_prob has values outside [0, 1] and normalize is "
"set to False.")
y_true = _check_binary_probabilistic_predictions(y_true, y_prob)
bins = np.linspace(0., 1. + 1e-8, n_bins + 1)
binids = np.digitize(y_prob, bins) - 1
bin_sums = np.bincount(binids, weights=y_prob, minlength=len(bins))
bin_true = np.bincount(binids, weights=y_true, minlength=len(bins))
bin_total = np.bincount(binids, minlength=len(bins))
nonzero = bin_total != 0
prob_true = (bin_true[nonzero] / bin_total[nonzero])
prob_pred = (bin_sums[nonzero] / bin_total[nonzero])
return prob_true, prob_pred
| bsd-3-clause |
sunzhxjs/JobGIS | lib/python2.7/site-packages/pandas/tests/test_panelnd.py | 13 | 3798 | # -*- coding: utf-8 -*-
from datetime import datetime
import os
import operator
import nose
import numpy as np
from pandas.core import panelnd
from pandas.core.panel import Panel
import pandas.core.common as com
from pandas import compat
from pandas.util.testing import (assert_panel_equal,
assert_panel4d_equal,
assert_frame_equal,
assert_series_equal,
assert_almost_equal)
import pandas.util.testing as tm
class TestPanelnd(tm.TestCase):
def setUp(self):
pass
def test_4d_construction(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
def test_4d_construction_alt(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='Panel',
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
def test_4d_construction_error(self):
# create a 4D
self.assertRaises(Exception,
panelnd.create_nd_panel_factory,
klass_name='Panel4D',
orders=['labels', 'items', 'major_axis',
'minor_axis'],
slices={'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer='foo',
aliases={'major': 'major_axis',
'minor': 'minor_axis'},
stat_axis=2)
def test_5d_construction(self):
# create a 4D
Panel4D = panelnd.create_nd_panel_factory(
klass_name='Panel4D',
orders=['labels1', 'items', 'major_axis', 'minor_axis'],
slices={'items': 'items', 'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p4d = Panel4D(dict(L1=tm.makePanel(), L2=tm.makePanel()))
# create a 5D
Panel5D = panelnd.create_nd_panel_factory(
klass_name='Panel5D',
orders=['cool1', 'labels1', 'items', 'major_axis',
'minor_axis'],
slices={'labels1': 'labels1', 'items': 'items',
'major_axis': 'major_axis',
'minor_axis': 'minor_axis'},
slicer=Panel4D,
aliases={'major': 'major_axis', 'minor': 'minor_axis'},
stat_axis=2)
p5d = Panel5D(dict(C1=p4d))
# slice back to 4d
results = p5d.ix['C1', :, :, 0:3, :]
expected = p4d.ix[:, :, 0:3, :]
assert_panel_equal(results['L1'], expected['L1'])
# test a transpose
# results = p5d.transpose(1,2,3,4,0)
# expected =
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| mit |
0asa/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 134 | 7452 | """
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
(1 - eps) ||u - v||^2 < ||p(u) - p(v)||^2 < (1 + eps) ||u - v||^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
n_components >= 4 log(n_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
lebek/reversible-raytracer | orbit_experiments/util.py | 1 | 2653 | import numpy as np
import theano
import theano.tensor as T
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
from scipy.misc import imsave
def initialize_weight(n_vis, n_hid, W_name, numpy_rng, rng_dist):
if 'uniform' in rng_dist:
W = numpy_rng.uniform(low=-np.sqrt(6. / (n_vis + n_hid)),\
high=np.sqrt(6. / (n_vis + n_hid)),
size=(n_vis, n_hid)).astype(theano.config.floatX)
elif rng_dist == 'normal':
W = 0.01 * numpy_rng.normal(size=(n_vis, n_hid)).astype(theano.config.floatX)
return theano.shared(value = W, name=W_name, borrow=True)
def initalize_conv_weight(filter_shape, poolsize, numpy_rng):
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[1:])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = (filter_shape[0] * np.prod(filter_shape[2:]) /
np.prod(poolsize))
# initialize weights with random weights
W_bound = np.sqrt(6. / (fan_in + fan_out))
W = theano.shared(
np.asarray(
numpy_rng.uniform(low=-W_bound, high=W_bound, size=filter_shape),
dtype=theano.config.floatX
),
borrow=True
)
return W
'''decaying learning rate'''
def get_epsilon(epsilon, n, i):
return float(epsilon / ( 1 + i/float(n)))
def broadcasted_switch(a, b, c):
return T.switch(a.dimshuffle(0, 1, 'x'), b, c)
def transNorm(transM, vec):
transN = T.zeros_like(vec)
transN = T.set_subtensor(transN[:,:,0], vec[:,:,0] * transM[0][0] \
+ vec[:,:,1] * transM[1][0] + vec[:,:,2] * transM[2][0])
transN = T.set_subtensor(transN[:,:,1], vec[:,:,0] * transM[0][1] \
+ vec[:,:,1] * transM[1][1] + vec[:,:,2] * transM[2][1])
transN = T.set_subtensor(transN[:,:,2], vec[:,:,0] * transM[0][2] \
+ vec[:,:,1] * transM[1][2] + vec[:,:,2] * transM[2][2])
return transN
def drawWithMarkers(fname, im):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.imshow(im, interpolation='nearest')
ax.add_patch(plt.Rectangle((85-3, 90-3), 6, 6, color='red',
linewidth=2, fill=False))
ax.add_patch(plt.Rectangle((90-3, 50-3), 6, 6, color='red',
linewidth=2, fill=False))
fig.savefig(fname, bbox_inches='tight', pad_inches=0)
def draw(fname, im):
imsave(fname, im)
def good_init_search():
pass
| mit |
VISTAS-IVES/pyvistas | plugins/graph/main.py | 1 | 7506 | from datetime import datetime
from io import BytesIO
import wx
from PIL import Image
from matplotlib import pyplot, dates
from vistas.core.color import RGBColor
from vistas.core.plugins.data import DataPlugin
from vistas.core.plugins.option import Option, OptionGroup
from vistas.core.plugins.visualization import VisualizationPlugin2D, VisualizationUpdateEvent
from vistas.core.timeline import Timeline
from vistas.ui.app import App
class GraphVisualization(VisualizationPlugin2D):
id = 'graph_visualization_plugin'
name = 'Graph Visualization'
description = 'Plots data on a 2D graph'
author = 'Conservation Biology Institute'
version = '1.0'
def __init__(self):
super().__init__()
self.data = []
self.option_groups = []
self.global_options = OptionGroup()
self.labels_option = Option(self, Option.CHECKBOX, 'Show Labels', True)
self.x_units_option = Option(self, Option.CHECKBOX, 'X-Axis Units', True)
self.y_units_option = Option(self, Option.CHECKBOX, 'Y-Axis Units', True)
self.cursor_option = Option(self, Option.CHECKBOX, 'Show Timeline Cursor', True)
self.bg_color_option = Option(self, Option.COLOR, 'Background Color', RGBColor(0, 0, 0))
self.label_color_option = Option(self, Option.COLOR, 'Label Color', RGBColor(1, 1, 1))
self.global_options.items = [
self.labels_option, self.x_units_option, self.y_units_option, self.cursor_option,
Option(self, Option.SPACER), self.bg_color_option, self.label_color_option
]
def get_group_option(self, plugin, option_name):
for group in self.option_groups:
if group.name == plugin.data_name:
for option in group.flat_list:
if option.name == option_name:
return option
return None
def get_options(self):
options = OptionGroup()
options.items.append(self.global_options)
for group in self.option_groups:
options.items.append(group)
return options
def _update_options(self):
if len(self.data) < len(self.option_groups):
# We removed a data plugin, figure out which one
while len(self.data) < len(self.option_groups):
group = None
for g in self.option_groups:
found = False
for p in self.data:
if p.data_name == g.name:
found = True
break
if not found:
group = g
break
self.option_groups.remove(group)
elif len(self.data) > len(self.option_groups):
# We added data, figure out which one
while len(self.data) > len(self.option_groups):
plugin = None
for p in self.data:
found = False
for group in self.option_groups:
if group.name == p.data_name:
found = True
break
if not found:
plugin = p
break
label = plugin.data_name
group = OptionGroup(label)
attr_option = Option(self, Option.CHOICE, 'Variable', 0)
attr_option.labels = plugin.variables
color_option = Option(self, Option.COLOR, 'Color', RGBColor(0, 0, 1))
group.items = [attr_option, color_option]
self.option_groups.append(group)
@property
def can_visualize(self):
return len(self.data) > 0
@property
def visualization_name(self):
return 'Graph Visualization' if len(self.data) == 0 else 'Graph of {}'.format(self.data[0].data_name)
@property
def data_roles(self):
return [
(DataPlugin.ARRAY, 'Data')
]
def role_supports_multiple_inputs(self, role):
if role == 0:
return True
return False
def role_size(self, role):
return len(self.data)
def set_data(self, data: DataPlugin, role):
if data is None:
self.data = []
else:
self.data.append(data)
self._update_options()
wx.PostEvent(App.get().app_controller.main_window, VisualizationUpdateEvent(plugin=self))
def remove_subdata(self, role, subrole):
if subrole < len(self.data):
self.data.pop(subrole)
self._update_options()
wx.PostEvent(App.get().app_controller.main_window, VisualizationUpdateEvent(plugin=self))
def get_data(self, role):
return self.data[0] if len(self.data) > 0 else None
def get_multiple_data(self, role):
return self.data
def fig_to_pil(self, fig):
f = BytesIO()
fig.savefig(f, format='png', facecolor=fig.get_facecolor())
f.seek(0)
return Image.open(f, 'r')
def render(self, width, height):
if self.data is None:
return
show_labels = self.labels_option.value
show_cursor = self.cursor_option.value
background_color = self.bg_color_option.value.rgb.rgb_list
label_color = self.label_color_option.value.rgb.rgb_list
fig = pyplot.figure(
figsize=(width / 100, height / 100), dpi=100, tight_layout=True,
facecolor=self.bg_color_option.value.rgb.rgb_list
)
try:
ax = fig.add_subplot(1, 1, 1, facecolor=background_color)
ax.margins(1 / width, 1 / height)
for data_plugin in self.data:
data_color = self.get_group_option(data_plugin, 'Color').value.rgb.rgb_list
data_variable = data_plugin.variables[self.get_group_option(data_plugin, 'Variable').value]
data = (data_plugin.get_data(data_variable),)
if data_plugin.time_info.is_temporal:
data = ([(x - datetime(1, 1, 1)).days for x in data_plugin.time_info.timestamps],) + data
ax.xaxis.set_major_formatter(dates.DateFormatter('%b %d, %Y'))
ax.plot(*data, color=data_color, label=data_variable, linewidth=1)
for spine in ('right', 'top'):
ax.spines[spine].set_visible(False)
if show_labels:
legend = ax.legend(loc='best', facecolor=background_color)
legend.get_frame().set_alpha(.6)
for text in legend.get_texts():
text.set_color(label_color)
if show_cursor and any([x.time_info.is_temporal for x in self.data]):
current_time = Timeline.app().current
color = (1, 1, 1) if self.bg_color_option.value.hsv.v < .5 else (0, 0, 0)
ax.axvline(x=(current_time - datetime(1, 1, 1)).days, color=color)
ax.tick_params(axis='both', color=label_color)
for spine in ('left', 'bottom'):
ax.spines[spine].set_color(label_color)
for label in ax.get_xticklabels() + ax.get_yticklabels():
label.set_color(label_color)
return self.fig_to_pil(fig).resize((width, height))
finally:
pyplot.close(fig)
def timeline_changed(self):
wx.PostEvent(App.get().app_controller.main_window, VisualizationUpdateEvent(plugin=self))
| bsd-3-clause |
ychfan/tensorflow | tensorflow/examples/learn/text_classification_character_rnn.py | 8 | 4104 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example of recurrent neural networks over characters for DBpedia dataset.
This model is similar to one described in this paper:
"Character-level Convolutional Networks for Text Classification"
http://arxiv.org/abs/1509.01626
and is somewhat alternative to the Lua code from here:
https://github.com/zhangxiangxiao/Crepe
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import pandas
import tensorflow as tf
FLAGS = None
MAX_DOCUMENT_LENGTH = 100
HIDDEN_SIZE = 20
MAX_LABEL = 15
CHARS_FEATURE = 'chars' # Name of the input character feature.
def char_rnn_model(features, labels, mode):
"""Character level recurrent neural network model to predict classes."""
byte_vectors = tf.one_hot(features[CHARS_FEATURE], 256, 1., 0.)
byte_list = tf.unstack(byte_vectors, axis=1)
cell = tf.nn.rnn_cell.GRUCell(HIDDEN_SIZE)
_, encoding = tf.nn.static_rnn(cell, byte_list, dtype=tf.float32)
logits = tf.layers.dense(encoding, MAX_LABEL, activation=None)
predicted_classes = tf.argmax(logits, 1)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={
'class': predicted_classes,
'prob': tf.nn.softmax(logits)
})
onehot_labels = tf.one_hot(labels, MAX_LABEL, 1, 0)
loss = tf.losses.softmax_cross_entropy(
onehot_labels=onehot_labels, logits=logits)
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
train_op = optimizer.minimize(loss, global_step=tf.train.get_global_step())
return tf.estimator.EstimatorSpec(mode, loss=loss, train_op=train_op)
eval_metric_ops = {
'accuracy': tf.metrics.accuracy(
labels=labels, predictions=predicted_classes)
}
return tf.estimator.EstimatorSpec(
mode=mode, loss=loss, eval_metric_ops=eval_metric_ops)
def main(unused_argv):
# Prepare training and testing data
dbpedia = tf.contrib.learn.datasets.load_dataset(
'dbpedia', test_with_fake_data=FLAGS.test_with_fake_data)
x_train = pandas.DataFrame(dbpedia.train.data)[1]
y_train = pandas.Series(dbpedia.train.target)
x_test = pandas.DataFrame(dbpedia.test.data)[1]
y_test = pandas.Series(dbpedia.test.target)
# Process vocabulary
char_processor = tf.contrib.learn.preprocessing.ByteProcessor(
MAX_DOCUMENT_LENGTH)
x_train = np.array(list(char_processor.fit_transform(x_train)))
x_test = np.array(list(char_processor.transform(x_test)))
# Build model
classifier = tf.estimator.Estimator(model_fn=char_rnn_model)
# Train.
train_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_train},
y=y_train,
batch_size=128,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=100)
# Eval.
test_input_fn = tf.estimator.inputs.numpy_input_fn(
x={CHARS_FEATURE: x_test},
y=y_test,
num_epochs=1,
shuffle=False)
scores = classifier.evaluate(input_fn=test_input_fn)
print('Accuracy: {0:f}'.format(scores['accuracy']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--test_with_fake_data',
default=False,
help='Test the example code with fake data.',
action='store_true')
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
mugizico/scikit-learn | sklearn/linear_model/tests/test_passive_aggressive.py | 121 | 6117 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_array_almost_equal, assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.base import ClassifierMixin
from sklearn.utils import check_random_state
from sklearn.datasets import load_iris
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.linear_model import PassiveAggressiveRegressor
iris = load_iris()
random_state = check_random_state(12)
indices = np.arange(iris.data.shape[0])
random_state.shuffle(indices)
X = iris.data[indices]
y = iris.target[indices]
X_csr = sp.csr_matrix(X)
class MyPassiveAggressive(ClassifierMixin):
def __init__(self, C=1.0, epsilon=0.01, loss="hinge",
fit_intercept=True, n_iter=1, random_state=None):
self.C = C
self.epsilon = epsilon
self.loss = loss
self.fit_intercept = fit_intercept
self.n_iter = n_iter
def fit(self, X, y):
n_samples, n_features = X.shape
self.w = np.zeros(n_features, dtype=np.float64)
self.b = 0.0
for t in range(self.n_iter):
for i in range(n_samples):
p = self.project(X[i])
if self.loss in ("hinge", "squared_hinge"):
loss = max(1 - y[i] * p, 0)
else:
loss = max(np.abs(p - y[i]) - self.epsilon, 0)
sqnorm = np.dot(X[i], X[i])
if self.loss in ("hinge", "epsilon_insensitive"):
step = min(self.C, loss / sqnorm)
elif self.loss in ("squared_hinge",
"squared_epsilon_insensitive"):
step = loss / (sqnorm + 1.0 / (2 * self.C))
if self.loss in ("hinge", "squared_hinge"):
step *= y[i]
else:
step *= np.sign(y[i] - p)
self.w += step * X[i]
if self.fit_intercept:
self.b += step
def project(self, X):
return np.dot(X, self.w) + self.b
def test_classifier_accuracy():
for data in (X, X_csr):
for fit_intercept in (True, False):
clf = PassiveAggressiveClassifier(C=1.0, n_iter=30,
fit_intercept=fit_intercept,
random_state=0)
clf.fit(data, y)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_partial_fit():
classes = np.unique(y)
for data in (X, X_csr):
clf = PassiveAggressiveClassifier(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(30):
clf.partial_fit(data, y, classes)
score = clf.score(data, y)
assert_greater(score, 0.79)
def test_classifier_refit():
# Classifier can be retrained on different labels and features.
clf = PassiveAggressiveClassifier().fit(X, y)
assert_array_equal(clf.classes_, np.unique(y))
clf.fit(X[:, :-1], iris.target_names[y])
assert_array_equal(clf.classes_, iris.target_names)
def test_classifier_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("hinge", "squared_hinge"):
clf1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
clf1.fit(X, y_bin)
for data in (X, X_csr):
clf2 = PassiveAggressiveClassifier(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
clf2.fit(data, y_bin)
assert_array_almost_equal(clf1.w, clf2.coef_.ravel(), decimal=2)
def test_classifier_undefined_methods():
clf = PassiveAggressiveClassifier()
for meth in ("predict_proba", "predict_log_proba", "transform"):
assert_raises(AttributeError, lambda x: getattr(clf, x), meth)
def test_regressor_mse():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
for fit_intercept in (True, False):
reg = PassiveAggressiveRegressor(C=1.0, n_iter=50,
fit_intercept=fit_intercept,
random_state=0)
reg.fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_partial_fit():
y_bin = y.copy()
y_bin[y != 1] = -1
for data in (X, X_csr):
reg = PassiveAggressiveRegressor(C=1.0,
fit_intercept=True,
random_state=0)
for t in range(50):
reg.partial_fit(data, y_bin)
pred = reg.predict(data)
assert_less(np.mean((pred - y_bin) ** 2), 1.7)
def test_regressor_correctness():
y_bin = y.copy()
y_bin[y != 1] = -1
for loss in ("epsilon_insensitive", "squared_epsilon_insensitive"):
reg1 = MyPassiveAggressive(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2)
reg1.fit(X, y_bin)
for data in (X, X_csr):
reg2 = PassiveAggressiveRegressor(C=1.0,
loss=loss,
fit_intercept=True,
n_iter=2, shuffle=False)
reg2.fit(data, y_bin)
assert_array_almost_equal(reg1.w, reg2.coef_.ravel(), decimal=2)
def test_regressor_undefined_methods():
reg = PassiveAggressiveRegressor()
for meth in ("transform",):
assert_raises(AttributeError, lambda x: getattr(reg, x), meth)
| bsd-3-clause |
galtay/data_sci_ale | project_1/explore.py | 1 | 1547 | import sys
import argparse
import pandas
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import seaborn as sns
import us_states
from geo_var_state_county import CmsGeoVarCountyTable
def make_pair_plot(fname, level):
gvct = CmsGeoVarCountyTable(fname, verbose=True)
df = gvct.select_rows(level)
pair_cols = [
'Average HCC Score',
'Standardized Per Capita Costs',
'Emergency Department Visits per 1000 Beneficiaries'
]
plt_df = df[pair_cols].dropna()
plt_df.columns = ['Avg HCC', 'Cost/Person [$1k]', 'EDD/10']
plt_df['Cost/Person [$1k]'] = plt_df['Cost/Person [$1k]'] * 1.0e-3
plt_df['EDD/10'] = plt_df['EDD/10'] * 1.0e-2
n_levels = 5
palette = list(reversed(sns.color_palette("Reds_d", n_levels)))
my_cmap = ListedColormap(palette)
g = sns.PairGrid(plt_df, size=2.5)
g.map_diag(plt.hist)
g.map_offdiag(plt.scatter, s=10, alpha=0.5)
g.map_offdiag(sns.kdeplot, cmap=my_cmap, n_levels=n_levels);
g.savefig('gvct_pairplot_{}.png'.format(level))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--fname',
type=str,
default='./data/County_All_Table_2014.csv',
help='name of geographical variation state/county file')
parser.add_argument(
'--level',
default='state',
choices=['state', 'county'],
help='rows to select from data')
args = parser.parse_args()
make_pair_plot(fname=args.fname, level=args.level)
| gpl-3.0 |
kjordahl/xray | xray/core/common.py | 2 | 15143 | import functools
import numpy as np
import pandas as pd
from .pycompat import basestring, iteritems
from . import formatting
from .utils import SortedKeysDict
class ImplementsArrayReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, axis=None, skipna=None,
keep_attrs=False, **kwargs):
return self.reduce(func, dim, axis, keep_attrs=keep_attrs,
skipna=skipna, allow_lazy=True, **kwargs)
else:
def wrapped_func(self, dim=None, axis=None, keep_attrs=False,
**kwargs):
return self.reduce(func, dim, axis, keep_attrs=keep_attrs,
allow_lazy=True, **kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `{name}`.
axis : int or sequence of int, optional
Axis(es) over which to apply `{name}`. Only one of the 'dim'
and 'axis' arguments can be supplied. If neither are supplied, then
`{name}` is calculated over axes."""
class ImplementsDatasetReduce(object):
@classmethod
def _reduce_method(cls, func, include_skipna, numeric_only):
if include_skipna:
def wrapped_func(self, dim=None, keep_attrs=False, skipna=None,
**kwargs):
return self.reduce(func, dim, keep_attrs, skipna=skipna,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
else:
def wrapped_func(self, dim=None, keep_attrs=False, **kwargs):
return self.reduce(func, dim, keep_attrs,
numeric_only=numeric_only, allow_lazy=True,
**kwargs)
return wrapped_func
_reduce_extra_args_docstring = \
"""dim : str or sequence of str, optional
Dimension(s) over which to apply `func`. By default `func` is
applied over all dimensions."""
class AbstractArray(ImplementsArrayReduce):
def __bool__(self):
return bool(self.values)
# Python 3 uses __bool__, Python 2 uses __nonzero__
__nonzero__ = __bool__
def __float__(self):
return float(self.values)
def __int__(self):
return int(self.values)
def __complex__(self):
return complex(self.values)
def __long__(self):
return long(self.values)
def __array__(self, dtype=None):
return np.asarray(self.values, dtype=dtype)
def __repr__(self):
return formatting.array_repr(self)
def _iter(self):
for n in range(len(self)):
yield self[n]
def __iter__(self):
if self.ndim == 0:
raise TypeError('iteration over a 0-d array')
return self._iter()
@property
def T(self):
return self.transpose()
def get_axis_num(self, dim):
"""Return axis number(s) corresponding to dimension(s) in this array.
Parameters
----------
dim : str or iterable of str
Dimension name(s) for which to lookup axes.
Returns
-------
int or tuple of int
Axis number or numbers corresponding to the given dimensions.
"""
if isinstance(dim, basestring):
return self._get_axis_num(dim)
else:
return tuple(self._get_axis_num(d) for d in dim)
def _get_axis_num(self, dim):
try:
return self.dims.index(dim)
except ValueError:
raise ValueError("%r not found in array dimensions %r" %
(dim, self.dims))
class AttrAccessMixin(object):
"""Mixin class that allow getting keys with attribute access
"""
@property
def __attr_sources__(self):
"""List of places to look-up items for attribute-style access"""
return [self, self.attrs]
def __getattr__(self, name):
for source in self.__attr_sources__:
try:
return source[name]
except KeyError:
pass
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, name))
def __dir__(self):
"""Provide method name lookup and completion. Only provide 'public'
methods.
"""
extra_attrs = [item for sublist in self.__attr_sources__
for item in sublist]
return sorted(set(dir(type(self)) + extra_attrs))
class BaseDataObject(AttrAccessMixin):
def _calc_assign_results(self, kwargs):
results = SortedKeysDict()
for k, v in kwargs.items():
if callable(v):
results[k] = v(self)
else:
results[k] = v
return results
def assign_coords(self, **kwargs):
"""Assign new coordinates to this object, returning a new object
with all the original data in addition to the new coordinates.
Parameters
----------
kwargs : keyword, value pairs
keywords are the variables names. If the values are callable, they
are computed on this object and assigned to new coordinate
variables. If the values are not callable, (e.g. a DataArray,
scalar, or array), they are simply assigned.
Returns
-------
assigned : same type as caller
A new object with the new coordinates in addition to the existing
data.
Notes
-----
Since ``kwargs`` is a dictionary, the order of your arguments may not
be preserved, and so the order of the new variables is not well
defined. Assigning multiple variables within the same ``assign_coords``
is possible, but you cannot reference other variables created within
the same ``assign_coords`` call.
See also
--------
Dataset.assign
"""
data = self.copy(deep=False)
results = data._calc_assign_results(kwargs)
data.coords.update(results)
return data
def pipe(self, func, *args, **kwargs):
"""
Apply func(self, *args, **kwargs)
This method replicates the pandas method of the same name.
Parameters
----------
func : function
function to apply to this xray object (Dataset/DataArray).
``args``, and ``kwargs`` are passed into ``func``.
Alternatively a ``(callable, data_keyword)`` tuple where
``data_keyword`` is a string indicating the keyword of
``callable`` that expects the xray object.
args : positional arguments passed into ``func``.
kwargs : a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
Use ``.pipe`` when chaining together functions that expect
xray or pandas objects, e.g., instead of writing
>>> f(g(h(ds), arg1=a), arg2=b, arg3=c)
You can write
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe(f, arg2=b, arg3=c)
... )
If you have a function that takes the data as (say) the second
argument, pass a tuple indicating which keyword expects the
data. For example, suppose ``f`` takes its data as ``arg2``:
>>> (ds.pipe(h)
... .pipe(g, arg1=a)
... .pipe((f, 'arg2'), arg1=a, arg3=c)
... )
See Also
--------
pandas.DataFrame.pipe
"""
if isinstance(func, tuple):
func, target = func
if target in kwargs:
msg = '%s is both the pipe target and a keyword argument' % target
raise ValueError(msg)
kwargs[target] = self
return func(*args, **kwargs)
else:
return func(self, *args, **kwargs)
def groupby(self, group, squeeze=True):
"""Returns a GroupBy object for performing grouped operations.
Parameters
----------
group : str, DataArray or Coordinate
Array whose unique values should be used to group this array. If a
string, must be the name of a variable contained in this dataset.
squeeze : boolean, optional
If "group" is a dimension of any arrays in this dataset, `squeeze`
controls whether the subarrays have a dimension of length 1 along
that dimension or if the dimension is squeezed out.
Returns
-------
grouped : GroupBy
A `GroupBy` object patterned after `pandas.GroupBy` that can be
iterated over in the form of `(unique_value, grouped_array)` pairs.
"""
if isinstance(group, basestring):
group = self[group]
return self.groupby_cls(self, group, squeeze=squeeze)
def resample(self, freq, dim, how='mean', skipna=None, closed=None,
label=None, base=0):
"""Resample this object to a new temporal resolution.
Handles both downsampling and upsampling. Upsampling with filling is
not yet supported; if any intervals contain no values in the original
object, they will be given the value ``NaN``.
Parameters
----------
freq : str
String in the '#offset' to specify the step-size along the
resampled dimension, where '#' is an (optional) integer multipler
(default 1) and 'offset' is any pandas date offset alias. Examples
of valid offsets include:
* 'AS': year start
* 'Q-DEC': quarter, starting on December 1
* 'MS': month start
* 'D': day
* 'H': hour
* 'Min': minute
The full list of these offset aliases is documented in pandas [1]_.
dim : str
Name of the dimension to resample along (e.g., 'time').
how : str or func, optional
Used for downsampling. If a string, ``how`` must be a valid
aggregation operation supported by xray. Otherwise, ``how`` must be
a function that can be called like ``how(values, axis)`` to reduce
ndarray values along the given axis. Valid choices that can be
provided as a string include all the usual Dataset/DataArray
aggregations (``all``, ``any``, ``argmax``, ``argmin``, ``max``,
``mean``, ``median``, ``min``, ``prod``, ``sum``, ``std`` and
``var``), as well as ``first`` and ``last``.
skipna : bool, optional
Whether to skip missing values when aggregating in downsampling.
closed : 'left' or 'right', optional
Side of each interval to treat as closed.
label : 'left or 'right', optional
Side of each interval to use for labeling.
base : int, optionalt
For frequencies that evenly subdivide 1 day, the "origin" of the
aggregated intervals. For example, for '24H' frequency, base could
range from 0 through 23.
Returns
-------
resampled : same type as caller
This object resampled.
References
----------
.. [1] http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases
"""
from .dataarray import DataArray
RESAMPLE_DIM = '__resample_dim__'
if isinstance(dim, basestring):
dim = self[dim]
group = DataArray(dim, name=RESAMPLE_DIM)
time_grouper = pd.TimeGrouper(freq=freq, how=how, closed=closed,
label=label, base=base)
gb = self.groupby_cls(self, group, grouper=time_grouper)
if isinstance(how, basestring):
f = getattr(gb, how)
if how in ['first', 'last']:
result = f(skipna=skipna)
else:
result = f(dim=dim.name, skipna=skipna)
else:
result = gb.reduce(how, dim=dim.name)
result = result.rename({RESAMPLE_DIM: dim.name})
return result
def where(self, cond):
"""Return an object of the same shape with all entries where cond is
True and all other entries masked.
This operation follows the normal broadcasting and alignment rules that
xray uses for binary arithmetic.
Parameters
----------
cond : boolean DataArray or Dataset
Returns
-------
same type as caller
Examples
--------
>>> import numpy as np
>>> a = xray.DataArray(np.arange(25).reshape(5, 5), dims=('x', 'y'))
>>> a.where((a > 6) & (a < 18))
<xray.DataArray (x: 5, y: 5)>
array([[ nan, nan, nan, nan, nan],
[ nan, nan, 7., 8., 9.],
[ 10., 11., 12., 13., 14.],
[ 15., 16., 17., nan, nan],
[ nan, nan, nan, nan, nan]])
Coordinates:
* y (y) int64 0 1 2 3 4
* x (x) int64 0 1 2 3 4
"""
return self._where(cond)
def squeeze(xray_obj, dims, dim=None):
"""Squeeze the dims of an xray object."""
if dim is None:
dim = [d for d, s in iteritems(dims) if s == 1]
else:
if isinstance(dim, basestring):
dim = [dim]
if any(dims[k] > 1 for k in dim):
raise ValueError('cannot select a dimension to squeeze out '
'which has length greater than one')
return xray_obj.isel(**dict((d, 0) for d in dim))
def _maybe_promote(dtype):
"""Simpler equivalent of pandas.core.common._maybe_promote"""
# N.B. these casting rules should match pandas
if np.issubdtype(dtype, float):
fill_value = np.nan
elif np.issubdtype(dtype, int):
# convert to floating point so NaN is valid
dtype = float
fill_value = np.nan
elif np.issubdtype(dtype, np.datetime64):
fill_value = np.datetime64('NaT')
elif np.issubdtype(dtype, np.timedelta64):
fill_value = np.timedelta64('NaT')
else:
dtype = object
fill_value = np.nan
return np.dtype(dtype), fill_value
def _possibly_convert_objects(values):
"""Convert arrays of datetime.datetime and datetime.timedelta objects into
datetime64 and timedelta64
"""
try:
converter = functools.partial(pd.core.common._possibly_convert_objects,
convert_numeric=False)
except AttributeError:
# our fault for using a private pandas API that has gone missing
# this should do the same coercion (though it will be slower)
converter = lambda x: np.asarray(pd.Series(x))
return converter(values.ravel()).reshape(values.shape)
| apache-2.0 |
zhangfang615/Tuberculosis | simulation/TB_statistics.py | 1 | 14089 | from __future__ import division
__author__ = 'Fang'
__date__= '2016.9.16'
__email__= 'fza34@sfu.ca'
__function__ = 'Tuberculosis simulation statistics'
import os
import random
import ast
import pandas
class patient:
def _init_(self):
self.removal = False
self.resistant = False
self.eventlist = []
self.mutation = {}
self.resistant_mutation = set()
def load_positions(position_path):
position_file=file(position_path)
position_list=[]
position=position_file.readline().strip()
while position:
position_list.append(position)
position = position_file.readline().strip()
position_file.close()
return position_list
def load_resistant_SNPs(resi_path, SNP_positions,TB_sequence):
resitant_SNPs_file=file(resi_path)
line = resitant_SNPs_file.readline().strip()
resistant_SNPs = {}
while line:
if line.startswith("Mycobacterium"):
fields=line.split("\t")
position=int(SNP_positions.index(fields[1]))
mutation=TB_sequence[position]+" "+fields[4].upper()
if position in resistant_SNPs.keys():
resistant_SNPs[position] = resistant_SNPs[position]+","+mutation
else:
resistant_SNPs[position] = mutation
line = resitant_SNPs_file.readline().strip()
resitant_SNPs_file.close()
return resistant_SNPs
def eventlist_str2list(eventlist_string):
eventlist = []
events = eventlist_string.split("\t")
for event in events:
eventlist.append(event)
return eventlist
def mutation_str2dic(mutation_string):
mutation_string = mutation_string[11:]
return ast.literal_eval(mutation_string)
def resistant_mutation_str2set(resistant_mutation_string):
resistant_mutation_string = resistant_mutation_string[20:]
# resistant_mutation_string= "set([2352, 1094, 1066, 3659, 3632, 3708])"
return eval(resistant_mutation_string)
def resistant_str2bool(resistant_string):
resistant_string = resistant_string.split(' ')[1]
if resistant_string == "True":
return True
else:
return False
def removal_str2bool(removal_string):
removal_string = removal_string.split(' ')[1]
if removal_string == "True":
return True
else:
return False
def reconstruct_patients_list(patients, simulation_file):
text = simulation_file.readlines()
for i in range(1024):
pat = patient()
patient_text=text[7+7*i:14+7*i]
pat.eventlist = eventlist_str2list(patient_text[1].strip())
pat.mutation = mutation_str2dic(patient_text[2].strip())
pat.resistant_mutation = resistant_mutation_str2set(patient_text[3].strip())
pat.resistant = resistant_str2bool(patient_text[4].strip())
pat.removal = removal_str2bool(patient_text[5].strip())
patients.append(pat)
return patients
def patients_sampling(unremoved, kappa):
sample_number=int(kappa*len(unremoved))
patients_sampling = random.sample(unremoved, sample_number)
return patients_sampling
def if_SNP_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
nucleotide=TB_sequence[mutate_position]
SNPs = resistant_SNPs[mutate_position].split(",")
for SNP in SNPs:
mutation_pair = SNP.split(" ")
if nucleotide == mutation_pair[0] and nucleotide_muatated == mutation_pair[1]:
return True
return False
def if_mutate_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
if not mutate_position in resistant_SNPs:
return False
elif not if_SNP_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
return False
return True
def get_resistant_eventlist(patients,n, resistant_SNPs):
resistant_eventlist = []
patient = patients[n]
while patient.eventlist:
event = patient.eventlist[-1].split(" ")
if event[0] == '4':
patient.removal = False
patient.eventlist.pop()
elif event[0] == '2':
if event[1] == str(n):
patient.eventlist.pop()
else:
patient.mutation.clear()
mutations = event[3].split(";")
mutations = mutations[0:len(mutations) - 1]
for mutation in mutations:
fields = mutation.split(":")
patient.mutation[int(fields[0])] = fields[1]
patient.resistant_mutation.clear()
res = patient.resistant # record resistant
patient.resistant = False
for mutate_position in patient.mutation.keys():
nucleotide_muatated = list(patient.mutation[mutate_position])[1]
if if_mutate_resistant(mutate_position, TB_sequence, nucleotide_muatated, resistant_SNPs):
patient.resistant_mutation.add(mutate_position)
patient.resistant = True
if res == True and patient.resistant ==False:
resistant_eventlist.append('2')
patient.eventlist.pop()
elif event[0] == '1':
SNP = list(patient.mutation[int(event[1])])
if event[2] == SNP[0] and event[3] == SNP[1]:
patient.mutation.pop(int(event[1]))
patient.eventlist.pop()
else:
print "Bug!"
break
else:
SNP = list(patient.mutation[int(event[1])])
if event[3] == SNP[1]:
if event[2] == SNP[0]:
patient.mutation.pop(int(event[1]))
else:
SNP[1] = event[2]
patient.mutation[int(event[1])] = "".join(SNP)
try:
patient.resistant_mutation.remove(int(event[1]))
except Exception, e:
# print patient.eventlist[-1]
# print event[1]+event[2]+event[3]
print Exception, ":", e
# print if_mutate_resistant(int(event[1]), TB_sequence, event[3], resistant_SNPs)
# print resistant_SNPs[int(event[1])]
resistant_eventlist.append('3')
if not patient.resistant_mutation:
patient.resistant = False
patient.eventlist.pop()
else:
print "Bug!"
break
if patient.mutation or patient.removal or patient.resistant:
" failed traced back!"
return resistant_eventlist
if __name__ == '__main__':
statistic_output = file("E:/PYTHON_PROJECTS/TB_simulation/statistics_new", 'w')
t_list = [10, 20, 30, 40, 50] # time span 10, 15, 20, 25, 30, 35, 40, 45, 50
beta_list = [0.02, 0.025, 0.03, 0.035, 0.04] # contact/reinfection rate 0.001,0.01,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4
P_resi_list = [0.0005, 0.0008, 0.001, 0.0015, 0.002] # rate of breakdown 0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5
# Pt=0.2 # probability of seeking treatment 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8
# Pr=0.3 # probability of resistant 0.01,0.05, 0.1, 0.15, 0.2, 0.3, .0.4, 0.5, 0.6, 0.7, 1
gama_list = [0.0005, 0.001, 0.005, 0.01, 0.02] # rate of removal 0.01, 0.05, 0.1, 0.2, 0.3
for t in t_list:
for beta in beta_list:
for P_resi in P_resi_list:
for gama in gama_list:
ancestor = file("E:/PYTHON_PROJECTS/TB_simulation/ancestor.fasta")
TB_sequence = ancestor.readline().strip()
ancestor.close()
SNP_positions = load_positions("E:/PYTHON_PROJECTS/TB_simulation/mutate_SNPs.txt")
resistant_SNPs = load_resistant_SNPs("E:/PYTHON_PROJECTS/TB_simulation/resi.vcf", SNP_positions,TB_sequence)
kappa = 0.1
patients = []
filename = "simulation_0.26_"+str(t)+"_"+str(beta)+"_"+str(P_resi)+"_"+str(gama)+".txt"
simulation_file = file("E:/PYTHON_PROJECTS/TB_simulation/output/" + filename)
reconstruct_patients_list(patients, simulation_file)
unremoved = set()
for i in range(0, len(patients)):
if not patients[i].removal:
unremoved.add(i)
count_sampling = []
count_resistant = []
count_unresistant = []
count_resistant_trans = []
count_resistant_acq = []
count_resistant_mevent = []
count_unresistant_oncere = []
for i in range(0, 20):
count_resistant.append(0)
count_unresistant.append(0)
count_resistant_trans.append(0)
count_resistant_acq.append(0)
count_resistant_mevent.append(0)
count_unresistant_oncere.append(0)
seed = random.randint(0, 100000)
random.Random(seed)
sampling_patients = patients_sampling(unremoved, kappa)
count_sampling.append(len(sampling_patients))
sampling_patients.sort()
resistant_sample = set()
unresistant_sample = set()
for sample in sampling_patients: # count resistant samples and separate resistant and unresistant samples
if patients[sample].resistant:
count_resistant[i] += 1
resistant_sample.add(sample)
else:
unresistant_sample.add(sample)
for sample in resistant_sample:
resistant_eventlist = get_resistant_eventlist(patients, sample, resistant_SNPs)
if resistant_eventlist[0] == '2':
count_resistant_trans[i] += 1
else:
count_resistant_acq[i] += 1
if len(resistant_eventlist) > 1:
count_resistant_mevent[i] += 1
for sample in unresistant_sample:
count_unresistant[i] += 1
resistant_eventlist = get_resistant_eventlist(patients, sample, resistant_SNPs)
if len(resistant_eventlist) > 0:
count_unresistant_oncere[i] += 1
count_sampling_pd = pandas.Series(count_sampling)
count_resistant_pd = pandas.Series(count_resistant)
count_unresistant_pd = pandas.Series(count_unresistant)
count_resistant_trans_pd = pandas.Series(count_resistant_trans)
count_resistant_acq_pd = pandas.Series(count_resistant_acq)
count_resistant_mevent_pd = pandas.Series(count_resistant_mevent)
count_unresistant_oncere_pd = pandas.Series(count_unresistant_oncere)
count_sampling_mean = count_sampling_pd.mean()
count_resistant_mean = count_resistant_pd.mean()
count_resistant_std = count_resistant_pd.std()
resistant_ratio = count_resistant_mean / count_sampling_mean *100
count_resistant_trans_mean = count_resistant_trans_pd.mean()
count_resistant_trans_std = count_resistant_trans_pd.std()
resistant_trans_ratio = count_resistant_trans_mean / count_sampling_mean *100
count_resistant_acq_mean = count_resistant_acq_pd.mean()
count_resistant_acq_std = count_resistant_acq_pd.std()
resistant_acq_ratio = count_resistant_acq_mean / count_sampling_mean *100
count_resistant_mevent_mean = count_resistant_mevent_pd.mean()
count_resistant_mevent_std = count_resistant_mevent_pd.std()
if not count_resistant_mean == 0:
resistant_mevent_ratio = count_resistant_mevent_mean / count_resistant_mean *100
else:
resistant_mevent_ratio = 0
count_unresistant_mean = count_unresistant_pd.mean()
count_unresistant_oncere_mean = count_unresistant_oncere_pd.mean()
count_unresistant_oncere_std = count_unresistant_oncere_pd.std()
output_line = str(t) + " " + str(beta) + " " + str(P_resi) + " " + str(gama) \
+ "\t" + str(count_sampling_mean) \
+ "\t" + str(count_resistant_mean) + "\t" + str(count_resistant_std) + "\t" + str(resistant_ratio) \
+ "\t" + str(count_resistant_trans_mean) + "\t" + str(count_resistant_trans_std) + "\t" + str(resistant_trans_ratio) \
+ "\t" + str(count_resistant_acq_mean) + "\t" + str(count_resistant_acq_std) + "\t" + str(resistant_acq_ratio) \
+ "\t" + str(count_resistant_mevent_mean) + "\t" + str(count_resistant_mevent_std) + "\t" + str(resistant_mevent_ratio) \
+ "\t" + str(count_unresistant_mean) \
+ "\t" + str(count_unresistant_oncere_mean) + "\t" + str(count_unresistant_oncere_std)+"\n"
statistic_output.writelines(output_line)
statistic_output.close() | apache-2.0 |
466152112/scikit-learn | sklearn/linear_model/__init__.py | 270 | 3096 | """
The :mod:`sklearn.linear_model` module implements generalized linear models. It
includes Ridge regression, Bayesian Regression, Lasso and Elastic Net
estimators computed with Least Angle Regression and coordinate descent. It also
implements Stochastic Gradient Descent related algorithms.
"""
# See http://scikit-learn.sourceforge.net/modules/sgd.html and
# http://scikit-learn.sourceforge.net/modules/linear_model.html for
# complete documentation.
from .base import LinearRegression
from .bayes import BayesianRidge, ARDRegression
from .least_angle import (Lars, LassoLars, lars_path, LarsCV, LassoLarsCV,
LassoLarsIC)
from .coordinate_descent import (Lasso, ElasticNet, LassoCV, ElasticNetCV,
lasso_path, enet_path, MultiTaskLasso,
MultiTaskElasticNet, MultiTaskElasticNetCV,
MultiTaskLassoCV)
from .sgd_fast import Hinge, Log, ModifiedHuber, SquaredLoss, Huber
from .stochastic_gradient import SGDClassifier, SGDRegressor
from .ridge import (Ridge, RidgeCV, RidgeClassifier, RidgeClassifierCV,
ridge_regression)
from .logistic import (LogisticRegression, LogisticRegressionCV,
logistic_regression_path)
from .omp import (orthogonal_mp, orthogonal_mp_gram, OrthogonalMatchingPursuit,
OrthogonalMatchingPursuitCV)
from .passive_aggressive import PassiveAggressiveClassifier
from .passive_aggressive import PassiveAggressiveRegressor
from .perceptron import Perceptron
from .randomized_l1 import (RandomizedLasso, RandomizedLogisticRegression,
lasso_stability_path)
from .ransac import RANSACRegressor
from .theil_sen import TheilSenRegressor
__all__ = ['ARDRegression',
'BayesianRidge',
'ElasticNet',
'ElasticNetCV',
'Hinge',
'Huber',
'Lars',
'LarsCV',
'Lasso',
'LassoCV',
'LassoLars',
'LassoLarsCV',
'LassoLarsIC',
'LinearRegression',
'Log',
'LogisticRegression',
'LogisticRegressionCV',
'ModifiedHuber',
'MultiTaskElasticNet',
'MultiTaskElasticNetCV',
'MultiTaskLasso',
'MultiTaskLassoCV',
'OrthogonalMatchingPursuit',
'OrthogonalMatchingPursuitCV',
'PassiveAggressiveClassifier',
'PassiveAggressiveRegressor',
'Perceptron',
'RandomizedLasso',
'RandomizedLogisticRegression',
'Ridge',
'RidgeCV',
'RidgeClassifier',
'RidgeClassifierCV',
'SGDClassifier',
'SGDRegressor',
'SquaredLoss',
'TheilSenRegressor',
'enet_path',
'lars_path',
'lasso_path',
'lasso_stability_path',
'logistic_regression_path',
'orthogonal_mp',
'orthogonal_mp_gram',
'ridge_regression',
'RANSACRegressor']
| bsd-3-clause |
vlas-sokolov/pyspeckit | docs/sphinxext/inheritance_diagram.py | 98 | 13648 | """
Defines a docutils directive for inserting inheritance diagrams.
Provide the directive with one or more classes or modules (separated
by whitespace). For modules, all of the classes in that module will
be used.
Example::
Given the following classes:
class A: pass
class B(A): pass
class C(A): pass
class D(B, C): pass
class E(B): pass
.. inheritance-diagram: D E
Produces a graph like the following:
A
/ \
B C
/ \ /
E D
The graph is inserted as a PNG+image map into HTML and a PDF in
LaTeX.
"""
import inspect
import os
import re
import subprocess
try:
from hashlib import md5
except ImportError:
from md5 import md5
from docutils.nodes import Body, Element
from docutils.parsers.rst import directives
from sphinx.roles import xfileref_role
def my_import(name):
"""Module importer - taken from the python documentation.
This function allows importing names with dots in them."""
mod = __import__(name)
components = name.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
class DotException(Exception):
pass
class InheritanceGraph(object):
"""
Given a list of classes, determines the set of classes that
they inherit from all the way to the root "object", and then
is able to generate a graphviz dot graph from them.
"""
def __init__(self, class_names, show_builtins=False):
"""
*class_names* is a list of child classes to show bases from.
If *show_builtins* is True, then Python builtins will be shown
in the graph.
"""
self.class_names = class_names
self.classes = self._import_classes(class_names)
self.all_classes = self._all_classes(self.classes)
if len(self.all_classes) == 0:
raise ValueError("No classes found for inheritance diagram")
self.show_builtins = show_builtins
py_sig_re = re.compile(r'''^([\w.]*\.)? # class names
(\w+) \s* $ # optionally arguments
''', re.VERBOSE)
def _import_class_or_module(self, name):
"""
Import a class using its fully-qualified *name*.
"""
try:
path, base = self.py_sig_re.match(name).groups()
except:
raise ValueError(
"Invalid class or module '%s' specified for inheritance diagram" % name)
fullname = (path or '') + base
path = (path and path.rstrip('.'))
if not path:
path = base
try:
module = __import__(path, None, None, [])
# We must do an import of the fully qualified name. Otherwise if a
# subpackage 'a.b' is requested where 'import a' does NOT provide
# 'a.b' automatically, then 'a.b' will not be found below. This
# second call will force the equivalent of 'import a.b' to happen
# after the top-level import above.
my_import(fullname)
except ImportError:
raise ValueError(
"Could not import class or module '%s' specified for inheritance diagram" % name)
try:
todoc = module
for comp in fullname.split('.')[1:]:
todoc = getattr(todoc, comp)
except AttributeError:
raise ValueError(
"Could not find class or module '%s' specified for inheritance diagram" % name)
# If a class, just return it
if inspect.isclass(todoc):
return [todoc]
elif inspect.ismodule(todoc):
classes = []
for cls in todoc.__dict__.values():
if inspect.isclass(cls) and cls.__module__ == todoc.__name__:
classes.append(cls)
return classes
raise ValueError(
"'%s' does not resolve to a class or module" % name)
def _import_classes(self, class_names):
"""
Import a list of classes.
"""
classes = []
for name in class_names:
classes.extend(self._import_class_or_module(name))
return classes
def _all_classes(self, classes):
"""
Return a list of all classes that are ancestors of *classes*.
"""
all_classes = {}
def recurse(cls):
all_classes[cls] = None
for c in cls.__bases__:
if c not in all_classes:
recurse(c)
for cls in classes:
recurse(cls)
return all_classes.keys()
def class_name(self, cls, parts=0):
"""
Given a class object, return a fully-qualified name. This
works for things I've tested in matplotlib so far, but may not
be completely general.
"""
module = cls.__module__
if module == '__builtin__':
fullname = cls.__name__
else:
fullname = "%s.%s" % (module, cls.__name__)
if parts == 0:
return fullname
name_parts = fullname.split('.')
return '.'.join(name_parts[-parts:])
def get_all_class_names(self):
"""
Get all of the class names involved in the graph.
"""
return [self.class_name(x) for x in self.all_classes]
# These are the default options for graphviz
default_graph_options = {
"rankdir": "LR",
"size": '"8.0, 12.0"'
}
default_node_options = {
"shape": "box",
"fontsize": 10,
"height": 0.25,
"fontname": "Vera Sans, DejaVu Sans, Liberation Sans, Arial, Helvetica, sans",
"style": '"setlinewidth(0.5)"'
}
default_edge_options = {
"arrowsize": 0.5,
"style": '"setlinewidth(0.5)"'
}
def _format_node_options(self, options):
return ','.join(["%s=%s" % x for x in options.items()])
def _format_graph_options(self, options):
return ''.join(["%s=%s;\n" % x for x in options.items()])
def generate_dot(self, fd, name, parts=0, urls={},
graph_options={}, node_options={},
edge_options={}):
"""
Generate a graphviz dot graph from the classes that
were passed in to __init__.
*fd* is a Python file-like object to write to.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
*graph_options*, *node_options*, *edge_options* are
dictionaries containing key/value pairs to pass on as graphviz
properties.
"""
g_options = self.default_graph_options.copy()
g_options.update(graph_options)
n_options = self.default_node_options.copy()
n_options.update(node_options)
e_options = self.default_edge_options.copy()
e_options.update(edge_options)
fd.write('digraph %s {\n' % name)
fd.write(self._format_graph_options(g_options))
for cls in self.all_classes:
if not self.show_builtins and cls in __builtins__.values():
continue
name = self.class_name(cls, parts)
# Write the node
this_node_options = n_options.copy()
url = urls.get(self.class_name(cls))
if url is not None:
this_node_options['URL'] = '"%s"' % url
fd.write(' "%s" [%s];\n' %
(name, self._format_node_options(this_node_options)))
# Write the edges
for base in cls.__bases__:
if not self.show_builtins and base in __builtins__.values():
continue
base_name = self.class_name(base, parts)
fd.write(' "%s" -> "%s" [%s];\n' %
(base_name, name,
self._format_node_options(e_options)))
fd.write('}\n')
def run_dot(self, args, name, parts=0, urls={},
graph_options={}, node_options={}, edge_options={}):
"""
Run graphviz 'dot' over this graph, returning whatever 'dot'
writes to stdout.
*args* will be passed along as commandline arguments.
*name* is the name of the graph
*urls* is a dictionary mapping class names to http urls
Raises DotException for any of the many os and
installation-related errors that may occur.
"""
try:
dot = subprocess.Popen(['dot'] + list(args),
stdin=subprocess.PIPE, stdout=subprocess.PIPE,
close_fds=True)
except OSError:
raise DotException("Could not execute 'dot'. Are you sure you have 'graphviz' installed?")
except ValueError:
raise DotException("'dot' called with invalid arguments")
except:
raise DotException("Unexpected error calling 'dot'")
self.generate_dot(dot.stdin, name, parts, urls, graph_options,
node_options, edge_options)
dot.stdin.close()
result = dot.stdout.read()
returncode = dot.wait()
if returncode != 0:
raise DotException("'dot' returned the errorcode %d" % returncode)
return result
class inheritance_diagram(Body, Element):
"""
A docutils node to use as a placeholder for the inheritance
diagram.
"""
pass
def inheritance_diagram_directive(name, arguments, options, content, lineno,
content_offset, block_text, state,
state_machine):
"""
Run when the inheritance_diagram directive is first encountered.
"""
node = inheritance_diagram()
class_names = arguments
# Create a graph starting with the list of classes
graph = InheritanceGraph(class_names)
# Create xref nodes for each target of the graph's image map and
# add them to the doc tree so that Sphinx can resolve the
# references to real URLs later. These nodes will eventually be
# removed from the doctree after we're done with them.
for name in graph.get_all_class_names():
refnodes, x = xfileref_role(
'class', ':class:`%s`' % name, name, 0, state)
node.extend(refnodes)
# Store the graph object so we can use it to generate the
# dot file later
node['graph'] = graph
# Store the original content for use as a hash
node['parts'] = options.get('parts', 0)
node['content'] = " ".join(class_names)
return [node]
def get_graph_hash(node):
return md5(node['content'] + str(node['parts'])).hexdigest()[-10:]
def html_output_graph(self, node):
"""
Output the graph for HTML. This will insert a PNG with clickable
image map.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
path = '_images'
dest_path = os.path.join(setup.app.builder.outdir, path)
if not os.path.exists(dest_path):
os.makedirs(dest_path)
png_path = os.path.join(dest_path, name + ".png")
path = setup.app.builder.imgpath
# Create a mapping from fully-qualified class names to URLs.
urls = {}
for child in node:
if child.get('refuri') is not None:
urls[child['reftitle']] = child.get('refuri')
elif child.get('refid') is not None:
urls[child['reftitle']] = '#' + child.get('refid')
# These arguments to dot will save a PNG file to disk and write
# an HTML image map to stdout.
image_map = graph.run_dot(['-Tpng', '-o%s' % png_path, '-Tcmapx'],
name, parts, urls)
return ('<img src="%s/%s.png" usemap="#%s" class="inheritance"/>%s' %
(path, name, name, image_map))
def latex_output_graph(self, node):
"""
Output the graph for LaTeX. This will insert a PDF.
"""
graph = node['graph']
parts = node['parts']
graph_hash = get_graph_hash(node)
name = "inheritance%s" % graph_hash
dest_path = os.path.abspath(os.path.join(setup.app.builder.outdir, '_images'))
if not os.path.exists(dest_path):
os.makedirs(dest_path)
pdf_path = os.path.abspath(os.path.join(dest_path, name + ".pdf"))
graph.run_dot(['-Tpdf', '-o%s' % pdf_path],
name, parts, graph_options={'size': '"6.0,6.0"'})
return '\n\\includegraphics{%s}\n\n' % pdf_path
def visit_inheritance_diagram(inner_func):
"""
This is just a wrapper around html/latex_output_graph to make it
easier to handle errors and insert warnings.
"""
def visitor(self, node):
try:
content = inner_func(self, node)
except DotException, e:
# Insert the exception as a warning in the document
warning = self.document.reporter.warning(str(e), line=node.line)
warning.parent = node
node.children = [warning]
else:
source = self.document.attributes['source']
self.body.append(content)
node.children = []
return visitor
def do_nothing(self, node):
pass
def setup(app):
setup.app = app
setup.confdir = app.confdir
app.add_node(
inheritance_diagram,
latex=(visit_inheritance_diagram(latex_output_graph), do_nothing),
html=(visit_inheritance_diagram(html_output_graph), do_nothing))
app.add_directive(
'inheritance-diagram', inheritance_diagram_directive,
False, (1, 100, 0), parts = directives.nonnegative_int)
| mit |
drewokane/xray | xarray/core/formatting.py | 1 | 9132 | from datetime import datetime, timedelta
import functools
import numpy as np
import pandas as pd
from .options import OPTIONS
from .pycompat import iteritems, unicode_type, bytes_type, dask_array_type
def pretty_print(x, numchars):
"""Given an object `x`, call `str(x)` and format the returned string so
that it is numchars long, padding with trailing spaces or truncating with
ellipses as necessary
"""
s = str(x)
if len(s) > numchars:
return s[:(numchars - 3)] + '...'
else:
return s + ' ' * (numchars - len(s))
def wrap_indent(text, start='', length=None):
if length is None:
length = len(start)
indent = '\n' + ' ' * length
return start + indent.join(x for x in text.splitlines())
def _get_indexer_at_least_n_items(shape, n_desired):
assert 0 < n_desired <= np.prod(shape)
cum_items = np.cumprod(shape[::-1])
n_steps = np.argmax(cum_items >= n_desired)
stop = int(np.ceil(float(n_desired) / np.r_[1, cum_items][n_steps]))
indexer = ((0, ) * (len(shape) - 1 - n_steps) + (slice(stop), ) +
(slice(None), ) * n_steps)
return indexer
def first_n_items(x, n_desired):
"""Returns the first n_desired items of an array"""
# Unfortunately, we can't just do x.flat[:n_desired] here because x might
# not be a numpy.ndarray. Moreover, access to elements of x could be very
# expensive (e.g. if it's only available over DAP), so go out of our way to
# get them in a single call to __getitem__ using only slices.
if n_desired < 1:
raise ValueError('must request at least one item')
if x.size == 0:
# work around for https://github.com/numpy/numpy/issues/5195
return []
if n_desired < x.size:
indexer = _get_indexer_at_least_n_items(x.shape, n_desired)
x = x[indexer]
return np.asarray(x).flat[:n_desired]
def format_timestamp(t):
"""Cast given object to a Timestamp and return a nicely formatted string"""
datetime_str = str(pd.Timestamp(t))
try:
date_str, time_str = datetime_str.split()
except ValueError:
# catch NaT and others that don't split nicely
return datetime_str
else:
if time_str == '00:00:00':
return date_str
else:
return '%sT%s' % (date_str, time_str)
def format_timedelta(t, timedelta_format=None):
"""Cast given object to a Timestamp and return a nicely formatted string"""
timedelta_str = str(pd.Timedelta(t))
try:
days_str, time_str = timedelta_str.split(' days ')
except ValueError:
# catch NaT and others that don't split nicely
return timedelta_str
else:
if timedelta_format == 'date':
return days_str + ' days'
elif timedelta_format == 'time':
return time_str
else:
return timedelta_str
def format_item(x, timedelta_format=None, quote_strings=True):
"""Returns a succinct summary of an object as a string"""
if isinstance(x, (np.datetime64, datetime)):
return format_timestamp(x)
if isinstance(x, (np.timedelta64, timedelta)):
return format_timedelta(x, timedelta_format=timedelta_format)
elif isinstance(x, (unicode_type, bytes_type)):
return repr(x) if quote_strings else x
elif isinstance(x, (float, np.float)):
return '{0:.4}'.format(x)
else:
return str(x)
def format_items(x):
"""Returns a succinct summaries of all items in a sequence as strings"""
x = np.asarray(x)
timedelta_format = 'datetime'
if np.issubdtype(x.dtype, np.timedelta64):
x = np.asarray(x, dtype='timedelta64[ns]')
day_part = (x[~pd.isnull(x)]
.astype('timedelta64[D]')
.astype('timedelta64[ns]'))
time_needed = x != day_part
day_needed = day_part != np.timedelta64(0, 'ns')
if np.logical_not(day_needed).all():
timedelta_format = 'time'
elif np.logical_not(time_needed).all():
timedelta_format = 'date'
formatted = [format_item(xi, timedelta_format) for xi in x]
return formatted
def format_array_flat(items_ndarray, max_width):
"""Return a formatted string for as many items in the flattened version of
items_ndarray that will fit within max_width characters
"""
# every item will take up at least two characters, but we always want to
# print at least one item
max_possibly_relevant = max(int(np.ceil(max_width / 2.0)), 1)
relevant_items = first_n_items(items_ndarray, max_possibly_relevant)
pprint_items = format_items(relevant_items)
cum_len = np.cumsum([len(s) + 1 for s in pprint_items]) - 1
if (max_possibly_relevant < items_ndarray.size or
(cum_len > max_width).any()):
end_padding = ' ...'
count = max(np.argmax((cum_len + len(end_padding)) > max_width), 1)
pprint_items = pprint_items[:count]
else:
end_padding = ''
pprint_str = ' '.join(pprint_items) + end_padding
return pprint_str
def _summarize_var_or_coord(name, var, col_width, show_values=True,
marker=' ', max_width=None):
if max_width is None:
max_width = OPTIONS['display_width']
first_col = pretty_print(' %s %s ' % (marker, name), col_width)
dims_str = '(%s) ' % ', '.join(map(str, var.dims)) if var.dims else ''
front_str = first_col + dims_str + ('%s ' % var.dtype)
if show_values:
values_str = format_array_flat(var, max_width - len(front_str))
else:
values_str = '...'
return front_str + values_str
def _not_remote(var):
"""Helper function to identify if array is positively identifiable as
coming from a remote source.
"""
source = var.encoding.get('source')
if source and source.startswith('http') and not var._in_memory:
return False
return True
def summarize_var(name, var, col_width):
show_values = _not_remote(var)
return _summarize_var_or_coord(name, var, col_width, show_values)
def summarize_coord(name, var, col_width):
is_index = name in var.dims
show_values = is_index or _not_remote(var)
marker = '*' if is_index else ' '
return _summarize_var_or_coord(name, var, col_width, show_values, marker)
def _maybe_truncate(obj, maxlen=500):
s = str(obj)
if len(s) > maxlen:
s = s[:(maxlen - 3)] + '...'
return s
def summarize_attr(key, value, col_width=None):
# ignore col_width for now to more clearly distinguish attributes
return ' %s: %s' % (key, _maybe_truncate(value))
EMPTY_REPR = ' *empty*'
def _calculate_col_width(mapping):
max_name_length = max(len(str(k)) for k in mapping) if mapping else 0
col_width = max(max_name_length, 7) + 6
return col_width
def _mapping_repr(mapping, title, summarizer, col_width=None):
if col_width is None:
col_width = _calculate_col_width(mapping)
summary = ['%s:' % title]
if mapping:
summary += [summarizer(k, v, col_width) for k, v in mapping.items()]
else:
summary += [EMPTY_REPR]
return '\n'.join(summary)
coords_repr = functools.partial(_mapping_repr, title='Coordinates',
summarizer=summarize_coord)
vars_repr = functools.partial(_mapping_repr, title='Data variables',
summarizer=summarize_var)
attrs_repr = functools.partial(_mapping_repr, title='Attributes',
summarizer=summarize_attr)
def indexes_repr(indexes):
summary = []
for k, v in indexes.items():
summary.append(wrap_indent(repr(v), '%s: ' % k))
return '\n'.join(summary)
def array_repr(arr):
# used for DataArray, Variable and Coordinate
if hasattr(arr, 'name') and arr.name is not None:
name_str = '%r ' % arr.name
else:
name_str = ''
dim_summary = ', '.join('%s: %s' % (k, v) for k, v
in zip(arr.dims, arr.shape))
summary = ['<xarray.%s %s(%s)>'
% (type(arr).__name__, name_str, dim_summary)]
if isinstance(getattr(arr, 'variable', arr)._data, dask_array_type):
summary.append(repr(arr.data))
elif arr._in_memory or arr.size < 1e5:
summary.append(repr(arr.values))
else:
summary.append('[%s values with dtype=%s]' % (arr.size, arr.dtype))
if hasattr(arr, 'coords'):
if arr.coords:
summary.append(repr(arr.coords))
if arr.attrs:
summary.append(attrs_repr(arr.attrs))
return '\n'.join(summary)
def dataset_repr(ds):
summary = ['<xarray.%s>' % type(ds).__name__]
col_width = _calculate_col_width(ds)
dims_start = pretty_print('Dimensions:', col_width)
all_dim_strings = ['%s: %s' % (k, v) for k, v in iteritems(ds.dims)]
summary.append('%s(%s)' % (dims_start, ', '.join(all_dim_strings)))
summary.append(coords_repr(ds.coords, col_width=col_width))
summary.append(vars_repr(ds.data_vars, col_width=col_width))
if ds.attrs:
summary.append(attrs_repr(ds.attrs))
return '\n'.join(summary)
| apache-2.0 |
soulmachine/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
mblondel/scikit-learn | sklearn/mixture/gmm.py | 9 | 27514 | """
Gaussian Mixture Models.
This implementation corresponds to frequentist (non-Bayesian) formulation
of Gaussian Mixture Models.
"""
# Author: Ron Weiss <ronweiss@gmail.com>
# Fabian Pedregosa <fabian.pedregosa@inria.fr>
# Bertrand Thirion <bertrand.thirion@inria.fr>
import warnings
import numpy as np
from scipy import linalg
from ..base import BaseEstimator
from ..utils import check_random_state, check_array
from ..utils.extmath import logsumexp
from ..utils.validation import check_is_fitted
from .. import cluster
from sklearn.externals.six.moves import zip
EPS = np.finfo(float).eps
def log_multivariate_normal_density(X, means, covars, covariance_type='diag'):
"""Compute the log probability under a multivariate Gaussian distribution.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row corresponds to a
single data point.
means : array_like, shape (n_components, n_features)
List of n_features-dimensional mean vectors for n_components Gaussians.
Each row corresponds to a single mean vector.
covars : array_like
List of n_components covariance parameters for each Gaussian. The shape
depends on `covariance_type`:
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
covariance_type : string
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
Returns
-------
lpr : array_like, shape (n_samples, n_components)
Array containing the log probabilities of each data point in
X under each of the n_components multivariate Gaussian distributions.
"""
log_multivariate_normal_density_dict = {
'spherical': _log_multivariate_normal_density_spherical,
'tied': _log_multivariate_normal_density_tied,
'diag': _log_multivariate_normal_density_diag,
'full': _log_multivariate_normal_density_full}
return log_multivariate_normal_density_dict[covariance_type](
X, means, covars)
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1,
random_state=None):
"""Generate random samples from a Gaussian distribution.
Parameters
----------
mean : array_like, shape (n_features,)
Mean of the distribution.
covar : array_like, optional
Covariance of the distribution. The shape depends on `covariance_type`:
scalar if 'spherical',
(n_features) if 'diag',
(n_features, n_features) if 'tied', or 'full'
covariance_type : string, optional
Type of the covariance parameters. Must be one of
'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'.
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array, shape (n_features, n_samples)
Randomly generated sample
"""
rng = check_random_state(random_state)
n_dim = len(mean)
rand = rng.randn(n_dim, n_samples)
if n_samples == 1:
rand.shape = (n_dim,)
if covariance_type == 'spherical':
rand *= np.sqrt(covar)
elif covariance_type == 'diag':
rand = np.dot(np.diag(np.sqrt(covar)), rand)
else:
s, U = linalg.eigh(covar)
s.clip(0, out=s) # get rid of tiny negatives
np.sqrt(s, out=s)
U *= s
rand = np.dot(U, rand)
return (rand.T + mean).T
class GMM(BaseEstimator):
"""Gaussian Mixture Model
Representation of a Gaussian mixture model probability distribution.
This class allows for easy evaluation of, sampling from, and
maximum-likelihood estimation of the parameters of a GMM distribution.
Initializes parameters such that every mixture component has zero
mean and identity covariance.
Parameters
----------
n_components : int, optional
Number of mixture components. Defaults to 1.
covariance_type : string, optional
String describing the type of covariance parameters to
use. Must be one of 'spherical', 'tied', 'diag', 'full'.
Defaults to 'diag'.
random_state: RandomState or an int seed (None by default)
A random number generator instance
min_covar : float, optional
Floor on the diagonal of the covariance matrix to prevent
overfitting. Defaults to 1e-3.
tol : float, optional
Convergence threshold. EM iterations will stop when average
gain in log-likelihood is below this threshold. Defaults to 1e-3.
n_iter : int, optional
Number of EM iterations to perform.
n_init : int, optional
Number of initializations to perform. the best results is kept
params : string, optional
Controls which parameters are updated in the training
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
init_params : string, optional
Controls which parameters are updated in the initialization
process. Can contain any combination of 'w' for weights,
'm' for means, and 'c' for covars. Defaults to 'wmc'.
Attributes
----------
weights_ : array, shape (`n_components`,)
This attribute stores the mixing weights for each mixture component.
means_ : array, shape (`n_components`, `n_features`)
Mean parameters for each mixture component.
covars_ : array
Covariance parameters for each mixture component. The shape
depends on `covariance_type`::
(n_components, n_features) if 'spherical',
(n_features, n_features) if 'tied',
(n_components, n_features) if 'diag',
(n_components, n_features, n_features) if 'full'
converged_ : bool
True when convergence was reached in fit(), False otherwise.
See Also
--------
DPGMM : Infinite gaussian mixture model, using the dirichlet
process, fit with a variational algorithm
VBGMM : Finite gaussian mixture model fit with a variational
algorithm, better for situations where there might be too little
data to get a good estimate of the covariance matrix.
Examples
--------
>>> import numpy as np
>>> from sklearn import mixture
>>> np.random.seed(1)
>>> g = mixture.GMM(n_components=2)
>>> # Generate random observations with two modes centered on 0
>>> # and 10 to use for training.
>>> obs = np.concatenate((np.random.randn(100, 1),
... 10 + np.random.randn(300, 1)))
>>> g.fit(obs) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001)
>>> np.round(g.weights_, 2)
array([ 0.75, 0.25])
>>> np.round(g.means_, 2)
array([[ 10.05],
[ 0.06]])
>>> np.round(g.covars_, 2) #doctest: +SKIP
array([[[ 1.02]],
[[ 0.96]]])
>>> g.predict([[0], [2], [9], [10]]) #doctest: +ELLIPSIS
array([1, 1, 0, 0]...)
>>> np.round(g.score([[0], [2], [9], [10]]), 2)
array([-2.19, -4.58, -1.75, -1.21])
>>> # Refit the model on new data (initial parameters remain the
>>> # same), this time with an even split between the two modes.
>>> g.fit(20 * [[0]] + 20 * [[10]]) # doctest: +NORMALIZE_WHITESPACE
GMM(covariance_type='diag', init_params='wmc', min_covar=0.001,
n_components=2, n_init=1, n_iter=100, params='wmc',
random_state=None, thresh=None, tol=0.001)
>>> np.round(g.weights_, 2)
array([ 0.5, 0.5])
"""
def __init__(self, n_components=1, covariance_type='diag',
random_state=None, thresh=None, tol=1e-3, min_covar=1e-3,
n_iter=100, n_init=1, params='wmc', init_params='wmc'):
if thresh is not None:
warnings.warn("'thresh' has been replaced by 'tol' in 0.16 "
" and will be removed in 0.18.",
DeprecationWarning)
self.n_components = n_components
self.covariance_type = covariance_type
self.thresh = thresh
self.tol = tol
self.min_covar = min_covar
self.random_state = random_state
self.n_iter = n_iter
self.n_init = n_init
self.params = params
self.init_params = init_params
if covariance_type not in ['spherical', 'tied', 'diag', 'full']:
raise ValueError('Invalid value for covariance_type: %s' %
covariance_type)
if n_init < 1:
raise ValueError('GMM estimation requires at least one run')
self.weights_ = np.ones(self.n_components) / self.n_components
# flag to indicate exit status of fit() method: converged (True) or
# n_iter reached (False)
self.converged_ = False
def _get_covars(self):
"""Covariance parameters for each mixture component.
The shape depends on `cvtype`::
(`n_states`, 'n_features') if 'spherical',
(`n_features`, `n_features`) if 'tied',
(`n_states`, `n_features`) if 'diag',
(`n_states`, `n_features`, `n_features`) if 'full'
"""
if self.covariance_type == 'full':
return self.covars_
elif self.covariance_type == 'diag':
return [np.diag(cov) for cov in self.covars_]
elif self.covariance_type == 'tied':
return [self.covars_] * self.n_components
elif self.covariance_type == 'spherical':
return [np.diag(cov) for cov in self.covars_]
def _set_covars(self, covars):
"""Provide values for covariance"""
covars = np.asarray(covars)
_validate_covars(covars, self.covariance_type, self.n_components)
self.covars_ = covars
def score_samples(self, X):
"""Return the per-sample likelihood of the data under the model.
Compute the log probability of X under the model and
return the posterior distribution (responsibilities) of each
mixture component for each element of X.
Parameters
----------
X: array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X.
responsibilities : array_like, shape (n_samples, n_components)
Posterior probabilities of each mixture component for each
observation
"""
check_is_fitted(self, 'means_')
X = check_array(X)
if X.ndim == 1:
X = X[:, np.newaxis]
if X.size == 0:
return np.array([]), np.empty((0, self.n_components))
if X.shape[1] != self.means_.shape[1]:
raise ValueError('The shape of X is not compatible with self')
lpr = (log_multivariate_normal_density(X, self.means_, self.covars_,
self.covariance_type)
+ np.log(self.weights_))
logprob = logsumexp(lpr, axis=1)
responsibilities = np.exp(lpr - logprob[:, np.newaxis])
return logprob, responsibilities
def score(self, X, y=None):
"""Compute the log probability under the model.
Parameters
----------
X : array_like, shape (n_samples, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
Returns
-------
logprob : array_like, shape (n_samples,)
Log probabilities of each data point in X
"""
logprob, _ = self.score_samples(X)
return logprob
def predict(self, X):
"""Predict label for data.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
C : array, shape = (n_samples,)
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities.argmax(axis=1)
def predict_proba(self, X):
"""Predict posterior probability of data under each Gaussian
in the model.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
responsibilities : array-like, shape = (n_samples, n_components)
Returns the probability of the sample for each Gaussian
(state) in the model.
"""
logprob, responsibilities = self.score_samples(X)
return responsibilities
def sample(self, n_samples=1, random_state=None):
"""Generate random samples from the model.
Parameters
----------
n_samples : int, optional
Number of samples to generate. Defaults to 1.
Returns
-------
X : array_like, shape (n_samples, n_features)
List of samples
"""
check_is_fitted(self, 'means_')
if random_state is None:
random_state = self.random_state
random_state = check_random_state(random_state)
weight_cdf = np.cumsum(self.weights_)
X = np.empty((n_samples, self.means_.shape[1]))
rand = random_state.rand(n_samples)
# decide which component to use for each sample
comps = weight_cdf.searchsorted(rand)
# for each component, generate all needed samples
for comp in range(self.n_components):
# occurrences of current component in X
comp_in_X = (comp == comps)
# number of those occurrences
num_comp_in_X = comp_in_X.sum()
if num_comp_in_X > 0:
if self.covariance_type == 'tied':
cv = self.covars_
elif self.covariance_type == 'spherical':
cv = self.covars_[comp][0]
else:
cv = self.covars_[comp]
X[comp_in_X] = sample_gaussian(
self.means_[comp], cv, self.covariance_type,
num_comp_in_X, random_state=random_state).T
return X
def fit(self, X, y=None):
"""Estimate model parameters with the expectation-maximization
algorithm.
A initialization step is performed before entering the em
algorithm. If you want to avoid this step, set the keyword
argument init_params to the empty string '' when creating the
GMM object. Likewise, if you would like just to do an
initialization, set n_iter=0.
Parameters
----------
X : array_like, shape (n, n_features)
List of n_features-dimensional data points. Each row
corresponds to a single data point.
"""
# initialization step
X = check_array(X, dtype=np.float64)
if X.shape[0] < self.n_components:
raise ValueError(
'GMM estimation with %s components, but got only %s samples' %
(self.n_components, X.shape[0]))
max_log_prob = -np.infty
for _ in range(self.n_init):
if 'm' in self.init_params or not hasattr(self, 'means_'):
self.means_ = cluster.KMeans(
n_clusters=self.n_components,
random_state=self.random_state).fit(X).cluster_centers_
if 'w' in self.init_params or not hasattr(self, 'weights_'):
self.weights_ = np.tile(1.0 / self.n_components,
self.n_components)
if 'c' in self.init_params or not hasattr(self, 'covars_'):
cv = np.cov(X.T) + self.min_covar * np.eye(X.shape[1])
if not cv.shape:
cv.shape = (1, 1)
self.covars_ = \
distribute_covar_matrix_to_match_covariance_type(
cv, self.covariance_type, self.n_components)
# EM algorithms
current_log_likelihood = None
# reset self.converged_ to False
self.converged_ = False
# this line should be removed when 'thresh' is removed in v0.18
tol = (self.tol if self.thresh is None
else self.thresh / float(X.shape[0]))
for i in range(self.n_iter):
prev_log_likelihood = current_log_likelihood
# Expectation step
log_likelihoods, responsibilities = self.score_samples(X)
current_log_likelihood = log_likelihoods.mean()
# Check for convergence.
# (should compare to self.tol when dreprecated 'thresh' is
# removed in v0.18)
if prev_log_likelihood is not None:
change = abs(current_log_likelihood - prev_log_likelihood)
if change < tol:
self.converged_ = True
break
# Maximization step
self._do_mstep(X, responsibilities, self.params,
self.min_covar)
# if the results are better, keep it
if self.n_iter:
if current_log_likelihood > max_log_prob:
max_log_prob = current_log_likelihood
best_params = {'weights': self.weights_,
'means': self.means_,
'covars': self.covars_}
# check the existence of an init param that was not subject to
# likelihood computation issue.
if np.isneginf(max_log_prob) and self.n_iter:
raise RuntimeError(
"EM algorithm was never able to compute a valid likelihood " +
"given initial parameters. Try different init parameters " +
"(or increasing n_init) or check for degenerate data.")
# self.n_iter == 0 occurs when using GMM within HMM
if self.n_iter:
self.covars_ = best_params['covars']
self.means_ = best_params['means']
self.weights_ = best_params['weights']
return self
def _do_mstep(self, X, responsibilities, params, min_covar=0):
""" Perform the Mstep of the EM algorithm and return the class weihgts.
"""
weights = responsibilities.sum(axis=0)
weighted_X_sum = np.dot(responsibilities.T, X)
inverse_weights = 1.0 / (weights[:, np.newaxis] + 10 * EPS)
if 'w' in params:
self.weights_ = (weights / (weights.sum() + 10 * EPS) + EPS)
if 'm' in params:
self.means_ = weighted_X_sum * inverse_weights
if 'c' in params:
covar_mstep_func = _covar_mstep_funcs[self.covariance_type]
self.covars_ = covar_mstep_func(
self, X, responsibilities, weighted_X_sum, inverse_weights,
min_covar)
return weights
def _n_parameters(self):
"""Return the number of free parameters in the model."""
ndim = self.means_.shape[1]
if self.covariance_type == 'full':
cov_params = self.n_components * ndim * (ndim + 1) / 2.
elif self.covariance_type == 'diag':
cov_params = self.n_components * ndim
elif self.covariance_type == 'tied':
cov_params = ndim * (ndim + 1) / 2.
elif self.covariance_type == 'spherical':
cov_params = self.n_components
mean_params = ndim * self.n_components
return int(cov_params + mean_params + self.n_components - 1)
def bic(self, X):
"""Bayesian information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
bic: float (the lower the better)
"""
return (-2 * self.score(X).sum() +
self._n_parameters() * np.log(X.shape[0]))
def aic(self, X):
"""Akaike information criterion for the current model fit
and the proposed data
Parameters
----------
X : array of shape(n_samples, n_dimensions)
Returns
-------
aic: float (the lower the better)
"""
return - 2 * self.score(X).sum() + 2 * self._n_parameters()
#########################################################################
## some helper routines
#########################################################################
def _log_multivariate_normal_density_diag(X, means, covars):
"""Compute Gaussian log-density at X for a diagonal model"""
n_samples, n_dim = X.shape
lpr = -0.5 * (n_dim * np.log(2 * np.pi) + np.sum(np.log(covars), 1)
+ np.sum((means ** 2) / covars, 1)
- 2 * np.dot(X, (means / covars).T)
+ np.dot(X ** 2, (1.0 / covars).T))
return lpr
def _log_multivariate_normal_density_spherical(X, means, covars):
"""Compute Gaussian log-density at X for a spherical model"""
cv = covars.copy()
if covars.ndim == 1:
cv = cv[:, np.newaxis]
if covars.shape[1] == 1:
cv = np.tile(cv, (1, X.shape[-1]))
return _log_multivariate_normal_density_diag(X, means, cv)
def _log_multivariate_normal_density_tied(X, means, covars):
"""Compute Gaussian log-density at X for a tied model"""
cv = np.tile(covars, (means.shape[0], 1, 1))
return _log_multivariate_normal_density_full(X, means, cv)
def _log_multivariate_normal_density_full(X, means, covars, min_covar=1.e-7):
"""Log probability for full covariance matrices.
"""
n_samples, n_dim = X.shape
nmix = len(means)
log_prob = np.empty((n_samples, nmix))
for c, (mu, cv) in enumerate(zip(means, covars)):
try:
cv_chol = linalg.cholesky(cv, lower=True)
except linalg.LinAlgError:
# The model is most probably stuck in a component with too
# few observations, we need to reinitialize this components
cv_chol = linalg.cholesky(cv + min_covar * np.eye(n_dim),
lower=True)
cv_log_det = 2 * np.sum(np.log(np.diagonal(cv_chol)))
cv_sol = linalg.solve_triangular(cv_chol, (X - mu).T, lower=True).T
log_prob[:, c] = - .5 * (np.sum(cv_sol ** 2, axis=1) +
n_dim * np.log(2 * np.pi) + cv_log_det)
return log_prob
def _validate_covars(covars, covariance_type, n_components):
"""Do basic checks on matrix covariance sizes and values
"""
from scipy import linalg
if covariance_type == 'spherical':
if len(covars) != n_components:
raise ValueError("'spherical' covars have length n_components")
elif np.any(covars <= 0):
raise ValueError("'spherical' covars must be non-negative")
elif covariance_type == 'tied':
if covars.shape[0] != covars.shape[1]:
raise ValueError("'tied' covars must have shape (n_dim, n_dim)")
elif (not np.allclose(covars, covars.T)
or np.any(linalg.eigvalsh(covars) <= 0)):
raise ValueError("'tied' covars must be symmetric, "
"positive-definite")
elif covariance_type == 'diag':
if len(covars.shape) != 2:
raise ValueError("'diag' covars must have shape "
"(n_components, n_dim)")
elif np.any(covars <= 0):
raise ValueError("'diag' covars must be non-negative")
elif covariance_type == 'full':
if len(covars.shape) != 3:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
elif covars.shape[1] != covars.shape[2]:
raise ValueError("'full' covars must have shape "
"(n_components, n_dim, n_dim)")
for n, cv in enumerate(covars):
if (not np.allclose(cv, cv.T)
or np.any(linalg.eigvalsh(cv) <= 0)):
raise ValueError("component %d of 'full' covars must be "
"symmetric, positive-definite" % n)
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
def distribute_covar_matrix_to_match_covariance_type(
tied_cv, covariance_type, n_components):
"""Create all the covariance matrices from a given template
"""
if covariance_type == 'spherical':
cv = np.tile(tied_cv.mean() * np.ones(tied_cv.shape[1]),
(n_components, 1))
elif covariance_type == 'tied':
cv = tied_cv
elif covariance_type == 'diag':
cv = np.tile(np.diag(tied_cv), (n_components, 1))
elif covariance_type == 'full':
cv = np.tile(tied_cv, (n_components, 1, 1))
else:
raise ValueError("covariance_type must be one of " +
"'spherical', 'tied', 'diag', 'full'")
return cv
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for diagonal cases"""
avg_X2 = np.dot(responsibilities.T, X * X) * norm
avg_means2 = gmm.means_ ** 2
avg_X_means = gmm.means_ * weighted_X_sum * norm
return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
def _covar_mstep_spherical(*args):
"""Performing the covariance M step for spherical cases"""
cv = _covar_mstep_diag(*args)
return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
"""Performing the covariance M step for full cases"""
# Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
n_features = X.shape[1]
cv = np.empty((gmm.n_components, n_features, n_features))
for c in range(gmm.n_components):
post = responsibilities[:, c]
mu = gmm.means_[c]
diff = X - mu
with np.errstate(under='ignore'):
# Underflow Errors in doing post * X.T are not important
avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS)
cv[c] = avg_cv + min_covar * np.eye(n_features)
return cv
def _covar_mstep_tied(gmm, X, responsibilities, weighted_X_sum, norm,
min_covar):
# Eq. 15 from K. Murphy, "Fitting a Conditional Linear Gaussian
# Distribution"
avg_X2 = np.dot(X.T, X)
avg_means2 = np.dot(gmm.means_.T, weighted_X_sum)
out = avg_X2 - avg_means2
out *= 1. / X.shape[0]
out.flat[::len(out) + 1] += min_covar
return out
_covar_mstep_funcs = {'spherical': _covar_mstep_spherical,
'diag': _covar_mstep_diag,
'tied': _covar_mstep_tied,
'full': _covar_mstep_full,
}
| bsd-3-clause |
owenjhwilliams/ASIIT | PODutils.py | 1 | 15849 | '''
This is a set of utility funcitons useful for analysing POD data. Plotting and data reorganization functions
'''
#Plot 2D POD modes
def plotPODmodes2D(X,Y,Umodes,Vmodes,plotModes,saveFolder = None):
'''
Plot 2D POD modes
Inputs:
X - 2D array with columns constant
Y - 2D array with rows constant
Umodes - 3D array with Umodes.shape[2] = the total number of modes plotted
Vmodes - 3D array with Umodes.shape[2] = the total number of modes plotted
plotModes = a list of modes to be plotted
Output:
Plots the modes corresponding to plotModes
'''
import matplotlib.pyplot as plt
#assert plotModes.max()<=Umodes.shape[2], 'You asked for more modes than were calculated'
assert Umodes.shape[2]==Vmodes.shape[2], 'There are different numbers of U and V modes. Thats not right...'
for i in plotModes:
f, ax = plt.subplots(1,2)
f.set_figwidth(18)
im1 = ax[0].pcolor(X,Y,Umodes[:,:,i],cmap='RdBu_r');
im2 = ax[1].pcolor(X,Y,Vmodes[:,:,i],cmap='RdBu_r');
ax[0].set_title('U - #' + str(i+1))
ax[0].set_aspect('equal')
ax[0].set_xlim([X.min(),X.max()])
ax[0].set_ylabel('$y/\delta$', fontsize=20)
ax[0].set_xlabel('$x/\delta$', fontsize=20)
ax[0].tick_params(axis='x', labelsize=12)
ax[0].tick_params(axis='y', labelsize=12)
ax[1].set_title('V - #' + str(i+1))
ax[1].set_aspect('equal')
ax[1].set_xlim([X.min(),X.max()])
ax[1].set_ylabel('$y/\delta$', fontsize=20)
ax[1].set_xlabel('$x/\delta$', fontsize=20)
ax[1].tick_params(axis='x', labelsize=12)
ax[1].tick_params(axis='y', labelsize=12)
cbar1 = f.colorbar(im1,ax=ax[0])
im1.set_clim(-1*max(map(abs,cbar1.get_clim())), max(map(abs,cbar1.get_clim())))
cbar2 = f.colorbar(im2,ax=ax[1])
im2.set_clim(-1*max(map(abs,cbar2.get_clim())), max(map(abs,cbar2.get_clim())))
if saveFolder is not None:
f.savefig(saveFolder + '/Mode' + str(i+1) + '.tif', transparent=True, bbox_inches='tight', pad_inches=0)
del im1,im2,cbar1,cbar2
#Plot 3D POD modes
def plotPODmodes3D(X,Y,Umodes,Vmodes,Wmodes,plotModes,saveFolder=None):
'''
Plot 2D POD modes
Inputs:
X - 2D array with columns constant
Y - 2D array with rows constant
Umodes - 3D array with Umodes.shape[2] = the total number of modes plotted
Vmodes - 3D array with Umodes.shape[2] = the total number of modes plotted
Wmodes - 3D array with Umodes.shape[2] = the total number of modes plotted
plotModes = a list of modes to be plotted
Output:
Plots the modes corresponding to plotModes
'''
import matplotlib.pyplot as plt
#assert plotModes.max()<=Umodes.shape[2], 'You asked for more modes than were calculated'
assert Umodes.shape[2]==Vmodes.shape[2], 'There are different numbers of U and V modes. Thats not right...'
assert Umodes.shape[2]==Wmodes.shape[2], 'There are different numbers of U and W modes. Thats not right...'
for i in plotModes:
f, ax = plt.subplots(1,3)
f.set_figwidth(18)
im1 = ax[0].pcolor(X,Y,Umodes[:,:,i],cmap='RdBu_r');
im2 = ax[1].pcolor(X,Y,Vmodes[:,:,i],cmap='RdBu_r');
im3 = ax[2].pcolor(X,Y,Wmodes[:,:,i],cmap='RdBu_r');
ax[0].set_title('U - #' + str(i+1))
ax[0].set_aspect('equal')
ax[0].set_xlim([X.min(),X.max()])
ax[0].set_ylabel('y(m)')
ax[0].set_xlabel('x(m)')
ax[1].set_title('V - #' + str(i+1))
ax[1].set_aspect('equal')
ax[1].set_xlim([X.min(),X.max()])
ax[1].set_ylabel('y(m)')
ax[1].set_xlabel('x(m)')
ax[2].set_title('W - #' + str(i+1))
ax[2].set_aspect('equal')
ax[2].set_xlim([X.min(),X.max()])
ax[2].set_ylabel('y(m)')
ax[2].set_xlabel('x(m)')
cbar1 = f.colorbar(im1,ax=ax[0])
im1.set_clim(-1*max(map(abs,cbar1.get_clim())), max(map(abs,cbar1.get_clim())))
cbar2 = f.colorbar(im2,ax=ax[1])
im2.set_clim(-1*max(map(abs,cbar2.get_clim())), max(map(abs,cbar2.get_clim())))
cbar3 = f.colorbar(im3,ax=ax[2])
im3.set_clim(-1*max(map(abs,cbar3.get_clim())), max(map(abs,cbar3.get_clim())))
if saveFolder is not None:
f.savefig(saveFolder + '/Mode' + str(i+1) + '.tif', transparent=True, bbox_inches='tight', pad_inches=0)
del im1,im2,im3,cbar1,cbar2,cbar3
#Reorganize modes matrix so that the modes can be easily plotted
def reconstructPODmodes(modes,uSize,num_modes,numC):
'''
Reconstruct the mode shapes for three component single plane data
Inputs:
modes - outout from mr.compute_POD_matrices_snaps_method
uSize - size of original velocity dataset
num_modes - number of modes calculated by mr.compute_POD_matrices_snaps_method
numC - number of velocity components
Output:
Umodes, Vmodes and optionally Wmodes
'''
import numpy as np
#Rearrange mode data to get mode fields
modeSize = modes.shape
Umodes = modes[0:uSize[0]*uSize[1],:];
Umodes2 = np.zeros((uSize[0],uSize[1],num_modes))
if numC >= 2:
Vmodes = modes[uSize[0]*uSize[1]:2*uSize[0]*uSize[1],:];
Vmodes2 = np.zeros((uSize[0],uSize[1],num_modes))
if numC >= 3:
Wmodes = modes[2*uSize[0]*uSize[1]:modeSize[0]+1,:];
Wmodes2 = np.zeros((uSize[0],uSize[1],num_modes))
Umodes.shape
for i in range(num_modes):
#i=1
Umodes2[:,:,i] = np.reshape(Umodes[:,i],(uSize[0],uSize[1]))
if numC >=2:
Vmodes2[:,:,i] = np.reshape(Vmodes[:,i],(uSize[0],uSize[1]))
if numC >=3:
Wmodes2[:,:,i] = np.reshape(Vmodes[:,i],(uSize[0],uSize[1]))
#Umodes.shape
#uSize[0]*uSize[1]
if numC == 1:
return [Umodes2]
elif numC == 2:
return [Umodes2, Vmodes2]
elif numC == 3:
return [Umodes2, Vmodes2, Wmodes2]
#Plot heatmaps of POD coefficients
def plotPODcoeff(C,modes,num_bins,bound=None,logscale=None,saveFolder=None):
'''
Reconstruct the mode shapes for three component single plane data
Inputs:
C - matrix of coefficients (mode number, coefficent for each frame)
modes - indices of modes to be plotted
num_bins - size of bins to be plotted. Passed to hexbin
bound - the axis bound. If none taken to be max coefficient
logscale - describing whether or not to do the heatmap in a log scale
Output:
plots a grid of hexbin plots for each mode
'''
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
if bound == None:
bound = round(np.max(np.absolute(C)))
xedges = np.linspace(-1*bound, bound, num=num_bins)
yedges = xedges;
bound = 0.5*(xedges[1]+xedges[2]);
Z, xedges, yedges = np.histogram2d(C[0], C[1], bins=(xedges, yedges))
xv, yv = np.meshgrid(0.5*(xedges[1:]+xedges[:-1]), 0.5*(yedges[1:]+yedges[:-1]))
fig, axs = plt.subplots(ncols=len(modes)-1,nrows=len(modes)-1,figsize=(9, 12))
fig.subplots_adjust(hspace=0.01, left=0.01, right=1)
#print(axs.shape)
for i in range(len(modes)-1):
for j in range(len(modes)-1):
ax = axs[i,j]
if j>=i:
Z, xedges, yedges = np.histogram2d(C[i],C[j+1], bins=(xedges, yedges))
if logscale == None:
hb = ax.pcolor(xv, yv, Z, cmap='hsv')
else:
hb = ax.pcolor(xv, yv, np.log(Z+1), cmap='hsv')
ax.plot([-1*bound, bound],[0, 0],'--k')
ax.plot([0, 0],[-1*bound, bound],'--k')
if i == 0:
ax.set_xlabel('C{0}'.format(j+2))
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
ax.tick_params(axis='x', labelsize=7)
else:
ax.set_xticklabels([])
if j == len(modes)-2:
ax.yaxis.tick_right()
ax.set_ylabel('C{0}'.format(i+1))
ax.yaxis.set_label_position("right")
ax.tick_params(axis='y', labelsize=7)
else:
ax.set_yticklabels([])
ax.set_xlim(bound,-1*bound)
ax.set_ylim(bound,-1*bound)
ax.set_aspect("equal")
ax.set_adjustable("box-forced")
#fig = plt.figure(figsize = [8,3])
#hb = ax.hexbin(C[0], C[1], gridsize=10, cmap='OrRd')
#plt.axis([-1*bound, bound, -1*bound, bound])
#plt.axis('scaled')
#cb = fig.colorbar(hb, ax=ax)
#cb.set_label('counts')
#cb.set_label('log10(N)')
else:
ax.axis('off')
if saveFolder is not None:
fig.savefig(saveFolder, transparent=True, bbox_inches='tight', pad_inches=0)
#Plot heatmaps of POD coefficients
def plotYposPODcoeff(ypos,C,modes,num_bins,bound=None,logscale=None,saveFolder=None):
'''
Reconstruct the mode shapes for three component single plane data
Inputs:
ypos - wall-normal position of each thumbnail. [0 1]
C - matrix of coefficients (mode number, coefficent for each frame)
modes - indices of modes to be plotted
num_bins - size of bins to be plotted. Passed to hexbin
bound - the axis bound. If none taken to be max coefficient
logscale - describing whether or not to do the heatmap in a log scale
Output:
plots a grid of hexbin plots for each mode
'''
import numpy as np
from scipy.interpolate import griddata
import matplotlib.pyplot as plt
if bound == None:
bound = round(np.max(np.absolute(C)))
#xedges = np.linspace(0, 1, num=num_bins);
xedges = ypos
xedges = np.concatenate([[xedges[0]-(xedges[1]-xedges[0])],xedges, [xedges[-1]+xedges[-1]-xedges[-2]]])
#print(xedges)
yedges = np.linspace(-1*bound, bound, num=num_bins);
#bound = 0.5*(xedges[1]+xedges[2]);
Z, xedges, yedges = np.histogram2d(C[0],C[1], bins=(xedges, yedges))
xv, yv = np.meshgrid(0.5*(xedges[1:]+xedges[:-1]),0.5*(yedges[1:]+yedges[:-1]))
fig, axs = plt.subplots(nrows=len(modes),figsize=(3, 3*len(modes)))
fig.subplots_adjust(hspace=0.1, left=0.1, right=1)
#print(axs.shape)
for i in range(len(modes)):
ax = axs[i]
Z, xedges, yedges = np.histogram2d(C[0],C[i+1], bins=(xedges, yedges))
Z = Z.T
if logscale == None:
hb = ax.pcolor(xv, yv, Z, cmap='hsv')
else:
hb = ax.pcolor(xv, yv, np.log(Z+1), cmap='hsv')
ax.plot([-1*bound, bound],[0, 0],'--k')
if i == len(modes)-1:
ax.set_xlabel('$y/\delta$')
ax.tick_params(axis='x', labelsize=7)
else:
ax.set_xticklabels([])
ax.set_ylabel('C{0}'.format(i+1))
ax.tick_params(axis='y', labelsize=7)
ax.set_xlim(0,max(ypos))
ax.set_ylim(-1*bound,bound)
if saveFolder is not None:
fig.savefig(saveFolder, transparent=True, bbox_inches='tight', pad_inches=0)
#Plot heatmaps of POD coefficients
def plotLLEscatter(C,ypos,St,modes,bound=None,thumb_frac=None,VecDist=None,saveFolder=None):
'''
Reconstruct the mode shapes for three component single plane data
Inputs:
C - matrix of coefficients (mode number, coefficent for each frame)
ypos - distance from the wall for each thumbnail
St - thumbnail data (not necessarily velocity, should use swirl)
modes - indices of modes to be plotted
bound - the axis bound. If none taken to be max coefficient
thumb_frac - fraction of thumbnails to show
VecDist - length of each thumbnail vector pointing to coefficient location
Output:
plots a grid of hexbin plots for each mode
'''
import numpy as np
#from scipy.interpolate import griddata
import matplotlib.pyplot as plt
from matplotlib import offsetbox,colors
if bound == None:
bound = round(np.max(np.absolute(C)))
if thumb_frac == None:
thumb_frac = 0.5
if VecDist == None:
VecDist = 0.05
fig, axs = plt.subplots(ncols=len(modes)-1,nrows=len(modes)-1,figsize=(9, 12))
fig.subplots_adjust(hspace=0.01, left=0.01, right=1)
colorize = dict(c=ypos, cmap=plt.cm.get_cmap('rainbow', 100))
cmap='RdBu_r'
C2 = C.copy().T
for i in range(len(modes)-1):
for j in range(len(modes)-1):
ax = axs[i,j]
if j>=i:
hb = ax.scatter(C[i], C[j+1],s=2, facecolor='0.5', lw = 0, **colorize)
ax.plot([-1*bound, bound],[0, 0],'--k')
ax.plot([0, 0],[-1*bound, bound],'--k')
if i == 0:
ax.set_xlabel('C{0}'.format(j+2))
ax.xaxis.tick_top()
ax.xaxis.set_label_position("top")
ax.tick_params(axis='x', labelsize=7)
else:
ax.set_xticklabels([])
if j == len(modes)-2:
ax.yaxis.tick_right()
ax.set_ylabel('C{0}'.format(i+1))
ax.yaxis.set_label_position("right")
ax.tick_params(axis='y', labelsize=7)
else:
ax.set_yticklabels([])
ax.set_xlim(bound,-1*bound)
ax.set_ylim(-1*bound,bound)
ax.set_aspect("equal")
ax.set_adjustable("box-forced")
min_dist_2 = (thumb_frac * max(C2.max(0) - C2.min(0))) ** 2
shown_images = np.array([2 * C2.max(0)])
for k in range(C2.shape[0]):
dist = np.sum((C2[k] - shown_images) ** 2, 1)
if np.min(dist) < min_dist_2:
# don't show points that are too close
continue
shown_images = np.vstack([shown_images, C2[k]])
vecNorm = (C2[k,i]**2 + C2[k,j+1]**2)**0.5
imagebox = offsetbox.AnnotationBbox(
offsetbox.OffsetImage(St[:,:,k], cmap=cmap, norm=colors.Normalize(-50,50),zoom=1.5),
xybox=VecDist*C2[k,[i,j+1]]/vecNorm+C2[k,[i,j+1]],xy=C2[k,[i,j+1]], arrowprops=dict(arrowstyle="->"))
ax.add_artist(imagebox)
else:
ax.axis('off')
if saveFolder is not None:
fig.savefig(saveFolder, transparent=True, bbox_inches='tight', pad_inches=0)
def minfuncVecField(params, U, V, x, y):
import numpy as np
import PIVutils
assert U.shape[0] == U.shape[1], 'Data must be a square matrix.'
assert U.shape == V.shape , 'U and V fields must be the same size'
[U2, V2] = PIVutils.genHairpinField(int((U.shape[0]-1)/2),*params,x=x,y=y)
return np.sum(((U - U2)**2 + (V-V2)**2))
def log_prior(params,bounds):
#if params[0:7] < 0:
# return -np.inf # log(0)
return 0
def log_posterior(params, U, V, x, y, bounds):
import numpy as np
params[0:8] = np.absolute(params[0:8])
return log_prior(params,bounds) + -1*minfuncVecField(params, U, V, x, y) | mit |
ran5515/DeepDecision | tensorflow/contrib/timeseries/examples/predict.py | 69 | 5579 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An example of training and predicting with a TFTS estimator."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import sys
import numpy as np
import tensorflow as tf
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example, namely train_and_predict.
HAS_MATPLOTLIB = False
FLAGS = None
def structural_ensemble_train_and_predict(csv_file_name):
# Cycle between 5 latent values over a period of 100. This leads to a very
# smooth periodic component (and a small model), which is a good fit for our
# example data. Modeling high-frequency periodic variations will require a
# higher cycle_num_latent_values.
structural = tf.contrib.timeseries.StructuralEnsembleRegressor(
periodicities=100, num_features=1, cycle_num_latent_values=5)
return train_and_predict(structural, csv_file_name, training_steps=150)
def ar_train_and_predict(csv_file_name):
# An autoregressive model, with periodicity handled as a time-based
# regression. Note that this requires windows of size 16 (input_window_size +
# output_window_size) for training.
ar = tf.contrib.timeseries.ARRegressor(
periodicities=100, input_window_size=10, output_window_size=6,
num_features=1,
# Use the (default) normal likelihood loss to adaptively fit the
# variance. SQUARED_LOSS overestimates variance when there are trends in
# the series.
loss=tf.contrib.timeseries.ARModel.NORMAL_LIKELIHOOD_LOSS)
return train_and_predict(ar, csv_file_name, training_steps=600)
def train_and_predict(estimator, csv_file_name, training_steps):
"""A simple example of training and predicting."""
# Read data in the default "time,value" CSV format with no header
reader = tf.contrib.timeseries.CSVReader(csv_file_name)
# Set up windowing and batching for training
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=16, window_size=16)
# Fit model parameters to data
estimator.train(input_fn=train_input_fn, steps=training_steps)
# Evaluate on the full dataset sequentially, collecting in-sample predictions
# for a qualitative evaluation. Note that this loads the whole dataset into
# memory. For quantitative evaluation, use RandomWindowChunker.
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=200)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, 0]
mean = np.squeeze(np.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
variance = np.squeeze(np.concatenate(
[evaluation["covariance"][0], predictions["covariance"]], axis=0))
all_times = np.concatenate([times, predictions["times"]], axis=0)
upper_limit = mean + np.sqrt(variance)
lower_limit = mean - np.sqrt(variance)
return times, observed, all_times, mean, upper_limit, lower_limit
def make_plot(name, training_times, observed, all_times, mean,
upper_limit, lower_limit):
"""Plot a time series in a new figure."""
pyplot.figure()
pyplot.plot(training_times, observed, "b", label="training series")
pyplot.plot(all_times, mean, "r", label="forecast")
pyplot.plot(all_times, upper_limit, "g", label="forecast upper bound")
pyplot.plot(all_times, lower_limit, "g", label="forecast lower bound")
pyplot.fill_between(all_times, lower_limit, upper_limit, color="grey",
alpha="0.2")
pyplot.axvline(training_times[-1], color="k", linestyle="--")
pyplot.xlabel("time")
pyplot.ylabel("observations")
pyplot.legend(loc=0)
pyplot.title(name)
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
make_plot("Structural ensemble",
*structural_ensemble_train_and_predict(FLAGS.input_filename))
make_plot("AR", *ar_train_and_predict(FLAGS.input_filename))
pyplot.show()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_filename",
type=str,
required=True,
help="Input csv file.")
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 |
suhelhammoud/omr | src/experimental/fipi.py | 1 | 1379 |
import numpy as np
from matplotlib import pyplot as plt
def foo( x_nz , scan_x_nz):
y0 = scan_x_nz[0]
y1 = scan_x_nz[-1]
a = scan_x_nz - y0
b = scan_x_nz - y1
x = np.range(len(a))
v0 = np.row_stack( (x, a))
v1 = np.row_stack( (x, b))
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
rad = np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
return np.degrees(rad)
if __name__ == '__main__':
scan_x_nz = np.load('../data/pickles/a.npy')
print(scan_x_nz)
skip = 30
x = np.arange(len(scan_x_nz))
v = np.column_stack((x, scan_x_nz))
p1 = v[0]
p2 = v[-1]
v1 = v - p1
v2 = p2 - v
print(v1.shape)
print(v2.shape)
ang = [angle_between(v1[i], v2[i]) for i in x[skip:-skip]]
xmax = np.argmax(ang) + skip
print(xmax)
# plt.plot(v1[:,0], v1[:,1], 'r', v2[:,0], v2[:,1], 'g')
plt.plot(ang)
plt.show()
print('done') | apache-2.0 |
arnold-jr/sem-classify | semclassify/workflows.py | 1 | 1634 | import pandas as pd
pd.set_option('expand_frame_repr', False)
import numpy as np
from semclassify.plots import (plot_labeled_image,
PaletteController)
from semclassify.classify import *
def calc_phase_fraction():
df = pd.read_hdf("../output/store.h5",
"BG2",
where=[
"site='soi_011'",
])
print(df.info())
print(df.head())
ymax, xmax = (df.imgCol.max() + 1, df.imgRow.max() + 1)
p = PaletteController()
labeled_image = np.array(list(map(lambda x: p.num_rgb[3 * int(round(x))],
df.ANH)))\
.reshape(xmax, ymax, 3)
print(labeled_image)
print(labeled_image.shape)
bse_image = df.Bse.reshape(xmax, ymax)
plot_labeled_image(bse_image, labeled_image)
def train_models(supermodel):
# TODO: pickle the supermodel
supermodel.train_all_models(
model_names=['rfc'],
where=['site=="soi_001" | site=="soi_002" | site=="soi_011"'])
supermodel.pickle_models(['rfc'])
def classify_all_anhydrous(supermodel):
clf = supermodel.get_trained_model('rfc')
return supermodel.get_model_prediction(clf,
where=["ANH==True", "site=='soi_001'"])
if __name__ == "__main__":
# calc_phase_fraction()
sm = SuperModel(
"/Users/joshuaarnold/Documents/MyApps/sem-classify/output/store.h5",
tables=['BG2'],
feat_cols=['Al', 'Ca', 'Fe', 'K', 'Mg', 'Na', 'S', 'Si'],
label_cols=['BFS', 'FAF', 'HYD', 'ILL', 'POR', 'QS'],
)
train_models(sm)
print(sm.label_encoder.inverse_transform(classify_all_anhydrous(sm)))
| mit |
larsmans/scikit-learn | sklearn/metrics/pairwise.py | 2 | 42098 | # -*- coding: utf-8 -*-
"""
The :mod:`sklearn.metrics.pairwise` submodule implements utilities to evaluate
pairwise distances, paired distances or affinity of sets of samples.
This module contains both distance metrics and kernels. A brief summary is
given on the two here.
Distance metrics are a function d(a, b) such that d(a, b) < d(a, c) if objects
a and b are considered "more similar" to objects a and c. Two objects exactly
alike would have a distance of zero.
One of the most popular examples is Euclidean distance.
To be a 'true' metric, it must obey the following four conditions::
1. d(a, b) >= 0, for all a and b
2. d(a, b) == 0, if and only if a = b, positive definiteness
3. d(a, b) == d(b, a), symmetry
4. d(a, c) <= d(a, b) + d(b, c), the triangle inequality
Kernels are measures of similarity, i.e. ``s(a, b) > s(a, c)``
if objects ``a`` and ``b`` are considered "more similar" to objects
``a`` and ``c``. A kernel must also be positive semi-definite.
There are a number of ways to convert between a distance metric and a
similarity measure, such as a kernel. Let D be the distance, and S be the
kernel:
1. ``S = np.exp(-D * gamma)``, where one heuristic for choosing
``gamma`` is ``1 / num_features``
2. ``S = 1. / (D / np.max(D))``
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Robert Layton <robertlayton@gmail.com>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Philippe Gervais <philippe.gervais@inria.fr>
# Lars Buitinck <larsmans@gmail.com>
# License: BSD 3 clause
import numpy as np
from scipy.spatial import distance
from scipy.sparse import csr_matrix
from scipy.sparse import issparse
from ..utils import check_array
from ..utils import gen_even_slices
from ..utils import gen_batches
from ..utils.extmath import row_norms, safe_sparse_dot
from ..preprocessing import normalize
from ..externals.joblib import Parallel
from ..externals.joblib import delayed
from ..externals.joblib.parallel import cpu_count
from .pairwise_fast import _chi2_kernel_fast, _sparse_manhattan
# Utility Functions
def check_pairwise_arrays(X, Y):
""" Set X and Y appropriately and checks inputs
If Y is None, it is set as a pointer to X (i.e. not a copy).
If Y is given, this does not happen.
All distance metrics should use this function first to assert that the
given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the second dimension of the two arrays is equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
if Y is X or Y is None:
X = Y = check_array(X, accept_sparse='csr')
else:
X = check_array(X, accept_sparse='csr')
Y = check_array(Y, accept_sparse='csr')
if X.shape[1] != Y.shape[1]:
raise ValueError("Incompatible dimension for X and Y matrices: "
"X.shape[1] == %d while Y.shape[1] == %d" % (
X.shape[1], Y.shape[1]))
if not (X.dtype == Y.dtype == np.float32):
if Y is X:
X = Y = check_array(X, ['csr', 'csc', 'coo'], dtype=np.float)
else:
X = check_array(X, ['csr', 'csc', 'coo'], dtype=np.float)
Y = check_array(Y, ['csr', 'csc', 'coo'], dtype=np.float)
return X, Y
def check_paired_arrays(X, Y):
""" Set X and Y appropriately and checks inputs for paired distances
All paired distance metrics should use this function first to assert that
the given parameters are correct and safe to use.
Specifically, this function first ensures that both X and Y are arrays,
then checks that they are at least two dimensional while ensuring that
their elements are floats. Finally, the function checks that the size
of the dimensions of the two arrays are equal.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
Returns
-------
safe_X : {array-like, sparse matrix}, shape (n_samples_a, n_features)
An array equal to X, guaranteed to be a numpy array.
safe_Y : {array-like, sparse matrix}, shape (n_samples_b, n_features)
An array equal to Y if Y was not None, guaranteed to be a numpy array.
If Y was None, safe_Y will be a pointer to X.
"""
X, Y = check_pairwise_arrays(X, Y)
if X.shape != Y.shape:
raise ValueError("X and Y should be of same shape. They were "
"respectively %r and %r long." % (X.shape, Y.shape))
return X, Y
# Pairwise distances
def euclidean_distances(X, Y=None, Y_norm_squared=None, squared=False):
"""
Considering the rows of X (and Y=X) as vectors, compute the
distance matrix between each pair of vectors.
For efficiency reasons, the euclidean distance between a pair of row
vector x and y is computed as::
dist(x, y) = sqrt(dot(x, x) - 2 * dot(x, y) + dot(y, y))
This formulation has two advantages over other ways of computing distances.
First, it is computationally efficient when dealing with sparse data.
Second, if x varies but y remains unchanged, then the right-most dot
product `dot(y, y)` can be pre-computed.
However, this is not the most precise way of doing this computation, and
the distance matrix returned by this function may not be exactly
symmetric as required by, e.g., ``scipy.spatial.distance`` functions.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples_1, n_features)
Y : {array-like, sparse matrix}, shape (n_samples_2, n_features)
Y_norm_squared : array-like, shape (n_samples_2, ), optional
Pre-computed dot-products of vectors in Y (e.g.,
``(Y**2).sum(axis=1)``)
squared : boolean, optional
Return squared Euclidean distances.
Returns
-------
distances : {array, sparse matrix}, shape (n_samples_1, n_samples_2)
Examples
--------
>>> from sklearn.metrics.pairwise import euclidean_distances
>>> X = [[0, 1], [1, 1]]
>>> # distance between rows of X
>>> euclidean_distances(X, X)
array([[ 0., 1.],
[ 1., 0.]])
>>> # get distance to origin
>>> euclidean_distances(X, [[0, 0]])
array([[ 1. ],
[ 1.41421356]])
See also
--------
paired_distances : distances betweens pairs of elements of X and Y.
"""
# should not need X_norm_squared because if you could precompute that as
# well as Y, then you should just pre-compute the output and not even
# call this function.
X, Y = check_pairwise_arrays(X, Y)
if Y_norm_squared is not None:
YY = check_array(Y_norm_squared)
if YY.shape != (1, Y.shape[0]):
raise ValueError(
"Incompatible dimensions for Y and Y_norm_squared")
else:
YY = row_norms(Y, squared=True)[np.newaxis, :]
if X is Y: # shortcut in the common case euclidean_distances(X, X)
XX = YY.T
else:
XX = row_norms(X, squared=True)[:, np.newaxis]
distances = safe_sparse_dot(X, Y.T, dense_output=True)
distances *= -2
distances += XX
distances += YY
np.maximum(distances, 0, out=distances)
if X is Y:
# Ensure that distances between vectors and themselves are set to 0.0.
# This may not be the case due to floating point rounding errors.
distances.flat[::distances.shape[0] + 1] = 0.0
return distances if squared else np.sqrt(distances, out=distances)
def pairwise_distances_argmin_min(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs=None):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance). The minimal distances are
also returned.
This is mostly equivalent to calling:
(pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis),
pairwise_distances(X, Y=Y, metric=metric).min(axis=axis))
but uses much less memory, and is faster for large arrays.
Parameters
----------
X, Y : {array-like, sparse matrix}
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict, optional
Keyword arguments to pass to specified metric function.
Returns
-------
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
distances : numpy.ndarray
distances[i] is the distance between the i-th row in X and the
argmin[i]-th row in Y.
See also
--------
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin
"""
dist_func = None
if metric in PAIRWISE_DISTANCE_FUNCTIONS:
dist_func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
elif not callable(metric) and not isinstance(metric, str):
raise ValueError("'metric' must be a string or a callable")
X, Y = check_pairwise_arrays(X, Y)
if metric_kwargs is None:
metric_kwargs = {}
if axis == 0:
X, Y = Y, X
# Allocate output arrays
indices = np.empty(X.shape[0], dtype=np.intp)
values = np.empty(X.shape[0])
values.fill(np.infty)
for chunk_x in gen_batches(X.shape[0], batch_size):
X_chunk = X[chunk_x, :]
for chunk_y in gen_batches(Y.shape[0], batch_size):
Y_chunk = Y[chunk_y, :]
if dist_func is not None:
if metric == 'euclidean': # special case, for speed
d_chunk = safe_sparse_dot(X_chunk, Y_chunk.T,
dense_output=True)
d_chunk *= -2
d_chunk += row_norms(X_chunk, squared=True)[:, np.newaxis]
d_chunk += row_norms(Y_chunk, squared=True)[np.newaxis, :]
np.maximum(d_chunk, 0, d_chunk)
else:
d_chunk = dist_func(X_chunk, Y_chunk, **metric_kwargs)
else:
d_chunk = pairwise_distances(X_chunk, Y_chunk,
metric=metric, **metric_kwargs)
# Update indices and minimum values using chunk
min_indices = d_chunk.argmin(axis=1)
min_values = d_chunk[np.arange(chunk_x.stop - chunk_x.start),
min_indices]
flags = values[chunk_x] > min_values
indices[chunk_x] = np.where(
flags, min_indices + chunk_y.start, indices[chunk_x])
values[chunk_x] = np.where(
flags, min_values, values[chunk_x])
if metric == "euclidean" and not metric_kwargs.get("squared", False):
np.sqrt(values, values)
return indices, values
def pairwise_distances_argmin(X, Y, axis=1, metric="euclidean",
batch_size=500, metric_kwargs={}):
"""Compute minimum distances between one point and a set of points.
This function computes for each row in X, the index of the row of Y which
is closest (according to the specified distance).
This is mostly equivalent to calling:
pairwise_distances(X, Y=Y, metric=metric).argmin(axis=axis)
but uses much less memory, and is faster for large arrays.
This function works with dense 2D arrays only.
Parameters
==========
X, Y : array-like
Arrays containing points. Respective shapes (n_samples1, n_features)
and (n_samples2, n_features)
batch_size : integer
To reduce memory consumption over the naive solution, data are
processed in batches, comprising batch_size rows of X and
batch_size rows of Y. The default value is quite conservative, but
can be changed for fine-tuning. The larger the number, the larger the
memory usage.
metric : string or callable
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto', 'russellrao',
'seuclidean', 'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_kwargs : dict
keyword arguments to pass to specified metric function.
Returns
=======
argmin : numpy.ndarray
Y[argmin[i], :] is the row in Y that is closest to X[i, :].
See also
========
sklearn.metrics.pairwise_distances
sklearn.metrics.pairwise_distances_argmin_min
"""
return pairwise_distances_argmin_min(X, Y, axis, metric, batch_size,
metric_kwargs)[0]
def manhattan_distances(X, Y=None, sum_over_features=True,
size_threshold=5e8):
""" Compute the L1 distances between the vectors in X and Y.
With sum_over_features equal to False it returns the componentwise
distances.
Parameters
----------
X : array_like
An array with shape (n_samples_X, n_features).
Y : array_like, optional
An array with shape (n_samples_Y, n_features).
sum_over_features : bool, default=True
If True the function returns the pairwise distance matrix
else it returns the componentwise L1 pairwise-distances.
Not supported for sparse matrix inputs.
size_threshold : int, default=5e8
Unused parameter.
Returns
-------
D : array
If sum_over_features is False shape is
(n_samples_X * n_samples_Y, n_features) and D contains the
componentwise L1 pairwise-distances (ie. absolute difference),
else shape is (n_samples_X, n_samples_Y) and D contains
the pairwise L1 distances.
Examples
--------
>>> from sklearn.metrics.pairwise import manhattan_distances
>>> manhattan_distances(3, 3)#doctest:+ELLIPSIS
array([[ 0.]])
>>> manhattan_distances(3, 2)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances(2, 3)#doctest:+ELLIPSIS
array([[ 1.]])
>>> manhattan_distances([[1, 2], [3, 4]],\
[[1, 2], [0, 3]])#doctest:+ELLIPSIS
array([[ 0., 2.],
[ 4., 4.]])
>>> import numpy as np
>>> X = np.ones((1, 2))
>>> y = 2 * np.ones((2, 2))
>>> manhattan_distances(X, y, sum_over_features=False)#doctest:+ELLIPSIS
array([[ 1., 1.],
[ 1., 1.]]...)
"""
X, Y = check_pairwise_arrays(X, Y)
if issparse(X) or issparse(Y):
if not sum_over_features:
raise TypeError("sum_over_features=%r not supported"
" for sparse matrices" % sum_over_features)
X = csr_matrix(X, copy=False)
Y = csr_matrix(Y, copy=False)
D = np.zeros((X.shape[0], Y.shape[0]))
_sparse_manhattan(X.data, X.indices, X.indptr,
Y.data, Y.indices, Y.indptr,
X.shape[1], D)
return D
if sum_over_features:
return distance.cdist(X, Y, 'cityblock')
D = X[:, np.newaxis, :] - Y[np.newaxis, :, :]
D = np.abs(D, D)
return D.reshape((-1, X.shape[1]))
def cosine_distances(X, Y=None):
"""
Compute cosine distance between samples in X and Y.
Cosine distance is defined as 1.0 minus the cosine similarity.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
distance matrix : array
An array with shape (n_samples_X, n_samples_Y).
See also
--------
sklearn.metrics.pairwise.cosine_similarity
scipy.spatial.distance.cosine (dense matrices only)
"""
# 1.0 - cosine_similarity(X, Y) without copy
S = cosine_similarity(X, Y)
S *= -1
S += 1
return S
# Paired distances
def paired_euclidean_distances(X, Y):
"""
Computes the paired euclidean distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.sqrt(((X - Y) ** 2).sum(axis=-1))
def paired_manhattan_distances(X, Y):
"""Compute the L1 distances between the vectors in X and Y.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray (n_samples, )
"""
X, Y = check_paired_arrays(X, Y)
return np.abs(X - Y).sum(axis=-1)
def paired_cosine_distances(X, Y):
"""
Computes the paired cosine distances between X and Y
Parameters
----------
X : array-like, shape (n_samples, n_features)
Y : array-like, shape (n_samples, n_features)
Returns
-------
distances : ndarray, shape (n_samples, )
Notes
------
The cosine distance is equivalent to the half the squared
euclidean distance if each sample is normalized to unit norm
"""
X, Y = check_paired_arrays(X, Y)
X_normalized = normalize(X, copy=True)
X_normalized -= normalize(Y, copy=True)
return .5 * (X_normalized ** 2).sum(axis=-1)
PAIRED_DISTANCES = {
'cosine': paired_cosine_distances,
'euclidean': paired_euclidean_distances,
'l2': paired_euclidean_distances,
'l1': paired_manhattan_distances,
'manhattan': paired_manhattan_distances,
'cityblock': paired_manhattan_distances,
}
def paired_distances(X, Y, metric="euclidean", **kwds):
"""
Computes the paired distances between X and Y.
Computes the distances between (X[0], Y[0]), (X[1], Y[1]), etc...
Parameters
----------
X, Y : ndarray (n_samples, n_features)
metric : string or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
specified in PAIRED_DISTANCES, including "euclidean",
"manhattan", or "cosine".
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
Returns
-------
distances : ndarray (n_samples, )
Examples
--------
>>> from sklearn.metrics.pairwise import paired_distances
>>> X = [[0, 1], [1, 1]]
>>> Y = [[0, 1], [2, 1]]
>>> paired_distances(X, Y)
array([ 0., 1.])
See also
--------
pairwise_distances : pairwise distances.
"""
if metric in PAIRED_DISTANCES:
func = PAIRED_DISTANCES[metric]
return func(X, Y)
elif callable(metric):
# Check the matrix first (it is usually done by the metric)
X, Y = check_paired_arrays(X, Y)
distances = np.zeros(len(X))
for i in range(len(X)):
distances[i] = metric(X[i], Y[i])
return distances
else:
raise ValueError('Unknown distance %s' % metric)
# Kernels
def linear_kernel(X, Y=None):
"""
Compute the linear kernel between X and Y.
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
return safe_sparse_dot(X, Y.T, dense_output=True)
def polynomial_kernel(X, Y=None, degree=3, gamma=None, coef0=1):
"""
Compute the polynomial kernel between X and Y::
K(X, Y) = (gamma <X, Y> + coef0)^degree
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix : array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
K **= degree
return K
def sigmoid_kernel(X, Y=None, gamma=None, coef0=1):
"""
Compute the sigmoid kernel between X and Y::
K(X, Y) = tanh(gamma <X, Y> + coef0)
Parameters
----------
X : array of shape (n_samples_1, n_features)
Y : array of shape (n_samples_2, n_features)
degree : int
Returns
-------
Gram matrix: array of shape (n_samples_1, n_samples_2)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = linear_kernel(X, Y)
K *= gamma
K += coef0
np.tanh(K, K) # compute tanh in-place
return K
def rbf_kernel(X, Y=None, gamma=None):
"""
Compute the rbf (gaussian) kernel between X and Y::
K(x, y) = exp(-gamma ||x-y||^2)
for each pair of rows x in X and y in Y.
Parameters
----------
X : array of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
"""
X, Y = check_pairwise_arrays(X, Y)
if gamma is None:
gamma = 1.0 / X.shape[1]
K = euclidean_distances(X, Y, squared=True)
K *= -gamma
np.exp(K, K) # exponentiate K in-place
return K
def cosine_similarity(X, Y=None):
"""Compute cosine similarity between samples in X and Y.
Cosine similarity, or the cosine kernel, computes similarity as the
normalized dot product of X and Y:
K(X, Y) = <X, Y> / (||X||*||Y||)
On L2-normalized data, this function is equivalent to linear_kernel.
Parameters
----------
X : array_like, sparse matrix
with shape (n_samples_X, n_features).
Y : array_like, sparse matrix (optional)
with shape (n_samples_Y, n_features).
Returns
-------
kernel matrix : array
An array with shape (n_samples_X, n_samples_Y).
"""
# to avoid recursive import
X, Y = check_pairwise_arrays(X, Y)
X_normalized = normalize(X, copy=True)
if X is Y:
Y_normalized = X_normalized
else:
Y_normalized = normalize(Y, copy=True)
K = linear_kernel(X_normalized, Y_normalized)
return K
def additive_chi2_kernel(X, Y=None):
"""Computes the additive chi-squared kernel between observations in X and Y
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = -Sum [(x - y)^2 / (x + y)]
It can be interpreted as a weighted difference per entry.
Notes
-----
As the negative of a distance, this kernel is only conditionally positive
definite.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
chi2_kernel : The exponentiated version of the kernel, which is usually
preferable.
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to this kernel.
"""
if issparse(X) or issparse(Y):
raise ValueError("additive_chi2 does not support sparse matrices.")
X, Y = check_pairwise_arrays(X, Y)
if (X < 0).any():
raise ValueError("X contains negative values.")
if Y is not X and (Y < 0).any():
raise ValueError("Y contains negative values.")
result = np.zeros((X.shape[0], Y.shape[0]), dtype=X.dtype)
_chi2_kernel_fast(X, Y, result)
return result
def chi2_kernel(X, Y=None, gamma=1.):
"""Computes the exponential chi-squared kernel X and Y.
The chi-squared kernel is computed between each pair of rows in X and Y. X
and Y have to be non-negative. This kernel is most commonly applied to
histograms.
The chi-squared kernel is given by::
k(x, y) = exp(-gamma Sum [(x - y)^2 / (x + y)])
It can be interpreted as a weighted difference per entry.
Parameters
----------
X : array-like of shape (n_samples_X, n_features)
Y : array of shape (n_samples_Y, n_features)
gamma : float, default=1.
Scaling parameter of the chi2 kernel.
Returns
-------
kernel_matrix : array of shape (n_samples_X, n_samples_Y)
References
----------
* Zhang, J. and Marszalek, M. and Lazebnik, S. and Schmid, C.
Local features and kernels for classification of texture and object
categories: A comprehensive study
International Journal of Computer Vision 2007
http://eprints.pascal-network.org/archive/00002309/01/Zhang06-IJCV.pdf
See also
--------
additive_chi2_kernel : The additive version of this kernel
sklearn.kernel_approximation.AdditiveChi2Sampler : A Fourier approximation
to the additive version of this kernel.
"""
K = additive_chi2_kernel(X, Y)
K *= gamma
return np.exp(K, K)
# Helper functions - distance
PAIRWISE_DISTANCE_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'cityblock': manhattan_distances,
'cosine': cosine_distances,
'euclidean': euclidean_distances,
'l2': euclidean_distances,
'l1': manhattan_distances,
'manhattan': manhattan_distances, }
def distance_metrics():
"""Valid metrics for pairwise_distances.
This function simply returns the valid pairwise distance metrics.
It exists to allow for a description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
============ ====================================
metric Function
============ ====================================
'cityblock' metrics.pairwise.manhattan_distances
'cosine' metrics.pairwise.cosine_distances
'euclidean' metrics.pairwise.euclidean_distances
'l1' metrics.pairwise.manhattan_distances
'l2' metrics.pairwise.euclidean_distances
'manhattan' metrics.pairwise.manhattan_distances
============ ====================================
"""
return PAIRWISE_DISTANCE_FUNCTIONS
def _parallel_pairwise(X, Y, func, n_jobs, **kwds):
"""Break the pairwise matrix in n_jobs even slices
and compute them in parallel"""
if n_jobs < 0:
n_jobs = max(cpu_count() + 1 + n_jobs, 1)
if Y is None:
Y = X
ret = Parallel(n_jobs=n_jobs, verbose=0)(
delayed(func)(X, Y[s], **kwds)
for s in gen_even_slices(Y.shape[0], n_jobs))
return np.hstack(ret)
_VALID_METRICS = ['euclidean', 'l2', 'l1', 'manhattan', 'cityblock',
'braycurtis', 'canberra', 'chebyshev', 'correlation',
'cosine', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener',
'sokalsneath', 'sqeuclidean', 'yule', "wminkowski"]
def pairwise_distances(X, Y=None, metric="euclidean", n_jobs=1, **kwds):
""" Compute the distance matrix from a vector array X and optional Y.
This method takes either a vector array or a distance matrix, and returns
a distance matrix. If the input is a vector array, the distances are
computed. If the input is a distances matrix, it is returned instead.
This method provides a safe way to take a distance matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
distance between the arrays from both X and Y.
Valid values for metric are:
- From scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']. These metrics support sparse matrix inputs.
- From scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski', 'mahalanobis',
'matching', 'minkowski', 'rogerstanimoto', 'russellrao', 'seuclidean',
'sokalmichener', 'sokalsneath', 'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics. These metrics do not support sparse matrix inputs.
Note that in the case of 'cityblock', 'cosine' and 'euclidean' (which are
valid scipy.spatial.distance metrics), the scikit-learn implementation
will be used, which is faster and has support for sparse matrices (except
for 'cityblock'). For a verbose description of the metrics from
scikit-learn, see the __doc__ of the sklearn.pairwise.distance_metrics
function.
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise distances between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by scipy.spatial.distance.pdist for its metric parameter, or
a metric listed in pairwise.PAIRWISE_DISTANCE_FUNCTIONS.
If metric is "precomputed", X is assumed to be a distance matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
D : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A distance matrix D such that D_{i, j} is the distance between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then D_{i, j} is the distance between the ith array
from X and the jth array from Y.
"""
if (metric not in _VALID_METRICS and
not callable(metric) and metric != "precomputed"):
raise ValueError("Unknown metric %s. "
"Valid metrics are %s, or 'precomputed', or a "
"callable" % (metric, _VALID_METRICS))
if metric == "precomputed":
return X
elif metric in PAIRWISE_DISTANCE_FUNCTIONS:
func = PAIRWISE_DISTANCE_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate distance for each element in X and Y.
# FIXME: can use n_jobs here too
# FIXME: np.zeros can be replaced by np.empty
D = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# distance assumed to be symmetric.
D[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
D[j][i] = D[i][j]
return D
else:
# Note: the distance module doesn't support sparse matrices!
if type(X) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
if Y is None:
return distance.squareform(distance.pdist(X, metric=metric,
**kwds))
else:
if type(Y) is csr_matrix:
raise TypeError("scipy distance metrics do not"
" support sparse matrices.")
return distance.cdist(X, Y, metric=metric, **kwds)
# Helper functions - distance
PAIRWISE_KERNEL_FUNCTIONS = {
# If updating this dictionary, update the doc in both distance_metrics()
# and also in pairwise_distances()!
'additive_chi2': additive_chi2_kernel,
'chi2': chi2_kernel,
'linear': linear_kernel,
'polynomial': polynomial_kernel,
'poly': polynomial_kernel,
'rbf': rbf_kernel,
'sigmoid': sigmoid_kernel,
'cosine': cosine_similarity, }
def kernel_metrics():
""" Valid metrics for pairwise_kernels
This function simply returns the valid pairwise distance metrics.
It exists, however, to allow for a verbose description of the mapping for
each of the valid strings.
The valid distance metrics, and the function they map to, are:
=============== ========================================
metric Function
=============== ========================================
'additive_chi2' sklearn.pairwise.additive_chi2_kernel
'chi2' sklearn.pairwise.chi2_kernel
'linear' sklearn.pairwise.linear_kernel
'poly' sklearn.pairwise.polynomial_kernel
'polynomial' sklearn.pairwise.polynomial_kernel
'rbf' sklearn.pairwise.rbf_kernel
'sigmoid' sklearn.pairwise.sigmoid_kernel
'cosine' sklearn.pairwise.cosine_similarity
=============== ========================================
"""
return PAIRWISE_KERNEL_FUNCTIONS
KERNEL_PARAMS = {
"additive_chi2": (),
"chi2": (),
"cosine": (),
"exp_chi2": frozenset(["gamma"]),
"linear": (),
"poly": frozenset(["gamma", "degree", "coef0"]),
"polynomial": frozenset(["gamma", "degree", "coef0"]),
"rbf": frozenset(["gamma"]),
"sigmoid": frozenset(["gamma", "coef0"]),
}
def pairwise_kernels(X, Y=None, metric="linear", filter_params=False,
n_jobs=1, **kwds):
"""Compute the kernel between arrays X and optional array Y.
This method takes either a vector array or a kernel matrix, and returns
a kernel matrix. If the input is a vector array, the kernels are
computed. If the input is a kernel matrix, it is returned instead.
This method provides a safe way to take a kernel matrix as input, while
preserving compatibility with many other algorithms that take a vector
array.
If Y is given (default is None), then the returned matrix is the pairwise
kernel between the arrays from both X and Y.
Valid values for metric are::
['rbf', 'sigmoid', 'polynomial', 'poly', 'linear', 'cosine']
Parameters
----------
X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \
[n_samples_a, n_features] otherwise
Array of pairwise kernels between samples, or a feature array.
Y : array [n_samples_b, n_features]
A second feature array only if X has shape [n_samples_a, n_features].
metric : string, or callable
The metric to use when calculating kernel between instances in a
feature array. If metric is a string, it must be one of the metrics
in pairwise.PAIRWISE_KERNEL_FUNCTIONS.
If metric is "precomputed", X is assumed to be a kernel matrix.
Alternatively, if metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays from X as input and return a value indicating
the distance between them.
n_jobs : int
The number of jobs to use for the computation. This works by breaking
down the pairwise matrix into n_jobs even slices and computing them in
parallel.
If -1 all CPUs are used. If 1 is given, no parallel computing code is
used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but one
are used.
filter_params: boolean
Whether to filter invalid parameters or not.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the kernel function.
Returns
-------
K : array [n_samples_a, n_samples_a] or [n_samples_a, n_samples_b]
A kernel matrix K such that K_{i, j} is the kernel between the
ith and jth vectors of the given matrix X, if Y is None.
If Y is not None, then K_{i, j} is the kernel between the ith array
from X and the jth array from Y.
Notes
-----
If metric is 'precomputed', Y is ignored and X is returned.
"""
if metric == "precomputed":
return X
elif metric in PAIRWISE_KERNEL_FUNCTIONS:
if filter_params:
kwds = dict((k, kwds[k]) for k in kwds
if k in KERNEL_PARAMS[metric])
func = PAIRWISE_KERNEL_FUNCTIONS[metric]
if n_jobs == 1:
return func(X, Y, **kwds)
else:
return _parallel_pairwise(X, Y, func, n_jobs, **kwds)
elif callable(metric):
# Check matrices first (this is usually done by the metric).
X, Y = check_pairwise_arrays(X, Y)
n_x, n_y = X.shape[0], Y.shape[0]
# Calculate kernel for each element in X and Y.
K = np.zeros((n_x, n_y), dtype='float')
for i in range(n_x):
start = 0
if X is Y:
start = i
for j in range(start, n_y):
# Kernel assumed to be symmetric.
K[i][j] = metric(X[i], Y[j], **kwds)
if X is Y:
K[j][i] = K[i][j]
return K
else:
raise ValueError("Unknown kernel %r" % metric)
| bsd-3-clause |
JPFrancoia/scikit-learn | sklearn/metrics/ranking.py | 11 | 27672 | """Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import assert_all_finite
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.extmath import stable_cumsum
from ..utils.fixes import bincount
from ..utils.fixes import array_equal
from ..utils.stats import rankdata
from ..utils.sparsefuncs import count_nonzero
from ..exceptions import UndefinedMetricWarning
from .base import _average_binary_score
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
if isinstance(area, np.memmap):
# Reductions such as .sum used internally in np.trapz do not return a
# scalar by default for numpy.memmap instances contrary to
# regular numpy.ndarray instances.
area = area.dtype.type(area)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<https://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds <= len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
assert_all_finite(y_true)
assert_all_finite(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (array_equal(classes, [0, 1]) or
array_equal(classes, [-1, 1]) or
array_equal(classes, [0]) or
array_equal(classes, [-1]) or
array_equal(classes, [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
distinct_value_indices = np.where(np.diff(y_score))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = stable_cumsum(y_true * weight)[threshold_idxs]
if sample_weight is not None:
fps = stable_cumsum(weight)[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Read more in the :ref:`User Guide <precision_recall_f_measure_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int or str, default=None
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds <= len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None,
drop_intermediate=True):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Read more in the :ref:`User Guide <roc_metrics>`.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
pos_label : int or str, default=None
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
drop_intermediate : boolean, optional (default=True)
Whether to drop some suboptimal thresholds which would not appear
on a plotted ROC curve. This is useful in order to create lighter
ROC curves.
.. versionadded:: 0.17
parameter *drop_intermediate*.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<https://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
# Attempt to drop thresholds corresponding to points in between and
# collinear with other points. These are always suboptimal and do not
# appear on a plotted ROC curve (and thus do not affect the AUC).
# Here np.diff(_, 2) is used as a "second derivative" to tell if there
# is a corner at the point. Both fps and tps must be tested to handle
# thresholds with multiple data points (which are combined in
# _binary_clf_curve). This keeps all cases where the point should be kept,
# but does not drop more complicated cases like fps = [1, 3, 7],
# tps = [1, 2, 4]; there is no harm in keeping too many thresholds.
if drop_intermediate and len(fps) > 2:
optimal_idxs = np.where(np.r_[True,
np.logical_or(np.diff(fps, 2),
np.diff(tps, 2)),
True])[0]
fps = fps[optimal_idxs]
tps = tps[optimal_idxs]
thresholds = thresholds[optimal_idxs]
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Read more in the :ref:`User Guide <label_ranking_average_precision>`.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator" and
not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += (L / rank).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Read more in the :ref:`User Guide <coverage_error>`.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
def label_ranking_loss(y_true, y_score, sample_weight=None):
"""Compute Ranking loss measure
Compute the average number of label pairs that are incorrectly ordered
given y_score weighted by the size of the label set and the number of
labels not in the label set.
This is similar to the error set size, but weighted by the number of
relevant and irrelevant labels. The best performance is achieved with
a ranking loss of zero.
Read more in the :ref:`User Guide <label_ranking_loss>`.
.. versionadded:: 0.17
A function *label_ranking_loss*
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or non-thresholded measure of decisions
(as returned by "decision_function" on some classifiers).
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
loss : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False, accept_sparse='csr')
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type not in ("multilabel-indicator",):
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
n_samples, n_labels = y_true.shape
y_true = csr_matrix(y_true)
loss = np.zeros(n_samples)
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
# Sort and bin the label scores
unique_scores, unique_inverse = np.unique(y_score[i],
return_inverse=True)
true_at_reversed_rank = bincount(
unique_inverse[y_true.indices[start:stop]],
minlength=len(unique_scores))
all_at_reversed_rank = bincount(unique_inverse,
minlength=len(unique_scores))
false_at_reversed_rank = all_at_reversed_rank - true_at_reversed_rank
# if the scores are ordered, it's possible to count the number of
# incorrectly ordered paires in linear time by cumulatively counting
# how many false labels of a given score have a score higher than the
# accumulated true labels with lower score.
loss[i] = np.dot(true_at_reversed_rank.cumsum(),
false_at_reversed_rank)
n_positives = count_nonzero(y_true, axis=1)
with np.errstate(divide="ignore", invalid="ignore"):
loss /= ((n_labels - n_positives) * n_positives)
# When there is no positive or no negative labels, those values should
# be consider as correct, i.e. the ranking doesn't matter.
loss[np.logical_or(n_positives == 0, n_positives == n_labels)] = 0.
return np.average(loss, weights=sample_weight)
| bsd-3-clause |
akrherz/iem | htdocs/plotting/auto/scripts100/p127.py | 1 | 5281 | """Plot or Harvest Progress"""
import calendar
import numpy as np
from pandas.io.sql import read_sql
from matplotlib import ticker
from pyiem.plot import figure_axes
from pyiem.plot import get_cmap
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.reference import state_names
from pyiem.exceptions import NoDataFound
PDICT = {
"CPR": "CORN - PROGRESS, MEASURED IN PCT SEEDBED PREPARED",
"CP": "CORN - PROGRESS, MEASURED IN PCT PLANTED",
"CE": "CORN - PROGRESS, MEASURED IN PCT EMERGED",
"CS": "CORN - PROGRESS, MEASURED IN PCT SILKING",
"CMI": "CORN - PROGRESS, MEASURED IN PCT MILK",
"CD": "CORN - PROGRESS, MEASURED IN PCT DENTED",
"CDO": "CORN - PROGRESS, MEASURED IN PCT DOUGH",
"CMA": "CORN - PROGRESS, MEASURED IN PCT MATURE",
"CH": "CORN, GRAIN - PROGRESS, MEASURED IN PCT HARVESTED",
"CSH": "CORN, SILAGE - PROGRESS, MEASURED IN PCT HARVESTED",
"SPR": "SOYBEANS - PROGRESS, MEASURED IN PCT SEEDBED PREPARED",
"SP": "SOYBEANS - PROGRESS, MEASURED IN PCT PLANTED",
"SE": "SOYBEANS - PROGRESS, MEASURED IN PCT EMERGED",
"SPO": "SOYBEANS - PROGRESS, MEASURED IN PCT FULLY PODDED",
"SB": "SOYBEANS - PROGRESS, MEASURED IN PCT BLOOMING",
"SM": "SOYBEANS - PROGRESS, MEASURED IN PCT MATURE",
"SL": "SOYBEANS - PROGRESS, MEASURED IN PCT DROPPING LEAVES",
"SS": "SOYBEANS - PROGRESS, MEASURED IN PCT SETTING PODS",
"SC": "SOYBEANS - PROGRESS, MEASURED IN PCT COLORING",
"SH": "SOYBEANS - PROGRESS, MEASURED IN PCT HARVESTED",
}
def get_description():
"""Return a dict describing how to call this plotter"""
desc = dict()
desc["data"] = True
desc["nass"] = True
desc[
"description"
] = """This chart presents the crop progress by year.
The most recent value for the current year is denoted on each of the
previous years on record.
<p><strong>Updated 15 June 2021</strong>: The options for this autoplot
were changed and not backwards compatable with previous URIs, sorry.</p>
"""
desc["arguments"] = [
dict(type="state", name="state", default="IA", label="Select State:"),
dict(
type="select",
name="short_desc",
default="CH",
options=PDICT,
label="Which Statistical Category?",
),
dict(type="cmap", name="cmap", default="jet", label="Color Ramp:"),
]
return desc
def plotter(fdict):
"""Go"""
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
state = ctx["state"][:2]
short_desc = PDICT[ctx["short_desc"].upper()]
df = read_sql(
"""
select year, week_ending, num_value,
extract(doy from week_ending)::int as day_of_year from nass_quickstats
where short_desc = %s and state_alpha = %s and num_value is not null
ORDER by week_ending ASC
""",
pgconn,
params=(short_desc, state),
index_col=None,
)
if df.empty:
raise NoDataFound("ERROR: No data found!")
df["yeari"] = df["year"] - df["year"].min()
year0 = int(df["year"].min())
lastyear = int(df["year"].max())
title = (
"%s %s Progress\n"
"USDA NASS %i-%i -- Daily Linear Interpolated Values "
"Between Weekly Reports"
) % (
state_names[state],
short_desc,
year0,
lastyear,
)
(fig, ax) = figure_axes(title=title)
data = np.ma.ones((df["yeari"].max() + 1, 366), "f") * -1
data.mask = np.where(data == -1, True, False)
lastrow = None
for _, row in df.iterrows():
if lastrow is None:
lastrow = row
continue
date = row["week_ending"]
ldate = lastrow["week_ending"]
val = int(row["num_value"])
lval = int(lastrow["num_value"])
d0 = int(ldate.strftime("%j"))
d1 = int(date.strftime("%j"))
if ldate.year == date.year:
delta = (val - lval) / float(d1 - d0)
for i, jday in enumerate(range(d0, d1 + 1)):
data[date.year - year0, jday] = lval + i * delta
else:
data[ldate.year - year0, d0:] = 100
lastrow = row
dlast = np.max(data[-1, :])
for year in range(year0, lastyear):
idx = np.digitize([dlast], data[year - year0, :])
ax.text(idx[0], year, "X", va="center", zorder=2, color="white")
cmap = get_cmap(ctx["cmap"])
res = ax.imshow(
data,
extent=[1, 367, lastyear + 0.5, year0 - 0.5],
aspect="auto",
interpolation="none",
cmap=cmap,
)
fig.colorbar(res)
ax.set_xticks((1, 32, 60, 91, 121, 152, 182, 213, 244, 274, 305, 335))
ax.set_xticklabels(calendar.month_abbr[1:])
# We need to compute the domain of this plot
maxv = np.max(data, 0)
minv = np.min(data, 0)
ax.set_xlim(np.argmax(maxv > 0) - 7, np.argmax(minv > 99) + 7)
ax.set_ylim(lastyear + 0.5, year0 - 0.5)
ax.yaxis.set_major_locator(ticker.MaxNLocator(integer=True))
ax.grid(True)
lastweek = df["week_ending"].max()
ax.set_xlabel(
"X denotes %s value of %.0f%%" % (lastweek.strftime("%d %b %Y"), dlast)
)
return fig, df
if __name__ == "__main__":
plotter(dict(unit_desc="CP"))
| mit |
johnmgregoire/NanoCalorimetry | BatchPlotACsingleexphpseg.py | 1 | 3756 | import os
from PnSC_h5io import *
import time, copy
import os
import sys
import numpy
import h5py
import pylab
from matplotlib.ticker import FuncFormatter
import scipy.integrate
from scipy.interpolate import griddata
import matplotlib.cm as cm
from matplotlib.ticker import FuncFormatter
def myexpformat(x, pos):
for ndigs in range(5):
lab=(('%.'+'%d' %ndigs+'e') %x).replace('e+0','e').replace('e+','e').replace('e0','').replace('e-0','e-')
print lab, eval(lab), x
if eval(lab)==x:
return lab
return lab
ExpTickLabels=FuncFormatter(myexpformat)
#enter the h5 path here and note that in Python you should only use "/" and not "\"
p='E:/pnscpythntraining/trainingcpy.h5'
#select the segment index that you wanted ploted - only one segment at a time
seg=4
#select the experiment group and heat program
exp='AC'
hp='cell24_20.4to71.8to12.2dc_20.4to71.8to12.2ac_120ppc_8s8s_again2_1_of_1'
#enter the time range for the plots in seconds
tlims=(0, 7.8)
#enter the plotting interval - the program will plot a subset of the data with this interval. If there are >100,000 data points you should probably use an interval to make plotting faster. (must be integer)
interv=6
#choose the temperatures in C for the tik labels on the temperature axis
Tmks=[100, 200, 300, 400, 500, 600]
f=h5py.File(p, mode='r')
cyc=0
d=CreateHeatProgSegDictList(p, exp, hp, expandmultdim=False)[seg]
for k in d.keys():
if k!='cycletime' and isinstance(d[k], numpy.ndarray) and d[k].shape[:2]==d['cycletime'].shape:
d[k]=d[k][cyc][::interv]
k='cycletime'
d[k]=d[k][cyc][::interv]
t=d['cycletime']
t-=t[0]
T=d['sampletemperature']
Tmklabs=['%d' %x for x in Tmks]
#the first plot is temperature vs time along with a polynomial fit for caclulatingf the temperature tick mark positions. if the fit doesn't look good try changing these parameters.
fitinterv=200
Tfitpolyorder=4
t_Tfit=numpy.polyfit(T[::fitinterv], t[::fitinterv], Tfitpolyorder)
tmks=[numpy.polyval(t_Tfit, x) for x in Tmks]
ms=1
subadjdict={'left':.2, 'bottom':.17, 'right':.92, 'top':.85}
figsize=(5, 4)
if 1:
pylab.figure(figsize=figsize)
pylab.plot(t, T, 'b.', ms=ms)
pylab.xlabel('elapsed time (s)')
pylab.ylabel('Temperature (C)')
pylab.plot(tmks, Tmks, 'ro')
pylab.xlim(tlims)
pylab.subplots_adjust(**subadjdict)
if 0:
pylab.show()
def plot1(x=t, yk='sampletemperature',fmt='b.', ms=ms, mult=1., xlab='elapsed time (s)', ylab='Temperature (C)', \
xlims=tlims, Ttopmks=(tmks, Tmklabs), subadjdict=subadjdict, figsize=figsize):
pylab.figure(figsize=figsize)
if isinstance(yk, str):
pylab.plot(x, d[yk]*mult, fmt, ms=ms)
else:
pylab.plot(x, yk*mult, fmt, ms=ms)
pylab.xlabel(xlab)
pylab.ylabel(ylab)
pylab.xlim(xlims)
if not Ttopmks is None:
ax2=pylab.twiny()
ax2.set_xticks(Ttopmks[0])
ax2.set_xticklabels(Ttopmks[1])
ax2.set_xlabel('Temperature (C)')
ax2.set_xlim(xlims)
pylab.subplots_adjust(**subadjdict)
def plotamp(yk, freqind, **kwargs):
plot1(yk=(d[yk][:, freqind, :]**2).sum(axis=1), **kwargs)
plot1(Ttopmks=None)
plot1(yk='sampleheatrate', ylab='Heat rate (K/s)')
pylab.show()
plot1(yk='samplepowerperrate', ylab='Power / dT/dt ($\mu$J/K)', mult=1.e6)
plotamp('WinFFT_voltage', 3, ylab='1$\omega$ amplitude (V)', mult=1.)
plotamp('WinFFT_voltage', 6, ylab='2$\omega$ amplitude (mV)', mult=1.e3)
plotamp('WinFFT_filteredvoltage', 6, ylab='filtered 2$\omega$ amplitude (mV)', mult=1.e3)
plot1(yk='acheatcapacity', ylab='mC$_p$, 2$\omega$ method ($\mu$J/K)', mult=1.e6)
plot1(yk='acheatcapacity_1', ylab='mC$_p$, filtered 2$\omega$ method ($\mu$J/K)', mult=1.e6)
pylab.show()
| bsd-3-clause |