repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tsgit/invenio | modules/bibauthorid/lib/bibauthorid_tortoise.py | 5 | 16153 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2011, 2012 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
import os
# import cPickle as SER
import msgpack as SER
import gzip as filehandler
# This is supposed to defeat a bit of the python vm performance losses:
import sys
sys.setcheckinterval(1000000)
try:
from collections import defaultdict
except:
from invenio.containerutils import defaultdict
from invenio.bibauthorid_logutils import Logger
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_marktables
from invenio.bibauthorid_cluster_set import delayed_cluster_sets_from_personid
from invenio.bibauthorid_wedge import wedge
from invenio.bibauthorid_name_utils import generate_last_name_cluster_str
from invenio.bibauthorid_backinterface import empty_tortoise_results_table
from invenio.bibauthorid_backinterface import remove_clusters_by_name
from invenio.bibauthorid_prob_matrix import prepare_matrix
# Scheduler is [temporarily] deprecated in favour of the much simpler schedule_workers
# from invenio.bibauthorid_scheduler import schedule, matrix_coefs
from invenio.bibauthorid_general_utils import schedule_workers
logger = Logger("tortoise")
'''
There are three main entry points to tortoise
i) tortoise
Performs disambiguation iteration.
The arguemnt pure indicates whether to use
the claims and the rejections or not.
Use pure=True only to test the accuracy of tortoise.
ii) tortoise_from_scratch
NOT RECOMMENDED!
Use this function only if you have just
installed invenio and this is your first
disambiguation or if personid is broken.
iii) tortoise_last_name
Computes the clusters for only one last name
group. Is is primary used for testing. It
may also be used to fix a broken last name
cluster. It does not involve multiprocessing
so it is convinient to debug with pdb.
'''
# Exit codes:
# The standard ones are not well documented
# so we are using random numbers.
def tortoise_from_scratch():
logger.log("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
logger.log("Building all matrices.")
cluster_sets = [(s,) for s in cluster_sets]
schedule_workers(lambda x: force_create_matrix(x, force=True), cluster_sets)
empty_tortoise_results_table()
logger.log("Preparing cluster sets.")
cluster_sets, _lnames, sizes = delayed_cluster_sets_from_marktables()
cluster_sets = [(s(),) for s in cluster_sets]
logger.log("Starting disambiguation.")
schedule_workers(wedge_and_store, cluster_sets)
def tortoise(pure=False,
force_matrix_creation=False,
skip_matrix_creation=False,
last_run=None):
assert not force_matrix_creation or not skip_matrix_creation
# The computation must be forced in case we want
# to compute pure results
force_matrix_creation = force_matrix_creation or pure
if not skip_matrix_creation:
logger.log("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
logger.log("Building all matrices.")
clusters = [(s,) for s in clusters]
schedule_workers(lambda x: force_create_matrix(x, force=force_matrix_creation), clusters)
logger.log("Preparing cluster sets.")
clusters, _lnames, sizes = delayed_cluster_sets_from_personid(pure, last_run)
clusters = [(s(),) for s in clusters]
logger.log("Starting disambiguation.")
schedule_workers(wedge_and_store, clusters)
def tortoise_last_name(name, wedge_threshold=None, from_mark=True, pure=False):
logger.log('Start working on %s' % name)
assert not(from_mark and pure)
lname = generate_last_name_cluster_str(name)
if from_mark:
logger.log(' ... from mark!')
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
logger.log(' ... delayed done')
else:
logger.log(' ... from pid, pure=%s' % str(pure))
clusters, lnames, sizes = delayed_cluster_sets_from_personid(pure)
logger.log(' ... delayed pure done!')
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
cluster_set = cluster()
logger.log("Found, %s(%s). Total number of bibs: %d." % (name, lname, size))
create_matrix(cluster_set, False)
wedge_and_store(cluster_set)
except (IndexError, ValueError):
logger.log("Sorry, %s not found in the last name clusters" % (lname))
def tortoise_last_names(names_args_list):
schedule_workers(tortoise_last_name, names_args_list, with_kwargs=True)
def _collect_statistics_lname_coeff(params):
lname = params[0]
coeff = params[1]
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
logger.log("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set, True, coeff)
remove_clusters_by_name(cluster_set.last_name)
except (IndexError, ValueError):
logger.log("Sorry, %s not found in the last name clusters," % (lname))
def _create_matrix(lname):
clusters, lnames, sizes = delayed_cluster_sets_from_marktables([lname])
try:
idx = lnames.index(lname)
cluster = clusters[idx]
size = sizes[idx]
logger.log("Found, %s. Total number of bibs: %d." % (lname, size))
cluster_set = cluster()
create_matrix(cluster_set, False)
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
cluster_set.store()
except (IndexError, ValueError):
logger.log("Sorry, %s not found in the last name clusters, not creating matrix" % (lname))
def tortoise_tweak_coefficient(lastnames, min_coef, max_coef, stepping, build_matrix=True):
logger.log('Coefficient tweaking!')
logger.log('Cluster sets from mark...')
lnames = set([generate_last_name_cluster_str(n) for n in lastnames])
coefficients = [x / 100. for x in range(int(min_coef * 100), int(max_coef * 100), int(stepping * 100))]
if build_matrix:
schedule_workers(_create_matrix, lnames)
schedule_workers(_collect_statistics_lname_coeff, ((x, y) for x in lnames for y in coefficients))
def tortoise_coefficient_statistics(pickle_output=None, generate_graphs=True):
import matplotlib.pyplot as plt
plt.ioff()
def _gen_plot(data, filename):
plt.clf()
ax = plt.subplot(111)
ax.grid(visible=True)
x = sorted(data.keys())
w = [data[k][0] for k in x]
try:
wscf = max(w)
except:
wscf = 0
w = [float(i) / wscf for i in w]
y = [data[k][1] for k in x]
maxi = [data[k][3] for k in x]
mini = [data[k][2] for k in x]
lengs = [data[k][4] for k in x]
try:
ml = float(max(lengs))
except:
ml = 1
lengs = [k / ml for k in lengs]
normalengs = [data[k][5] for k in x]
ax.plot(x, y, '-o', label='avg')
ax.plot(x, maxi, '-o', label='max')
ax.plot(x, mini, '-o', label='min')
ax.plot(x, w, '-x', label='norm %s' % str(wscf))
ax.plot(x, lengs, '-o', label='acl %s' % str(int(ml)))
ax.plot(x, normalengs, '-o', label='ncl')
plt.ylim(ymax=1., ymin=-0.01)
plt.xlim(xmax=1., xmin=-0.01)
ax.legend(bbox_to_anchor=(0., 1.02, 1., .102), loc=3, ncol=6, mode="expand", borderaxespad=0.)
plt.savefig(filename)
files = ['/tmp/baistats/' + x for x in os.listdir('/tmp/baistats/') if x.startswith('cluster_status_report_pid')]
fnum = float(len(files))
quanta = .1 / fnum
total_stats = 0
used_coeffs = set()
used_clusters = set()
# av_counter, avg, min, max, nclus, normalized_avg
cluster_stats = defaultdict(lambda: defaultdict(lambda: [0., 0., 0., 0., 0., 0.]))
coeff_stats = defaultdict(lambda: [0., 0., 0., 0., 0., 0.])
def gen_graphs(only_synthetic=False):
logger.update_status(0, 'Generating coefficients graph...')
_gen_plot(coeff_stats, '/tmp/graphs/AAAAA-coefficients.svg')
if not only_synthetic:
cn = cluster_stats.keys()
l = float(len(cn))
for i, c in enumerate(cn):
logger.update_status(i / l, 'Generating name graphs... %s' % str(c))
_gen_plot(cluster_stats[c], '/tmp/graphs/CS-%s.png' % str(c))
for i, fi in enumerate(files):
if generate_graphs:
if i % 1000 == 0:
gen_graphs(True)
f = filehandler.open(fi, 'r')
status = i / fnum
logger.update_status(status, 'Loading ' + fi[fi.find('lastname') + 9:])
contents = SER.load(f)
f.close()
cur_coef = contents[0]
cur_clust = contents[1]
cur_maxlen = float(contents[3])
if cur_coef:
total_stats += 1
used_coeffs.add(cur_coef)
used_clusters.add(cur_clust)
logger.update_status(status + 0.2 * quanta, ' Computing averages...')
cur_clen = len(contents[2])
cur_coeffs = [x[2] for x in contents[2]]
cur_clustnumber = float(len(set([x[0] for x in contents[2]])))
assert cur_clustnumber > 0 and cur_clustnumber < cur_maxlen, "Error, found log with strange clustnumber! %s %s %s %s" % (str(cur_clust), str(cur_coef), str(cur_maxlen),
str(cur_clustnumber))
if cur_coeffs:
assert len(cur_coeffs) == cur_clen and cur_coeffs, "Error, there is a cluster witohut stuff? %s %s %s" % (
str(cur_clust), str(cur_coef), str(cur_coeffs))
assert all([x >= 0 and x <= 1 for x in cur_coeffs]), "Error, a coefficient is wrong here! Check me! %s %s %s" % (
str(cur_clust), str(cur_coef), str(cur_coeffs))
cur_min = min(cur_coeffs)
cur_max = max(cur_coeffs)
cur_avg = sum(cur_coeffs) / cur_clen
logger.update_status(status + 0.4 * quanta, ' comulative per coeff...')
avi = coeff_stats[cur_coef][0]
# number of points
coeff_stats[cur_coef][0] = avi + 1
# average of coefficients
coeff_stats[cur_coef][1] = (coeff_stats[cur_coef][1] * avi + cur_avg) / (avi + 1)
# min coeff
coeff_stats[cur_coef][2] = min(coeff_stats[cur_coef][2], cur_min)
# max coeff
coeff_stats[cur_coef][3] = max(coeff_stats[cur_coef][3], cur_max)
# avg number of clusters
coeff_stats[cur_coef][4] = (coeff_stats[cur_coef][4] * avi + cur_clustnumber) / (avi + 1)
# normalized avg number of clusters
coeff_stats[cur_coef][5] = (coeff_stats[cur_coef][5] * avi + cur_clustnumber / cur_maxlen) / (avi + 1)
logger.update_status(status + 0.6 * quanta, ' comulative per cluster per coeff...')
avi = cluster_stats[cur_clust][cur_coef][0]
cluster_stats[cur_clust][cur_coef][0] = avi + 1
cluster_stats[cur_clust][cur_coef][1] = (
cluster_stats[cur_clust][cur_coef][1] * avi + cur_avg) / (avi + 1)
cluster_stats[cur_clust][cur_coef][2] = min(cluster_stats[cur_clust][cur_coef][2], cur_min)
cluster_stats[cur_clust][cur_coef][3] = max(cluster_stats[cur_clust][cur_coef][3], cur_max)
cluster_stats[cur_clust][cur_coef][4] = (
cluster_stats[cur_clust][cur_coef][4] * avi + cur_clustnumber) / (avi + 1)
cluster_stats[cur_clust][cur_coef][5] = (
cluster_stats[cur_clust][cur_coef][5] * avi + cur_clustnumber / cur_maxlen) / (avi + 1)
logger.update_status_final('Done!')
if generate_graphs:
gen_graphs()
if pickle_output:
logger.update_status(0, 'Dumping to file...')
f = open(pickle_output, 'w')
SER.dump(
{'cluster_stats': dict((x,
dict(cluster_stats[x])) for x in cluster_stats.iterkeys()),
'coeff_stats': dict((coeff_stats))},
f)
f.close()
def create_matrix(cluster_set, force):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start building matrix for %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
return prepare_matrix(cluster_set, force)
def force_create_matrix(cluster_set, force):
logger.log("Building a cluster set.")
return create_matrix(cluster_set(), force)
def wedge_and_store(cluster_set, wedge_threshold=None):
bibs = cluster_set.num_all_bibs
expected = bibs * (bibs - 1) / 2
logger.log("Start working on %s. Total number of bibs: %d, "
"maximum number of comparisons: %d"
% (cluster_set.last_name, bibs, expected))
wedge(cluster_set, force_wedge_thrsh=wedge_threshold)
remove_clusters_by_name(cluster_set.last_name)
cluster_set.store()
return True
def force_wedge_and_store(cluster_set):
logger.log("Building a cluster set.")
return wedge_and_store(cluster_set())
#[temporarily] deprecated
# def schedule_create_matrix(cluster_sets, sizes, force):
# def create_job(cluster):
# def ret():
# return force_create_matrix(cluster, force)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%smatrix_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
#
#
# def schedule_wedge_and_store(cluster_sets, sizes):
# def create_job(cluster):
# def ret():
# return force_wedge_and_store(cluster)
# return ret
#
# memfile_path = None
# if bconfig.DEBUG_PROCESS_PEAK_MEMORY:
# tt = datetime.now()
# tt = (tt.hour, tt.minute, tt.day, tt.month, tt.year)
# memfile_path = ('%swedge_memory_%d:%d_%d-%d-%d.log' %
# ((bconfig.TORTOISE_FILES_PATH,) + tt))
#
# return schedule(map(create_job, cluster_sets),
# sizes,
# create_approx_func(matrix_coefs),
# memfile_path)
| gpl-2.0 |
poryfly/scikit-learn | benchmarks/bench_plot_approximate_neighbors.py | 244 | 6011 | """
Benchmark for approximate nearest neighbor search using
locality sensitive hashing forest.
There are two types of benchmarks.
First, accuracy of LSHForest queries are measured for various
hyper-parameters and index sizes.
Second, speed up of LSHForest queries compared to brute force
method in exact nearest neighbors is measures for the
aforementioned settings. In general, speed up is increasing as
the index size grows.
"""
from __future__ import division
import numpy as np
from tempfile import gettempdir
from time import time
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors.approximate import LSHForest
from sklearn.datasets import make_blobs
from sklearn.externals.joblib import Memory
m = Memory(cachedir=gettempdir())
@m.cache()
def make_data(n_samples, n_features, n_queries, random_state=0):
"""Create index and query data."""
print('Generating random blob-ish data')
X, _ = make_blobs(n_samples=n_samples + n_queries,
n_features=n_features, centers=100,
shuffle=True, random_state=random_state)
# Keep the last samples as held out query vectors: note since we used
# shuffle=True we have ensured that index and query vectors are
# samples from the same distribution (a mixture of 100 gaussians in this
# case)
return X[:n_samples], X[n_samples:]
def calc_exact_neighbors(X, queries, n_queries, n_neighbors):
"""Measures average times for exact neighbor queries."""
print ('Building NearestNeighbors for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
nbrs = NearestNeighbors(algorithm='brute', metric='cosine').fit(X)
average_time = 0
t0 = time()
neighbors = nbrs.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time = (time() - t0) / n_queries
return neighbors, average_time
def calc_accuracy(X, queries, n_queries, n_neighbors, exact_neighbors,
average_time_exact, **lshf_params):
"""Calculates accuracy and the speed up of LSHForest."""
print('Building LSHForest for %d samples in %d dimensions' %
(X.shape[0], X.shape[1]))
lshf = LSHForest(**lshf_params)
t0 = time()
lshf.fit(X)
lshf_build_time = time() - t0
print('Done in %0.3fs' % lshf_build_time)
accuracy = 0
t0 = time()
approx_neighbors = lshf.kneighbors(queries, n_neighbors=n_neighbors,
return_distance=False)
average_time_approx = (time() - t0) / n_queries
for i in range(len(queries)):
accuracy += np.in1d(approx_neighbors[i], exact_neighbors[i]).mean()
accuracy /= n_queries
speed_up = average_time_exact / average_time_approx
print('Average time for lshf neighbor queries: %0.3fs' %
average_time_approx)
print ('Average time for exact neighbor queries: %0.3fs' %
average_time_exact)
print ('Average Accuracy : %0.2f' % accuracy)
print ('Speed up: %0.1fx' % speed_up)
return speed_up, accuracy
if __name__ == '__main__':
import matplotlib.pyplot as plt
# Initialize index sizes
n_samples = [int(1e3), int(1e4), int(1e5), int(1e6)]
n_features = int(1e2)
n_queries = 100
n_neighbors = 10
X_index, X_query = make_data(np.max(n_samples), n_features, n_queries,
random_state=0)
params_list = [{'n_estimators': 3, 'n_candidates': 50},
{'n_estimators': 5, 'n_candidates': 70},
{'n_estimators': 10, 'n_candidates': 100}]
accuracies = np.zeros((len(n_samples), len(params_list)), dtype=float)
speed_ups = np.zeros((len(n_samples), len(params_list)), dtype=float)
for i, sample_size in enumerate(n_samples):
print ('==========================================================')
print ('Sample size: %i' % sample_size)
print ('------------------------')
exact_neighbors, average_time_exact = calc_exact_neighbors(
X_index[:sample_size], X_query, n_queries, n_neighbors)
for j, params in enumerate(params_list):
print ('LSHF parameters: n_estimators = %i, n_candidates = %i' %
(params['n_estimators'], params['n_candidates']))
speed_ups[i, j], accuracies[i, j] = calc_accuracy(
X_index[:sample_size], X_query, n_queries, n_neighbors,
exact_neighbors, average_time_exact, random_state=0, **params)
print ('')
print ('==========================================================')
# Set labels for LSHForest parameters
colors = ['c', 'm', 'y']
legend_rects = [plt.Rectangle((0, 0), 0.1, 0.1, fc=color)
for color in colors]
legend_labels = ['n_estimators={n_estimators}, '
'n_candidates={n_candidates}'.format(**p)
for p in params_list]
# Plot precision
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, accuracies[:, i], c=colors[i])
plt.plot(n_samples, accuracies[:, i], c=colors[i])
plt.ylim([0, 1.3])
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Precision@10")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Precision of first 10 neighbors with index size")
# Plot speed up
plt.figure()
plt.legend(legend_rects, legend_labels,
loc='upper left')
for i in range(len(params_list)):
plt.scatter(n_samples, speed_ups[:, i], c=colors[i])
plt.plot(n_samples, speed_ups[:, i], c=colors[i])
plt.ylim(0, np.max(speed_ups))
plt.xlim(np.min(n_samples), np.max(n_samples))
plt.semilogx()
plt.ylabel("Speed up")
plt.xlabel("Index size")
plt.grid(which='both')
plt.title("Relationship between Speed up and index size")
plt.show()
| bsd-3-clause |
PMBio/limix | limix/scripts/iSet_postprocess.py | 1 | 2451 | #! /usr/bin/env python
# Copyright(c) 2014, The mtSet developers (Francesco Paolo Casale, Barbara Rakitsch, Oliver Stegle)
# All rights reserved.
from optparse import OptionParser
from limix.mtSet.core.iset_utils import calc_emp_pv_eff
import pandas as pd
import glob
import os
import time
import sys
def entry_point():
parser = OptionParser()
parser.add_option("--resdir", dest='resdir', type=str, default='./')
parser.add_option("--outfile", dest='outfile', type=str, default=None)
#parser.add_option("--manhattan_plot", dest='manhattan',action="store_true",default=False)
parser.add_option("--tol", dest='tol', type=float, default=4e-3)
(options, args) = parser.parse_args()
resdir = options.resdir
out_file = options.outfile
tol = options.tol
print('.. load permutation results')
file_name = os.path.join(resdir, '*.iSet.perm')
files = glob.glob(file_name)
df0 = pd.DataFrame()
for _file in files:
print(_file)
df0 = df0.append(pd.read_csv(_file, index_col=0))
print('.. load real results')
file_name = os.path.join(resdir, '*.iSet.real')
files = glob.glob(file_name)
df = pd.DataFrame()
for _file in files:
print(_file)
df = df.append(pd.read_csv(_file, index_col=0))
#calculate P values for the three tests
for test in ['mtSet', 'iSet', 'iSet-het']:
df[test+' pv'] = calc_emp_pv_eff(df[test+' LLR'].values,
df0[test+' LLR0'].values)
print(('.. saving %s' % out_file+'.res'))
df.to_csv(out_file+'.res')
if 0:
if options.manhattan:
import limix.utils.plot as plot
if not os.path.exists(options.outfile):
os.makedirs(options.outfile)
def plot_manhattan(pv, out_file):
import matplotlib.pylab as PLT
import scipy as SP
posCum = SP.arange(pv.shape[0])
idx=~SP.isnan(pv)
plot.plot_manhattan(posCum[idx],pv[idx],alphaNS=1.0,alphaS=1.0)
PLT.savefig(out_file)
for test in ['mtSet', 'iSet', 'iSet-het']:
out_file = os.path.join(options.outfile,
'iSet.%s_pv.manhattan.png'\
% (test,))
print((".. saving " + out_file))
plot_manhattan(df['%s pv' % test].values, out_file)
| apache-2.0 |
SamStudio8/scikit-bio | skbio/io/format/ordination.py | 8 | 14555 | r"""
Ordination results format (:mod:`skbio.io.format.ordination`)
=============================================================
.. currentmodule:: skbio.io.format.ordination
The ordination results file format (``ordination``) stores the results of an
ordination method in a human-readable, text-based format. The format supports
storing the results of various ordination methods available in scikit-bio,
including (but not necessarily limited to) PCoA, CA, RDA, and CCA.
Format Support
--------------
**Has Sniffer: Yes**
+------+------+---------------------------------------------------------------+
|Reader|Writer| Object Class |
+======+======+===============================================================+
|Yes |Yes |:mod:`skbio.stats.ordination.OrdinationResults` |
+------+------+---------------------------------------------------------------+
Format Specification
--------------------
The format is text-based, consisting of six attributes that describe the
ordination results:
- ``Eigvals``: 1-D
- ``Proportion explained``: 1-D
- ``Species``: 2-D
- ``Site``: 2-D
- ``Biplot``: 2-D
- ``Site constraints``: 2-D
The attributes in the file *must* be in this order.
Each attribute is defined in its own section of the file, where sections are
separated by a blank (or whitespace-only) line. Each attribute begins with a
header line, which contains the attribute's name (as listed above), followed by
a tab character, followed by one or more tab-separated dimensions (integers)
that describe the shape of the attribute's data.
The attribute's data follows its header line, and is stored in tab-separated
format. ``Species``, ``Site``, and ``Site constraints`` store species and site
IDs, respectively, as the first column, followed by the 2-D data array.
An example of this file format might look like::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>3<tab>3
-0.16<tab>0.63<tab>0.76
-0.99<tab>0.06<tab>-0.04
0.18<tab>-0.97<tab>0.03
Site constraints<tab>10<tab>4
Site0<tab>0.69<tab>-3.08<tab>-0.32<tab>-1.24
Site1<tab>0.66<tab>-3.06<tab>0.23<tab>2.69
Site2<tab>0.63<tab>-3.04<tab>0.78<tab>-3.11
Site3<tab>1.10<tab>0.50<tab>-1.55<tab>0.66
Site4<tab>-0.97<tab>0.06<tab>-1.12<tab>-0.61
Site5<tab>1.05<tab>0.53<tab>-0.43<tab>0.28
Site6<tab>-1.02<tab>0.10<tab>-0.00<tab>-0.42
Site7<tab>0.99<tab>0.57<tab>0.67<tab>-0.00
Site8<tab>-1.08<tab>0.13<tab>1.11<tab>1.17
Site9<tab>0.94<tab>0.61<tab>1.79<tab>-1.28
If a given result attribute is not present (e.g. ``Biplot``), it should still
be defined and declare its dimensions as 0. For example::
Biplot<tab>0<tab>0
All attributes are optional except for ``Eigvals``.
Examples
--------
Assume we have the following tab-delimited text file storing the
ordination results in ``ordination`` format::
Eigvals<tab>4
0.36<tab>0.18<tab>0.07<tab>0.08
Proportion explained<tab>4
0.46<tab>0.23<tab>0.10<tab>0.10
Species<tab>9<tab>4
Species0<tab>0.11<tab>0.28<tab>-0.20<tab>-0.00
Species1<tab>0.14<tab>0.30<tab>0.39<tab>-0.14
Species2<tab>-1.01<tab>0.09<tab>-0.19<tab>-0.10
Species3<tab>-1.03<tab>0.10<tab>0.22<tab>0.22
Species4<tab>1.05<tab>0.53<tab>-0.43<tab>0.22
Species5<tab>0.99<tab>0.57<tab>0.67<tab>-0.38
Species6<tab>0.25<tab>-0.17<tab>-0.20<tab>0.43
Species7<tab>0.14<tab>-0.85<tab>-0.01<tab>0.05
Species8<tab>0.41<tab>-0.70<tab>0.21<tab>-0.69
Site<tab>10<tab>4
Site0<tab>0.71<tab>-3.08<tab>0.21<tab>-1.24
Site1<tab>0.58<tab>-3.00<tab>-0.94<tab>2.69
Site2<tab>0.76<tab>-3.15<tab>2.13<tab>-3.11
Site3<tab>1.11<tab>1.07<tab>-1.87<tab>0.66
Site4<tab>-0.97<tab>-0.06<tab>-0.69<tab>-0.61
Site5<tab>1.04<tab>0.45<tab>-0.63<tab>0.28
Site6<tab>-0.95<tab>-0.08<tab>0.13<tab>-0.42
Site7<tab>0.94<tab>-0.10<tab>0.52<tab>-0.00
Site8<tab>-1.14<tab>0.49<tab>0.47<tab>1.17
Site9<tab>1.03<tab>1.03<tab>2.74<tab>-1.28
Biplot<tab>0<tab>0
Site constraints<tab>0<tab>0
Load the ordination results from the file:
>>> from io import StringIO
>>> from skbio import OrdinationResults
>>> or_f = StringIO(
... "Eigvals\t4\n"
... "0.36\t0.18\t0.07\t0.08\n"
... "\n"
... "Proportion explained\t4\n"
... "0.46\t0.23\t0.10\t0.10\n"
... "\n"
... "Species\t9\t4\n"
... "Species0\t0.11\t0.28\t-0.20\t-0.00\n"
... "Species1\t0.14\t0.30\t0.39\t-0.14\n"
... "Species2\t-1.01\t0.09\t-0.19\t-0.10\n"
... "Species3\t-1.03\t0.10\t0.22\t0.22\n"
... "Species4\t1.05\t0.53\t-0.43\t0.22\n"
... "Species5\t0.99\t0.57\t0.67\t-0.38\n"
... "Species6\t0.25\t-0.17\t-0.20\t0.43\n"
... "Species7\t0.14\t-0.85\t-0.01\t0.05\n"
... "Species8\t0.41\t-0.70\t0.21\t-0.69\n"
... "\n"
... "Site\t10\t4\n"
... "Site0\t0.71\t-3.08\t0.21\t-1.24\n"
... "Site1\t0.58\t-3.00\t-0.94\t2.69\n"
... "Site2\t0.76\t-3.15\t2.13\t-3.11\n"
... "Site3\t1.11\t1.07\t-1.87\t0.66\n"
... "Site4\t-0.97\t-0.06\t-0.69\t-0.61\n"
... "Site5\t1.04\t0.45\t-0.63\t0.28\n"
... "Site6\t-0.95\t-0.08\t0.13\t-0.42\n"
... "Site7\t0.94\t-0.10\t0.52\t-0.00\n"
... "Site8\t-1.14\t0.49\t0.47\t1.17\n"
... "Site9\t1.03\t1.03\t2.74\t-1.28\n"
... "\n"
... "Biplot\t0\t0\n"
... "\n"
... "Site constraints\t0\t0\n")
>>> ord_res = OrdinationResults.read(or_f)
"""
# ----------------------------------------------------------------------------
# Copyright (c) 2013--, scikit-bio development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import zip
import numpy as np
import pandas as pd
from skbio._base import OrdinationResults
from skbio.io import create_format, OrdinationFormatError
ordination = create_format('ordination')
@ordination.sniffer()
def _ordination_sniffer(fh):
# Smells an ordination file if *all* of the following lines are present
# *from the beginning* of the file:
# - eigvals header (minimally parsed)
# - another line (contents ignored)
# - a whitespace-only line
# - proportion explained header (minimally parsed)
try:
_parse_header(fh, 'Eigvals', 1)
next_line = next(fh, None)
if next_line is not None:
_check_empty_line(fh)
_parse_header(fh, 'Proportion explained', 1)
return True, {}
except OrdinationFormatError:
pass
return False, {}
@ordination.reader(OrdinationResults)
def _ordination_to_ordination_results(fh):
eigvals = _parse_vector_section(fh, 'Eigvals')
if eigvals is None:
raise OrdinationFormatError("At least one eigval must be present.")
_check_empty_line(fh)
prop_expl = _parse_vector_section(fh, 'Proportion explained')
_check_length_against_eigvals(prop_expl, eigvals,
'proportion explained values')
_check_empty_line(fh)
species = _parse_array_section(fh, 'Species')
_check_length_against_eigvals(species, eigvals,
'coordinates per species')
_check_empty_line(fh)
site = _parse_array_section(fh, 'Site')
_check_length_against_eigvals(site, eigvals,
'coordinates per site')
_check_empty_line(fh)
# biplot does not have ids to parse (the other arrays do)
biplot = _parse_array_section(fh, 'Biplot', has_ids=False)
_check_empty_line(fh)
cons = _parse_array_section(fh, 'Site constraints')
if cons is not None and site is not None:
if not np.array_equal(cons.index, site.index):
raise OrdinationFormatError(
"Site constraints ids and site ids must be equal: %s != %s" %
(cons.index, site.index))
return OrdinationResults(
short_method_name='', long_method_name='', eigvals=eigvals,
features=species, samples=site, biplot_scores=biplot,
sample_constraints=cons, proportion_explained=prop_expl)
def _parse_header(fh, header_id, num_dimensions):
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for %s header." % header_id)
header = line.strip().split('\t')
# +1 for the header ID
if len(header) != num_dimensions + 1 or header[0] != header_id:
raise OrdinationFormatError("%s header not found." % header_id)
return header
def _check_empty_line(fh):
"""Check that the next line in `fh` is empty or whitespace-only."""
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for blank line separating "
"sections.")
if line.strip():
raise OrdinationFormatError("Expected an empty line.")
def _check_length_against_eigvals(data, eigvals, label):
if data is not None:
num_vals = data.shape[-1]
num_eigvals = eigvals.shape[-1]
if num_vals != num_eigvals:
raise OrdinationFormatError(
"There should be as many %s as eigvals: %d != %d" %
(label, num_vals, num_eigvals))
def _parse_vector_section(fh, header_id):
header = _parse_header(fh, header_id, 1)
# Parse how many values we are waiting for
num_vals = int(header[1])
if num_vals == 0:
# The ordination method didn't generate the vector, so set it to None
vals = None
else:
# Parse the line with the vector values
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for line containing values "
"for %s section." % header_id)
vals = pd.Series(np.asarray(line.strip().split('\t'),
dtype=np.float64))
if len(vals) != num_vals:
raise OrdinationFormatError(
"Expected %d values in %s section, but found %d." %
(num_vals, header_id, len(vals)))
return vals
def _parse_array_section(fh, header_id, has_ids=True):
"""Parse an array section of `fh` identified by `header_id`."""
# Parse the array header
header = _parse_header(fh, header_id, 2)
# Parse the dimensions of the array
rows = int(header[1])
cols = int(header[2])
ids = None
if rows == 0 and cols == 0:
# The ordination method didn't generate the array data for 'header', so
# set it to None
data = None
elif rows == 0 or cols == 0:
# Both dimensions should be 0 or none of them are zero
raise OrdinationFormatError("One dimension of %s is 0: %d x %d" %
(header_id, rows, cols))
else:
# Parse the data
data = np.empty((rows, cols), dtype=np.float64)
if has_ids:
ids = []
for i in range(rows):
# Parse the next row of data
line = next(fh, None)
if line is None:
raise OrdinationFormatError(
"Reached end of file while looking for row %d in %s "
"section." % (i + 1, header_id))
vals = line.strip().split('\t')
if has_ids:
ids.append(vals[0])
vals = vals[1:]
if len(vals) != cols:
raise OrdinationFormatError(
"Expected %d values, but found %d in row %d." %
(cols, len(vals), i + 1))
data[i, :] = np.asarray(vals, dtype=np.float64)
data = pd.DataFrame(data, index=ids)
return data
@ordination.writer(OrdinationResults)
def _ordination_results_to_ordination(obj, fh):
_write_vector_section(fh, 'Eigvals', obj.eigvals)
_write_vector_section(fh, 'Proportion explained', obj.proportion_explained)
_write_array_section(fh, 'Species', obj.features)
_write_array_section(fh, 'Site', obj.samples)
_write_array_section(fh, 'Biplot', obj.biplot_scores, has_ids=False)
_write_array_section(fh, 'Site constraints', obj.sample_constraints,
include_section_separator=False)
def _write_vector_section(fh, header_id, vector):
if vector is None:
shape = 0
else:
shape = vector.shape[0]
fh.write("%s\t%d\n" % (header_id, shape))
if vector is not None:
fh.write(_format_vector(vector.values))
fh.write("\n")
def _write_array_section(fh, header_id, data, has_ids=True,
include_section_separator=True):
# write section header
if data is None:
shape = (0, 0)
else:
shape = data.shape
fh.write("%s\t%d\t%d\n" % (header_id, shape[0], shape[1]))
# write section data
if data is not None:
if not has_ids:
for vals in data.values:
fh.write(_format_vector(vals))
else:
for id_, vals in zip(data.index, data.values):
fh.write(_format_vector(vals, id_))
if include_section_separator:
fh.write("\n")
def _format_vector(vector, id_=None):
formatted_vector = '\t'.join(np.asarray(vector, dtype=np.str))
if id_ is None:
return "%s\n" % formatted_vector
else:
return "%s\t%s\n" % (id_, formatted_vector)
| bsd-3-clause |
RPGOne/Skynet | scikit-learn-c604ac39ad0e5b066d964df3e8f31ba7ebda1e0e/sklearn/decomposition/tests/test_dict_learning.py | 40 | 7535 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10*len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
| bsd-3-clause |
mugizico/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
VaclavDedik/classifier | classifier/models.py | 1 | 5190 | import numpy as np
import operator
from sklearn import naive_bayes
from sklearn import svm, tree
from kernels import GaussianKernel
class AbstractModel(object):
"""Abstract model of a learning algorithm. When implementing a subclass,
you have to implement method ``train``. Method ``predict`` is implemented
by default.
"""
def __init__(self, feature_selector):
"""Default initializer requires only feature selector to be specified.
If you want to add additional parameters to your implementation of the
model, be sure to call this initializer in your initializer first.
:param feature_selector: Feature selector class from package
``selectors``.
"""
self.feature_selector = feature_selector
def train(self, documents):
"""Trains the model on the provided list of **labeled** documents.
This method is expected to initialize some sort of predictor field(s)
that will be used by method ``predict``, e.g. in Naive Bayes model,
the initialized fields could be ``prior`` and ``likelihood``.
:param documents: Labeled documents used to train the predictor.
"""
raise NotImplementedError()
def predict(self, document, n=1):
"""Predicts label(s) for given document.
Note that before running this method, method ``train`` must be run.
:param document: Document to be labeled.
:param n: Number of predictions, 1 by default.
:returns: Predicted label(s) of the document in descending order.
"""
raise NotImplementedError()
class BaselineModel(AbstractModel):
"""This baseline model always predict label that is the most frequent."""
def __init__(self, feature_selector):
super(BaselineModel, self).__init__(feature_selector)
def train(self, documents):
X, Y = self.feature_selector.build(documents)
labels_freq = {}
for document in documents:
if document.label in labels_freq:
labels_freq[document.label] += 1
else:
labels_freq[document.label] = 1
self.labels_freq = sorted(labels_freq.items(), reverse=True,
key=operator.itemgetter(1))
def predict(self, document, n=1):
top_n = self.labels_freq[:n]
labels = map(lambda x: x[0], top_n)
return labels
class NaiveBayesModel(AbstractModel):
"""Naive Bayes model. No +1 smoothing is used in this model, the selector
is expected to remove words that are not in the vocabulary.
"""
def __init__(self, feature_selector):
super(NaiveBayesModel, self).__init__(feature_selector)
def train(self, documents):
X, Y = self.feature_selector.build(documents)
nb = naive_bayes.GaussianNB()
nb.fit(X, np.concatenate(Y))
self.nb = nb
def predict(self, document, n=1):
x = self.feature_selector.get_x(document)
probs = self.nb.predict_proba([x])[0]
Y = probs.argsort()[::-1]
labels = map(self.feature_selector.get_label, Y)
return labels[:n]
def __str__(self):
return "NaiveBayesModel(feature_selector=%s)" \
% self.feature_selector
class SVMModel(AbstractModel):
"""Support Vector Machine model."""
def __init__(self, feature_selector, kernel=GaussianKernel(), C=1,
cache_size=200):
super(SVMModel, self).__init__(feature_selector)
self.C = C
self.kernel = kernel
self.cache_size = cache_size
def train(self, documents):
X, Y = self.feature_selector.build(documents)
if hasattr(self.kernel, 'sklearn_name'):
self.svm = svm.SVC(C=self.C, kernel=self.kernel.sklearn_name,
probability=True, cache_size=self.cache_size,
**self.kernel.sklearn_params)
else:
self.svm = svm.SVC(C=self.C, kernel=self.kernel.compute)
self.svm.fit(X, np.concatenate(Y))
def predict(self, document, n=1):
x = self.feature_selector.get_x(document)
probs = self.svm.predict_proba([x])[0]
Y = probs.argsort()[::-1]
labels = map(self.feature_selector.get_label, Y)
return labels[:n]
def __str__(self):
return "SVMModel(feature_selector=%s, kernel=%s, C=%s)" \
% (self.feature_selector, self.kernel, self.C)
class CARTModel(AbstractModel):
"Decision Tree Model using CART algorithm."
def __init__(self, feature_selector):
super(CARTModel, self).__init__(feature_selector)
def train(self, documents):
X, Y = self.feature_selector.build(documents)
self.clf = tree.DecisionTreeClassifier()
self.clf.fit(X, np.concatenate(Y))
def predict(self, document, n=1):
x = self.feature_selector.get_x(document)
probs = self.clf.predict_proba([x])[0]
Y = probs.argsort()[::-1]
labels = map(self.feature_selector.get_label, Y)
return labels[:n]
def __str__(self):
return "CARTModel(feature_selector=%s)" % self.feature_selector
| mit |
Noahs-ARK/ARKcat | download_20ng.py | 2 | 3942 | from sklearn.datasets import fetch_20newsgroups
import os, sys
import subprocess, random
import unicodedata
base = "/cab1/corpora/bayes_opt/20_newsgroups/"
groups = {'all_topics':['talk.religion.misc', 'comp.windows.x', 'rec.sport.baseball', 'talk.politics.mideast', 'comp.sys.mac.hardware', 'sci.space', 'talk.politics.guns', 'comp.graphics', 'comp.os.ms-windows.misc', 'soc.religion.christian', 'talk.politics.misc', 'rec.motorcycles', 'comp.sys.ibm.pc.hardware', 'rec.sport.hockey', 'misc.forsale', 'sci.crypt', 'rec.autos', 'sci.med', 'sci.electronics', 'alt.atheism'],
'science':['sci.space', 'sci.crypt', 'sci.med', 'sci.electronics'],
'religion':['talk.religion.misc', 'alt.atheism'],
'comp':['comp.windows.x', 'comp.graphics']}
random.seed(999)
def do_stuff(train_or_test):
make_dev = train_or_test == 'train'
for group in groups:
#make directory
os.system('mkdir -p ' + base + group)
examples = {}
newsgroups_data = fetch_20newsgroups(subset=train_or_test, categories=groups[group], remove=('headers'))
for i in range(len(newsgroups_data['data'])):
line = newsgroups_data['data'][i]
# line = unicode(line, errors='ignore')
unicodedata.normalize('NFKD', line).encode('ascii', 'ignore')
line = ''.join(ch for ch in line if unicodedata.category(ch)[0]!="C")
contents = line.replace('\n', ' ').replace('\r', ' ').replace('\t', ' ').replace('\0',' ').replace('*', '.').strip().replace('"', "''").replace("\\","\\\\")
examples[len(examples)] = (contents, newsgroups_data['target_names'][newsgroups_data['target'][i]])
dev_items = sorted(random.sample(xrange(len(examples)), int(.2*len(examples))))
if not make_dev:
dev_items = []
print(len(dev_items))
if make_dev:
train_json_out = open(base + group + '/train.json', 'w')
train_csv_out = open(base + group + '/train.csv', 'w')
dev_json_out = open(base + group + '/dev.json', 'w')
dev_csv_out = open(base + group + '/dev.csv', 'w')
else:
train_json_out = open(base + group + '/test.json', 'w')
train_csv_out = open(base + group + '/test.csv', 'w')
train_json_out.write('{\n')
train_csv_out.write('idontknow,whattoputhere\n')
if make_dev:
dev_csv_out.write('idontknow,whattoputhere\n')
dev_json_out.write('{\n')
dev_counter = 0
train_counter = 0
for index in examples:
if index in dev_items:
dev_counter = dev_counter + 1
if index == dev_items[len(dev_items)-1]:
dev_json_out.write(' "' + str(dev_counter) + '": "' + examples[index][0].replace('\*','*').encode('ascii', 'ignore') + '"\n')
else:
dev_json_out.write(' "' + str(dev_counter) + '": "' + examples[index][0].replace('\*','*').encode('ascii', 'ignore') + '",\n')
dev_csv_out.write(str(dev_counter) + ',' + str(examples[index][1]) + '\n')
else:
train_counter = train_counter + 1
train_json_out.write(' "' + str(train_counter) + '": "' + examples[index][0].replace('\*','*').encode('ascii', 'ignore') + '",\n')
train_csv_out.write(str(train_counter) + ',' + str(examples[index][1]) + '\n')
train_json_out.write('}')
if make_dev:
dev_json_out.write('}')
print("DON'T FORGET TO REMOVE THE EXTRA COMMA AT THE END OF TRAIN.JSON AND TEST.JSON")
train_json_out.close()
train_csv_out.close()
if make_dev:
dev_json_out.close()
dev_csv_out.close()
do_stuff('train')
do_stuff('test')
#dev_items = sorted(random.sample(xrange(len(examples)), int(.2*len(examples))))
| apache-2.0 |
jcnelson/syndicate | papers/paper-nsdi2013/data/tools/analysis/Nr1w.py | 2 | 9504 | #!/usr/bin/python
import analysis
import os
import sys
import numpy as np
import statsmodels.api as sm
import matplotlib.pyplot as plt
def eval_dict( s ):
ret = None
try:
exec("ret = " + s)
except:
return None
return ret
def cdf_compare( dists, title, xl, xr, yl, yr, labels ):
mm = min(dists[0])
ma = max(dists[0])
cnt = len(dists[0])
for i in xrange(1,len(dists)):
mm = min( mm, min(dists[i]) )
ma = max( ma, max(dists[i]) )
cnt = min( cnt, len(dists[i]) )
print "cnt = " + str(cnt)
x = np.linspace( mm, ma, cnt )
i = 0
for dist in dists:
ecdf = sm.distributions.ECDF( dist )
plt.step( x, ecdf(x), label=labels[i] )
i += 1
dist.sort()
#print dist
plt.title( title )
plt.xticks( xr )
plt.yticks( yr )
plt.xlabel( xl )
plt.ylabel( yl )
plt.legend( labels, loc=4 )
plt.show()
if __name__ == "__main__":
syndicate_data_1k = {}
syndicate_data_1M = {}
syndicate_data_50M = {}
s3_data_20k = {}
s3_data_50M = {}
s3_data_100blk = {}
s3_data_100blk_nocache = {}
plc_data_100blk = {}
syndicate_data_100blk = {}
intersection = []
for expfile in os.listdir( sys.argv[1] ):
expfd = open( os.path.join( sys.argv[1], expfile ), "r" )
expdata = analysis.parse_experiments( expfd )
expfd.close()
if len(expdata['fcdistro']) > 0 and "12" not in expdata['fcdistro']:
print >> sys.stderr, "%s: wrong distro '%s'" % (expfile, expdata['fcdistro'])
continue
syndicate_exp_1k = analysis.read_experiment_data( expdata, "Nr1w-x5-small-syndicate.py" )
syndicate_exp_1M = analysis.read_experiment_data( expdata, "Nr1w-x5-1M-syndicate.py" )
syndicate_exp_50M = analysis.read_experiment_data( expdata, "Nr1w-x5-50M-syndicate-4.py" )
syndicate_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-syndicate-3.py" )
s3_exp_20k = analysis.read_experiment_data( expdata, "Nr1w-x5.py" )
s3_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-s3-cache-chunked.py" )
plc_exp_100blk = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-planetlab-cache-chunked.py" )
s3_exp_50M = analysis.read_experiment_data( expdata, "Nr1w-x5-50M.py" )
s3_exp_100blk_nocache = analysis.read_experiment_data( expdata, "Nr1w-x5-100blk-s3-chunked.py" )
intersect = True
"""
if syndicate_exp_1k != None and len(syndicate_exp_1k) > 0 and syndicate_exp_1k[0] != None:
syndicate_data_1k[expfile] = eval_dict( syndicate_exp_1k[0][0] )
else:
intersect = False
if syndicate_exp_1M != None and len(syndicate_exp_1M) > 0 and syndicate_exp_1M[0] != None:
syndicate_data_1M[expfile] = eval_dict( syndicate_exp_1M[0][0] )
else:
intersect = False
if syndicate_exp_50M != None and len(syndicate_exp_50M) > 0 and syndicate_exp_50M[0] != None:
syndicate_data_50M[expfile] = eval_dict( syndicate_exp_50M[0][0] )
else:
intersect = False
if s3_exp_20k != None and len(s3_exp_20k) > 0 and s3_exp_20k[0] != None:
s3_data_20k[expfile] = eval_dict( s3_exp_20k[0][0] )
else:
intersect = False
if s3_exp_50M != None and len(s3_exp_50M) > 0 and s3_exp_50M[0] != None:
s3_data_50M[expfile] = eval_dict( s3_exp_50M[0][0] )
else:
intersect = False
"""
if s3_exp_100blk != None and len(s3_exp_100blk) > 0 and s3_exp_100blk[0] != None:
s3_data_100blk[expfile] = eval_dict( s3_exp_100blk[0][0] )
else:
intersect = False
if plc_exp_100blk != None and len(plc_exp_100blk) > 0 and plc_exp_100blk[-1] != None:
plc_data_100blk[expfile] = eval_dict( plc_exp_100blk[-1][0] )
else:
intersect = False
if s3_exp_100blk_nocache != None and len(s3_exp_100blk_nocache) > 0 and s3_exp_100blk_nocache[-1] != None:
s3_data_100blk_nocache[expfile] = eval_dict( s3_exp_100blk_nocache[-1][0] )
else:
intersect = False
if syndicate_exp_100blk != None and len(syndicate_exp_100blk) > 0 and syndicate_exp_100blk[-1] != None:
syndicate_data_100blk[expfile] = eval_dict( syndicate_exp_100blk[-1][0] )
else:
intersect = False
if intersect:
intersection.append( expfile )
for expfile in os.listdir( sys.argv[1] ):
if expfile not in intersection:
print >> sys.stderr, "Node %s did not pass all tests" % expfile
print >> sys.stderr, "%s nodes have data" % len(intersection)
syndicate = { 'first_1k': [], 'last_1k': [], 'first_1m': [], 'last_1m': [], 'first_50m': [], 'last_50m': [], 'first_100blk': [], 'last_100blk': [] }
s3 = { 'first_20k': [], 'last_20k': [], 'first_50m': [], 'last_50m': [], 'first_100blk': [], 'last_100blk': [], 'first_100blk_nocache': [], 'last_100blk_nocache': [] }
plc = {'first_100blk' : [], 'last_100blk': [] }
num_valid = 0
slow = []
for node in intersection:
valid = True
#data_list = [("syndicate 1k", syndicate_data_1k), ("syndicate 1M", syndicate_data_1M), ("syndicate 50M", syndicate_data_50M), ("S3 20k", s3_data_20k), ("S3 50M", s3_data_50M), ("S3 100blk", s3_data_100blk), ("PLC 100blk", plc_data_100blk)]
data_list = [("S3 100blk", s3_data_100blk), ("PLC 100blk", plc_data_100blk), ("S3 nocache 100blk", s3_data_100blk_nocache), ("Syndicate 100blk", syndicate_data_100blk)]
for (data_name, data) in data_list:
if data.get(node) == None:
print >> sys.stderr, "%s: no data for %s" % (node, data_name)
valid = False
elif data[node] == None:
print >> sys.stderr, "%s: unparseable data" % (node, data_name)
valid = False
elif len(data[node]['exception']) > 0:
print >> sys.stderr, "%s: exceptions on %s" % (node, data_name)
valid = False
if not valid:
continue;
"""
syndicate['first_1k'].append( syndicate_data_1k[node]['end_recv'][0] - syndicate_data_1k[node]['start_recv'][0] )
syndicate['last_1k'].append( syndicate_data_1k[node]['end_recv'][-1] - syndicate_data_1k[node]['start_recv'][-1] )
syndicate['first_1m'].append( syndicate_data_1M[node]['end_recv'][0] - syndicate_data_1M[node]['start_recv'][0] )
syndicate['last_1m'].append( syndicate_data_1M[node]['end_recv'][-1] - syndicate_data_1M[node]['start_recv'][-1] )
syndicate['first_50m'].append( syndicate_data_50M[node]['end_recv'][0] - syndicate_data_50M[node]['start_recv'][0] )
syndicate['last_50m'].append( syndicate_data_50M[node]['end_recv'][-1] - syndicate_data_50M[node]['start_recv'][-1] )
s3['first_20k'].append( s3_data_20k[node]['end_recv'][0] - s3_data_20k[node]['start_recv'][0] )
s3['last_20k'].append( s3_data_20k[node]['end_recv'][-1] - s3_data_20k[node]['start_recv'][-1] )
s3['first_50m'].append( s3_data_50M[node]['end_recv'][0] - s3_data_50M[node]['start_recv'][0] )
s3['last_50m'].append( s3_data_50M[node]['end_recv'][-1] - s3_data_50M[node]['start_recv'][-1] )
"""
s3['first_100blk'].append( s3_data_100blk[node]['end_recv'][0] - s3_data_100blk[node]['start_recv'][0])
s3['last_100blk'].append( s3_data_100blk[node]['end_recv'][-1] - s3_data_100blk[node]['start_recv'][-1])
s3['first_100blk_nocache'].append( s3_data_100blk_nocache[node]['end_recv'][0] - s3_data_100blk_nocache[node]['start_recv'][0] )
plc['first_100blk'].append( plc_data_100blk[node]['end_recv'][0] - plc_data_100blk[node]['start_recv'][0])
plc['last_100blk'].append( plc_data_100blk[node]['end_recv'][-1] - plc_data_100blk[node]['start_recv'][-1])
syndicate['first_100blk'].append( syndicate_data_100blk[node]['end_recv'][0] - syndicate_data_100blk[node]['start_recv'][0] )
syndicate['last_100blk'].append( syndicate_data_100blk[node]['end_recv'][-1] - syndicate_data_100blk[node]['start_recv'][-1] )
if syndicate['first_100blk'][-1] > 150:
slow.append( node )
num_valid += 1
#print "s3_first_100blk = " + str(s3['first_100blk'])
#print "s3_last_100blk = " + str(s3['last_100blk'])
print "valid: " + str(num_valid)
print "slow: \n" + "\n".join(slow)
# first 1K vs last 1K
cdf_compare( [syndicate['first_100blk'], syndicate['last_100blk'], plc['first_100blk'] ], "Syndicate One-Writer-Many-Reader Download Times", "Seconds", np.arange(0, 1000, 100), "CDF(x)", np.arange(0, 1.05, 0.05), ["Syndicate 0% Cache Hit", "Syndicate 100% Cache Hit", "Python HTTP Server and Clients"] )
#cdf_compare( [plc['first_100blk'], s3['first_100blk']], "Amazon S3 vs PLC Cache Miss Download Times", "Seconds", np.arange(0, 425, 30), "CDF(x)", np.arange(0, 1.05, 0.05) )
cdf_compare( [s3['first_100blk'], s3['first_100blk_nocache']], "Amazon S3 Cache and Direct Download Times", "Seconds", np.arange(0, 1200, 100), "CDF(x)", np.arange(0, 1.05, 0.05), ["0% hit cache hit rate", "Direct Download"] )
cdf_compare( [s3['first_100blk'], s3['last_100blk']], "Amazon S3 Cache Miss and Cache Hit Download Times", "Seconds", np.arange(0, 425, 30), "CDF(x)", np.arange(0, 1.05, 0.05) )
cdf_compare( [syndicate['first_1k'], syndicate['last_1k']] )
cdf_compare( [syndicate['first_50m'], s3['first_50m']] )
cdf_compare( [syndicate['last_50m'], s3['last_50m']] )
#cdf_compare( [syndicate['last_1m'], s3['last_20k']] )
| apache-2.0 |
iulian787/spack | var/spack/repos/builtin/packages/py-sncosmo/package.py | 5 | 1133 | # Copyright 2013-2020 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PySncosmo(PythonPackage):
"""SNCosmo is a Python library for high-level supernova cosmology
analysis."""
homepage = "http://sncosmo.readthedocs.io/"
url = "https://pypi.io/packages/source/s/sncosmo/sncosmo-1.2.0.tar.gz"
version('1.2.0', sha256='f3969eec5b25f60c70418dbd64765a2b4735bb53c210c61d0aab68916daea588')
# Required dependencies
# py-sncosmo binaries are duplicates of those from py-astropy
extends('python', ignore=r'bin/.*')
depends_on('py-setuptools', type='build')
depends_on('py-numpy', type=('build', 'run'))
depends_on('py-scipy', type=('build', 'run'))
depends_on('py-astropy', type=('build', 'run'))
# Recommended dependencies
depends_on('py-matplotlib', type=('build', 'run'))
depends_on('py-iminuit', type=('build', 'run'))
depends_on('py-emcee', type=('build', 'run'))
depends_on('py-nestle', type=('build', 'run'))
| lgpl-2.1 |
jreback/pandas | pandas/conftest.py | 1 | 37035 | """
This file is very long and growing, but it was decided to not split it yet, as
it's still manageable (2020-03-17, ~1.1k LoC). See gh-31989
Instead of splitting it was decided to define sections here:
- Configuration / Settings
- Autouse fixtures
- Common arguments
- Missing values & co.
- Classes
- Indices
- Series'
- DataFrames
- Operators & Operations
- Data sets/files
- Time zones
- Dtypes
- Misc
"""
from collections import abc
from datetime import date, time, timedelta, timezone
from decimal import Decimal
import operator
import os
from dateutil.tz import tzlocal, tzutc
import hypothesis
from hypothesis import strategies as st
import numpy as np
import pytest
from pytz import FixedOffset, utc
import pandas.util._test_decorators as td
from pandas.core.dtypes.dtypes import DatetimeTZDtype, IntervalDtype
import pandas as pd
from pandas import DataFrame, Interval, Period, Series, Timedelta, Timestamp
import pandas._testing as tm
from pandas.core import ops
from pandas.core.indexes.api import Index, MultiIndex
# ----------------------------------------------------------------
# Configuration / Settings
# ----------------------------------------------------------------
# pytest
def pytest_configure(config):
# Register marks to avoid warnings in pandas.test()
# sync with setup.cfg
config.addinivalue_line("markers", "single: mark a test as single cpu only")
config.addinivalue_line("markers", "slow: mark a test as slow")
config.addinivalue_line("markers", "network: mark a test as network")
config.addinivalue_line(
"markers", "db: tests requiring a database (mysql or postgres)"
)
config.addinivalue_line("markers", "high_memory: mark a test as a high-memory only")
config.addinivalue_line("markers", "clipboard: mark a pd.read_clipboard test")
config.addinivalue_line(
"markers", "arm_slow: mark a test as slow for arm64 architecture"
)
def pytest_addoption(parser):
parser.addoption("--skip-slow", action="store_true", help="skip slow tests")
parser.addoption("--skip-network", action="store_true", help="skip network tests")
parser.addoption("--skip-db", action="store_true", help="skip db tests")
parser.addoption(
"--run-high-memory", action="store_true", help="run high memory tests"
)
parser.addoption("--only-slow", action="store_true", help="run only slow tests")
parser.addoption(
"--strict-data-files",
action="store_true",
help="Fail if a test is skipped for missing data file.",
)
def pytest_runtest_setup(item):
if "slow" in item.keywords and item.config.getoption("--skip-slow"):
pytest.skip("skipping due to --skip-slow")
if "slow" not in item.keywords and item.config.getoption("--only-slow"):
pytest.skip("skipping due to --only-slow")
if "network" in item.keywords and item.config.getoption("--skip-network"):
pytest.skip("skipping due to --skip-network")
if "db" in item.keywords and item.config.getoption("--skip-db"):
pytest.skip("skipping due to --skip-db")
if "high_memory" in item.keywords and not item.config.getoption(
"--run-high-memory"
):
pytest.skip("skipping high memory test since --run-high-memory was not set")
# Hypothesis
hypothesis.settings.register_profile(
"ci",
# Hypothesis timing checks are tuned for scalars by default, so we bump
# them from 200ms to 500ms per test case as the global default. If this
# is too short for a specific test, (a) try to make it faster, and (b)
# if it really is slow add `@settings(deadline=...)` with a working value,
# or `deadline=None` to entirely disable timeouts for that test.
deadline=500,
suppress_health_check=(hypothesis.HealthCheck.too_slow,),
)
hypothesis.settings.load_profile("ci")
# Registering these strategies makes them globally available via st.from_type,
# which is use for offsets in tests/tseries/offsets/test_offsets_properties.py
for name in "MonthBegin MonthEnd BMonthBegin BMonthEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls, st.builds(cls, n=st.integers(-99, 99), normalize=st.booleans())
)
for name in "YearBegin YearEnd BYearBegin BYearEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-5, 5),
normalize=st.booleans(),
month=st.integers(min_value=1, max_value=12),
),
)
for name in "QuarterBegin QuarterEnd BQuarterBegin BQuarterEnd".split():
cls = getattr(pd.tseries.offsets, name)
st.register_type_strategy(
cls,
st.builds(
cls,
n=st.integers(-24, 24),
normalize=st.booleans(),
startingMonth=st.integers(min_value=1, max_value=12),
),
)
# ----------------------------------------------------------------
# Autouse fixtures
# ----------------------------------------------------------------
@pytest.fixture(autouse=True)
def configure_tests():
"""
Configure settings for all tests and test modules.
"""
pd.set_option("chained_assignment", "raise")
@pytest.fixture(autouse=True)
def add_imports(doctest_namespace):
"""
Make `np` and `pd` names available for doctests.
"""
doctest_namespace["np"] = np
doctest_namespace["pd"] = pd
# ----------------------------------------------------------------
# Common arguments
# ----------------------------------------------------------------
@pytest.fixture(params=[0, 1, "index", "columns"], ids=lambda x: f"axis {repr(x)}")
def axis(request):
"""
Fixture for returning the axis numbers of a DataFrame.
"""
return request.param
axis_frame = axis
@pytest.fixture(params=[True, False, None])
def observed(request):
"""
Pass in the observed keyword to groupby for [True, False]
This indicates whether categoricals should return values for
values which are not in the grouper [False / None], or only values which
appear in the grouper [True]. [None] is supported for future compatibility
if we decide to change the default (and would need to warn if this
parameter is not passed).
"""
return request.param
@pytest.fixture(params=[True, False, None])
def ordered(request):
"""
Boolean 'ordered' parameter for Categorical.
"""
return request.param
@pytest.fixture(params=["first", "last", False])
def keep(request):
"""
Valid values for the 'keep' parameter used in
.duplicated or .drop_duplicates
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def closed(request):
"""
Fixture for trying all interval closed parameters.
"""
return request.param
@pytest.fixture(params=["left", "right", "both", "neither"])
def other_closed(request):
"""
Secondary closed fixture to allow parametrizing over all pairs of closed.
"""
return request.param
@pytest.fixture(params=[None, "gzip", "bz2", "zip", "xz"])
def compression(request):
"""
Fixture for trying common compression types in compression tests.
"""
return request.param
@pytest.fixture(params=["gzip", "bz2", "zip", "xz"])
def compression_only(request):
"""
Fixture for trying common compression types in compression tests excluding
uncompressed case.
"""
return request.param
@pytest.fixture(params=[True, False])
def writable(request):
"""
Fixture that an array is writable.
"""
return request.param
@pytest.fixture(params=["inner", "outer", "left", "right"])
def join_type(request):
"""
Fixture for trying all types of join operations.
"""
return request.param
@pytest.fixture(params=["nlargest", "nsmallest"])
def nselect_method(request):
"""
Fixture for trying all nselect methods.
"""
return request.param
# ----------------------------------------------------------------
# Missing values & co.
# ----------------------------------------------------------------
@pytest.fixture(params=tm.NULL_OBJECTS, ids=str)
def nulls_fixture(request):
"""
Fixture for each null type in pandas.
"""
return request.param
nulls_fixture2 = nulls_fixture # Generate cartesian product of nulls_fixture
@pytest.fixture(params=[None, np.nan, pd.NaT])
def unique_nulls_fixture(request):
"""
Fixture for each null type in pandas, each null type exactly once.
"""
return request.param
# Generate cartesian product of unique_nulls_fixture:
unique_nulls_fixture2 = unique_nulls_fixture
# ----------------------------------------------------------------
# Classes
# ----------------------------------------------------------------
@pytest.fixture(params=[pd.DataFrame, pd.Series])
def frame_or_series(request):
"""
Fixture to parametrize over DataFrame and Series.
"""
return request.param
@pytest.fixture(
params=[pd.Index, pd.Series], ids=["index", "series"] # type: ignore[list-item]
)
def index_or_series(request):
"""
Fixture to parametrize over Index and Series, made necessary by a mypy
bug, giving an error:
List item 0 has incompatible type "Type[Series]"; expected "Type[PandasObject]"
See GH#29725
"""
return request.param
# Generate cartesian product of index_or_series fixture:
index_or_series2 = index_or_series
@pytest.fixture(
params=[pd.Index, pd.Series, pd.array], ids=["index", "series", "array"]
)
def index_or_series_or_array(request):
"""
Fixture to parametrize over Index, Series, and ExtensionArray
"""
return request.param
@pytest.fixture
def dict_subclass():
"""
Fixture for a dictionary subclass.
"""
class TestSubDict(dict):
def __init__(self, *args, **kwargs):
dict.__init__(self, *args, **kwargs)
return TestSubDict
@pytest.fixture
def non_dict_mapping_subclass():
"""
Fixture for a non-mapping dictionary subclass.
"""
class TestNonDictMapping(abc.Mapping):
def __init__(self, underlying_dict):
self._data = underlying_dict
def __getitem__(self, key):
return self._data.__getitem__(key)
def __iter__(self):
return self._data.__iter__()
def __len__(self):
return self._data.__len__()
return TestNonDictMapping
# ----------------------------------------------------------------
# Indices
# ----------------------------------------------------------------
@pytest.fixture
def multiindex_year_month_day_dataframe_random_data():
"""
DataFrame with 3 level MultiIndex (year, month, day) covering
first 100 business days from 2000-01-01 with random data
"""
tdf = tm.makeTimeDataFrame(100)
ymd = tdf.groupby([lambda x: x.year, lambda x: x.month, lambda x: x.day]).sum()
# use Int64Index, to make sure things work
ymd.index = ymd.index.set_levels([lev.astype("i8") for lev in ymd.index.levels])
ymd.index.set_names(["year", "month", "day"], inplace=True)
return ymd
@pytest.fixture
def multiindex_dataframe_random_data():
"""DataFrame with 2 level MultiIndex with random data"""
index = MultiIndex(
levels=[["foo", "bar", "baz", "qux"], ["one", "two", "three"]],
codes=[[0, 0, 0, 1, 1, 2, 2, 3, 3, 3], [0, 1, 2, 0, 1, 1, 2, 0, 1, 2]],
names=["first", "second"],
)
return DataFrame(
np.random.randn(10, 3), index=index, columns=Index(["A", "B", "C"], name="exp")
)
def _create_multiindex():
"""
MultiIndex used to test the general functionality of this object
"""
# See Also: tests.multi.conftest.idx
major_axis = Index(["foo", "bar", "baz", "qux"])
minor_axis = Index(["one", "two"])
major_codes = np.array([0, 0, 1, 2, 3, 3])
minor_codes = np.array([0, 1, 0, 1, 0, 1])
index_names = ["first", "second"]
return MultiIndex(
levels=[major_axis, minor_axis],
codes=[major_codes, minor_codes],
names=index_names,
verify_integrity=False,
)
def _create_mi_with_dt64tz_level():
"""
MultiIndex with a level that is a tzaware DatetimeIndex.
"""
# GH#8367 round trip with pickle
return MultiIndex.from_product(
[[1, 2], ["a", "b"], pd.date_range("20130101", periods=3, tz="US/Eastern")],
names=["one", "two", "three"],
)
indices_dict = {
"unicode": tm.makeUnicodeIndex(100),
"string": tm.makeStringIndex(100),
"datetime": tm.makeDateIndex(100),
"datetime-tz": tm.makeDateIndex(100, tz="US/Pacific"),
"period": tm.makePeriodIndex(100),
"timedelta": tm.makeTimedeltaIndex(100),
"int": tm.makeIntIndex(100),
"uint": tm.makeUIntIndex(100),
"range": tm.makeRangeIndex(100),
"float": tm.makeFloatIndex(100),
"bool": tm.makeBoolIndex(10),
"categorical": tm.makeCategoricalIndex(100),
"interval": tm.makeIntervalIndex(100),
"empty": Index([]),
"tuples": MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3])),
"mi-with-dt64tz-level": _create_mi_with_dt64tz_level(),
"multi": _create_multiindex(),
"repeats": Index([0, 0, 1, 1, 2, 2]),
}
@pytest.fixture(params=indices_dict.keys())
def index(request):
"""
Fixture for many "simple" kinds of indices.
These indices are unlikely to cover corner cases, e.g.
- no names
- no NaTs/NaNs
- no values near implementation bounds
- ...
"""
# copy to avoid mutation, e.g. setting .name
return indices_dict[request.param].copy()
# Needed to generate cartesian product of indices
index_fixture2 = index
@pytest.fixture(params=indices_dict.keys())
def index_with_missing(request):
"""
Fixture for indices with missing values
"""
if request.param in ["int", "uint", "range", "empty", "repeats"]:
pytest.xfail("missing values not supported")
# GH 35538. Use deep copy to avoid illusive bug on np-dev
# Azure pipeline that writes into indices_dict despite copy
ind = indices_dict[request.param].copy(deep=True)
vals = ind.values
if request.param in ["tuples", "mi-with-dt64tz-level", "multi"]:
# For setting missing values in the top level of MultiIndex
vals = ind.tolist()
vals[0] = (None,) + vals[0][1:]
vals[-1] = (None,) + vals[-1][1:]
return MultiIndex.from_tuples(vals)
else:
vals[0] = None
vals[-1] = None
return type(ind)(vals)
# ----------------------------------------------------------------
# Series'
# ----------------------------------------------------------------
@pytest.fixture
def empty_series():
return pd.Series([], index=[], dtype=np.float64)
@pytest.fixture
def string_series():
"""
Fixture for Series of floats with Index of unique strings
"""
s = tm.makeStringSeries()
s.name = "series"
return s
@pytest.fixture
def object_series():
"""
Fixture for Series of dtype object with Index of unique strings
"""
s = tm.makeObjectSeries()
s.name = "objects"
return s
@pytest.fixture
def datetime_series():
"""
Fixture for Series of floats with DatetimeIndex
"""
s = tm.makeTimeSeries()
s.name = "ts"
return s
def _create_series(index):
""" Helper for the _series dict """
size = len(index)
data = np.random.randn(size)
return pd.Series(data, index=index, name="a")
_series = {
f"series-with-{index_id}-index": _create_series(index)
for index_id, index in indices_dict.items()
}
@pytest.fixture
def series_with_simple_index(index):
"""
Fixture for tests on series with changing types of indices.
"""
return _create_series(index)
@pytest.fixture
def series_with_multilevel_index():
"""
Fixture with a Series with a 2-level MultiIndex.
"""
arrays = [
["bar", "bar", "baz", "baz", "qux", "qux", "foo", "foo"],
["one", "two", "one", "two", "one", "two", "one", "two"],
]
tuples = zip(*arrays)
index = MultiIndex.from_tuples(tuples)
data = np.random.randn(8)
ser = Series(data, index=index)
ser[3] = np.NaN
return ser
_narrow_dtypes = [
np.float16,
np.float32,
np.int8,
np.int16,
np.int32,
np.uint8,
np.uint16,
np.uint32,
]
_narrow_series = {
f"{dtype.__name__}-series": tm.makeFloatSeries(name="a").astype(dtype)
for dtype in _narrow_dtypes
}
@pytest.fixture(params=_narrow_series.keys())
def narrow_series(request):
"""
Fixture for Series with low precision data types
"""
# copy to avoid mutation, e.g. setting .name
return _narrow_series[request.param].copy()
_index_or_series_objs = {**indices_dict, **_series, **_narrow_series}
@pytest.fixture(params=_index_or_series_objs.keys())
def index_or_series_obj(request):
"""
Fixture for tests on indexes, series and series with a narrow dtype
copy to avoid mutation, e.g. setting .name
"""
return _index_or_series_objs[request.param].copy(deep=True)
# ----------------------------------------------------------------
# DataFrames
# ----------------------------------------------------------------
@pytest.fixture
def empty_frame():
return DataFrame()
@pytest.fixture
def int_frame():
"""
Fixture for DataFrame of ints with index of unique strings
Columns are ['A', 'B', 'C', 'D']
A B C D
vpBeWjM651 1 0 1 0
5JyxmrP1En -1 0 0 0
qEDaoD49U2 -1 1 0 0
m66TkTfsFe 0 0 0 0
EHPaNzEUFm -1 0 -1 0
fpRJCevQhi 2 0 0 0
OlQvnmfi3Q 0 0 -2 0
... .. .. .. ..
uB1FPlz4uP 0 0 0 1
EcSe6yNzCU 0 0 -1 0
L50VudaiI8 -1 1 -2 0
y3bpw4nwIp 0 -1 0 0
H0RdLLwrCT 1 1 0 0
rY82K0vMwm 0 0 0 0
1OPIUjnkjk 2 0 0 0
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData()).astype("int64")
@pytest.fixture
def datetime_frame():
"""
Fixture for DataFrame of floats with DatetimeIndex
Columns are ['A', 'B', 'C', 'D']
A B C D
2000-01-03 -1.122153 0.468535 0.122226 1.693711
2000-01-04 0.189378 0.486100 0.007864 -1.216052
2000-01-05 0.041401 -0.835752 -0.035279 -0.414357
2000-01-06 0.430050 0.894352 0.090719 0.036939
2000-01-07 -0.620982 -0.668211 -0.706153 1.466335
2000-01-10 -0.752633 0.328434 -0.815325 0.699674
2000-01-11 -2.236969 0.615737 -0.829076 -1.196106
... ... ... ... ...
2000-02-03 1.642618 -0.579288 0.046005 1.385249
2000-02-04 -0.544873 -1.160962 -0.284071 -1.418351
2000-02-07 -2.656149 -0.601387 1.410148 0.444150
2000-02-08 -1.201881 -1.289040 0.772992 -1.445300
2000-02-09 1.377373 0.398619 1.008453 -0.928207
2000-02-10 0.473194 -0.636677 0.984058 0.511519
2000-02-11 -0.965556 0.408313 -1.312844 -0.381948
[30 rows x 4 columns]
"""
return DataFrame(tm.getTimeSeriesData())
@pytest.fixture
def float_frame():
"""
Fixture for DataFrame of floats with index of unique strings
Columns are ['A', 'B', 'C', 'D'].
A B C D
P7GACiRnxd -0.465578 -0.361863 0.886172 -0.053465
qZKh6afn8n -0.466693 -0.373773 0.266873 1.673901
tkp0r6Qble 0.148691 -0.059051 0.174817 1.598433
wP70WOCtv8 0.133045 -0.581994 -0.992240 0.261651
M2AeYQMnCz -1.207959 -0.185775 0.588206 0.563938
QEPzyGDYDo -0.381843 -0.758281 0.502575 -0.565053
r78Jwns6dn -0.653707 0.883127 0.682199 0.206159
... ... ... ... ...
IHEGx9NO0T -0.277360 0.113021 -1.018314 0.196316
lPMj8K27FA -1.313667 -0.604776 -1.305618 -0.863999
qa66YMWQa5 1.110525 0.475310 -0.747865 0.032121
yOa0ATsmcE -0.431457 0.067094 0.096567 -0.264962
65znX3uRNG 1.528446 0.160416 -0.109635 -0.032987
eCOBvKqf3e 0.235281 1.622222 0.781255 0.392871
xSucinXxuV -1.263557 0.252799 -0.552247 0.400426
[30 rows x 4 columns]
"""
return DataFrame(tm.getSeriesData())
# ----------------------------------------------------------------
# Scalars
# ----------------------------------------------------------------
@pytest.fixture(
params=[
(Interval(left=0, right=5), IntervalDtype("int64")),
(Interval(left=0.1, right=0.5), IntervalDtype("float64")),
(Period("2012-01", freq="M"), "period[M]"),
(Period("2012-02-01", freq="D"), "period[D]"),
(
Timestamp("2011-01-01", tz="US/Eastern"),
DatetimeTZDtype(tz="US/Eastern"),
),
(Timedelta(seconds=500), "timedelta64[ns]"),
]
)
def ea_scalar_and_dtype(request):
return request.param
# ----------------------------------------------------------------
# Operators & Operations
# ----------------------------------------------------------------
_all_arithmetic_operators = [
"__add__",
"__radd__",
"__sub__",
"__rsub__",
"__mul__",
"__rmul__",
"__floordiv__",
"__rfloordiv__",
"__truediv__",
"__rtruediv__",
"__pow__",
"__rpow__",
"__mod__",
"__rmod__",
]
@pytest.fixture(params=_all_arithmetic_operators)
def all_arithmetic_operators(request):
"""
Fixture for dunder names for common arithmetic operations.
"""
return request.param
@pytest.fixture(
params=[
operator.add,
ops.radd,
operator.sub,
ops.rsub,
operator.mul,
ops.rmul,
operator.truediv,
ops.rtruediv,
operator.floordiv,
ops.rfloordiv,
operator.mod,
ops.rmod,
operator.pow,
ops.rpow,
operator.eq,
operator.ne,
operator.lt,
operator.le,
operator.gt,
operator.ge,
operator.and_,
ops.rand_,
operator.xor,
ops.rxor,
operator.or_,
ops.ror_,
]
)
def all_binary_operators(request):
"""
Fixture for operator and roperator arithmetic, comparison, and logical ops.
"""
return request.param
@pytest.fixture(
params=[
operator.add,
ops.radd,
operator.sub,
ops.rsub,
operator.mul,
ops.rmul,
operator.truediv,
ops.rtruediv,
operator.floordiv,
ops.rfloordiv,
operator.mod,
ops.rmod,
operator.pow,
ops.rpow,
]
)
def all_arithmetic_functions(request):
"""
Fixture for operator and roperator arithmetic functions.
Notes
-----
This includes divmod and rdivmod, whereas all_arithmetic_operators
does not.
"""
return request.param
_all_numeric_reductions = [
"sum",
"max",
"min",
"mean",
"prod",
"std",
"var",
"median",
"kurt",
"skew",
]
@pytest.fixture(params=_all_numeric_reductions)
def all_numeric_reductions(request):
"""
Fixture for numeric reduction names.
"""
return request.param
_all_boolean_reductions = ["all", "any"]
@pytest.fixture(params=_all_boolean_reductions)
def all_boolean_reductions(request):
"""
Fixture for boolean reduction names.
"""
return request.param
_all_reductions = _all_numeric_reductions + _all_boolean_reductions
@pytest.fixture(params=_all_reductions)
def all_reductions(request):
"""
Fixture for all (boolean + numeric) reduction names.
"""
return request.param
@pytest.fixture(params=["__eq__", "__ne__", "__le__", "__lt__", "__ge__", "__gt__"])
def all_compare_operators(request):
"""
Fixture for dunder names for common compare operations
* >=
* >
* ==
* !=
* <
* <=
"""
return request.param
@pytest.fixture(params=["__le__", "__lt__", "__ge__", "__gt__"])
def compare_operators_no_eq_ne(request):
"""
Fixture for dunder names for compare operations except == and !=
* >=
* >
* <
* <=
"""
return request.param
@pytest.fixture(
params=["__and__", "__rand__", "__or__", "__ror__", "__xor__", "__rxor__"]
)
def all_logical_operators(request):
"""
Fixture for dunder names for common logical operations
* |
* &
* ^
"""
return request.param
# ----------------------------------------------------------------
# Data sets/files
# ----------------------------------------------------------------
@pytest.fixture
def strict_data_files(pytestconfig):
"""
Returns the configuration for the test setting `--strict-data-files`.
"""
return pytestconfig.getoption("--strict-data-files")
@pytest.fixture
def datapath(strict_data_files):
"""
Get the path to a data file.
Parameters
----------
path : str
Path to the file, relative to ``pandas/tests/``
Returns
-------
path including ``pandas/tests``.
Raises
------
ValueError
If the path doesn't exist and the --strict-data-files option is set.
"""
BASE_PATH = os.path.join(os.path.dirname(__file__), "tests")
def deco(*args):
path = os.path.join(BASE_PATH, *args)
if not os.path.exists(path):
if strict_data_files:
raise ValueError(
f"Could not find file {path} and --strict-data-files is set."
)
else:
pytest.skip(f"Could not find {path}.")
return path
return deco
@pytest.fixture
def iris(datapath):
"""
The iris dataset as a DataFrame.
"""
return pd.read_csv(datapath("io", "data", "csv", "iris.csv"))
# ----------------------------------------------------------------
# Time zones
# ----------------------------------------------------------------
TIMEZONES = [
None,
"UTC",
"US/Eastern",
"Asia/Tokyo",
"dateutil/US/Pacific",
"dateutil/Asia/Singapore",
"+01:15",
"-02:15",
"UTC+01:15",
"UTC-02:15",
tzutc(),
tzlocal(),
FixedOffset(300),
FixedOffset(0),
FixedOffset(-300),
timezone.utc,
timezone(timedelta(hours=1)),
timezone(timedelta(hours=-1), name="foo"),
]
TIMEZONE_IDS = [repr(i) for i in TIMEZONES]
@td.parametrize_fixture_doc(str(TIMEZONE_IDS))
@pytest.fixture(params=TIMEZONES, ids=TIMEZONE_IDS)
def tz_naive_fixture(request):
"""
Fixture for trying timezones including default (None): {0}
"""
return request.param
@td.parametrize_fixture_doc(str(TIMEZONE_IDS[1:]))
@pytest.fixture(params=TIMEZONES[1:], ids=TIMEZONE_IDS[1:])
def tz_aware_fixture(request):
"""
Fixture for trying explicit timezones: {0}
"""
return request.param
# Generate cartesian product of tz_aware_fixture:
tz_aware_fixture2 = tz_aware_fixture
@pytest.fixture(scope="module")
def datetime_tz_utc():
"""
Yields the UTC timezone object from the datetime module.
"""
return timezone.utc
@pytest.fixture(params=["utc", "dateutil/UTC", utc, tzutc(), timezone.utc])
def utc_fixture(request):
"""
Fixture to provide variants of UTC timezone strings and tzinfo objects.
"""
return request.param
# ----------------------------------------------------------------
# Dtypes
# ----------------------------------------------------------------
@pytest.fixture(params=tm.STRING_DTYPES)
def string_dtype(request):
"""
Parametrized fixture for string dtypes.
* str
* 'str'
* 'U'
"""
return request.param
@pytest.fixture(params=tm.BYTES_DTYPES)
def bytes_dtype(request):
"""
Parametrized fixture for bytes dtypes.
* bytes
* 'bytes'
"""
return request.param
@pytest.fixture(params=tm.OBJECT_DTYPES)
def object_dtype(request):
"""
Parametrized fixture for object dtypes.
* object
* 'object'
"""
return request.param
@pytest.fixture(params=tm.DATETIME64_DTYPES)
def datetime64_dtype(request):
"""
Parametrized fixture for datetime64 dtypes.
* 'datetime64[ns]'
* 'M8[ns]'
"""
return request.param
@pytest.fixture(params=tm.TIMEDELTA64_DTYPES)
def timedelta64_dtype(request):
"""
Parametrized fixture for timedelta64 dtypes.
* 'timedelta64[ns]'
* 'm8[ns]'
"""
return request.param
@pytest.fixture(params=tm.FLOAT_DTYPES)
def float_dtype(request):
"""
Parameterized fixture for float dtypes.
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.FLOAT_EA_DTYPES)
def float_ea_dtype(request):
"""
Parameterized fixture for float dtypes.
* 'Float32'
* 'Float64'
"""
return request.param
@pytest.fixture(params=tm.FLOAT_DTYPES + tm.FLOAT_EA_DTYPES)
def any_float_allowed_nullable_dtype(request):
"""
Parameterized fixture for float dtypes.
* float
* 'float32'
* 'float64'
* 'Float32'
* 'Float64'
"""
return request.param
@pytest.fixture(params=tm.COMPLEX_DTYPES)
def complex_dtype(request):
"""
Parameterized fixture for complex dtypes.
* complex
* 'complex64'
* 'complex128'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_INT_DTYPES)
def sint_dtype(request):
"""
Parameterized fixture for signed integer dtypes.
* int
* 'int8'
* 'int16'
* 'int32'
* 'int64'
"""
return request.param
@pytest.fixture(params=tm.UNSIGNED_INT_DTYPES)
def uint_dtype(request):
"""
Parameterized fixture for unsigned integer dtypes.
* 'uint8'
* 'uint16'
* 'uint32'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_INT_DTYPES)
def any_int_dtype(request):
"""
Parameterized fixture for any integer dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
"""
return request.param
@pytest.fixture(params=tm.ALL_EA_INT_DTYPES)
def any_nullable_int_dtype(request):
"""
Parameterized fixture for any nullable integer dtype.
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.ALL_EA_INT_DTYPES + tm.FLOAT_EA_DTYPES)
def any_numeric_dtype(request):
"""
Parameterized fixture for any nullable integer dtype and
any float ea dtypes.
* 'UInt8'
* 'Int8'
* 'UInt16'
* 'Int16'
* 'UInt32'
* 'Int32'
* 'UInt64'
* 'Int64'
* 'Float32'
* 'Float64'
"""
return request.param
@pytest.fixture(params=tm.SIGNED_EA_INT_DTYPES)
def any_signed_nullable_int_dtype(request):
"""
Parameterized fixture for any signed nullable integer dtype.
* 'Int8'
* 'Int16'
* 'Int32'
* 'Int64'
"""
return request.param
@pytest.fixture(params=tm.ALL_REAL_DTYPES)
def any_real_dtype(request):
"""
Parameterized fixture for any (purely) real numeric dtype.
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
"""
return request.param
@pytest.fixture(params=tm.ALL_NUMPY_DTYPES)
def any_numpy_dtype(request):
"""
Parameterized fixture for all numpy dtypes.
* bool
* 'bool'
* int
* 'int8'
* 'uint8'
* 'int16'
* 'uint16'
* 'int32'
* 'uint32'
* 'int64'
* 'uint64'
* float
* 'float32'
* 'float64'
* complex
* 'complex64'
* 'complex128'
* str
* 'str'
* 'U'
* bytes
* 'bytes'
* 'datetime64[ns]'
* 'M8[ns]'
* 'timedelta64[ns]'
* 'm8[ns]'
* object
* 'object'
"""
return request.param
# categoricals are handled separately
_any_skipna_inferred_dtype = [
("string", ["a", np.nan, "c"]),
("string", ["a", pd.NA, "c"]),
("bytes", [b"a", np.nan, b"c"]),
("empty", [np.nan, np.nan, np.nan]),
("empty", []),
("mixed-integer", ["a", np.nan, 2]),
("mixed", ["a", np.nan, 2.0]),
("floating", [1.0, np.nan, 2.0]),
("integer", [1, np.nan, 2]),
("mixed-integer-float", [1, np.nan, 2.0]),
("decimal", [Decimal(1), np.nan, Decimal(2)]),
("boolean", [True, np.nan, False]),
("boolean", [True, pd.NA, False]),
("datetime64", [np.datetime64("2013-01-01"), np.nan, np.datetime64("2018-01-01")]),
("datetime", [pd.Timestamp("20130101"), np.nan, pd.Timestamp("20180101")]),
("date", [date(2013, 1, 1), np.nan, date(2018, 1, 1)]),
# The following two dtypes are commented out due to GH 23554
# ('complex', [1 + 1j, np.nan, 2 + 2j]),
# ('timedelta64', [np.timedelta64(1, 'D'),
# np.nan, np.timedelta64(2, 'D')]),
("timedelta", [timedelta(1), np.nan, timedelta(2)]),
("time", [time(1), np.nan, time(2)]),
("period", [pd.Period(2013), pd.NaT, pd.Period(2018)]),
("interval", [pd.Interval(0, 1), np.nan, pd.Interval(0, 2)]),
]
ids, _ = zip(*_any_skipna_inferred_dtype) # use inferred type as fixture-id
@pytest.fixture(params=_any_skipna_inferred_dtype, ids=ids)
def any_skipna_inferred_dtype(request):
"""
Fixture for all inferred dtypes from _libs.lib.infer_dtype
The covered (inferred) types are:
* 'string'
* 'empty'
* 'bytes'
* 'mixed'
* 'mixed-integer'
* 'mixed-integer-float'
* 'floating'
* 'integer'
* 'decimal'
* 'boolean'
* 'datetime64'
* 'datetime'
* 'date'
* 'timedelta'
* 'time'
* 'period'
* 'interval'
Returns
-------
inferred_dtype : str
The string for the inferred dtype from _libs.lib.infer_dtype
values : np.ndarray
An array of object dtype that will be inferred to have
`inferred_dtype`
Examples
--------
>>> import pandas._libs.lib as lib
>>>
>>> def test_something(any_skipna_inferred_dtype):
... inferred_dtype, values = any_skipna_inferred_dtype
... # will pass
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
"""
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
# ----------------------------------------------------------------
# Misc
# ----------------------------------------------------------------
@pytest.fixture
def ip():
"""
Get an instance of IPython.InteractiveShell.
Will raise a skip if IPython is not installed.
"""
pytest.importorskip("IPython", minversion="6.0.0")
from IPython.core.interactiveshell import InteractiveShell
# GH#35711 make sure sqlite history file handle is not leaked
from traitlets.config import Config # isort:skip
c = Config()
c.HistoryManager.hist_file = ":memory:"
return InteractiveShell(config=c)
@pytest.fixture(params=["bsr", "coo", "csc", "csr", "dia", "dok", "lil"])
def spmatrix(request):
"""
Yields scipy sparse matrix classes.
"""
from scipy import sparse
return getattr(sparse, request.param + "_matrix")
@pytest.fixture(
params=[
getattr(pd.offsets, o)
for o in pd.offsets.__all__
if issubclass(getattr(pd.offsets, o), pd.offsets.Tick)
]
)
def tick_classes(request):
"""
Fixture for Tick based datetime offsets available for a time series.
"""
return request.param
@pytest.fixture(params=[None, lambda x: x])
def sort_by_key(request):
"""
Simple fixture for testing keys in sorting methods.
Tests None (no key) and the identity key.
"""
return request.param
@pytest.fixture()
def fsspectest():
pytest.importorskip("fsspec")
from fsspec import register_implementation
from fsspec.implementations.memory import MemoryFileSystem
from fsspec.registry import _registry as registry
class TestMemoryFS(MemoryFileSystem):
protocol = "testmem"
test = [None]
def __init__(self, **kwargs):
self.test[0] = kwargs.pop("test", None)
super().__init__(**kwargs)
register_implementation("testmem", TestMemoryFS, clobber=True)
yield TestMemoryFS()
registry.pop("testmem", None)
TestMemoryFS.test[0] = None
TestMemoryFS.store.clear()
@pytest.fixture(
params=[
("foo", None, None),
("Egon", "Venkman", None),
("NCC1701D", "NCC1701D", "NCC1701D"),
]
)
def names(request):
"""
A 3-tuple of names, the first two for operands, the last for a result.
"""
return request.param
@pytest.fixture(params=[tm.setitem, tm.loc, tm.iloc])
def indexer_sli(request):
"""
Parametrize over __setitem__, loc.__setitem__, iloc.__setitem__
"""
return request.param
@pytest.fixture(params=[tm.setitem, tm.iloc])
def indexer_si(request):
"""
Parametrize over __setitem__, iloc.__setitem__
"""
return request.param
| bsd-3-clause |
waterponey/scikit-learn | sklearn/utils/random.py | 46 | 10523 | # Author: Hamzeh Alsalhi <ha258@cornell.edu>
#
# License: BSD 3 clause
from __future__ import division
import numpy as np
import scipy.sparse as sp
import operator
import array
from sklearn.utils import check_random_state
from sklearn.utils.fixes import astype
from ._random import sample_without_replacement
__all__ = ['sample_without_replacement', 'choice']
# This is a backport of np.random.choice from numpy 1.7
# The function can be removed when we bump the requirements to >=1.7
def choice(a, size=None, replace=True, p=None, random_state=None):
"""
choice(a, size=None, replace=True, p=None)
Generates a random sample from a given 1-D array
.. versionadded:: 1.7.0
Parameters
-----------
a : 1-D array-like or int
If an ndarray, a random sample is generated from its elements.
If an int, the random sample is generated as if a was np.arange(n)
size : int or tuple of ints, optional
Output shape. Default is None, in which case a single value is
returned.
replace : boolean, optional
Whether the sample is with or without replacement.
p : 1-D array-like, optional
The probabilities associated with each entry in a.
If not given the sample assumes a uniform distribution over all
entries in a.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
--------
samples : 1-D ndarray, shape (size,)
The generated random samples
Raises
-------
ValueError
If a is an int and less than zero, if a or p are not 1-dimensional,
if a is an array-like of size 0, if p is not a vector of
probabilities, if a and p have different lengths, or if
replace=False and the sample size is greater than the population
size
See Also
---------
randint, shuffle, permutation
Examples
---------
Generate a uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3) # doctest: +SKIP
array([0, 3, 4])
>>> #This is equivalent to np.random.randint(0,5,3)
Generate a non-uniform random sample from np.arange(5) of size 3:
>>> np.random.choice(5, 3, p=[0.1, 0, 0.3, 0.6, 0]) # doctest: +SKIP
array([3, 3, 0])
Generate a uniform random sample from np.arange(5) of size 3 without
replacement:
>>> np.random.choice(5, 3, replace=False) # doctest: +SKIP
array([3,1,0])
>>> #This is equivalent to np.random.shuffle(np.arange(5))[:3]
Generate a non-uniform random sample from np.arange(5) of size
3 without replacement:
>>> np.random.choice(5, 3, replace=False, p=[0.1, 0, 0.3, 0.6, 0])
... # doctest: +SKIP
array([2, 3, 0])
Any of the above can be repeated with an arbitrary array-like
instead of just integers. For instance:
>>> aa_milne_arr = ['pooh', 'rabbit', 'piglet', 'Christopher']
>>> np.random.choice(aa_milne_arr, 5, p=[0.5, 0.1, 0.1, 0.3])
... # doctest: +SKIP
array(['pooh', 'pooh', 'pooh', 'Christopher', 'piglet'],
dtype='|S11')
"""
random_state = check_random_state(random_state)
# Format and Verify input
a = np.array(a, copy=False)
if a.ndim == 0:
try:
# __index__ must return an integer by python rules.
pop_size = operator.index(a.item())
except TypeError:
raise ValueError("a must be 1-dimensional or an integer")
if pop_size <= 0:
raise ValueError("a must be greater than 0")
elif a.ndim != 1:
raise ValueError("a must be 1-dimensional")
else:
pop_size = a.shape[0]
if pop_size is 0:
raise ValueError("a must be non-empty")
if p is not None:
p = np.array(p, dtype=np.double, ndmin=1, copy=False)
if p.ndim != 1:
raise ValueError("p must be 1-dimensional")
if p.size != pop_size:
raise ValueError("a and p must have same size")
if np.any(p < 0):
raise ValueError("probabilities are not non-negative")
if not np.allclose(p.sum(), 1):
raise ValueError("probabilities do not sum to 1")
shape = size
if shape is not None:
size = np.prod(shape, dtype=np.intp)
else:
size = 1
# Actual sampling
if replace:
if p is not None:
cdf = p.cumsum()
cdf /= cdf[-1]
uniform_samples = random_state.random_sample(shape)
idx = cdf.searchsorted(uniform_samples, side='right')
# searchsorted returns a scalar
idx = np.array(idx, copy=False)
else:
idx = random_state.randint(0, pop_size, size=shape)
else:
if size > pop_size:
raise ValueError("Cannot take a larger sample than "
"population when 'replace=False'")
if p is not None:
if np.sum(p > 0) < size:
raise ValueError("Fewer non-zero entries in p than size")
n_uniq = 0
p = p.copy()
found = np.zeros(shape, dtype=np.int)
flat_found = found.ravel()
while n_uniq < size:
x = random_state.rand(size - n_uniq)
if n_uniq > 0:
p[flat_found[0:n_uniq]] = 0
cdf = np.cumsum(p)
cdf /= cdf[-1]
new = cdf.searchsorted(x, side='right')
_, unique_indices = np.unique(new, return_index=True)
unique_indices.sort()
new = new.take(unique_indices)
flat_found[n_uniq:n_uniq + new.size] = new
n_uniq += new.size
idx = found
else:
idx = random_state.permutation(pop_size)[:size]
if shape is not None:
idx.shape = shape
if shape is None and isinstance(idx, np.ndarray):
# In most cases a scalar will have been made an array
idx = idx.item(0)
# Use samples as indices for a if a is array-like
if a.ndim == 0:
return idx
if shape is not None and idx.ndim == 0:
# If size == () then the user requested a 0-d array as opposed to
# a scalar object when size is None. However a[idx] is always a
# scalar and not an array. So this makes sure the result is an
# array, taking into account that np.array(item) may not work
# for object arrays.
res = np.empty((), dtype=a.dtype)
res[()] = a[idx]
return res
return a[idx]
def random_choice_csc(n_samples, classes, class_probability=None,
random_state=None):
"""Generate a sparse random matrix given column class distributions
Parameters
----------
n_samples : int,
Number of samples to draw in each column.
classes : list of size n_outputs of arrays of size (n_classes,)
List of classes for each column.
class_probability : list of size n_outputs of arrays of size (n_classes,)
Optional (default=None). Class distribution of each column. If None the
uniform distribution is assumed.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Returns
-------
random_matrix : sparse csc matrix of size (n_samples, n_outputs)
"""
data = array.array('i')
indices = array.array('i')
indptr = array.array('i', [0])
for j in range(len(classes)):
classes[j] = np.asarray(classes[j])
if classes[j].dtype.kind != 'i':
raise ValueError("class dtype %s is not supported" %
classes[j].dtype)
classes[j] = astype(classes[j], np.int64, copy=False)
# use uniform distribution if no class_probability is given
if class_probability is None:
class_prob_j = np.empty(shape=classes[j].shape[0])
class_prob_j.fill(1 / classes[j].shape[0])
else:
class_prob_j = np.asarray(class_probability[j])
if np.sum(class_prob_j) != 1.0:
raise ValueError("Probability array at index {0} does not sum to "
"one".format(j))
if class_prob_j.shape[0] != classes[j].shape[0]:
raise ValueError("classes[{0}] (length {1}) and "
"class_probability[{0}] (length {2}) have "
"different length.".format(j,
classes[j].shape[0],
class_prob_j.shape[0]))
# If 0 is not present in the classes insert it with a probability 0.0
if 0 not in classes[j]:
classes[j] = np.insert(classes[j], 0, 0)
class_prob_j = np.insert(class_prob_j, 0, 0.0)
# If there are nonzero classes choose randomly using class_probability
rng = check_random_state(random_state)
if classes[j].shape[0] > 1:
p_nonzero = 1 - class_prob_j[classes[j] == 0]
nnz = int(n_samples * p_nonzero)
ind_sample = sample_without_replacement(n_population=n_samples,
n_samples=nnz,
random_state=random_state)
indices.extend(ind_sample)
# Normalize probabilites for the nonzero elements
classes_j_nonzero = classes[j] != 0
class_probability_nz = class_prob_j[classes_j_nonzero]
class_probability_nz_norm = (class_probability_nz /
np.sum(class_probability_nz))
classes_ind = np.searchsorted(class_probability_nz_norm.cumsum(),
rng.rand(nnz))
data.extend(classes[j][classes_j_nonzero][classes_ind])
indptr.append(len(indices))
return sp.csc_matrix((data, indices, indptr),
(n_samples, len(classes)),
dtype=int)
| bsd-3-clause |
margulies/topography | utils_py/paths_find_similar.py | 4 | 4083 | #get_ipython().magic(u'matplotlib inline')
import matplotlib.pyplot as plt
from scipy.spatial.distance import pdist, squareform
import scipy.io as sio
import h5py
import networkx as nx
import numpy as np
import gdist
def calcPaths(num):
length = nx.all_pairs_dijkstra_path(G, num)
length_paths = []
for node in length:
for target in length[node]:
if len(length[node][target]) == num:
length_paths.append(length[node][target])
labeled_paths = labels[length_paths]
same_labels = (squareform(pdist(labeled_paths)) < 1e-10).sum(axis=1)
return length_paths, labeled_paths, same_labels
def uniqueRows(labeled_paths, same_labels, cutoff):
a = labeled_paths[same_labels == cutoff]
uRows = np.unique(a.view(np.dtype((np.void, a.dtype.itemsize*a.shape[1])))).view(a.dtype).reshape(-1, a.shape[1])
return uRows
print uRows
def removePaths(labeled_paths, same_labels, vals, row):
ind = np.in1d(labeled_paths[:,row], vals).reshape(labeled_paths[:,row].shape)
labeled_paths_new = labeled_paths[ind]
same_labels_new = same_labels[ind]
return ind, labeled_paths_new, same_labels_new
def removePathsInverse(labeled_paths, same_labels, vals, row):
ind = np.in1d(labeled_paths[:,row], vals, invert=True).reshape(labeled_paths[:,row].shape)
labeled_paths_new = labeled_paths[ind]
same_labels_new = same_labels[ind]
return ind, labeled_paths_new, same_labels_new
def printAll(labeled_paths,same_labels):
print (('1 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,1)))
print (('2 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,2)))
print (('3 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,3)))
print (('4 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,4)))
print (('5 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,5)))
print (('6 times:\n %s \n' % uniqueRows(labeled_paths,same_labels,6)))
def labelHist(num,labeled_paths):
a = np.zeros([num,18])
for i in xrange(0,num):
a[i] = np.histogram(labeled_paths[:,i],18, range=(1,18))[0]
return a.transpose()
def calcPlace(b):
c = np.zeros([len(b),1])
for i in xrange(0,len(b)):
if np.sum(b[i]) == 0:
c[i] = 0
else:
c[i] = np.average(np.array(xrange(0,num)) + 1, weights=b[i])
return c
'''load data:'''
fp = h5py.File('data/clus.mat')
fp.keys()
adj = fp['clus']['edge'][:]
labels = fp['clus']['edgeNet'][:].flatten()
G = nx.from_numpy_matrix(adj)
num=6
length_paths, labeled_paths, same_labels = calcPaths(num)
ind1, labeled_paths1, same_labels1 = removePaths(labeled_paths, same_labels, [7,12,14,15,16], 0)
ind2, labeled_paths2, same_labels2 = removePathsInverse(labeled_paths1, same_labels1, [7,12,14,15,16], 1)
ind3, labeled_paths3, same_labels3 = removePathsInverse(labeled_paths2, same_labels2, [7,12,14,15,16], 2)
ind4, labeled_paths4, same_labels4 = removePaths(labeled_paths3, same_labels3, [17,3,8], num-2)
ind5, labeled_paths5, same_labels5 = removePaths(labeled_paths4, same_labels4, [13], num-1)
print '\nNum = %s' % num
printAll(labeled_paths1, same_labels1)
'''histograms'''
indX, labeled_pathsX, same_labelsX = removePathsInverse(labeled_paths3, same_labels3, [7,12,14,15,16], 3)
num=5
length_paths, labeled_paths, same_labels = calcPaths(num)
ind1, labeled_paths1, same_labels1 = removePaths(labeled_paths, same_labels, [7,12,14,15,16], 0)
a = labelHist(num,labeled_paths1)
print a
order = np.argsort(calcPlace(a), axis=0) + 1
fl = np.floor(np.sort(calcPlace(a), axis=0))
print '\nNum = %s' % num
print np.hstack((order, fl))
num=6
length_paths, labeled_paths, same_labels = calcPaths(num)
ind1, labeled_paths1, same_labels1 = removePaths(labeled_paths, same_labels, [7,12,14,15,16], 0)
a = labelHist(num,labeled_paths1)
order = np.argsort(calcPlace(a), axis=0) + 1
fl = np.floor(np.sort(calcPlace(a), axis=0))
print '\nNum = %s' % num
print np.hstack((order, fl))
np.array(fl[np.argsort(order, axis=0)]).transpose()
np.array(length_paths)[same_labels == same_labels.max()]
| mit |
evenmarbles/mlpy | mlpy/knowledgerep/cbr/similarity.py | 1 | 17713 | from __future__ import division, print_function, absolute_import
import math
import numpy as np
from abc import ABCMeta, abstractmethod
from sklearn.neighbors import NearestNeighbors
from sklearn.cluster import KMeans
from sklearn.metrics.pairwise import cosine_similarity
from sklearn.neighbors.dist_metrics import METRIC_MAPPING
class Stat(object):
"""The similarity statistics container.
The similarity statistics is a container to pass the
calculated measure of similarity between the case
identified by the case id and the query case between
functions.
Parameters
----------
case_id : int
The case's id.
similarity : float
The similarity measure.
"""
__slots__ = ('_case_id', '_similarity')
@property
def case_id(self):
"""The case's id.
Returns
-------
int :
The case's id
"""
return self._case_id
@property
def similarity(self):
"""The similarity measure.
Returns
-------
float :
The similarity measure.
"""
return self._similarity
def __init__(self, case_id, similarity=None):
self._case_id = case_id
self._similarity = similarity
class SimilarityFactory(object):
"""The similarity factory.
An instance of a similarity model can be created by passing
the similarity model type.
Examples
--------
>>> from mlpy.knowledgerep.cbr.similarity import SimilarityFactory
>>> SimilarityFactory.create('float', **{})
"""
@staticmethod
def create(_type, **kwargs):
"""
Create a feature of the given type.
Parameters
----------
_type : str
The feature type. Valid feature types are:
knn
A k-nearest-neighbor algorithm is used to determine similarity
between cases (:class:`NeighborSimilarity`). The value
``n_neighbors`` must be specified.
radius-n
Similarity between cases is determined by the nearest neighbors
within a radius (:class:`NeighborSimilarity`). The value ``radius``
must be specified.
kmeans
Similarity is determined by a KMeans clustering algorithm
(:class:`KMeansSimilarity`). The value ``n_clusters`` must be specified.
exact-match
Only exact matches are considered similar (:class:`ExactMatchSimilarity`).
cosine
A cosine similarity measure is used to determine similarity between
cases (:class:`CosineSimilarity`).
kwargs : dict, optional
Non-positional arguments to pass to the class of the given type
for initialization.
Returns
-------
ISimilarity :
A similarity instance of the given type.
"""
try:
if _type == "knn":
kwargs["n_neighbors"] = kwargs["method_params"]
elif _type == "radius-n":
kwargs["radius"] = kwargs["method_params"]
elif _type == "kmeans":
kwargs["n_cluster"] = kwargs["method_params"]
elif _type == "cosine":
kwargs["threshold"] = kwargs["method_params"]
del kwargs["method_params"]
return {
"knn": NeighborSimilarity,
"radius-n": NeighborSimilarity,
"kmeans": KMeansSimilarity,
"exact-match": ExactMatchSimilarity,
"cosine": CosineSimilarity,
}[_type](**kwargs)
except KeyError:
return None
class ISimilarity(object):
"""The similarity model interface.
The similarity model keeps an internal indexing structure of
the relevant case data to efficiently computing the similarity
measure between data points.
Notes
-----
All similarity models must inherit from this class.
"""
__metaclass__ = ABCMeta
def __init__(self):
#: The indexing structure
self._indexing_structure = None
#: The mapping of the data points to their case ids
self._id_map = None
""":ivar: dict"""
@abstractmethod
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
@abstractmethod
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure returning the results in a collection of
similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
Raises
------
NotImplementedError
If the child class does not implement this function.
"""
raise NotImplementedError
class NeighborSimilarity(ISimilarity):
"""The neighborhood similarity model.
The neighbor similarity model determines similarity between the data
in the indexing structure and the query data by using the nearest
neighbor algorithm :class:`sklearn.neighbors.NearestNeighbors`.
Both a k-neighbors classifier and a radius-neighbor-classifier are implemented.
To choose between the classifiers either `n_neighbors` or `radius` must be
specified.
Parameters
----------
n_neighbors : int
The number of data points considered to be closest neighbors.
radius : int
The radius around the query data point, within which the data points
are considered closest neighbors.
algorithm : str
The internal indexing structure of the training data. Defaults to
`kd-tree`.
metric : str
The metric used to compute the distances between pairs of points.
Refer to :class:`sklearn.neighbors.DistanceMetric` for valid
identifiers. Default is `euclidean`.
metric_params : dict
Parameters relevant to the specified metric.
Raises
------
UserWarning :
If the either both or none of `n_neighbors` and `radius` are given.
See Also
--------
:class:`sklearn.neighbors.KNeighborsClassifier`, :class:`sklearn.neighbors.RadiusNeighborsClassifier`
"""
def __init__(self, n_neighbors=None, radius=None, algorithm=None, metric=None, metric_params=None):
super(NeighborSimilarity, self).__init__()
if (n_neighbors is not None and radius is not None) or not (n_neighbors is None or radius is None):
raise UserWarning("Exactly one of n_neighbors or radius must be initialized.")
self._n_neighbors = n_neighbors
self._radius = radius
if algorithm is not None:
if algorithm not in ["ball_tree", "kd_tree", "brute", "auto"]:
raise ValueError("%s is not a valid retrieval algorithm" % algorithm)
self._algorithm = algorithm
else:
self._algorithm = "kd_tree"
if metric is not None:
if metric not in METRIC_MAPPING:
raise ValueError("%s is not a valid retrieval metric" % metric)
self._metric = metric
else:
self._metric = "euclidean"
self._metric_params = metric_params if metric_params is not None else 2
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
Build the indexing structure by fitting the data according to the
specified algorithm.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
"""
self._id_map = id_map
if self._n_neighbors is not None:
self._indexing_structure = NearestNeighbors(n_neighbors=self._n_neighbors, algorithm=self._algorithm,
metric=self._metric, p=self._metric_params).fit(data)
else:
self._indexing_structure = NearestNeighbors(radius=self._radius, algorithm=self._algorithm,
metric=self._metric, p=self._metric_params).fit(data)
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure using the :class:`sklearn.neighbors.NearestNeighbors`
algorithm. The results are returned in a collection of similarity statistics
(:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
if self._n_neighbors is not None:
# noinspection PyProtectedMember
raw_data = self._indexing_structure._fit_X
if len(raw_data) < self._n_neighbors:
result = []
for i, feat in enumerate(raw_data):
dist = np.linalg.norm(np.asarray(data_point) - np.asarray(feat))
result.append(Stat(self._id_map[i], dist))
# noinspection PyShadowingNames
result = sorted(result, key=lambda x: x.similarity)
else:
d, key_lists = self._indexing_structure.kneighbors(data_point)
result = [Stat(self._id_map[x], d[0][i]) for i, x in enumerate(key_lists[0])]
else:
d, key_lists = self._indexing_structure.radius_neighbors(data_point)
result = [Stat(self._id_map[x], d[0][i]) for i, x in enumerate(key_lists[0])]
return result
class KMeansSimilarity(ISimilarity):
"""The KMeans similarity model.
The KMeans similarity model determines similarity between the data in the
indexing structure and the query data by using the :class:`sklearn.cluster.KMeans`
algorithm.
Parameters
----------
n_cluster : int
The number of clusters to fit the raw data in.
"""
def __init__(self, n_cluster=None):
super(KMeansSimilarity, self).__init__()
self._n_cluster = n_cluster if n_cluster is None else 8
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
Build the indexing structure by fitting the data into `n_cluster`
clusters.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
"""
self._id_map = id_map
self._indexing_structure = KMeans(init='k-means++', n_clusters=self._n_cluster, n_init=10).fit(data)
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure using the :class:`sklearn.cluster.KMeans`
clustering algorithm. The results are returned in a collection
of similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
label = self._indexing_structure.predict(data_point)
result = []
try:
# noinspection PyTypeChecker,PyUnresolvedReferences
key_lists = np.nonzero(self._indexing_structure.labels_ == label[0])[0]
result = [Stat(self._id_map[x]) for x in key_lists]
except IndexError:
pass
return result
class ExactMatchSimilarity(ISimilarity):
"""The exact match similarity model.
The exact match similarity model considered only exact matches between
the data in the indexing structure and the query data as similar.
"""
# noinspection PyUnusedLocal
def __init__(self, **kwargs):
super(ExactMatchSimilarity, self).__init__()
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
To determine exact matches a brute-force algorithm is used thus
the data remains as is and no special indexing structure is
implemented.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
.. todo::
It might be worth looking into a more efficient way of determining
exact matches.
"""
self._id_map = id_map
self._indexing_structure = data
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure identifying exact matches. The results are
returned in a collection of similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
result = []
for i, feat in enumerate(self._indexing_structure):
total = 0
for j, val in enumerate(data_point):
total += math.pow(val - feat[j], 2)
if total == 0.0:
result.append(Stat(self._id_map[i]))
return result
class CosineSimilarity(ISimilarity):
"""The cosine similarity model.
Cosine similarity is a measure of similarity between two vectors of an inner
product space that measures the cosine of the angle between them. The cosine
of 0 degree is 1, and it is less than 1 for any other angle. It is thus a
judgement of orientation and not magnitude: tow vectors with the same
orientation have a cosine similarity of 1, two vectors at 90 degrees have a
similarity of 0, and two vectors diametrically opposed have a similarity of -1,
independent of their magnitude [1]_.
The cosine model employs the
`cosine_similarity <http://scikit-learn.org/stable/modules/metrics.html#cosine-similarity>`_
function from the :mod:`sklearn.metrics.pairwise` module to determine similarity.
.. seealso::
`Machine Learning::Cosine Similarity for Vector Space Models (Part III)
<http://blog.christianperone.com/?p=2497>`_
References
----------
.. [1] `Wikipidia::cosine_similarity <https://en.wikipedia.org/wiki/Cosine_similarity>`_
"""
# noinspection PyUnusedLocal
def __init__(self, **kwargs):
super(CosineSimilarity, self).__init__()
def build_indexing_structure(self, data, id_map):
"""Build the indexing structure.
The cosine_similarity function from :mod:`sklearn.metrics.pairwise` takes
the raw data as input. Thus the data remains as is and no special indexing
structure is implemented.
Parameters
----------
data : ndarray[ndarray[float]]
The raw data points to be indexed.
id_map : dict[int, int]
The mapping from the data points to their case ids.
"""
self._id_map = id_map
self._indexing_structure = data
def compute_similarity(self, data_point):
"""Computes the similarity.
Computes the similarity between the data point and the data in
the indexing structure using the function :func:`cosine_similarity` from
:mod:`sklearn.metrics.pairwise`.
The resulting similarity ranges from -1 meaning exactly opposite, to 1
meaning exactly the same, with 0 indicating orthogonality (decorrelation),
and in-between values indicating intermediate similarity or dissimilarity.
The results are returned in a collection of similarity statistics (:class:`Stat`).
Parameters
----------
data_point : list[float]
The raw data point to compare against the data points stored in the
indexing structure.
Returns
-------
list[Stat] :
A collection of similarity statistics.
"""
similarity = cosine_similarity(data_point, self._indexing_structure)
if not np.any(data_point):
similarity = np.array([[float(np.array_equal(data_point, m)) for m in np.array(self._indexing_structure)]])
return [Stat(self._id_map[i], x) for i, x in enumerate(similarity[0])]
| mit |
sanketloke/scikit-learn | sklearn/metrics/base.py | 46 | 4627 | """
Common code for all metrics
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils import check_array, check_consistent_length
from ..utils.multiclass import type_of_target
from ..exceptions import UndefinedMetricWarning as _UndefinedMetricWarning
from ..utils import deprecated
@deprecated("UndefinedMetricWarning has been moved into the sklearn.exceptions"
" module. It will not be available here from version 0.19")
class UndefinedMetricWarning(_UndefinedMetricWarning):
pass
def _average_binary_score(binary_metric, y_true, y_score, average,
sample_weight=None):
"""Average a binary metric for multilabel classification
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
binary_metric : callable, returns shape [n_classes]
The binary metric function to use.
Returns
-------
score : float or array of shape [n_classes]
If not ``None``, average the score, else return the score for each
classes.
"""
average_options = (None, 'micro', 'macro', 'weighted', 'samples')
if average not in average_options:
raise ValueError('average has to be one of {0}'
''.format(average_options))
y_type = type_of_target(y_true)
if y_type not in ("binary", "multilabel-indicator"):
raise ValueError("{0} format is not supported".format(y_type))
if y_type == "binary":
return binary_metric(y_true, y_score, sample_weight=sample_weight)
check_consistent_length(y_true, y_score, sample_weight)
y_true = check_array(y_true)
y_score = check_array(y_score)
not_average_axis = 1
score_weight = sample_weight
average_weight = None
if average == "micro":
if score_weight is not None:
score_weight = np.repeat(score_weight, y_true.shape[1])
y_true = y_true.ravel()
y_score = y_score.ravel()
elif average == 'weighted':
if score_weight is not None:
average_weight = np.sum(np.multiply(
y_true, np.reshape(score_weight, (-1, 1))), axis=0)
else:
average_weight = np.sum(y_true, axis=0)
if average_weight.sum() == 0:
return 0
elif average == 'samples':
# swap average_weight <-> score_weight
average_weight = score_weight
score_weight = None
not_average_axis = 0
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_score.ndim == 1:
y_score = y_score.reshape((-1, 1))
n_classes = y_score.shape[not_average_axis]
score = np.zeros((n_classes,))
for c in range(n_classes):
y_true_c = y_true.take([c], axis=not_average_axis).ravel()
y_score_c = y_score.take([c], axis=not_average_axis).ravel()
score[c] = binary_metric(y_true_c, y_score_c,
sample_weight=score_weight)
# Average the results
if average is not None:
return np.average(score, weights=average_weight)
else:
return score
| bsd-3-clause |
ankurankan/pgmpy | pgmpy/tests/test_estimators/test_HillClimbSearch.py | 2 | 9288 | import unittest
import pandas as pd
import numpy as np
from pgmpy.estimators import HillClimbSearch, K2Score
from pgmpy.models import BayesianNetwork
class TestHillClimbEstimator(unittest.TestCase):
def setUp(self):
self.rand_data = pd.DataFrame(
np.random.randint(0, 5, size=(int(1e4), 2)), columns=list("AB")
)
self.rand_data["C"] = self.rand_data["B"]
self.est_rand = HillClimbSearch(self.rand_data)
k2score = K2Score(self.rand_data)
self.score_rand = k2score.local_score
self.score_structure_prior = k2score.structure_prior_ratio
self.model1 = BayesianNetwork()
self.model1.add_nodes_from(["A", "B", "C"])
self.model1_possible_edges = set(
[(u, v) for u in self.model1.nodes() for v in self.model1.nodes()]
)
self.model2 = self.model1.copy()
self.model2.add_edge("A", "B")
self.model2_possible_edges = set(
[(u, v) for u in self.model2.nodes() for v in self.model2.nodes()]
)
# link to dataset: "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data1 = self.titanic_data[
["Survived", "Sex", "Pclass", "Age", "Embarked"]
]
self.est_titanic1 = HillClimbSearch(self.titanic_data1)
self.score_titanic1 = K2Score(self.titanic_data1).local_score
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
self.est_titanic2 = HillClimbSearch(self.titanic_data2)
self.score_titanic2 = K2Score(self.titanic_data2).local_score
def test_legal_operations(self):
model2_legal_ops = list(
self.est_rand._legal_operations(
model=self.model2,
score=self.score_rand,
structure_score=self.score_structure_prior,
tabu_list=set(),
max_indegree=float("inf"),
black_list=set(),
white_list=self.model2_possible_edges,
fixed_edges=set(),
)
)
model2_legal_ops_ref = [
(("+", ("C", "A")), -28.15602208305154),
(("+", ("A", "C")), -28.155467430966382),
(("+", ("C", "B")), 7636.947544933631),
(("+", ("B", "C")), 7937.805375579936),
(("-", ("A", "B")), 28.155467430966382),
(("flip", ("A", "B")), -0.0005546520851567038),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops]),
set([op for op, score in model2_legal_ops_ref]),
)
def test_legal_operations_blacklist_whitelist(self):
model2_legal_ops_bl = list(
self.est_rand._legal_operations(
model=self.model2,
score=self.score_rand,
structure_score=self.score_structure_prior,
tabu_list=set(),
max_indegree=float("inf"),
black_list=set([("A", "B"), ("A", "C"), ("C", "A"), ("C", "B")]),
white_list=self.model2_possible_edges,
fixed_edges=set(),
)
)
model2_legal_ops_bl_ref = [
("+", ("B", "C")),
("-", ("A", "B")),
("flip", ("A", "B")),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops_bl]), set(model2_legal_ops_bl_ref)
)
model2_legal_ops_wl = list(
self.est_rand._legal_operations(
model=self.model2,
score=self.score_rand,
structure_score=self.score_structure_prior,
tabu_list=set(),
max_indegree=float("inf"),
black_list=set(),
white_list=set([("A", "B"), ("A", "C"), ("C", "A"), ("A", "B")]),
fixed_edges=set(),
)
)
model2_legal_ops_wl_ref = [
("+", ("A", "C")),
("+", ("C", "A")),
("-", ("A", "B")),
]
self.assertSetEqual(
set([op for op, score in model2_legal_ops_wl]), set(model2_legal_ops_wl_ref)
)
def test_legal_operations_titanic(self):
start_model = BayesianNetwork(
[("Survived", "Sex"), ("Pclass", "Age"), ("Pclass", "Embarked")]
)
all_possible_edges = set(
[(u, v) for u in start_model.nodes() for v in start_model.nodes()]
)
legal_ops = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
structure_score=self.score_structure_prior,
tabu_list=[],
max_indegree=float("inf"),
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
self.assertEqual(len(list(legal_ops)), 20)
tabu_list = [
("-", ("Survived", "Sex")),
("-", ("Survived", "Pclass")),
("flip", ("Age", "Pclass")),
]
legal_ops_tabu = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
structure_score=self.score_structure_prior,
tabu_list=tabu_list,
max_indegree=float("inf"),
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
self.assertEqual(len(list(legal_ops_tabu)), 18)
legal_ops_indegree = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
structure_score=self.score_structure_prior,
tabu_list=[],
max_indegree=1,
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
self.assertEqual(len(list(legal_ops_indegree)), 11)
legal_ops_both = self.est_titanic1._legal_operations(
model=start_model,
score=self.score_titanic1,
structure_score=self.score_structure_prior,
tabu_list=tabu_list,
max_indegree=1,
black_list=set(),
white_list=all_possible_edges,
fixed_edges=set(),
)
legal_ops_both_ref = {
("+", ("Embarked", "Survived")): 10.050632580087495,
("+", ("Survived", "Pclass")): 41.8886804654893,
("+", ("Age", "Survived")): -23.635716036430722,
("+", ("Pclass", "Survived")): 41.81314459373152,
("+", ("Sex", "Pclass")): 4.772261678791324,
("-", ("Pclass", "Age")): 11.546515590730905,
("-", ("Pclass", "Embarked")): -32.17148283253266,
("flip", ("Pclass", "Embarked")): 3.3563814191275583,
("flip", ("Survived", "Sex")): 0.0397370279797542,
}
self.assertSetEqual(
set([op for op, score in legal_ops_both]), set(legal_ops_both_ref)
)
for op, score in legal_ops_both:
self.assertAlmostEqual(score, legal_ops_both_ref[op])
def test_estimate_rand(self):
est1 = self.est_rand.estimate()
self.assertSetEqual(set(est1.nodes()), set(["A", "B", "C"]))
self.assertTrue(
list(est1.edges()) == [("B", "C")] or list(est1.edges()) == [("C", "B")]
)
est2 = self.est_rand.estimate(
start_dag=BayesianNetwork([("A", "B"), ("A", "C")])
)
self.assertTrue(
list(est2.edges()) == [("B", "C")] or list(est2.edges()) == [("C", "B")]
)
est3 = self.est_rand.estimate(fixed_edges=[("B", "C")])
self.assertTrue([("B", "C")] == list(est3.edges()))
def test_estimate_titanic(self):
self.assertSetEqual(
set(self.est_titanic2.estimate().edges()),
set([("Survived", "Pclass"), ("Sex", "Pclass"), ("Sex", "Survived")]),
)
self.assertTrue(
("Pclass", "Survived")
in self.est_titanic2.estimate(fixed_edges=[("Pclass", "Survived")]).edges()
)
def test_no_legal_operation(self):
data = pd.DataFrame(
[
[1, 0, 0, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 0, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 0, 1, 0, 0, 1, 1, 0, 0],
],
columns=list("ABCDEFGHI"),
)
est = HillClimbSearch(data)
best_model = est.estimate(
fixed_edges=[("A", "B"), ("B", "C")], white_list=[("F", "C")]
)
self.assertEqual(
set(best_model.edges()), set([("A", "B"), ("B", "C"), ("F", "C")])
)
def test_estimate(self):
for score in ["k2score", "bdeuscore", "bdsscore", "bicscore"]:
dag = self.est_rand.estimate(scoring_method=score)
dag = self.est_titanic1.estimate(scoring_method=score)
def tearDown(self):
del self.rand_data
del self.est_rand
del self.model1
del self.titanic_data
del self.titanic_data1
del self.titanic_data2
del self.est_titanic1
del self.est_titanic2
| mit |
cython-testbed/pandas | pandas/core/apply.py | 4 | 12744 | import warnings
import numpy as np
from pandas import compat
from pandas._libs import reduction
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.common import (
is_extension_type,
is_dict_like,
is_list_like,
is_sequence)
from pandas.util._decorators import cache_readonly
from pandas.io.formats.printing import pprint_thing
def frame_apply(obj, func, axis=0, broadcast=None,
raw=False, reduce=None, result_type=None,
ignore_failures=False,
args=None, kwds=None):
""" construct and return a row or column based frame apply object """
axis = obj._get_axis_number(axis)
if axis == 0:
klass = FrameRowApply
elif axis == 1:
klass = FrameColumnApply
return klass(obj, func, broadcast=broadcast,
raw=raw, reduce=reduce, result_type=result_type,
ignore_failures=ignore_failures,
args=args, kwds=kwds)
class FrameApply(object):
def __init__(self, obj, func, broadcast, raw, reduce, result_type,
ignore_failures, args, kwds):
self.obj = obj
self.raw = raw
self.ignore_failures = ignore_failures
self.args = args or ()
self.kwds = kwds or {}
if result_type not in [None, 'reduce', 'broadcast', 'expand']:
raise ValueError("invalid value for result_type, must be one "
"of {None, 'reduce', 'broadcast', 'expand'}")
if broadcast is not None:
warnings.warn("The broadcast argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='broadcast' to broadcast the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if broadcast:
result_type = 'broadcast'
if reduce is not None:
warnings.warn("The reduce argument is deprecated and will "
"be removed in a future version. You can specify "
"result_type='reduce' to try to reduce the result "
"to the original dimensions",
FutureWarning, stacklevel=4)
if reduce:
if result_type is not None:
raise ValueError(
"cannot pass both reduce=True and result_type")
result_type = 'reduce'
self.result_type = result_type
# curry if needed
if ((kwds or args) and
not isinstance(func, (np.ufunc, compat.string_types))):
def f(x):
return func(x, *args, **kwds)
else:
f = func
self.f = f
# results
self.result = None
self.res_index = None
self.res_columns = None
@property
def columns(self):
return self.obj.columns
@property
def index(self):
return self.obj.index
@cache_readonly
def values(self):
return self.obj.values
@cache_readonly
def dtypes(self):
return self.obj.dtypes
@property
def agg_axis(self):
return self.obj._get_agg_axis(self.axis)
def get_result(self):
""" compute the results """
# dispatch to agg
if is_list_like(self.f) or is_dict_like(self.f):
return self.obj.aggregate(self.f, axis=self.axis,
*self.args, **self.kwds)
# all empty
if len(self.columns) == 0 and len(self.index) == 0:
return self.apply_empty_result()
# string dispatch
if isinstance(self.f, compat.string_types):
# Support for `frame.transform('method')`
# Some methods (shift, etc.) require the axis argument, others
# don't, so inspect and insert if necessary.
func = getattr(self.obj, self.f)
sig = compat.signature(func)
if 'axis' in sig.args:
self.kwds['axis'] = self.axis
return func(*self.args, **self.kwds)
# ufunc
elif isinstance(self.f, np.ufunc):
with np.errstate(all='ignore'):
results = self.f(self.values)
return self.obj._constructor(data=results, index=self.index,
columns=self.columns, copy=False)
# broadcasting
if self.result_type == 'broadcast':
return self.apply_broadcast()
# one axis empty
elif not all(self.obj.shape):
return self.apply_empty_result()
# raw
elif self.raw and not self.obj._is_mixed_type:
return self.apply_raw()
return self.apply_standard()
def apply_empty_result(self):
"""
we have an empty result; at least 1 axis is 0
we will try to apply the function to an empty
series in order to see if this is a reduction function
"""
# we are not asked to reduce or infer reduction
# so just return a copy of the existing object
if self.result_type not in ['reduce', None]:
return self.obj.copy()
# we may need to infer
reduce = self.result_type == 'reduce'
from pandas import Series
if not reduce:
EMPTY_SERIES = Series([])
try:
r = self.f(EMPTY_SERIES, *self.args, **self.kwds)
reduce = not isinstance(r, Series)
except Exception:
pass
if reduce:
return self.obj._constructor_sliced(np.nan, index=self.agg_axis)
else:
return self.obj.copy()
def apply_raw(self):
""" apply to the values as a numpy array """
try:
result = reduction.reduce(self.values, self.f, axis=self.axis)
except Exception:
result = np.apply_along_axis(self.f, self.axis, self.values)
# TODO: mixed type case
if result.ndim == 2:
return self.obj._constructor(result,
index=self.index,
columns=self.columns)
else:
return self.obj._constructor_sliced(result,
index=self.agg_axis)
def apply_broadcast(self, target):
result_values = np.empty_like(target.values)
# axis which we want to compare compliance
result_compare = target.shape[0]
for i, col in enumerate(target.columns):
res = self.f(target[col])
ares = np.asarray(res).ndim
# must be a scalar or 1d
if ares > 1:
raise ValueError("too many dims to broadcast")
elif ares == 1:
# must match return dim
if result_compare != len(res):
raise ValueError("cannot broadcast result")
result_values[:, i] = res
# we *always* preserve the original index / columns
result = self.obj._constructor(result_values,
index=target.index,
columns=target.columns)
return result
def apply_standard(self):
# try to reduce first (by default)
# this only matters if the reduction in values is of different dtype
# e.g. if we want to apply to a SparseFrame, then can't directly reduce
# we cannot reduce using non-numpy dtypes,
# as demonstrated in gh-12244
if (self.result_type in ['reduce', None] and
not self.dtypes.apply(is_extension_type).any()):
# Create a dummy Series from an empty array
from pandas import Series
values = self.values
index = self.obj._get_axis(self.axis)
labels = self.agg_axis
empty_arr = np.empty(len(index), dtype=values.dtype)
dummy = Series(empty_arr, index=index, dtype=values.dtype)
try:
result = reduction.reduce(values, self.f,
axis=self.axis,
dummy=dummy,
labels=labels)
return self.obj._constructor_sliced(result, index=labels)
except Exception:
pass
# compute the result using the series generator
self.apply_series_generator()
# wrap results
return self.wrap_results()
def apply_series_generator(self):
series_gen = self.series_generator
res_index = self.result_index
i = None
keys = []
results = {}
if self.ignore_failures:
successes = []
for i, v in enumerate(series_gen):
try:
results[i] = self.f(v)
keys.append(v.name)
successes.append(i)
except Exception:
pass
# so will work with MultiIndex
if len(successes) < len(res_index):
res_index = res_index.take(successes)
else:
try:
for i, v in enumerate(series_gen):
results[i] = self.f(v)
keys.append(v.name)
except Exception as e:
if hasattr(e, 'args'):
# make sure i is defined
if i is not None:
k = res_index[i]
e.args = e.args + ('occurred at index %s' %
pprint_thing(k), )
raise
self.results = results
self.res_index = res_index
self.res_columns = self.result_columns
def wrap_results(self):
results = self.results
# see if we can infer the results
if len(results) > 0 and is_sequence(results[0]):
return self.wrap_results_for_axis()
# dict of scalars
result = self.obj._constructor_sliced(results)
result.index = self.res_index
return result
class FrameRowApply(FrameApply):
axis = 0
def apply_broadcast(self):
return super(FrameRowApply, self).apply_broadcast(self.obj)
@property
def series_generator(self):
return (self.obj._ixs(i, axis=1)
for i in range(len(self.columns)))
@property
def result_index(self):
return self.columns
@property
def result_columns(self):
return self.index
def wrap_results_for_axis(self):
""" return the results for the rows """
results = self.results
result = self.obj._constructor(data=results)
if not isinstance(results[0], ABCSeries):
try:
result.index = self.res_columns
except ValueError:
pass
try:
result.columns = self.res_index
except ValueError:
pass
return result
class FrameColumnApply(FrameApply):
axis = 1
def apply_broadcast(self):
result = super(FrameColumnApply, self).apply_broadcast(self.obj.T)
return result.T
@property
def series_generator(self):
constructor = self.obj._constructor_sliced
return (constructor(arr, index=self.columns, name=name)
for i, (arr, name) in enumerate(zip(self.values,
self.index)))
@property
def result_index(self):
return self.index
@property
def result_columns(self):
return self.columns
def wrap_results_for_axis(self):
""" return the results for the columns """
results = self.results
# we have requested to expand
if self.result_type == 'expand':
result = self.infer_to_same_shape()
# we have a non-series and don't want inference
elif not isinstance(results[0], ABCSeries):
from pandas import Series
result = Series(results)
result.index = self.res_index
# we may want to infer results
else:
result = self.infer_to_same_shape()
return result
def infer_to_same_shape(self):
""" infer the results to the same shape as the input object """
results = self.results
result = self.obj._constructor(data=results)
result = result.T
# set the index
result.index = self.res_index
# infer dtypes
result = result.infer_objects()
return result
| bsd-3-clause |
xguse/ggplot | setup.py | 13 | 2169 | import os
from setuptools import find_packages, setup
def extract_version():
"""
Extracts version values from the main matplotlib __init__.py and
returns them as a dictionary.
"""
with open('ggplot/__init__.py') as fd:
for line in fd.readlines():
if (line.startswith('__version__')):
exec(line.strip())
return locals()["__version__"]
def get_package_data():
baseline_images = [
'tests/baseline_images/%s/*' % x
for x in os.listdir('ggplot/tests/baseline_images')]
return {
'ggplot':
baseline_images +
[
"exampledata/*.csv",
"geoms/*.png"
]}
setup(
name="ggplot",
# Increase the version in ggplot/__init__.py
version=extract_version(),
author="Greg Lamp",
author_email="greg@yhathq.com",
url="https://github.com/yhat/ggplot/",
license="BSD",
packages=find_packages(),
package_dir={"ggplot": "ggplot"},
package_data=get_package_data(),
description="ggplot for python",
# run pandoc --from=markdown --to=rst --output=README.rst README.md
long_description=open("README.rst").read(),
# numpy is here to make installing easier... Needs to be at the last position,
# as that's the first installed with "python setup.py install"
install_requires=["six", "statsmodels", "brewer2mpl", "matplotlib", "scipy",
"patsy", "pandas", "numpy"],
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3'],
zip_safe=False)
| bsd-2-clause |
mugizico/scikit-learn | sklearn/tests/test_calibration.py | 213 | 12219 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_greater, assert_almost_equal,
assert_greater_equal,
assert_array_equal,
assert_raises,
assert_warns_message)
from sklearn.datasets import make_classification, make_blobs
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier, RandomForestRegressor
from sklearn.svm import LinearSVC
from sklearn.linear_model import Ridge
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import Imputer
from sklearn.metrics import brier_score_loss, log_loss
from sklearn.calibration import CalibratedClassifierCV
from sklearn.calibration import _sigmoid_calibration, _SigmoidCalibration
from sklearn.calibration import calibration_curve
def test_calibration():
"""Test calibration objects with isotonic and sigmoid"""
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test, y_test = X[n_samples:], y[n_samples:]
# Naive-Bayes
clf = MultinomialNB().fit(X_train, y_train, sample_weight=sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
pc_clf = CalibratedClassifierCV(clf, cv=y.size + 1)
assert_raises(ValueError, pc_clf.fit, X, y)
# Naive Bayes with calibration
for this_X_train, this_X_test in [(X_train, X_test),
(sparse.csr_matrix(X_train),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv=2)
# Note that this fit overwrites the fit on the entire training
# set
pc_clf.fit(this_X_train, y_train, sample_weight=sw_train)
prob_pos_pc_clf = pc_clf.predict_proba(this_X_test)[:, 1]
# Check that brier score has improved after calibration
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
# Check invariance against relabeling [0, 1] -> [1, 2]
pc_clf.fit(this_X_train, y_train + 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [-1, 1]
pc_clf.fit(this_X_train, 2 * y_train - 1, sample_weight=sw_train)
prob_pos_pc_clf_relabeled = pc_clf.predict_proba(this_X_test)[:, 1]
assert_array_almost_equal(prob_pos_pc_clf,
prob_pos_pc_clf_relabeled)
# Check invariance against relabeling [0, 1] -> [1, 0]
pc_clf.fit(this_X_train, (y_train + 1) % 2,
sample_weight=sw_train)
prob_pos_pc_clf_relabeled = \
pc_clf.predict_proba(this_X_test)[:, 1]
if method == "sigmoid":
assert_array_almost_equal(prob_pos_pc_clf,
1 - prob_pos_pc_clf_relabeled)
else:
# Isotonic calibration is not invariant against relabeling
# but should improve in both cases
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss((y_test + 1) % 2,
prob_pos_pc_clf_relabeled))
# check that calibration can also deal with regressors that have
# a decision_function
clf_base_regressor = CalibratedClassifierCV(Ridge())
clf_base_regressor.fit(X_train, y_train)
clf_base_regressor.predict(X_test)
# Check failure cases:
# only "isotonic" and "sigmoid" should be accepted as methods
clf_invalid_method = CalibratedClassifierCV(clf, method="foo")
assert_raises(ValueError, clf_invalid_method.fit, X_train, y_train)
# base-estimators should provide either decision_function or
# predict_proba (most regressors, for instance, should fail)
clf_base_regressor = \
CalibratedClassifierCV(RandomForestRegressor(), method="sigmoid")
assert_raises(RuntimeError, clf_base_regressor.fit, X_train, y_train)
def test_sample_weight_warning():
n_samples = 100
X, y = make_classification(n_samples=2 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=len(y))
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_test = X[n_samples:]
for method in ['sigmoid', 'isotonic']:
base_estimator = LinearSVC(random_state=42)
calibrated_clf = CalibratedClassifierCV(base_estimator, method=method)
# LinearSVC does not currently support sample weights but they
# can still be used for the calibration step (with a warning)
msg = "LinearSVC does not support sample_weight."
assert_warns_message(
UserWarning, msg,
calibrated_clf.fit, X_train, y_train, sample_weight=sw_train)
probs_with_sw = calibrated_clf.predict_proba(X_test)
# As the weights are used for the calibration, they should still yield
# a different predictions
calibrated_clf.fit(X_train, y_train)
probs_without_sw = calibrated_clf.predict_proba(X_test)
diff = np.linalg.norm(probs_with_sw - probs_without_sw)
assert_greater(diff, 0.1)
def test_calibration_multiclass():
"""Test calibration for multiclass """
# test multi-class setting with classifier that implements
# only decision function
clf = LinearSVC()
X, y_idx = make_blobs(n_samples=100, n_features=2, random_state=42,
centers=3, cluster_std=3.0)
# Use categorical labels to check that CalibratedClassifierCV supports
# them correctly
target_names = np.array(['a', 'b', 'c'])
y = target_names[y_idx]
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf.fit(X_train, y_train)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=2)
cal_clf.fit(X_train, y_train)
probas = cal_clf.predict_proba(X_test)
assert_array_almost_equal(np.sum(probas, axis=1), np.ones(len(X_test)))
# Check that log-loss of calibrated classifier is smaller than
# log-loss of naively turned OvR decision function to probabilities
# via softmax
def softmax(y_pred):
e = np.exp(-y_pred)
return e / e.sum(axis=1).reshape(-1, 1)
uncalibrated_log_loss = \
log_loss(y_test, softmax(clf.decision_function(X_test)))
calibrated_log_loss = log_loss(y_test, probas)
assert_greater_equal(uncalibrated_log_loss, calibrated_log_loss)
# Test that calibration of a multiclass classifier decreases log-loss
# for RandomForestClassifier
X, y = make_blobs(n_samples=100, n_features=2, random_state=42,
cluster_std=3.0)
X_train, y_train = X[::2], y[::2]
X_test, y_test = X[1::2], y[1::2]
clf = RandomForestClassifier(n_estimators=10, random_state=42)
clf.fit(X_train, y_train)
clf_probs = clf.predict_proba(X_test)
loss = log_loss(y_test, clf_probs)
for method in ['isotonic', 'sigmoid']:
cal_clf = CalibratedClassifierCV(clf, method=method, cv=3)
cal_clf.fit(X_train, y_train)
cal_clf_probs = cal_clf.predict_proba(X_test)
cal_loss = log_loss(y_test, cal_clf_probs)
assert_greater(loss, cal_loss)
def test_calibration_prefit():
"""Test calibration for prefitted classifiers"""
n_samples = 50
X, y = make_classification(n_samples=3 * n_samples, n_features=6,
random_state=42)
sample_weight = np.random.RandomState(seed=42).uniform(size=y.size)
X -= X.min() # MultinomialNB only allows positive X
# split train and test
X_train, y_train, sw_train = \
X[:n_samples], y[:n_samples], sample_weight[:n_samples]
X_calib, y_calib, sw_calib = \
X[n_samples:2 * n_samples], y[n_samples:2 * n_samples], \
sample_weight[n_samples:2 * n_samples]
X_test, y_test = X[2 * n_samples:], y[2 * n_samples:]
# Naive-Bayes
clf = MultinomialNB()
clf.fit(X_train, y_train, sw_train)
prob_pos_clf = clf.predict_proba(X_test)[:, 1]
# Naive Bayes with calibration
for this_X_calib, this_X_test in [(X_calib, X_test),
(sparse.csr_matrix(X_calib),
sparse.csr_matrix(X_test))]:
for method in ['isotonic', 'sigmoid']:
pc_clf = CalibratedClassifierCV(clf, method=method, cv="prefit")
for sw in [sw_calib, None]:
pc_clf.fit(this_X_calib, y_calib, sample_weight=sw)
y_prob = pc_clf.predict_proba(this_X_test)
y_pred = pc_clf.predict(this_X_test)
prob_pos_pc_clf = y_prob[:, 1]
assert_array_equal(y_pred,
np.array([0, 1])[np.argmax(y_prob, axis=1)])
assert_greater(brier_score_loss(y_test, prob_pos_clf),
brier_score_loss(y_test, prob_pos_pc_clf))
def test_sigmoid_calibration():
"""Test calibration values with Platt sigmoid model"""
exF = np.array([5, -4, 1.0])
exY = np.array([1, -1, -1])
# computed from my python port of the C++ code in LibSVM
AB_lin_libsvm = np.array([-0.20261354391187855, 0.65236314980010512])
assert_array_almost_equal(AB_lin_libsvm,
_sigmoid_calibration(exF, exY), 3)
lin_prob = 1. / (1. + np.exp(AB_lin_libsvm[0] * exF + AB_lin_libsvm[1]))
sk_prob = _SigmoidCalibration().fit(exF, exY).predict(exF)
assert_array_almost_equal(lin_prob, sk_prob, 6)
# check that _SigmoidCalibration().fit only accepts 1d array or 2d column
# arrays
assert_raises(ValueError, _SigmoidCalibration().fit,
np.vstack((exF, exF)), exY)
def test_calibration_curve():
"""Check calibration_curve function"""
y_true = np.array([0, 0, 0, 1, 1, 1])
y_pred = np.array([0., 0.1, 0.2, 0.8, 0.9, 1.])
prob_true, prob_pred = calibration_curve(y_true, y_pred, n_bins=2)
prob_true_unnormalized, prob_pred_unnormalized = \
calibration_curve(y_true, y_pred * 2, n_bins=2, normalize=True)
assert_equal(len(prob_true), len(prob_pred))
assert_equal(len(prob_true), 2)
assert_almost_equal(prob_true, [0, 1])
assert_almost_equal(prob_pred, [0.1, 0.9])
assert_almost_equal(prob_true, prob_true_unnormalized)
assert_almost_equal(prob_pred, prob_pred_unnormalized)
# probabilities outside [0, 1] should not be accepted when normalize
# is set to False
assert_raises(ValueError, calibration_curve, [1.1], [-0.1],
normalize=False)
def test_calibration_nan_imputer():
"""Test that calibration can accept nan"""
X, y = make_classification(n_samples=10, n_features=2,
n_informative=2, n_redundant=0,
random_state=42)
X[0, 0] = np.nan
clf = Pipeline(
[('imputer', Imputer()),
('rf', RandomForestClassifier(n_estimators=1))])
clf_c = CalibratedClassifierCV(clf, cv=2, method='isotonic')
clf_c.fit(X, y)
clf_c.predict(X)
| bsd-3-clause |
edublancas/sklearn-evaluation | src/sklearn_evaluation/metrics.py | 2 | 4986 | # from collections import OrderedDict
import numpy as np
from sklearn.metrics import precision_score
from sklearn_evaluation.preprocessing import binarize
from sklearn_evaluation import util
from sklearn_evaluation import validate
def compute_at_thresholds(fn, y_true, y_score, n_thresholds=10, start=0.0):
"""
Given scores, binarize them at different thresholds, then compute
metrics
Examples
--------
>>> from sklearn_evaluation.metrics import compute_at_thresholds
>>> from sklearn.metrics import accuracy_score
>>> from sklearn.metrics import precision_score, recall_score, f1_score
>>> import numpy as np
>>> y_true = np.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
>>> y_score = np.array([1.0, 0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1])
>>> binarized = compute_at_thresholds([accuracy_score, precision_score,
... recall_score, f1_score],
... y_true, y_score)
"""
if util.isiter(fn):
(thresholds,
Y_pred) = binarize.scores_at_thresholds(y_score,
n_thresholds=n_thresholds)
metrics = [np.array([fn_(y_true, y_pred) for y_pred in Y_pred])
for fn_ in fn]
return thresholds, metrics
else:
(thresholds,
Y_pred) = binarize.scores_at_thresholds(y_score,
n_thresholds=n_thresholds)
metrics = np.array([fn(y_true, y_pred) for y_pred in Y_pred])
return thresholds, metrics
def confusion_matrix(y_true, y_pred, normalize):
pass
@validate.argument_is_proportion('top_proportion')
def precision_at(y_true, y_score, top_proportion, ignore_nas=False):
'''
Calculates precision at a given proportion.
Only supports binary classification.
'''
# Sort scores in descending order
scores_sorted = np.sort(y_score)[::-1]
# Based on the proportion, get the index to split the data
# if value is negative, return 0
cutoff_index = max(int(len(y_true) * top_proportion) - 1, 0)
# Get the cutoff value
cutoff_value = scores_sorted[cutoff_index]
# Convert scores to binary, by comparing them with the cutoff value
scores_binary = np.array([int(y >= cutoff_value) for y in y_score])
# Calculate precision using sklearn function
if ignore_nas:
precision = __precision(y_true, scores_binary)
else:
precision = precision_score(y_true, scores_binary)
return precision, cutoff_value
def __precision(y_true, y_pred):
'''
Precision metric tolerant to unlabeled data in y_true,
NA values are ignored for the precision calculation
'''
# make copies of the arrays to avoid modifying the original ones
y_true = np.copy(y_true)
y_pred = np.copy(y_pred)
# precision = tp/(tp+fp)
# True nehatives do not affect precision value, so for every missing
# value in y_true, replace it with 0 and also replace the value
# in y_pred with 0
is_nan = np.isnan(y_true)
y_true[is_nan] = 0
y_pred[is_nan] = 0
precision = precision_score(y_true, y_pred)
return precision
@validate.argument_is_proportion('top_proportion')
def tp_at(y_true, y_score, top_proportion):
y_pred = binarize.scores_at_top_proportion(y_score, top_proportion)
tp = (y_pred == 1) & (y_true == 1)
return tp.sum()
@validate.argument_is_proportion('top_proportion')
def fp_at(y_true, y_score, top_proportion):
y_pred = binarize.scores_at_top_proportion(y_score, top_proportion)
fp = (y_pred == 1) & (y_true == 0)
return fp.sum()
@validate.argument_is_proportion('top_proportion')
def tn_at(y_true, y_score, top_proportion):
y_pred = binarize.scores_at_top_proportion(y_score, top_proportion)
tn = (y_pred == 0) & (y_true == 0)
return tn.sum()
@validate.argument_is_proportion('top_proportion')
def fn_at(y_true, y_score, top_proportion):
y_pred = binarize.scores_at_top_proportion(y_score, top_proportion)
fn = (y_pred == 0) & (y_true == 1)
return fn.sum()
@validate.argument_is_proportion('top_proportion')
def labels_at(y_true, y_score, top_proportion, normalize=False):
'''
Return the number of labels encountered in the top X proportion
'''
# Get indexes of scores sorted in descending order
indexes = np.argsort(y_score)[::-1]
# Sort true values in the same order
y_true_sorted = y_true[indexes]
# Grab top x proportion of true values
cutoff_index = max(int(len(y_true_sorted) * top_proportion) - 1, 0)
# add one to index to grab values including that index
y_true_top = y_true_sorted[:cutoff_index + 1]
# Count the number of non-nas in the top x proportion
# we are returning a count so it should be an int
values = int((~np.isnan(y_true_top)).sum())
if normalize:
values = float(values) / (~np.isnan(y_true)).sum()
return values
| mit |
saihttam/kaggle-axa | RobustRegressionDriver.py | 1 | 5500 | import numpy as np
from sklearn.ensemble import GradientBoostingRegressor
from random import sample, seed
from sklearn.decomposition import TruncatedSVD
from math import floor
from sklearn import cross_validation
import numpy as np
from numpy.linalg import norm, svd
def inexact_augmented_lagrange_multiplier(X, lmbda=.01, tol=1e-3,
maxiter=100, verbose=True):
"""
Inexact Augmented Lagrange Multiplier
"""
Y = X
norm_two = norm(Y.ravel(), 2)
norm_inf = norm(Y.ravel(), np.inf) / lmbda
dual_norm = np.max([norm_two, norm_inf])
Y = Y / dual_norm
A = np.zeros(Y.shape)
E = np.zeros(Y.shape)
dnorm = norm(X, 'fro')
mu = 1.25 / norm_two
rho = 1.5
sv = 10.
n = Y.shape[0]
itr = 0
while True:
Eraw = X - A + (1/mu) * Y
Eupdate = np.maximum(Eraw - lmbda / mu, 0) + np.minimum(Eraw + lmbda / mu, 0)
U, S, V = svd(X - Eupdate + (1 / mu) * Y, full_matrices=False)
svp = (S > 1 / mu).shape[0]
if svp < sv:
sv = np.min([svp + 1, n])
else:
sv = np.min([svp + round(.05 * n), n])
Aupdate = np.dot(np.dot(U[:, :svp], np.diag(S[:svp] - 1 / mu)), V[:svp, :])
A = Aupdate
E = Eupdate
Z = X - A - E
Y = Y + mu * Z
mu = np.min([mu * rho, mu * 1e7])
itr += 1
if ((norm(Z, 'fro') / dnorm) < tol) or (itr >= maxiter):
break
if verbose:
print "Finished at iteration %d" % (itr)
return A, E
class RegressionDriver(object):
"""Class for Regression-based analysis of Driver traces"""
def __init__(self, driver, datadict, numberofrows=40): #, numfeatures = 200):
"""Initialize by providing a (positive) driver example and a dictionary of (negative) driver references."""
seed(42)
self.driver = driver
self.numfeatures = self.driver.num_features
featurelist = []
self.__clf = GradientBoostingRegressor(n_estimators=300, max_depth=4, min_samples_leaf=2)
# gbr = GradientBoostingRegressor(n_estimators=500, max_depth=10, max_features=numfeatures, random_state=42)
# pca = PCA(whiten=True, n_components=numfeatures)
# estimators = [('polyf', PolynomialFeatures()), ('scale', MinMaxScaler()), ('pca', PCA()), ('gbr', gbr)]
# self.__clf = Pipeline(estimators)
self.__indexlist = []
for trace in self.driver.traces:
self.__indexlist.append(trace.identifier)
featurelist.append(trace.features)
# Initialize train and test np arrays
self.__traindata = np.asarray(featurelist)
self.__testdata = np.asarray(featurelist)
self.__trainlabels = np.ones((self.__traindata.shape[0],))
data = np.empty((0, self.numfeatures), float)
setkeys = datadict.keys()
if driver.identifier in setkeys:
setkeys.remove(driver.identifier)
else:
setkeys = sample(setkeys, len(setkeys) - 1)
for key in setkeys:
if key != driver.identifier:
rand_smpl = [datadict[key][i] for i in sorted(sample(xrange(len(datadict[key])), numberofrows)) ]
data = np.append(data, np.asarray(rand_smpl), axis=0)
self.__traindata = np.append(self.__traindata, data, axis=0)
self.__trainlabels = np.append(self.__trainlabels, np.zeros((data.shape[0],)), axis=0)
self.__y = np.zeros((self.__testdata.shape[0],))
def classify(self, nfolds=4):
"""Perform classification"""
components = self.__traindata.shape[1]
_, train_rpca_X_np = inexact_augmented_lagrange_multiplier(np.nan_to_num(self.__traindata))
_, test_rpca_X_np = inexact_augmented_lagrange_multiplier(np.nan_to_num(self.__testdata))
skf = cross_validation.StratifiedKFold(self.__trainlabels, n_folds=nfolds)
for train_index, _ in skf:
X_train = train_rpca_X_np[train_index]
y_train = self.__trainlabels[train_index]
self.__clf.fit(X_train, y_train)
self.__y += self.__clf.predict(test_rpca_X_np)
self.__y /= float(nfolds)
# feature_importance = self.__clf.feature_importances_
# feature_importance = 100.0 * (feature_importance / feature_importance.max())
# print feature_importance
def toKaggle(self):
"""Return string in Kaggle submission format"""
returnstring = ""
for i in xrange(len(self.__indexlist) - 1):
returnstring += "%d_%d,%.6f\n" % (self.driver.identifier, self.__indexlist[i], self.__y[i])
returnstring += "%d_%d,%.6f" % (self.driver.identifier, self.__indexlist[len(self.__indexlist)-1], self.__y[len(self.__indexlist)-1])
return returnstring
def validate(self, datadict):
from sklearn.metrics import roc_auc_score
testdata = np.empty((0, self.numfeatures), float)
y_true = np.empty((0,), float)
for key in datadict.keys():
currenttestdata = np.asarray(datadict[key])
testdata = np.append(testdata, currenttestdata, axis=0)
if key != self.driver.identifier:
y_true = np.append(y_true, np.zeros((currenttestdata.shape[0],)), axis=0)
else:
y_true = np.append(y_true, np.ones((currenttestdata.shape[0],)), axis=0)
y_score = self.__clf.predict(testdata)
result = roc_auc_score(y_true, y_score)
return result | bsd-2-clause |
AE4317group07/paparazzi | sw/tools/calibration/calibration_utils.py | 27 | 12769 |
# Copyright (C) 2010 Antoine Drouin
#
# This file is part of Paparazzi.
#
# Paparazzi is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# Paparazzi is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Paparazzi; see the file COPYING. If not, write to
# the Free Software Foundation, 59 Temple Place - Suite 330,
# Boston, MA 02111-1307, USA.
#
from __future__ import print_function, division
import re
import numpy as np
from numpy import sin, cos
from scipy import linalg, stats
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def get_ids_in_log(filename):
"""Returns available ac_id from a log."""
f = open(filename, 'r')
ids = []
pattern = re.compile("\S+ (\S+)")
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
ac_id = m.group(1)
if not ac_id in ids:
ids.append(ac_id)
return ids
def read_log(ac_id, filename, sensor):
"""Extracts raw sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_RAW (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_scaled(ac_id, filename, sensor, t_start, t_end):
"""Extracts scaled sensor measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_"+sensor+"_SCALED (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
if (float(m.group(1)) >= float(t_start)) and (float(m.group(1)) < (float(t_end)+1.0)):
list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_meas)
def read_log_mag_current(ac_id, filename):
"""Extracts raw magnetometer and current measurements from a log."""
f = open(filename, 'r')
pattern = re.compile("(\S+) "+ac_id+" IMU_MAG_CURRENT_CALIBRATION (\S+) (\S+) (\S+) (\S+)")
list_meas = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern, line)
if m:
list_meas.append([float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5))])
return np.array(list_meas)
def filter_meas(meas, window_size, noise_threshold):
"""Select only non-noisy data."""
filtered_meas = []
filtered_idx = []
for i in range(window_size, len(meas)-window_size):
noise = meas[i-window_size:i+window_size, :].std(axis=0)
if linalg.norm(noise) < noise_threshold:
filtered_meas.append(meas[i, :])
filtered_idx.append(i)
return np.array(filtered_meas), filtered_idx
def get_min_max_guess(meas, scale):
"""Initial boundary based calibration."""
max_meas = meas[:, :].max(axis=0)
min_meas = meas[:, :].min(axis=0)
range = max_meas - min_meas
# check if we would get division by zero
if range.all():
n = (max_meas + min_meas) / 2
sf = 2*scale/range
return np.array([n[0], n[1], n[2], sf[0], sf[1], sf[2]])
else:
return np.array([0, 0, 0, 0])
def scale_measurements(meas, p):
"""Scale the set of measurements."""
l_comp = []
l_norm = []
for m in meas[:, ]:
sm = (m - p[0:3])*p[3:6]
l_comp.append(sm)
l_norm.append(linalg.norm(sm))
return np.array(l_comp), np.array(l_norm)
def estimate_mag_current_relation(meas):
"""Calculate linear coefficient of magnetometer-current relation."""
coefficient = []
for i in range(0, 3):
gradient, intercept, r_value, p_value, std_err = stats.linregress(meas[:, 3], meas[:, i])
coefficient.append(gradient)
return coefficient
def print_xml(p, sensor, res):
"""Print xml for airframe file."""
print("")
print("<define name=\""+sensor+"_X_NEUTRAL\" value=\""+str(int(round(p[0])))+"\"/>")
print("<define name=\""+sensor+"_Y_NEUTRAL\" value=\""+str(int(round(p[1])))+"\"/>")
print("<define name=\""+sensor+"_Z_NEUTRAL\" value=\""+str(int(round(p[2])))+"\"/>")
print("<define name=\""+sensor+"_X_SENS\" value=\""+str(p[3]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Y_SENS\" value=\""+str(p[4]*2**res)+"\" integer=\"16\"/>")
print("<define name=\""+sensor+"_Z_SENS\" value=\""+str(p[5]*2**res)+"\" integer=\"16\"/>")
print("")
def print_imu_scaled(sensor, measurements, attrs):
print("")
print(sensor+" : Time Range("+str(measurements[:,0].min(axis=0))+" : "+str(measurements[:,0].max(axis=0))+")")
np.set_printoptions(formatter={'float': '{:-7.3f}'.format})
print(" " + attrs[2] + " " + attrs[3] + " " + attrs[4])
print("Min " + str(measurements[:,1:].min(axis=0)*attrs[0]) + " " + attrs[1])
print("Max " + str(measurements[:,1:].max(axis=0)*attrs[0]) + " " + attrs[1])
print("Mean " + str(measurements[:,1:].mean(axis=0)*attrs[0]) + " " + attrs[1])
print("StDev " + str(measurements[:,1:].std(axis=0)*attrs[0]) + " " + attrs[1])
def plot_measurements(sensor, measurements):
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.ylabel('ADC')
plt.title("Raw %s measurements" % sensor)
plt.show()
def plot_results(sensor, measurements, flt_idx, flt_meas, cp0, np0, cp1, np1, sensor_ref, blocking=True):
"""Plot calibration results."""
# plot raw measurements with filtered ones marked as red circles
plt.subplot(3, 1, 1)
plt.plot(measurements[:, 0])
plt.plot(measurements[:, 1])
plt.plot(measurements[:, 2])
plt.plot(flt_idx, flt_meas[:, 0], 'ro')
plt.plot(flt_idx, flt_meas[:, 1], 'ro')
plt.plot(flt_idx, flt_meas[:, 2], 'ro')
plt.ylabel('ADC')
plt.title('Raw '+sensor+', red dots are actually used measurements')
plt.tight_layout()
# show scaled measurements with initial guess
plt.subplot(3, 2, 3)
plt.plot(cp0[:, 0])
plt.plot(cp0[:, 1])
plt.plot(cp0[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (initial guess)')
plt.xticks([])
plt.subplot(3, 2, 4)
plt.plot(np0)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (initial guess)')
plt.xticks([])
# show scaled measurements after optimization
plt.subplot(3, 2, 5)
plt.plot(cp1[:, 0])
plt.plot(cp1[:, 1])
plt.plot(cp1[:, 2])
plt.plot(-sensor_ref*np.ones(len(flt_meas)))
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('scaled '+sensor+' (optimized)')
plt.xticks([])
plt.subplot(3, 2, 6)
plt.plot(np1)
plt.plot(sensor_ref*np.ones(len(flt_meas)))
plt.title('norm of '+sensor+' (optimized)')
plt.xticks([])
# if we want to have another plot we only draw the figure (non-blocking)
# also in matplotlib before 1.0.0 there is only one call to show possible
if blocking:
plt.show()
else:
plt.draw()
def plot_imu_scaled(sensor, measurements, attrs):
"""Plot imu scaled results."""
plt.figure("Sensor Scaled")
plt.subplot(4, 1, 1)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0])
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0])
#plt.xlabel('Time (s)')
plt.ylabel(attrs[1])
plt.title(sensor)
plt.subplot(4, 1, 2)
plt.plot(measurements[:, 0], measurements[:, 1]*attrs[0], 'b')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[2])
plt.subplot(4, 1, 3)
plt.plot(measurements[:, 0], measurements[:, 2]*attrs[0], 'g')
#plt.xlabel('Time (s)')
plt.ylabel(attrs[3])
plt.subplot(4, 1, 4)
plt.plot(measurements[:, 0], measurements[:, 3]*attrs[0], 'r')
plt.xlabel('Time (s)')
plt.ylabel(attrs[4])
plt.show()
def plot_imu_scaled_fft(sensor, measurements, attrs):
"""Plot imu scaled fft results."""
#dt = 0.0769
#Fs = 1/dt
Fs = 26.0
plt.figure("Sensor Scaled - FFT")
plt.subplot(3, 1, 1)
plt.magnitude_spectrum(measurements[:, 1]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[2])
plt.title(sensor)
plt.subplot(3, 1, 2)
plt.magnitude_spectrum(measurements[:, 2]*attrs[0], Fs=Fs, scale='linear')
plt.ylabel(attrs[3])
plt.subplot(3, 1, 3)
plt.magnitude_spectrum(measurements[:, 3]*attrs[0], Fs=Fs, scale='linear')
plt.xlabel('Frequency')
plt.ylabel(attrs[4])
plt.show()
def plot_mag_3d(measured, calibrated, p):
"""Plot magnetometer measurements on 3D sphere."""
# set up points for sphere and ellipsoid wireframes
u = np.r_[0:2 * np.pi:20j]
v = np.r_[0:np.pi:20j]
wx = np.outer(cos(u), sin(v))
wy = np.outer(sin(u), sin(v))
wz = np.outer(np.ones(np.size(u)), cos(v))
ex = p[0] * np.ones(np.size(u)) + np.outer(cos(u), sin(v)) / p[3]
ey = p[1] * np.ones(np.size(u)) + np.outer(sin(u), sin(v)) / p[4]
ez = p[2] * np.ones(np.size(u)) + np.outer(np.ones(np.size(u)), cos(v)) / p[5]
# measurements
mx = measured[:, 0]
my = measured[:, 1]
mz = measured[:, 2]
# calibrated values
cx = calibrated[:, 0]
cy = calibrated[:, 1]
cz = calibrated[:, 2]
# axes size
left = 0.02
bottom = 0.05
width = 0.46
height = 0.9
rect_l = [left, bottom, width, height]
rect_r = [left/2+0.5, bottom, width, height]
fig = plt.figure(figsize=plt.figaspect(0.5))
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_l)
else:
ax = fig.add_subplot(1, 2, 1, position=rect_l, projection='3d')
# plot measurements
ax.scatter(mx, my, mz)
plt.hold(True)
# plot line from center to ellipsoid center
ax.plot([0.0, p[0]], [0.0, p[1]], [0.0, p[2]], color='black', marker='+', markersize=10)
# plot ellipsoid
ax.plot_wireframe(ex, ey, ez, color='grey', alpha=0.5)
# Create cubic bounding box to simulate equal aspect ratio
max_range = np.array([mx.max() - mx.min(), my.max() - my.min(), mz.max() - mz.min()]).max()
Xb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][0].flatten() + 0.5 * (mx.max() + mx.min())
Yb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][1].flatten() + 0.5 * (my.max() + my.min())
Zb = 0.5 * max_range * np.mgrid[-1:2:2, -1:2:2, -1:2:2][2].flatten() + 0.5 * (mz.max() + mz.min())
# add the fake bounding box:
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
ax.set_title('MAG raw with fitted ellipsoid and center offset')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
if matplotlib.__version__.startswith('0'):
ax = Axes3D(fig, rect=rect_r)
else:
ax = fig.add_subplot(1, 2, 2, position=rect_r, projection='3d')
ax.plot_wireframe(wx, wy, wz, color='grey', alpha=0.5)
plt.hold(True)
ax.scatter(cx, cy, cz)
ax.set_title('MAG calibrated on unit sphere')
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim3d(-1, 1)
ax.set_ylim3d(-1, 1)
ax.set_zlim3d(-1, 1)
plt.show()
def read_turntable_log(ac_id, tt_id, filename, _min, _max):
""" Read a turntable log.
return an array which first column is turnatble and next 3 are gyro
"""
f = open(filename, 'r')
pattern_g = re.compile("(\S+) "+str(ac_id)+" IMU_GYRO_RAW (\S+) (\S+) (\S+)")
pattern_t = re.compile("(\S+) "+str(tt_id)+" IMU_TURNTABLE (\S+)")
last_tt = None
list_tt = []
while True:
line = f.readline().strip()
if line == '':
break
m = re.match(pattern_t, line)
if m:
last_tt = float(m.group(2))
m = re.match(pattern_g, line)
if m and last_tt and _min < last_tt < _max:
list_tt.append([last_tt, float(m.group(2)), float(m.group(3)), float(m.group(4))])
return np.array(list_tt)
| gpl-2.0 |
cjayb/kingjr_natmeg_arhus | JR_toolbox/skl_king_parallel_gs.py | 2 | 15257 |
print("######################################################################")
print("# Parallel n-split k-stratified-fold continuous SVM Scikitlearn MVPA #")
print("# (c) Jean-Remi King 2012, jeanremi.king [at] gmail [dot] com #")
print("######################################################################")
# Implementation of a multivariate pattern analysis based on the scikit-learn
# toolbox (http://scikit-learn.org/stable/). It reads two .mat files
# (filenameX, filenamey) created by 'jr_classify.m'
#
# Function:
# skl_king_parallel.py filenameX filenamey [number_of_cores]
#
# Inputs:
# in filenameX:
# Xm: samples x features x classification matrix (e.g. trials x
# chans x time)
# in filenamey:
# y: vector indicating the class of each sample. Negative values
# will be used for generalization only. 0 indicates to-be-
# ignored samples.
# y2: cost/weights applied on each sample
# path: export directory
# nameX: export filename X
# namey: export filename y
# folding:type of folding(e.g. stratified)
# n_splits:number of splits
# n_folds: number of folds
# C: SVM penalization parameter
# compute_probas: compute logit fit
# compute_predict: compute traditional SVM
# fs_n: number of univariate features selected for classification
# dims: classification performed on dims dimensions
# dims_tg:classification generalized on dims_tg dimensions
#
# Ouputs:
# predict: prediction matrix (split x samples x dims x dimsg)
# predictg:same as predict for generalized samples
# probas: probas matrix (split x samples x dims x dimsg x class)
# probasg: same as probas for generalized samples
# coef: weight hyperplan vector
# all_folds:folding report (split x fold x samples)
# y_all: original y
# y: training y
# yg: generalized y
# filenameX:
# filenamey:
#
# Results are reported in: path + nameX + '_' + namey + "_results.mat"
###############################################################################
# (c) Jean-Remi King: jeanremi.king [at] gmail [dot] com
###############################################################################
# update 2013 01 03: input binary format
# update 2012 12 20: remove np.copy, add compute distance
# update 2012 11 29: fix 3rd dimension issue
# update 2012 11 13: fix bug str output on some python versions
# update 2012 11 02: change stratified kfolding y by y2
# update 2012 11 02: add np.copy to Xtrain and Xtest
# update 2012 11 01: correct feature selection coef bug when at 100 %
# update 2012 10 23: correct leaveoneout bug
# update 2012 10 23: correct major n_split new_order error
# update 2012 10 18: correct python/matlab dim incompatibility
# update 2012 10 18: correct error fs between 99 and 100 && remove Kbest
# update 2012 10 17: correct error n_features shape and add nice
# update 2012 10 01: correct prediction error+change loading results option
# update 2012 09 14: handle fs float error
# update 2012 09 14: pass n_cores to sys.arg
# version 2012 09 13: implementation of parallelization
###############################################################################
print("LIBRARY")
import sys as sys
import numpy as np
from scipy import stats
from sklearn import svm
from sklearn.cross_validation import StratifiedKFold, LeaveOneOut, KFold
from sklearn.feature_selection import SelectPercentile, SelectKBest, f_classif
from sklearn.externals.joblib import Parallel, delayed
import scipy.io as sio
from sklearn.preprocessing import Scaler
from sklearn.grid_search import GridSearchCV
from sklearn.metrics import precision_score
###############################################################################
print("INPUT DATA")
#-- get argument to load specific file
filenameX = str(sys.argv[1])
filenamey = str(sys.argv[2])
if len(sys.argv) <= 3:
n_cores = -1
else:
n_cores = int(sys.argv[3])
print("cores: " + str(n_cores))
print(filenameX)
print(filenamey)
#-- Load data into python
if filenameX[-4:] == ".mat": # backward compatibility
mat = sio.loadmat(filenameX)
Xm_all = mat["Xm"] # data
if np.size(Xm_all.shape) == 2: # fix 3rd dimension issue
X = np.zeros(np.append(Xm_all.shape, 1))
X[:, :, 0] = Xm_all
Xm_all = X
else: # load binary
mat = sio.loadmat(filenameX[0:-4] + "_dims.mat")
Xdim = mat["Xdim"].reshape(3)
Xm_all = np.fromfile(filenameX, dtype=np.float64)
Xm_all = Xm_all.reshape(Xdim[2], Xdim[1], Xdim[0]).transpose([2, 1, 0])
#-- load classification parameters
mat = sio.loadmat(filenamey)
dims = mat["dims"] # select time windows to compute
dims = np.reshape(dims, dims.size) - 1 # reshape for skl compatibility
dims_tg = mat["dims_tg"] - 1
mat = sio.loadmat(filenamey, squeeze_me=True)
path = mat["path"]
nameX = mat["nameX"]
namey = mat["namey"]
folding = mat["folding"]
n_splits = mat["n_splits"]
n_folds = mat["n_folds"] # fold number
svm_C = mat["C"] # svm penalization parameter
compute_probas = mat["compute_probas"]
compute_predict = mat["compute_predict"]
compute_distance = mat["compute_distance"]
fs_n = mat["fs"] # feature selection
y_all = mat["y"] # class used for train and test
print(Xm_all.shape)
print(y_all.shape)
y2_all = mat["y2"] # class used for sample weights
#-- build training and generalizing classes
Xm = Xm_all[y_all > 0, :, :] # training categories
Xmg = Xm_all[y_all < 0, :, :] # generalization categories
y = y_all[y_all > 0]
yg = y_all[y_all < 0]
y2 = y2_all[y_all > 0]
n_samples, n_features, unused = Xm.shape
n_samplesg, unused, unused = Xmg.shape
n_featuresg = n_features
n_dims = dims.shape[0]
n_dimsg = n_dims
n_dims_tg = dims_tg.shape[1]
n_dimsg_tg = dims_tg.shape[1]
n_classes = np.unique(y).shape[0]
#deal with sample_weight
sample_weight = np.ones(y.shape[0])
classes = np.unique(y2)
for c in range(classes.shape[0]):
sample_weight[y2 == classes[c]] = 1. / (np.sum(y2 == classes[c]))
###############################################################################
print("PREPARE CLASSIFICATION")
#-- classifier
clf = GridSearchCV(svm.SVC(kernel='linear', probability=True),
{'C': svm_C}, score_func=precision_score)
#-- normalizer
scaler = Scaler()
#-- feature selection
if fs_n > 1:
fs = SelectKBest(f_classif, k=fs_n)
elif fs_n == -1:
fs = SelectKBest(f_classif, k=1)
else:
fs = SelectPercentile(f_classif, percentile=fs_n * 100)
#-- results initialization
if compute_predict:
predict = np.zeros([n_splits, n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_folds]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
probas = []
probasg = []
if compute_distance:
distance = np.zeros([n_splits, n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
distanceg = np.zeros([n_splits, n_samplesg, n_dimsg, n_dimsg_tg, n_classes, n_folds]) ** np.nan
else:
distance = []
distanceg = []
coef = np.empty([n_splits, n_folds, n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
all_folds = np.zeros([n_splits, n_folds, n_samples]) ** np.nan
###############################################################################
#-- Define parallel cross validation
def my_pipeline(train, test,
Xm_shfl, y_shfl, sw_shfl, Xmg,
dims, fs, scaler, clf,
n_samples, n_dims, n_dims_tg, n_classes):
# indicate opened fold
sys.stdout.write("<")
sys.stdout.flush()
# initialize results within a given fold
if compute_predict:
predict = np.zeros([n_samples, n_dims, n_dims_tg]) ** np.nan
predictg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg]) ** np.nan
else:
predict = []
predictg = []
if compute_probas:
probas = np.zeros([n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
probasg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg, n_classes]) ** np.nan
else:
probas = []
probasg = []
if compute_distance:
distance = np.zeros([n_samples, n_dims, n_dims_tg, n_classes]) ** np.nan
distanceg = np.zeros([n_samplesg, n_dimsg, n_dimsg_tg, n_classes]) ** np.nan
else:
distance = []
distanceg = []
coef = np.empty([n_dims, n_classes * (n_classes - 1) / 2, n_features]) ** 0
# apply different classification along dimension 0
for d in range(0, dims.shape[0]):
Xtrain = Xm_shfl[train, :, dims[d]]
ytrain = y_shfl[train]
sw_train = sw_shfl[train]
# (deal with NaN samples in training)
ytrain = ytrain[~np.isnan(np.nansum(Xtrain, axis=1))]
sw_train = sw_train[~np.isnan(np.nansum(Xtrain, axis=1))]
Xtrain = Xtrain[~np.isnan(np.nansum(Xtrain, axis=1)), :]
if np.unique(ytrain).shape[0] > 1:
# feature selection
fs.fit(Xtrain, ytrain)
Xtrain = fs.transform(Xtrain)
# normalization
scaler.fit(Xtrain)
Xtrain = scaler.transform(Xtrain)
# Grid search
clf.fit(Xtrain, ytrain, sample_weight=sw_train, cv=5)
# select best classifier
_clf = clf.best_estimator_
# print(clf.best_params_)
# retrieve features selected during univariate selection
if fs_n == -1 or (fs_n > 1):
# uni_features = sorted(range(len(fs.pvalues_)),key=lambda x:fs.pvalues_[x])
uni_features = range(0, _clf.coef_.shape[1])
else:
uni_features = fs.pvalues_ <= stats.scoreatpercentile(fs.pvalues_, fs.percentile)
# retrieve hyperplan (unselected features as 0)
coef[d, :, uni_features] = scaler.inverse_transform(_clf.coef_).T
# generalize across all time points
for d_tg in range(0, n_dims_tg):
# select data
Xtest = Xm_shfl[test, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtest, axis=1))
Xtest = Xtest[~test_nan, :]
# feature selection from training
Xtest = fs.transform(Xtest)
# normalize from training
Xtest = scaler.transform(Xtest)
# generalize test samples
if (Xtest.shape[0] - np.sum(test_nan)) > 0:
if compute_predict:
predict[test[~test_nan], d, d_tg] = _clf.predict(Xtest)
if compute_probas:
probas[test[~test_nan], d, d_tg, :] = _clf.predict_proba(Xtest)
if compute_distance:
distance[test[~test_nan], d, d_tg, :] = _clf.decision_function(Xtest) # correct!
# predict on generalization sample
# select data
Xtestg = Xmg[:, :, dims_tg[d, d_tg]]
# handles NaNs
test_nan = np.isnan(np.nansum(Xtestg, axis=1))
if (Xtestg.shape[0] - np.sum(test_nan)) > 0:
Xtestg = Xtestg[~test_nan, :]
# preproc feature selection and normalization
Xtestg = fs.transform(Xtestg)
Xtestg = scaler.transform(Xtestg)
# compute prediction
if compute_predict:
predictg[~test_nan, d, d_tg] = clf.predict(Xtestg)
if compute_probas:
probasg[~test_nan, d, d_tg, :] = clf.predict_proba(Xtestg)
if compute_distance:
distanceg[~test_nan, d, d_tg, :] = clf.decision_function(Xtestg) # correct!
# summarize fold results
out = {
'coef': coef,
'predict': predict,
'predictg': predictg,
'probas': probas,
'probasg': probasg,
'distance': distance,
'distanceg': distanceg}
# indicate end of fold
sys.stdout.write(">")
sys.stdout.flush()
return out
###############################################################################
print("CLASSIFY")
#-- Shuffle split
for split in range(n_splits):
print("split " + str(split))
#-- shuffle order in case this is not the first split
new_order = np.array(range(y.shape[0]))
if split > 0:
np.random.shuffle(new_order)
y_shfl = y
y_shfl = y_shfl[new_order]
y2_shfl = y2
y2_shfl = y2_shfl[new_order]
Xm_shfl = Xm
Xm_shfl = Xm_shfl[new_order, :, :]
sw_shfl = sample_weight
sw_shfl = sw_shfl[new_order]
else:
y_shfl = y
y2_shfl = y2
Xm_shfl = Xm
sw_shfl = sample_weight
#-- define crossvalidation
if folding == 'stratified':
cv = StratifiedKFold(y2_shfl, k=n_folds)
elif folding == 'kfolding':
cv = KFold(n=y2_shfl.shape[0], k=n_folds)
elif folding == 'leaveoneout':
n_folds = y_shfl.shape[0]
cv = LeaveOneOut(n=y_shfl.shape[0])
else:
print("unknown crossvalidation method!")
# Cross-validation computed in parallel
out = Parallel(n_jobs=n_cores)(delayed(my_pipeline)(
train=train,
test=test,
Xm_shfl=Xm_shfl,
y_shfl=y_shfl,
sw_shfl=sw_shfl,
Xmg=Xmg,
dims=dims,
fs=fs,
scaler=scaler,
clf=clf,
n_samples=n_samples,
n_dims=n_dims,
n_dims_tg=n_dims_tg,
n_classes=n_classes) for fold, (train, test) in enumerate(cv))
# reorder results folds and splits
for fold, (train, test) in enumerate(cv):
all_folds[split, fold, train] = 1
all_folds[split, fold, test] = 0
coef[split, fold, :, :, :] = out[fold]['coef']
if compute_predict:
predict[split, new_order[test], :, :] = out[fold]['predict'][test, :, :]
predictg[split, :, :, :, fold] = out[fold]['predictg']
if compute_probas:
probas[split, new_order[test], :, :, :] = out[fold]['probas'][test, :, :, :]
probasg[split, :, :, :, :, fold] = out[fold]['probasg']
if compute_distance:
distance[split, new_order[test], :, :, :] = out[fold]['distance'][test, :, :, :]
distanceg[split, :, :, :, :, fold] = out[fold]['distanceg']
all_folds[split, :, new_order] = all_folds[split, :, :].T
###############################################################################
print("EXPORT DATA")
mat['predict'] = predict
mat['predictg'] = predictg
mat['probas'] = probas
mat['probasg'] = probasg
mat['distance'] = distance
mat['distanceg'] = distanceg
mat['coef'] = coef
mat['all_folds'] = all_folds
mat['y_all'] = y_all
mat['y'] = y
mat['yg'] = yg
mat['filenameX'] = filenameX
mat['filenamey'] = filenamey
print nameX
print namey
print path
output = str(path) + str(nameX) + '_' + str(namey) + "_results.mat"
print(output)
sio.savemat(output, mat)
| bsd-3-clause |
eickenberg/scikit-learn | sklearn/cluster/tests/test_dbscan.py | 16 | 5134 | """
Tests for DBSCAN clustering algorithm
"""
import pickle
import numpy as np
from numpy.testing import assert_raises
from scipy.spatial import distance
from sklearn.utils.testing import assert_equal
from sklearn.cluster.dbscan_ import DBSCAN, dbscan
from .common import generate_clustered_data
from sklearn.metrics.pairwise import pairwise_distances
n_clusters = 3
X = generate_clustered_data(n_clusters=n_clusters)
def test_dbscan_similarity():
"""Tests the DBSCAN algorithm with a similarity array."""
# Parameters chosen specifically for this task.
eps = 0.15
min_samples = 10
# Compute similarities
D = distance.squareform(distance.pdist(X))
D /= np.max(D)
# Compute DBSCAN
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - (1 if -1 in labels else 0)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric="precomputed", eps=eps, min_samples=min_samples)
labels = db.fit(D).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_feature():
"""Tests the DBSCAN algorithm with a feature vector array."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
metric = 'euclidean'
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples)
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_callable():
"""Tests the DBSCAN algorithm with a callable metric."""
# Parameters chosen specifically for this task.
# Different eps to other test, because distance is not normalised.
eps = 0.8
min_samples = 10
# metric is the function reference, not the string key.
metric = distance.euclidean
# Compute DBSCAN
# parameters chosen for task
core_samples, labels = dbscan(X, metric=metric, eps=eps,
min_samples=min_samples,
algorithm='ball_tree')
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(metric=metric, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
def test_dbscan_balltree():
"""Tests the DBSCAN algorithm with balltree for neighbor calculation."""
eps = 0.8
min_samples = 10
D = pairwise_distances(X)
core_samples, labels = dbscan(D, metric="precomputed", eps=eps,
min_samples=min_samples)
# number of clusters, ignoring noise if present
n_clusters_1 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_1, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_2 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_2, n_clusters)
db = DBSCAN(p=2.0, eps=eps, min_samples=min_samples, algorithm='kd_tree')
labels = db.fit(X).labels_
n_clusters_3 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_3, n_clusters)
db = DBSCAN(p=1.0, eps=eps, min_samples=min_samples, algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_4 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_4, n_clusters)
db = DBSCAN(leaf_size=20, eps=eps, min_samples=min_samples,
algorithm='ball_tree')
labels = db.fit(X).labels_
n_clusters_5 = len(set(labels)) - int(-1 in labels)
assert_equal(n_clusters_5, n_clusters)
def test_input_validation():
"""DBSCAN.fit should accept a list of lists."""
X = [[1., 2.], [3., 4.]]
DBSCAN().fit(X) # must not raise exception
def test_dbscan_badargs():
"""Test bad argument values: these should all raise ValueErrors"""
assert_raises(ValueError,
dbscan,
X, eps=-1.0)
assert_raises(ValueError,
dbscan,
X, algorithm='blah')
assert_raises(ValueError,
dbscan,
X, metric='blah')
assert_raises(ValueError,
dbscan,
X, leaf_size=-1)
assert_raises(ValueError,
dbscan,
X, p=-1)
def test_pickle():
obj = DBSCAN()
s = pickle.dumps(obj)
assert_equal(type(pickle.loads(s)), obj.__class__)
| bsd-3-clause |
OCM-Lab-PUC/switch-chile | python_utility_scripts/create_transmission_csv.py | 1 | 5810 | # -*- coding: utf-8 -*-
# Copyright 2016 The Switch-Chile Authors. All rights reserved.
# Licensed under the Apache License, Version 2, which is in the LICENSE file.
# Operations, Control and Markets laboratory at Pontificia Universidad
# Católica de Chile.
import pandas, os, re, datetime, sys
from unidecode import unidecode
if sys.getdefaultencoding() != 'utf-8':
# Character encoding may raise errors if set in ascii or other simple
# encodings which do not support spanish characters.
reload(sys)
sys.setdefaultencoding('utf-8')
def limpiar(a):
# Devuelvo un string limpio de carácteres anómalos, espacios y comas
limpio = unidecode(a.replace(' ','_').replace('ó','o')).lower().replace(',','_')
while limpio[0] == '_':
limpio = limpio[1:]
while limpio[-1] == '_':
limpio = limpio[:-1]
return limpio
def SepararLineaSIC(a):
#Algunos nombres separan 220 KV en vez de 220KV, hacemos este cambio para que queden igual
a = a.replace('k','K').replace('v','V').replace(' KV','KV')
try:
#Divido por guion y obtengo primer elemento
se1 = limpiar(a.split('-')[0])
#Obtengo 220kv al eliminar el ultimo termino con espacio y se lo quito al string, luego divido por guion
se2 = limpiar(a.replace(a.split(' ')[-1],'').split('-')[1])
return [se1,se2]
except:
print('No es posible separar',a)
return [limpiar(a),limpiar(a)]
def SepararLineaSIC2(a):
a = a.replace('k','K').replace('v','V').replace(' KV','KV')
try:
#Divido por guion y obtengo primer elemento
se1 = limpiar(a.split('-')[0])
#Obtengo 220kv al eliminar el ultimo termino con espacio y se lo quito al string, luego divido por guion
se2 = limpiar(' '.join(a.split('-')[1].split('KV')[0].split(' ')[:-1]))
return [se1,se2]
except:
print('No es posible separar',a)
return [limpiar(a),limpiar(a)]
def SepararLineaSING(a):
try:
a = a.split('kV ')[1]
#Divido por guion y obtengo primer elemento
se1 = limpiar(a.split('-')[0])
#Obtengo 220kv al eliminar el ultimo termino con espacio y se lo quito al string, luego divido por guion
se2 = limpiar(a.split('-')[1])
return [se1,se2]
except:
print('No es posible separar',a)
return [limpiar(a),limpiar(a)]
###############################
# Obtenemos los datos del SIC #
###############################
#Archivo de conversion de unidades a abrir
transmision = pandas.read_excel('capacidad_instalada_de_transmision.xlsx', sheetname= 'SIC', parse_cols = 'E:K', skiprows=6)
transmision.columns = ['SE','Tramo','dsa','Tension (KV)', 'N','Longitud (km)','Capacidad (MVA)']
#Obtenemos las columnas
#for i,j in enumerate(transmision.columns.values):
# print(limpiar(j),'=',i)
linea = 0
tramo = 1
tension = 3
numerocircuitos = 4
longitud = 5
capacidad = 6
#Construimos un data frame de dos columnas, de subestaciones por linea
SE = pandas.DataFrame({'SE1' : [],'SE2' : [], 'SEalt1' : [],'SEalt2' : []})
for i in transmision.index:
#Mientras leamos
if pandas.isnull(transmision.ix[i,linea]):
break
subs = SepararLineaSIC2(transmision.ix[i,tramo])
subs2 = SepararLineaSIC(transmision.ix[i,linea])
#print(subs,subs2)
fila = pandas.DataFrame([[subs[0],subs[1], subs2[0], subs2[1]]], columns=['SE1','SE2','SEalt1','SEalt2'])
SE = SE.append(fila, ignore_index = True)
#Hacemos la nueva matriz con las subestaciones, voltaje y
neotransmision = pandas.concat([pandas.Series(['sic' for i in range(i)], name = 'Sistema'), SE.ix[:i,0], SE.ix[:i,1], SE.ix[:i,2], SE.ix[:i,3], transmision.ix[:i-1,3], transmision.iloc[:i,4], transmision.iloc[:i,5], transmision.iloc[:i,6]], names = None, axis = 1)
################################
# Obtenemos los datos del SING #
################################
#Leemos, eliminando las dos primeras lineas correspondientes al header (celdas agrupadas no se leen bien...)
transmision = pandas.read_excel('capacidad_instalada_de_transmision.xlsx', sheetname= 'SING', parse_cols = 'E:J', skiprows=6,header = None)
transmision = transmision[2:].reset_index(drop = True)
linea = 0
tension = 1
numerocircuitos = 2
longitud = 3
capacidad = 5
#Construimos un data frame de dos columnas, de subestaciones por linea
SE = pandas.DataFrame({'SE1' : [],'SE2' : [], 'SEalt1' : [],'SEalt2' : []})
for i in transmision.index:
#Mientras leamos
if pandas.isnull(transmision.ix[i,linea]):
break
subs = SepararLineaSING(transmision.ix[i,linea])
fila = pandas.DataFrame([[subs[0],subs[1],subs[0],subs[1]]], columns=['SE1','SE2','SEalt1','SEalt2'])
SE = SE.append(fila, ignore_index = True)
#Si no tiene limite, le asignamos la capacidad
if transmision.ix[i,capacidad] == 'N/I' or pandas.isnull(transmision.ix[i,capacidad]):
transmision.ix[i,capacidad] = transmision.ix[i,4]
#Hacemos la nueva matriz con las subestaciones, voltaje y
neotransmision2 = pandas.concat([pandas.Series(['sing' for i in range(i)], name = 'Sistema'), SE.ix[:i,0], SE.ix[:i,1], SE.ix[:i,0], SE.ix[:i,1], transmision.ix[:i,tension], transmision.ix[:i,numerocircuitos], transmision.iloc[:i,longitud], transmision.iloc[:i,capacidad]], names = None, axis = 1)
neotransmision2 = neotransmision2[:-1]
#Renombramos columnas
neotransmision2.columns = ['Sistema','SE1','SE2','SEalt1','SEalt2','Tension (KV)', 'N','Longitud (km)','Capacidad (MVA)']
#Unimos ambas
transmisionfinal = pandas.concat([neotransmision, neotransmision2])
#Convertimos filas a int
transmisionfinal[['Tension (KV)', 'N']] = transmisionfinal[['Tension (KV)', 'N']].astype(int)
#Imprimimos datos
transmisionfinal.to_csv('transmision.csv', index = None , float_format = '%.2f')
| apache-2.0 |
SvichkarevAnatoly/Course-Python-Bioinformatics | semester2/task8/exercise2.py | 1 | 3387 | import numpy
import random
import matplotlib.pyplot as plot
from sklearn.tree import DecisionTreeRegressor
from sklearn import tree
a = 1
b = 2
# Build a simple data set with y = x + random
nPoints = 1000
# x values for plotting
xPlot = [(float(i) / float(nPoints) - 0.5) for i in range(nPoints + 1)]
# x needs to be list of lists.
x = [[s] for s in xPlot]
# y (labels) has random noise added to x-value
# set seed
random.seed(1)
numpy.random.seed(1)
y = [a + b * s * s + numpy.random.normal(scale=0.1) for s in xPlot]
# take fixed test set 30% of sample
nSample = int(nPoints * 0.30)
idxTest = random.sample(range(nPoints), nSample)
idxTest.sort()
idxTrain = [idx for idx in range(nPoints) if not (idx in idxTest)]
# Define test and training attribute and label sets
xTrain = [x[r] for r in idxTrain]
xTest = [x[r] for r in idxTest]
yTrain = [y[r] for r in idxTrain]
yTest = [y[r] for r in idxTest]
# train a series of models on random subsets of the training data
# collect the models in a list and check error of composite as list grows
# maximum number of models to generate
numTreesMax = 30
# tree depth - typically at the high end
treeDepth = 5
# initialize a list to hold models
modelList = []
predList = []
eps = 0.3
# initialize residuals to be the labels y
residuals = list(yTrain)
for iTrees in range(numTreesMax):
modelList.append(DecisionTreeRegressor(max_depth=treeDepth))
modelList[-1].fit(xTrain, residuals)
# make prediction with latest model and add to list of predictions
latestInSamplePrediction = modelList[-1].predict(xTrain)
# use new predictions to update residuals
residuals = [residuals[i] - eps * latestInSamplePrediction[i] \
for i in range(len(residuals))]
latestOutSamplePrediction = modelList[-1].predict(xTest)
predList.append(list(latestOutSamplePrediction))
# build cumulative prediction from first "n" models
mse = []
allPredictions = []
for iModels in range(len(modelList)):
# add the first "iModels" of the predictions and multiply by eps
prediction = []
for iPred in range(len(xTest)):
prediction.append(
sum([predList[i][iPred] for i in range(iModels + 1)]) * eps)
allPredictions.append(prediction)
errors = [(yTest[i] - prediction[i]) for i in range(len(yTest))]
mse.append(sum([e * e for e in errors]) / len(yTest))
nModels = [i + 1 for i in range(len(modelList))]
# mse plot
plot.plot(nModels, mse)
plot.axis('tight')
plot.xlabel('Number of Models in Ensemble')
plot.ylabel('Mean Squared Error')
plot.ylim((0.0, max(mse)))
# plot.show()
plot.savefig("mseEx2.png")
plot.close()
print min(mse)
# predictions plot
plotList = [0, 14, 29]
lineType = [':', '-.', '--']
plot.figure()
for i in range(len(plotList)):
iPlot = plotList[i]
textLegend = 'Prediction with ' + str(iPlot) + ' Trees'
plot.plot(xTest, allPredictions[iPlot],
label=textLegend, linestyle=lineType[i])
plot.plot(xTest, yTest, label='True y Value', alpha=0.25)
plot.legend(bbox_to_anchor=(1, 0.3))
plot.axis('tight')
plot.xlabel('x value')
plot.ylabel('Predictions')
# plot.show()
plot.savefig("predictionsEx2.png")
plot.close()
# save first 2 tree
with open("tree1Ex2.dot", 'w') as f1:
f1 = tree.export_graphviz(modelList[0], out_file=f1)
with open("tree2Ex2.dot", 'w') as f2:
f2 = tree.export_graphviz(modelList[1], out_file=f2)
| gpl-2.0 |
miyyer/qb | qanta/hyperparam.py | 2 | 1848 | import copy
import json
import yaml
from sklearn.model_selection import ParameterGrid
def expand_config(base_file, hyper_file, output_file):
"""
This is useful for taking the qanta.yaml config, a set of values to try for different hyper parameters, and
generating a configuration representing each value in the parameter sweep
"""
with open(base_file) as f:
base_conf = yaml.load(f)
with open(hyper_file) as f:
hyper_conf = yaml.load(f)
all_base_guessers = base_conf["guessers"]
final_guessers = {}
for guesser, params in hyper_conf["parameters"].items():
base_guesser_conf = all_base_guessers[guesser]
if len(base_guesser_conf) != 1:
raise ValueError(
"More than one configuration for parameter tuning base is invalid"
)
base_guesser_conf = base_guesser_conf[0]
parameter_set = set(base_guesser_conf.keys()) | set(params.keys())
param_grid = {}
for p in parameter_set:
if p in params:
param_grid[p] = params[p]
else:
param_grid[p] = [base_guesser_conf[p]]
parameter_list = list(ParameterGrid(param_grid))
final_guessers[guesser] = parameter_list
final_conf = copy.deepcopy(base_conf)
for g in final_conf["guessers"]:
if g in final_guessers:
final_conf["guessers"][g] = copy.deepcopy(final_guessers[g])
# There is a bug in yaml.dump that doesn't handle outputting nested dicts/arrays correctly. I didn't want to debug
# So instead output to json then convert that to yaml
with open("/tmp/qanta-tmp.json", "w") as f:
json.dump(final_conf, f)
with open("/tmp/qanta-tmp.json") as f:
conf = json.load(f)
with open(output_file, "w") as f:
yaml.dump(conf, f)
| mit |
chrisjdavie/shares | machine_learning/sklearn_dataset_format.py | 1 | 1160 | '''
Created on 2 Sep 2014
@author: chris
'''
'''File format - data, length of data, containing unicode
- target, length of data, contains int reference to target
- target_names, type names relative to target
- filenames, names of files storing data (probably target too)
'''
def main():
''' taken from the tutorials, I'm having a look at how they store datasets'''
from sklearn.datasets import fetch_20newsgroups
# import numpy as np
categories = ['alt.atheism', 'soc.religion.christian', 'comp.graphics', 'sci.med']
twenty_train = fetch_20newsgroups(subset='train',
categories=categories,
shuffle=True,
random_state=42)
print dir(twenty_train)
print twenty_train.keys()
# print twenty_train.data[0]
print twenty_train.target[0]
print len(twenty_train.filenames)
print twenty_train.filenames[0]
print twenty_train.target_names
if __name__ == '__main__':
main() | mit |
rexshihaoren/scikit-learn | sklearn/feature_extraction/tests/test_feature_hasher.py | 258 | 2861 | from __future__ import unicode_literals
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from nose.tools import assert_raises, assert_true
from numpy.testing import assert_array_equal, assert_equal
def test_feature_hasher_dicts():
h = FeatureHasher(n_features=16)
assert_equal("dict", h.input_type)
raw_X = [{"dada": 42, "tzara": 37}, {"gaga": 17}]
X1 = FeatureHasher(n_features=16).transform(raw_X)
gen = (iter(d.items()) for d in raw_X)
X2 = FeatureHasher(n_features=16, input_type="pair").transform(gen)
assert_array_equal(X1.toarray(), X2.toarray())
def test_feature_hasher_strings():
# mix byte and Unicode strings; note that "foo" is a duplicate in row 0
raw_X = [["foo", "bar", "baz", "foo".encode("ascii")],
["bar".encode("ascii"), "baz", "quux"]]
for lg_n_features in (7, 9, 11, 16, 22):
n_features = 2 ** lg_n_features
it = (x for x in raw_X) # iterable
h = FeatureHasher(n_features, non_negative=True, input_type="string")
X = h.transform(it)
assert_equal(X.shape[0], len(raw_X))
assert_equal(X.shape[1], n_features)
assert_true(np.all(X.data > 0))
assert_equal(X[0].sum(), 4)
assert_equal(X[1].sum(), 3)
assert_equal(X.nnz, 6)
def test_feature_hasher_pairs():
raw_X = (iter(d.items()) for d in [{"foo": 1, "bar": 2},
{"baz": 3, "quux": 4, "foo": -1}])
h = FeatureHasher(n_features=16, input_type="pair")
x1, x2 = h.transform(raw_X).toarray()
x1_nz = sorted(np.abs(x1[x1 != 0]))
x2_nz = sorted(np.abs(x2[x2 != 0]))
assert_equal([1, 2], x1_nz)
assert_equal([1, 3, 4], x2_nz)
def test_hash_empty_input():
n_features = 16
raw_X = [[], (), iter(range(0))]
h = FeatureHasher(n_features=n_features, input_type="string")
X = h.transform(raw_X)
assert_array_equal(X.A, np.zeros((len(raw_X), n_features)))
def test_hasher_invalid_input():
assert_raises(ValueError, FeatureHasher, input_type="gobbledygook")
assert_raises(ValueError, FeatureHasher, n_features=-1)
assert_raises(ValueError, FeatureHasher, n_features=0)
assert_raises(TypeError, FeatureHasher, n_features='ham')
h = FeatureHasher(n_features=np.uint16(2 ** 6))
assert_raises(ValueError, h.transform, [])
assert_raises(Exception, h.transform, [[5.5]])
assert_raises(Exception, h.transform, [[None]])
def test_hasher_set_params():
# Test delayed input validation in fit (useful for grid search).
hasher = FeatureHasher()
hasher.set_params(n_features=np.inf)
assert_raises(TypeError, hasher.fit)
def test_hasher_zeros():
# Assert that no zeros are materialized in the output.
X = FeatureHasher().transform([{'foo': 0}])
assert_equal(X.data.shape, (0,))
| bsd-3-clause |
aleju/imgaug | imgaug/augmentables/heatmaps.py | 2 | 25136 | """Classes to represent heatmaps, i.e. float arrays of ``[0.0, 1.0]``."""
from __future__ import print_function, division, absolute_import
import numpy as np
import six.moves as sm
from .. import imgaug as ia
from .base import IAugmentable
class HeatmapsOnImage(IAugmentable):
"""Object representing heatmaps on a single image.
Parameters
----------
arr : (H,W) ndarray or (H,W,C) ndarray
Array representing the heatmap(s) on a single image.
Multiple heatmaps may be provided, in which case ``C`` is expected to
denote the heatmap index.
The array must be of dtype ``float32``.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed.
**Not** the shape of the heatmap(s) array, unless it is identical
to the image shape (note the likely difference between the arrays
in the number of channels).
This is expected to be ``(H, W)`` or ``(H, W, C)`` with ``C`` usually
being ``3``.
If there is no corresponding image, use ``(H_arr, W_arr)`` instead,
where ``H_arr`` is the height of the heatmap(s) array
(analogous ``W_arr``).
min_value : float, optional
Minimum value for the heatmaps that `arr` represents. This will
usually be ``0.0``.
max_value : float, optional
Maximum value for the heatmaps that `arr` represents. This will
usually be ``1.0``.
"""
def __init__(self, arr, shape, min_value=0.0, max_value=1.0):
"""Construct a new HeatmapsOnImage object."""
assert ia.is_np_array(arr), (
"Expected numpy array as heatmap input array, "
"got type %s" % (type(arr),))
# TODO maybe allow 0-sized heatmaps? in that case the min() and max()
# must be adjusted
assert arr.shape[0] > 0 and arr.shape[1] > 0, (
"Expected numpy array as heatmap with height and width greater "
"than 0, got shape %s." % (arr.shape,))
assert arr.dtype.name in ["float32"], (
"Heatmap input array expected to be of dtype float32, "
"got dtype %s." % (arr.dtype,))
assert arr.ndim in [2, 3], (
"Heatmap input array must be 2d or 3d, got shape %s." % (
arr.shape,))
assert len(shape) in [2, 3], (
"Argument 'shape' in HeatmapsOnImage expected to be 2d or 3d, "
"got shape %s." % (shape,))
assert min_value < max_value, (
"Expected min_value to be lower than max_value, "
"got %.4f and %.4f" % (min_value, max_value))
eps = np.finfo(arr.dtype).eps
components = arr.flat[0:50]
beyond_min = np.min(components) < min_value - eps
beyond_max = np.max(components) > max_value + eps
if beyond_min or beyond_max:
ia.warn(
"Value range of heatmap was chosen to be (%.8f, %.8f), but "
"found actual min/max of (%.8f, %.8f). Array will be "
"clipped to chosen value range." % (
min_value, max_value, np.min(arr), np.max(arr)))
arr = np.clip(arr, min_value, max_value)
if arr.ndim == 2:
arr = arr[..., np.newaxis]
self.arr_was_2d = True
else:
self.arr_was_2d = False
min_is_zero = 0.0 - eps < min_value < 0.0 + eps
max_is_one = 1.0 - eps < max_value < 1.0 + eps
if min_is_zero and max_is_one:
self.arr_0to1 = arr
else:
self.arr_0to1 = (arr - min_value) / (max_value - min_value)
self.shape = shape
self.min_value = min_value
self.max_value = max_value
def get_arr(self):
"""Get the heatmap's array in value range provided to ``__init__()``.
The :class:`HeatmapsOnImage` object saves heatmaps internally in the
value range ``[0.0, 1.0]``. This function converts the internal
representation to ``[min, max]``, where ``min`` and ``max`` are
provided to :func:`HeatmapsOnImage.__init__` upon instantiation of
the object.
Returns
-------
(H,W) ndarray or (H,W,C) ndarray
Heatmap array of dtype ``float32``.
"""
if self.arr_was_2d and self.arr_0to1.shape[2] == 1:
arr = self.arr_0to1[:, :, 0]
else:
arr = self.arr_0to1
eps = np.finfo(np.float32).eps
min_is_zero = 0.0 - eps < self.min_value < 0.0 + eps
max_is_one = 1.0 - eps < self.max_value < 1.0 + eps
if min_is_zero and max_is_one:
return np.copy(arr)
diff = self.max_value - self.min_value
return self.min_value + diff * arr
# TODO
# def find_global_maxima(self):
# raise NotImplementedError()
def draw(self, size=None, cmap="jet"):
"""Render the heatmaps as RGB images.
Parameters
----------
size : None or float or iterable of int or iterable of float, optional
Size of the rendered RGB image as ``(height, width)``.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
If set to ``None``, no resizing is performed and the size of the
heatmaps array is used.
cmap : str or None, optional
Name of the ``matplotlib`` color map to use when convert the
heatmaps to RGB images.
If set to ``None``, no color map will be used and the heatmaps
will be converted to simple intensity maps.
Returns
-------
list of (H,W,3) ndarray
Rendered heatmaps as ``uint8`` arrays.
Always a **list** containing one RGB image per heatmap array
channel.
"""
heatmaps_uint8 = self.to_uint8()
heatmaps_drawn = []
for c in sm.xrange(heatmaps_uint8.shape[2]):
# We use c:c+1 here to get a (H,W,1) array. Otherwise imresize
# would have to re-attach an axis.
heatmap_c = heatmaps_uint8[..., c:c+1]
if size is not None:
heatmap_c_rs = ia.imresize_single_image(
heatmap_c, size, interpolation="nearest")
else:
heatmap_c_rs = heatmap_c
heatmap_c_rs = np.squeeze(heatmap_c_rs).astype(np.float32) / 255.0
if cmap is not None:
# import only when necessary (faster startup; optional
# dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
cmap_func = plt.get_cmap(cmap)
heatmap_cmapped = cmap_func(heatmap_c_rs)
heatmap_cmapped = np.delete(heatmap_cmapped, 3, 2)
else:
heatmap_cmapped = np.tile(
heatmap_c_rs[..., np.newaxis], (1, 1, 3))
heatmap_cmapped = np.clip(
heatmap_cmapped * 255, 0, 255).astype(np.uint8)
heatmaps_drawn.append(heatmap_cmapped)
return heatmaps_drawn
def draw_on_image(self, image, alpha=0.75, cmap="jet", resize="heatmaps"):
"""Draw the heatmaps as overlays over an image.
Parameters
----------
image : (H,W,3) ndarray
Image onto which to draw the heatmaps.
Expected to be of dtype ``uint8``.
alpha : float, optional
Alpha/opacity value to use for the mixing of image and heatmaps.
Larger values mean that the heatmaps will be more visible and the
image less visible.
cmap : str or None, optional
Name of the ``matplotlib`` color map to use.
See :func:`HeatmapsOnImage.draw` for details.
resize : {'heatmaps', 'image'}, optional
In case of size differences between the image and heatmaps,
either the image or the heatmaps can be resized. This parameter
controls which of the two will be resized to the other's size.
Returns
-------
list of (H,W,3) ndarray
Rendered overlays as ``uint8`` arrays.
Always a **list** containing one RGB image per heatmap array
channel.
"""
# assert RGB image
assert image.ndim == 3, (
"Expected to draw on three-dimensional image, "
"got %d dimensions with shape %s instead." % (
image.ndim, image.shape))
assert image.shape[2] == 3, (
"Expected RGB image, got %d channels instead." % (image.shape[2],))
assert image.dtype.name == "uint8", (
"Expected uint8 image, got dtype %s." % (image.dtype.name,))
assert 0 - 1e-8 <= alpha <= 1.0 + 1e-8, (
"Expected 'alpha' to be in the interval [0.0, 1.0], got %.4f" % (
alpha))
assert resize in ["heatmaps", "image"], (
"Expected resize to be \"heatmaps\" or \"image\", "
"got %s instead." % (resize,))
if resize == "image":
image = ia.imresize_single_image(
image, self.arr_0to1.shape[0:2], interpolation="cubic")
heatmaps_drawn = self.draw(
size=image.shape[0:2] if resize == "heatmaps" else None,
cmap=cmap)
# TODO use blend_alpha here
mix = [
np.clip(
(1-alpha) * image + alpha * heatmap_i,
0, 255
).astype(np.uint8)
for heatmap_i
in heatmaps_drawn]
return mix
def invert(self):
"""Invert each component in the heatmap.
This shifts low values towards high values and vice versa.
This changes each value to::
v' = max - (v - min)
where ``v`` is the value at a spatial location, ``min`` is the
minimum value in the heatmap and ``max`` is the maximum value.
As the heatmap uses internally a ``0.0`` to ``1.0`` representation,
this simply becomes ``v' = 1.0 - v``.
This function can be useful e.g. when working with depth maps, where
algorithms might have an easier time representing the furthest away
points with zeros, requiring an inverted depth map.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Inverted heatmap.
"""
arr_inv = HeatmapsOnImage.from_0to1(
1 - self.arr_0to1,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
arr_inv.arr_was_2d = self.arr_was_2d
return arr_inv
def pad(self, top=0, right=0, bottom=0, left=0, mode="constant", cval=0.0):
"""Pad the heatmaps at their top/right/bottom/left side.
Parameters
----------
top : int, optional
Amount of pixels to add at the top side of the heatmaps.
Must be ``0`` or greater.
right : int, optional
Amount of pixels to add at the right side of the heatmaps.
Must be ``0`` or greater.
bottom : int, optional
Amount of pixels to add at the bottom side of the heatmaps.
Must be ``0`` or greater.
left : int, optional
Amount of pixels to add at the left side of the heatmaps.
Must be ``0`` or greater.
mode : string, optional
Padding mode to use. See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding `mode` is ``constant``.
See :func:`~imgaug.imgaug.pad` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Padded heatmaps of height ``H'=H+top+bottom`` and
width ``W'=W+left+right``.
"""
from ..augmenters import size as iasize
arr_0to1_padded = iasize.pad(
self.arr_0to1,
top=top,
right=right,
bottom=bottom,
left=left,
mode=mode,
cval=cval)
# TODO change to deepcopy()
return HeatmapsOnImage.from_0to1(
arr_0to1_padded,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
def pad_to_aspect_ratio(self, aspect_ratio, mode="constant", cval=0.0,
return_pad_amounts=False):
"""Pad the heatmaps until they match a target aspect ratio.
Depending on which dimension is smaller (height or width), only the
corresponding sides (left/right or top/bottom) will be padded. In
each case, both of the sides will be padded equally.
Parameters
----------
aspect_ratio : float
Target aspect ratio, given as width/height. E.g. ``2.0`` denotes
the image having twice as much width as height.
mode : str, optional
Padding mode to use.
See :func:`~imgaug.imgaug.pad` for details.
cval : number, optional
Value to use for padding if `mode` is ``constant``.
See :func:`~imgaug.imgaug.pad` for details.
return_pad_amounts : bool, optional
If ``False``, then only the padded instance will be returned.
If ``True``, a tuple with two entries will be returned, where
the first entry is the padded instance and the second entry are
the amounts by which each array side was padded. These amounts are
again a tuple of the form ``(top, right, bottom, left)``, with
each value being an integer.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Padded heatmaps as :class:`HeatmapsOnImage` instance.
tuple of int
Amounts by which the instance's array was padded on each side,
given as a tuple ``(top, right, bottom, left)``.
This tuple is only returned if `return_pad_amounts` was set to
``True``.
"""
from ..augmenters import size as iasize
arr_0to1_padded, pad_amounts = iasize.pad_to_aspect_ratio(
self.arr_0to1,
aspect_ratio=aspect_ratio,
mode=mode,
cval=cval,
return_pad_amounts=True)
# TODO change to deepcopy()
heatmaps = HeatmapsOnImage.from_0to1(
arr_0to1_padded,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
if return_pad_amounts:
return heatmaps, pad_amounts
return heatmaps
def avg_pool(self, block_size):
"""Average-pool the heatmap(s) array using a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size.
See :func:`~imgaug.imgaug.pool` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps after average pooling.
"""
arr_0to1_reduced = ia.avg_pool(self.arr_0to1, block_size, pad_cval=0.0)
return HeatmapsOnImage.from_0to1(
arr_0to1_reduced,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
def max_pool(self, block_size):
"""Max-pool the heatmap(s) array using a given block/kernel size.
Parameters
----------
block_size : int or tuple of int
Size of each block of values to pool, aka kernel size.
See :func:`~imgaug.imgaug.pool` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps after max-pooling.
"""
arr_0to1_reduced = ia.max_pool(self.arr_0to1, block_size)
return HeatmapsOnImage.from_0to1(
arr_0to1_reduced,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
@ia.deprecated(alt_func="HeatmapsOnImage.resize()",
comment="resize() has the exactly same interface.")
def scale(self, *args, **kwargs):
"""Resize the heatmap(s) array given a target size and interpolation."""
return self.resize(*args, **kwargs)
def resize(self, sizes, interpolation="cubic"):
"""Resize the heatmap(s) array given a target size and interpolation.
Parameters
----------
sizes : float or iterable of int or iterable of float
New size of the array in ``(height, width)``.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
interpolation : None or str or int, optional
The interpolation to use during resize.
See :func:`~imgaug.imgaug.imresize_single_image` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Resized heatmaps object.
"""
arr_0to1_resized = ia.imresize_single_image(
self.arr_0to1, sizes, interpolation=interpolation)
# cubic interpolation can lead to values outside of [0.0, 1.0],
# see https://github.com/opencv/opencv/issues/7195
# TODO area interpolation too?
arr_0to1_resized = np.clip(arr_0to1_resized, 0.0, 1.0)
return HeatmapsOnImage.from_0to1(
arr_0to1_resized,
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
def to_uint8(self):
"""Convert this heatmaps object to an ``uint8`` array.
Returns
-------
(H,W,C) ndarray
Heatmap as an ``uint8`` array, i.e. with the discrete value
range ``[0, 255]``.
"""
# TODO this always returns (H,W,C), even if input ndarray was
# originally (H,W). Does it make sense here to also return
# (H,W) if self.arr_was_2d?
arr_0to255 = np.clip(np.round(self.arr_0to1 * 255), 0, 255)
arr_uint8 = arr_0to255.astype(np.uint8)
return arr_uint8
@staticmethod
def from_uint8(arr_uint8, shape, min_value=0.0, max_value=1.0):
"""Create a ``float``-based heatmaps object from an ``uint8`` array.
Parameters
----------
arr_uint8 : (H,W) ndarray or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is height, ``W`` is width
and ``C`` is the number of heatmap channels.
Expected dtype is ``uint8``.
shape : tuple of int
Shape of the image on which the heatmap(s) is/are placed.
**Not** the shape of the heatmap(s) array, unless it is identical
to the image shape (note the likely difference between the arrays
in the number of channels).
If there is not a corresponding image, use the shape of the
heatmaps array.
min_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 0.0. In most other cases it will
be close to the interval ``[0.0, 1.0]``.
Calling :func:`~imgaug.HeatmapsOnImage.get_arr`, will automatically
convert the interval ``[0.0, 1.0]`` float array to this
``[min, max]`` interval.
max_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 1.0.
See parameter `min_value` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps object.
"""
arr_0to1 = arr_uint8.astype(np.float32) / 255.0
return HeatmapsOnImage.from_0to1(
arr_0to1, shape,
min_value=min_value,
max_value=max_value)
@staticmethod
def from_0to1(arr_0to1, shape, min_value=0.0, max_value=1.0):
"""Create a heatmaps object from a ``[0.0, 1.0]`` float array.
Parameters
----------
arr_0to1 : (H,W) or (H,W,C) ndarray
Heatmap(s) array, where ``H`` is the height, ``W`` is the width
and ``C`` is the number of heatmap channels.
Expected dtype is ``float32``.
shape : tuple of ints
Shape of the image on which the heatmap(s) is/are placed.
**Not** the shape of the heatmap(s) array, unless it is identical
to the image shape (note the likely difference between the arrays
in the number of channels).
If there is not a corresponding image, use the shape of the
heatmaps array.
min_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 0.0. In most other cases it will
be close to the interval ``[0.0, 1.0]``.
Calling :func:`~imgaug.HeatmapsOnImage.get_arr`, will automatically
convert the interval ``[0.0, 1.0]`` float array to this
``[min, max]`` interval.
max_value : float, optional
Minimum value of the float heatmaps that the input array
represents. This will usually be 1.0.
See parameter `min_value` for details.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Heatmaps object.
"""
heatmaps = HeatmapsOnImage(arr_0to1, shape,
min_value=0.0, max_value=1.0)
heatmaps.min_value = min_value
heatmaps.max_value = max_value
return heatmaps
# TODO change name to change_value_range()?
@classmethod
def change_normalization(cls, arr, source, target):
"""Change the value range of a heatmap array.
E.g. the value range may be changed from the interval ``[0.0, 1.0]``
to ``[-1.0, 1.0]``.
Parameters
----------
arr : ndarray
Heatmap array to modify.
source : tuple of float
Current value range of the input array, given as a
tuple ``(min, max)``, where both are ``float`` values.
target : tuple of float
Desired output value range of the array, given as a
tuple ``(min, max)``, where both are ``float`` values.
Returns
-------
ndarray
Input array, with value range projected to the desired target
value range.
"""
assert ia.is_np_array(arr), (
"Expected 'arr' to be an ndarray, got type %s." % (type(arr),))
def _validate_tuple(arg_name, arg_value):
assert isinstance(arg_value, tuple), (
"'%s' was not a HeatmapsOnImage instance, "
"expected type tuple then. Got type %s." % (
arg_name, type(arg_value),))
assert len(arg_value) == 2, (
"Expected tuple '%s' to contain exactly two entries, "
"got %d." % (arg_name, len(arg_value),))
assert arg_value[0] < arg_value[1], (
"Expected tuple '%s' to have two entries with "
"entry 1 < entry 2, got values %.4f and %.4f." % (
arg_name, arg_value[0], arg_value[1]))
if isinstance(source, HeatmapsOnImage):
source = (source.min_value, source.max_value)
else:
_validate_tuple("source", source)
if isinstance(target, HeatmapsOnImage):
target = (target.min_value, target.max_value)
else:
_validate_tuple("target", target)
# Check if source and target are the same (with a tiny bit of
# tolerance) if so, evade compuation and just copy the array instead.
# This is reasonable, as source and target will often both
# be (0.0, 1.0).
eps = np.finfo(arr.dtype).eps
mins_same = source[0] - 10*eps < target[0] < source[0] + 10*eps
maxs_same = source[1] - 10*eps < target[1] < source[1] + 10*eps
if mins_same and maxs_same:
return np.copy(arr)
min_source, max_source = source
min_target, max_target = target
diff_source = max_source - min_source
diff_target = max_target - min_target
arr_0to1 = (arr - min_source) / diff_source
arr_target = min_target + arr_0to1 * diff_target
return arr_target
# TODO make this a proper shallow-copy
def copy(self):
"""Create a shallow copy of the heatmaps object.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Shallow copy.
"""
return self.deepcopy()
def deepcopy(self):
"""Create a deep copy of the heatmaps object.
Returns
-------
imgaug.augmentables.heatmaps.HeatmapsOnImage
Deep copy.
"""
return HeatmapsOnImage(
self.get_arr(),
shape=self.shape,
min_value=self.min_value,
max_value=self.max_value)
| mit |
kushalbhola/MyStuff | Practice/PythonApplication/env/Lib/site-packages/pandas/tests/util/test_move.py | 2 | 1457 | import pytest
from pandas.util._move import BadMove, move_into_mutable_buffer, stolenbuf
def test_cannot_create_instance_of_stolen_buffer():
# Stolen buffers need to be created through the smart constructor
# "move_into_mutable_buffer," which has a bunch of checks in it.
msg = "cannot create 'pandas.util._move.stolenbuf' instances"
with pytest.raises(TypeError, match=msg):
stolenbuf()
def test_more_than_one_ref():
# Test case for when we try to use "move_into_mutable_buffer"
# when the object being moved has other references.
b = b"testing"
with pytest.raises(BadMove, match="testing") as e:
def handle_success(type_, value, tb):
assert value.args[0] is b
return type(e).handle_success(e, type_, value, tb) # super
e.handle_success = handle_success
move_into_mutable_buffer(b)
def test_exactly_one_ref():
# Test case for when the object being moved has exactly one reference.
b = b"testing"
# We need to pass an expression on the stack to ensure that there are
# not extra references hanging around. We cannot rewrite this test as
# buf = b[:-3]
# as_stolen_buf = move_into_mutable_buffer(buf)
# because then we would have more than one reference to buf.
as_stolen_buf = move_into_mutable_buffer(b[:-3])
# Materialize as byte-array to show that it is mutable.
assert bytearray(as_stolen_buf) == b"test"
| apache-2.0 |
LaboratoireMecaniqueLille/crappy | crappy/blocks/grapher.py | 1 | 5641 | # coding: utf-8
import numpy as np
from .block import Block
from .._global import OptionalModule
try:
import matplotlib.pyplot as plt
from matplotlib.widgets import Button
except (ModuleNotFoundError, ImportError):
plt = OptionalModule("matplotlib")
Button = OptionalModule("matplotlib")
class Grapher(Block):
"""The grapher receive data from a block (via a :ref:`Link`) and plots it."""
def __init__(self,
*labels,
length=0,
freq=2,
maxpt=20000,
window_size=(8, 8),
window_pos=None,
interp=True,
backend="TkAgg"):
"""Sets the args and initializes the parent class.
Args:
*labels (:obj:`tuple`): Tuples of the columns labels of input data for
plotting. You can add as much as you want, depending on your
performances. The first value is the `x` label, the second is the `y`
label.
length (:obj:`int`, optional): If `0` the graph is static and displays
all data from the start of the assay. Else only displays the last
``length`` received chunks, and drops the previous ones.
freq (:obj:`float`, optional): The refresh rate of the graph. May cause
high memory consumption if set too high.
maxpt (:obj:`int`, optional): The maximum number of points displayed on
the graph. When reaching this limit, the block deletes one point out of
two but this is almost invisible to the user.
window_size (:obj:`tuple`, optional): The size of the graph, in inches.
window_pos (:obj:`tuple`, optional): The position of the graph in pixels.
The first value is for the `x` direction, the second for the `y`
direction. The origin is the top left corner. Works with multiple
screens.
interp (:obj:`bool`, optional): If :obj:`True`, the points of data will
be linked to the following by straight lines. Else, each value wil be
displayed as constant until the next update.
backend (:obj:`int`, optional): The :mod:`matplotlib` backend to use.
Example:
::
graph = Grapher(('t(s)', 'F(N)'), ('t(s)', 'def(%)'))
will plot a dynamic graph with two lines plot (`F=f(t)` and `def=f(t)`).
::
graph = Grapher(('def(%)', 'F(N)'), length=0)
will plot a static graph.
::
graph = Grapher(('t(s)', 'F(N)'), length=30)
will plot a dynamic graph displaying the last 30 chunks of data.
"""
Block.__init__(self)
self.niceness = 10
self.length = length
self.freq = freq
self.maxpt = maxpt
self.window_size = window_size
self.window_pos = window_pos
self.interp = interp
self.backend = backend
self.labels = labels
def prepare(self):
if self.backend:
plt.switch_backend(self.backend)
self.f = plt.figure(figsize=self.window_size)
self.ax = self.f.add_subplot(111)
self.lines = []
for _ in self.labels:
if self.interp:
self.lines.append(self.ax.plot([], [])[0])
else:
self.lines.append(self.ax.step([], [])[0])
# Keep only 1/factor points on each line
self.factor = [1 for _ in self.labels]
# Count to drop exactly 1/factor points, no more and no less
self.counter = [0 for _ in self.labels]
legend = [y for x, y in self.labels]
plt.legend(legend, bbox_to_anchor=(-0.03, 1.02, 1.06, .102), loc=3,
ncol=len(legend), mode="expand", borderaxespad=1)
plt.xlabel(self.labels[0][0])
plt.ylabel(self.labels[0][1])
plt.grid()
self.axclear = plt.axes([.8, .02, .15, .05])
self.bclear = Button(self.axclear, 'Clear')
self.bclear.on_clicked(self.clear)
if self.window_pos:
mng = plt.get_current_fig_manager()
mng.window.wm_geometry("+%s+%s" % self.window_pos)
plt.draw()
plt.pause(.001)
def clear(self, event=None):
for line in self.lines:
line.set_xdata([])
line.set_ydata([])
self.factor = [1 for _ in self.labels]
self.counter = [0 for _ in self.labels]
def loop(self):
# We need to recv data from all the links, but keep
# ALL of the data, even with the same label (so not get_all_last)
data = self.recv_all_delay()
for i, (lx, ly) in enumerate(self.labels):
x, y = 0, 0 # So that if we don't find it, we do nothing
for d in data:
if lx in d and ly in d: # Find the first input with both labels
dx = d[lx][self.factor[i]-self.counter[i]-1::self.factor[i]]
dy = d[ly][self.factor[i]-self.counter[i]-1::self.factor[i]]
self.counter[i] = (self.counter[i]+len(d[lx])) % self.factor[i]
x = np.hstack((self.lines[i].get_xdata(), dx))
y = np.hstack((self.lines[i].get_ydata(), dy))
break
if isinstance(x, int):
break
if self.length and len(x) >= self.length:
# Remove the beginning if the graph is dynamic
x = x[-self.length:]
y = y[-self.length:]
elif len(x) > self.maxpt:
# Reduce the number of points if we have to many to display
print("[Grapher] Too many points on the graph {} ({}>{})".format(
i, len(x), self.maxpt))
x, y = x[::2], y[::2]
self.factor[i] *= 2
print("[Grapher] Resampling factor is now {}".format(self.factor[i]))
self.lines[i].set_xdata(x)
self.lines[i].set_ydata(y)
self.ax.relim() # Update the window
self.ax.autoscale_view(True, True, True)
self.f.canvas.draw() # Update the graph
self.f.canvas.flush_events()
def finish(self):
plt.close("all")
| gpl-2.0 |
maropu/spark | python/pyspark/pandas/namespace.py | 2 | 104198 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Wrappers around spark that correspond to common pandas functions.
"""
from typing import ( # noqa: F401 (SPARK-34943)
Any,
Callable,
Dict,
List,
Optional,
Sized,
Tuple,
Type,
Union,
cast,
no_type_check,
)
from collections import OrderedDict
from collections.abc import Iterable
from datetime import tzinfo
from distutils.version import LooseVersion
from functools import reduce
from io import BytesIO
import json
import numpy as np
import pandas as pd
from pandas.api.types import is_datetime64_dtype, is_datetime64tz_dtype, is_list_like
from pandas.tseries.offsets import DateOffset
import pyarrow as pa
import pyarrow.parquet as pq
from pyspark import sql as spark
from pyspark.sql import functions as F
from pyspark.sql.functions import pandas_udf
from pyspark.sql.types import (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
DecimalType,
StringType,
DateType,
StructType,
DataType,
)
from pyspark import pandas as ps # noqa: F401
from pyspark.pandas.base import IndexOpsMixin
from pyspark.pandas.utils import (
align_diff_frames,
default_session,
is_name_like_tuple,
name_like_string,
same_anchor,
scol_for,
validate_axis,
)
from pyspark.pandas.frame import DataFrame, _reduce_spark_multi
from pyspark.pandas.internal import (
InternalFrame,
DEFAULT_SERIES_NAME,
HIDDEN_COLUMNS,
)
from pyspark.pandas.series import Series, first_series
from pyspark.pandas.spark.utils import as_nullable_spark_type, force_decimal_precision_scale
from pyspark.pandas.typedef.typehints import Dtype
from pyspark.pandas.indexes import Index, DatetimeIndex
__all__ = [
"from_pandas",
"range",
"read_csv",
"read_delta",
"read_table",
"read_spark_io",
"read_parquet",
"read_clipboard",
"read_excel",
"read_html",
"to_datetime",
"date_range",
"get_dummies",
"concat",
"melt",
"isna",
"isnull",
"notna",
"notnull",
"read_sql_table",
"read_sql_query",
"read_sql",
"read_json",
"merge",
"to_numeric",
"broadcast",
"read_orc",
]
def from_pandas(pobj: Union[pd.DataFrame, pd.Series, pd.Index]) -> Union[Series, DataFrame, Index]:
"""Create a pandas-on-Spark DataFrame, Series or Index from a pandas DataFrame, Series or Index.
This is similar to Spark's `SparkSession.createDataFrame()` with pandas DataFrame,
but this also works with pandas Series and picks the index.
Parameters
----------
pobj : pandas.DataFrame or pandas.Series
pandas DataFrame or Series to read.
Returns
-------
Series or DataFrame
If a pandas Series is passed in, this function returns a pandas-on-Spark Series.
If a pandas DataFrame is passed in, this function returns a pandas-on-Spark DataFrame.
"""
if isinstance(pobj, pd.Series):
return Series(pobj)
elif isinstance(pobj, pd.DataFrame):
return DataFrame(pobj)
elif isinstance(pobj, pd.Index):
return DataFrame(pd.DataFrame(index=pobj)).index
else:
raise TypeError("Unknown data type: {}".format(type(pobj).__name__))
_range = range # built-in range
def range(
start: int, end: Optional[int] = None, step: int = 1, num_partitions: Optional[int] = None
) -> DataFrame:
"""
Create a DataFrame with some range of numbers.
The resulting DataFrame has a single int64 column named `id`, containing elements in a range
from ``start`` to ``end`` (exclusive) with step value ``step``. If only the first parameter
(i.e. start) is specified, we treat it as the end value with the start value being 0.
This is similar to the range function in SparkSession and is used primarily for testing.
Parameters
----------
start : int
the start value (inclusive)
end : int, optional
the end value (exclusive)
step : int, optional, default 1
the incremental step
num_partitions : int, optional
the number of partitions of the DataFrame
Returns
-------
DataFrame
Examples
--------
When the first parameter is specified, we generate a range of values up till that number.
>>> ps.range(5)
id
0 0
1 1
2 2
3 3
4 4
When start, end, and step are specified:
>>> ps.range(start = 100, end = 200, step = 20)
id
0 100
1 120
2 140
3 160
4 180
"""
sdf = default_session().range(start=start, end=end, step=step, numPartitions=num_partitions)
return DataFrame(sdf)
def read_csv(
path: str,
sep: str = ",",
header: Union[str, int, None] = "infer",
names: Optional[Union[str, List[str]]] = None,
index_col: Optional[Union[str, List[str]]] = None,
usecols: Optional[Union[List[int], List[str], Callable[[str], bool]]] = None,
squeeze: bool = False,
mangle_dupe_cols: bool = True,
dtype: Optional[Union[str, Dtype, Dict[str, Union[str, Dtype]]]] = None,
nrows: Optional[int] = None,
parse_dates: bool = False,
quotechar: Optional[str] = None,
escapechar: Optional[str] = None,
comment: Optional[str] = None,
**options: Any
) -> Union[DataFrame, Series]:
"""Read CSV (comma-separated) file into DataFrame or Series.
Parameters
----------
path : str
The path string storing the CSV file to be read.
sep : str, default ‘,’
Delimiter to use. Must be a single character.
header : int, default ‘infer’
Whether to to use as the column names, and the start of the data.
Default behavior is to infer the column names: if no names are passed
the behavior is identical to `header=0` and column names are inferred from
the first line of the file, if column names are passed explicitly then
the behavior is identical to `header=None`. Explicitly pass `header=0` to be
able to replace existing names
names : str or array-like, optional
List of column names to use. If file contains no header row, then you should
explicitly pass `header=None`. Duplicates in this list will cause an error to be issued.
If a string is given, it should be a DDL-formatted string in Spark SQL, which is
preferred to avoid schema inference for better performance.
index_col: str or list of str, optional, default: None
Index column of table in Spark.
usecols : list-like or callable, optional
Return a subset of the columns. If list-like, all elements must either be
positional (i.e. integer indices into the document columns) or strings that
correspond to column names provided either by the user in names or inferred
from the document header row(s).
If callable, the callable function will be evaluated against the column names,
returning names where the callable function evaluates to `True`.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X0', 'X1', ... 'XN', rather
than 'X' ... 'X'. Passing in False will cause data to be overwritten if
there are duplicate names in the columns.
Currently only `True` is allowed.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {‘a’: np.float64, ‘b’: np.int32} Use str or object
together with suitable na_values settings to preserve and not interpret dtype.
nrows : int, default None
Number of rows to read from the CSV file.
parse_dates : boolean or list of ints or names or list of lists or dict, default `False`.
Currently only `False` is allowed.
quotechar : str (length 1), optional
The character used to denote the start and end of a quoted item. Quoted items can include
the delimiter and it will be ignored.
escapechar : str (length 1), default None
One-character string used to escape delimiter
comment: str, optional
Indicates the line should not be parsed.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame or Series
See Also
--------
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
Examples
--------
>>> ps.read_csv('data.csv') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if mangle_dupe_cols is not True:
raise ValueError("mangle_dupe_cols can only be `True`: %s" % mangle_dupe_cols)
if parse_dates is not False:
raise ValueError("parse_dates can only be `False`: %s" % parse_dates)
if usecols is not None and not callable(usecols):
usecols = list(usecols) # type: ignore
if usecols is None or callable(usecols) or len(usecols) > 0:
reader = default_session().read
reader.option("inferSchema", True)
reader.option("sep", sep)
if header == "infer":
header = 0 if names is None else None
if header == 0:
reader.option("header", True)
elif header is None:
reader.option("header", False)
else:
raise ValueError("Unknown header argument {}".format(header))
if quotechar is not None:
reader.option("quote", quotechar)
if escapechar is not None:
reader.option("escape", escapechar)
if comment is not None:
if not isinstance(comment, str) or len(comment) != 1:
raise ValueError("Only length-1 comment characters supported")
reader.option("comment", comment)
reader.options(**options)
if isinstance(names, str):
sdf = reader.schema(names).csv(path)
column_labels = OrderedDict((col, col) for col in sdf.columns) # type: Dict[Any, str]
else:
sdf = reader.csv(path)
if is_list_like(names):
names = list(names)
if len(set(names)) != len(names):
raise ValueError("Found non-unique column index")
if len(names) != len(sdf.columns):
raise ValueError(
"The number of names [%s] does not match the number "
"of columns [%d]. Try names by a Spark SQL DDL-formatted "
"string." % (len(sdf.schema), len(names))
)
column_labels = OrderedDict(zip(names, sdf.columns))
elif header is None:
column_labels = OrderedDict(enumerate(sdf.columns))
else:
column_labels = OrderedDict((col, col) for col in sdf.columns)
if usecols is not None:
if callable(usecols):
column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if usecols(label)
)
missing = [] # type: List[Union[int, str]]
elif all(isinstance(col, int) for col in usecols):
usecols_ints = cast(List[int], usecols)
new_column_labels = OrderedDict(
(label, col)
for i, (label, col) in enumerate(column_labels.items())
if i in usecols_ints
)
missing = [
col
for col in usecols_ints
if (
col >= len(column_labels)
or list(column_labels)[col] not in new_column_labels
)
]
column_labels = new_column_labels
elif all(isinstance(col, str) for col in usecols):
new_column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if label in usecols
)
missing = [col for col in usecols if col not in new_column_labels]
column_labels = new_column_labels
else:
raise ValueError(
"'usecols' must either be list-like of all strings, "
"all unicode, all integers or a callable."
)
if len(missing) > 0:
raise ValueError(
"Usecols do not match columns, columns expected but not " "found: %s" % missing
)
if len(column_labels) > 0:
sdf = sdf.select([scol_for(sdf, col) for col in column_labels.values()])
else:
sdf = default_session().createDataFrame([], schema=StructType())
else:
sdf = default_session().createDataFrame([], schema=StructType())
column_labels = OrderedDict()
if nrows is not None:
sdf = sdf.limit(nrows)
if index_col is not None:
if isinstance(index_col, (str, int)):
index_col = [index_col]
for col in index_col:
if col not in column_labels:
raise KeyError(col)
index_spark_column_names = [column_labels[col] for col in index_col]
index_names = [(col,) for col in index_col] # type: List[Tuple]
column_labels = OrderedDict(
(label, col) for label, col in column_labels.items() if label not in index_col
)
else:
index_spark_column_names = []
index_names = []
psdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=[scol_for(sdf, col) for col in index_spark_column_names],
index_names=index_names,
column_labels=[
label if is_name_like_tuple(label) else (label,) for label in column_labels
],
data_spark_columns=[scol_for(sdf, col) for col in column_labels.values()],
)
) # type: DataFrame
if dtype is not None:
if isinstance(dtype, dict):
for col, tpe in dtype.items():
psdf[col] = psdf[col].astype(tpe)
else:
for col in psdf.columns:
psdf[col] = psdf[col].astype(dtype)
if squeeze and len(psdf.columns) == 1:
return first_series(psdf)
else:
return psdf
def read_json(
path: str, lines: bool = True, index_col: Optional[Union[str, List[str]]] = None, **options: Any
) -> DataFrame:
"""
Convert a JSON string to DataFrame.
Parameters
----------
path : string
File path
lines : bool, default True
Read the file as a json object per line. It should be always True for now.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Examples
--------
>>> df = ps.DataFrame([['a', 'b'], ['c', 'd']],
... columns=['col 1', 'col 2'])
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1)
>>> ps.read_json(
... path=r'%s/read_json/foo.json' % path
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
>>> df.to_json(path=r'%s/read_json/foo.json' % path, num_files=1, lineSep='___')
>>> ps.read_json(
... path=r'%s/read_json/foo.json' % path, lineSep='___'
... ).sort_values(by="col 1")
col 1 col 2
0 a b
1 c d
You can preserve the index in the roundtrip as below.
>>> df.to_json(path=r'%s/read_json/bar.json' % path, num_files=1, index_col="index")
>>> ps.read_json(
... path=r'%s/read_json/bar.json' % path, index_col="index"
... ).sort_values(by="col 1") # doctest: +NORMALIZE_WHITESPACE
col 1 col 2
index
0 a b
1 c d
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if not lines:
raise NotImplementedError("lines=False is not implemented yet.")
return read_spark_io(path, format="json", index_col=index_col, **options)
def read_delta(
path: str,
version: Optional[str] = None,
timestamp: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> DataFrame:
"""
Read a Delta Lake table on some file system and return a DataFrame.
If the Delta Lake table is already stored in the catalog (aka the metastore), use 'read_table'.
Parameters
----------
path : string
Path to the Delta Lake table.
version : string, optional
Specifies the table version (based on Delta's internal transaction version) to read from,
using Delta's time travel feature. This sets Delta's 'versionAsOf' option.
timestamp : string, optional
Specifies the table version (based on timestamp) to read from,
using Delta's time travel feature. This must be a valid date or timestamp string in Spark,
and sets Delta's 'timestampAsOf' option.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options
Additional options that can be passed onto Delta.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_delta
read_table
read_spark_io
read_parquet
Examples
--------
>>> ps.range(1).to_delta('%s/read_delta/foo' % path) # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 0
>>> ps.range(10, 15, num_partitions=1).to_delta('%s/read_delta/foo' % path,
... mode='overwrite') # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/foo' % path) # doctest: +SKIP
id
0 10
1 11
2 12
3 13
4 14
>>> ps.read_delta('%s/read_delta/foo' % path, version=0) # doctest: +SKIP
id
0 0
You can preserve the index in the roundtrip as below.
>>> ps.range(10, 15, num_partitions=1).to_delta(
... '%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
>>> ps.read_delta('%s/read_delta/bar' % path, index_col="index") # doctest: +SKIP
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if version is not None:
options["versionAsOf"] = version
if timestamp is not None:
options["timestampAsOf"] = timestamp
return read_spark_io(path, format="delta", index_col=index_col, **options)
def read_table(name: str, index_col: Optional[Union[str, List[str]]] = None) -> DataFrame:
"""
Read a Spark table and return a DataFrame.
Parameters
----------
name : string
Table name in Spark.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_table
read_delta
read_parquet
read_spark_io
Examples
--------
>>> ps.range(1).to_table('%s.my_table' % db)
>>> ps.read_table('%s.my_table' % db)
id
0 0
>>> ps.range(1).to_table('%s.my_table' % db, index_col="index")
>>> ps.read_table('%s.my_table' % db, index_col="index") # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
sdf = default_session().read.table(name)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_spark_io(
path: Optional[str] = None,
format: Optional[str] = None,
schema: Union[str, "StructType"] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> DataFrame:
"""Load a DataFrame from a Spark data source.
Parameters
----------
path : string, optional
Path to the data source.
format : string, optional
Specifies the output data source format. Some common ones are:
- 'delta'
- 'parquet'
- 'orc'
- 'json'
- 'csv'
schema : string or StructType, optional
Input schema. If none, Spark tries to infer the schema automatically.
The schema can either be a Spark StructType, or a DDL-formatted string like
`col0 INT, col1 DOUBLE`.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
See Also
--------
DataFrame.to_spark_io
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_parquet
Examples
--------
>>> ps.range(1).to_spark_io('%s/read_spark_io/data.parquet' % path)
>>> ps.read_spark_io(
... '%s/read_spark_io/data.parquet' % path, format='parquet', schema='id long')
id
0 0
>>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.json' % path,
... format='json', lineSep='__')
>>> ps.read_spark_io(
... '%s/read_spark_io/data.json' % path, format='json', schema='id long', lineSep='__')
id
0 10
1 11
2 12
3 13
4 14
You can preserve the index in the roundtrip as below.
>>> ps.range(10, 15, num_partitions=1).to_spark_io('%s/read_spark_io/data.orc' % path,
... format='orc', index_col="index")
>>> ps.read_spark_io(
... path=r'%s/read_spark_io/data.orc' % path, format="orc", index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 10
1 11
2 12
3 13
4 14
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
sdf = default_session().read.load(path=path, format=format, schema=schema, **options)
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
def read_parquet(
path: str,
columns: Optional[List[str]] = None,
index_col: Optional[List[str]] = None,
pandas_metadata: bool = False,
**options: Any
) -> DataFrame:
"""Load a parquet object from the file path, returning a DataFrame.
Parameters
----------
path : string
File path
columns : list, default=None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
pandas_metadata : bool, default: False
If True, try to respect the metadata if the Parquet file is written from pandas.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
See Also
--------
DataFrame.to_parquet
DataFrame.read_table
DataFrame.read_delta
DataFrame.read_spark_io
Examples
--------
>>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path)
>>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> ps.range(1).to_parquet('%s/read_spark_io/data.parquet' % path, index_col="index")
>>> ps.read_parquet('%s/read_spark_io/data.parquet' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
if columns is not None:
columns = list(columns)
index_names = None
if index_col is None and pandas_metadata:
# Try to read pandas metadata
@no_type_check
@pandas_udf("index_col array<string>, index_names array<string>")
def read_index_metadata(pser: pd.Series) -> pd.DataFrame:
binary = pser.iloc[0]
metadata = pq.ParquetFile(pa.BufferReader(binary)).metadata.metadata
if b"pandas" in metadata:
pandas_metadata = json.loads(metadata[b"pandas"].decode("utf8"))
if all(isinstance(col, str) for col in pandas_metadata["index_columns"]):
index_col = []
index_names = []
for col in pandas_metadata["index_columns"]:
index_col.append(col)
for column in pandas_metadata["columns"]:
if column["field_name"] == col:
index_names.append(column["name"])
break
else:
index_names.append(None)
return pd.DataFrame({"index_col": [index_col], "index_names": [index_names]})
return pd.DataFrame({"index_col": [None], "index_names": [None]})
index_col, index_names = (
default_session()
.read.format("binaryFile")
.load(path)
.limit(1)
.select(read_index_metadata("content").alias("index_metadata"))
.select("index_metadata.*")
.head()
)
psdf = read_spark_io(path=path, format="parquet", options=options, index_col=index_col)
if columns is not None:
new_columns = [c for c in columns if c in psdf.columns]
if len(new_columns) > 0:
psdf = psdf[new_columns]
else:
sdf = default_session().createDataFrame([], schema=StructType())
index_spark_columns, index_names = _get_index_map(sdf, index_col)
psdf = DataFrame(
InternalFrame(
spark_frame=sdf,
index_spark_columns=index_spark_columns,
index_names=index_names,
)
)
if index_names is not None:
psdf.index.names = index_names
return psdf
def read_clipboard(sep: str = r"\s+", **kwargs: Any) -> DataFrame:
r"""
Read text from clipboard and pass to read_csv. See read_csv for the
full argument list
Parameters
----------
sep : str, default '\s+'
A string or regex delimiter. The default of '\s+' denotes
one or more whitespace characters.
See Also
--------
DataFrame.to_clipboard : Write text out to clipboard.
Returns
-------
parsed : DataFrame
"""
return cast(DataFrame, from_pandas(pd.read_clipboard(sep, **kwargs)))
def read_excel(
io: Union[str, Any],
sheet_name: Union[str, int, List[Union[str, int]], None] = 0,
header: Union[int, List[int]] = 0,
names: Optional[List] = None,
index_col: Optional[List[int]] = None,
usecols: Optional[Union[int, str, List[Union[int, str]], Callable[[str], bool]]] = None,
squeeze: bool = False,
dtype: Optional[Dict[str, Union[str, Dtype]]] = None,
engine: Optional[str] = None,
converters: Optional[Dict] = None,
true_values: Optional[Any] = None,
false_values: Optional[Any] = None,
skiprows: Optional[Union[int, List[int]]] = None,
nrows: Optional[int] = None,
na_values: Optional[Any] = None,
keep_default_na: bool = True,
verbose: bool = False,
parse_dates: Union[bool, List, Dict] = False,
date_parser: Optional[Callable] = None,
thousands: Optional[str] = None,
comment: Optional[str] = None,
skipfooter: int = 0,
convert_float: bool = True,
mangle_dupe_cols: bool = True,
**kwds: Any
) -> Union[DataFrame, Series, OrderedDict]:
"""
Read an Excel file into a pandas-on-Spark DataFrame or Series.
Support both `xls` and `xlsx` file extensions from a local filesystem or URL.
Support an option to read a single sheet or a list of sheets.
Parameters
----------
io : str, file descriptor, pathlib.Path, ExcelFile or xlrd.Book
The string could be a URL. The value URL must be available in Spark's DataFrameReader.
.. note::
If the underlying Spark is below 3.0, the parameter as a string is not supported.
You can use `ps.from_pandas(pd.read_excel(...))` as a workaround.
sheet_name : str, int, list, or None, default 0
Strings are used for sheet names. Integers are used in zero-indexed
sheet positions. Lists of strings/integers are used to request
multiple sheets. Specify None to get all sheets.
Available cases:
* Defaults to ``0``: 1st sheet as a `DataFrame`
* ``1``: 2nd sheet as a `DataFrame`
* ``"Sheet1"``: Load sheet with name "Sheet1"
* ``[0, 1, "Sheet5"]``: Load first, second and sheet named "Sheet5"
as a dict of `DataFrame`
* None: All sheets.
header : int, list of int, default 0
Row (0-indexed) to use for the column labels of the parsed
DataFrame. If a list of integers is passed those row positions will
be combined into a ``MultiIndex``. Use None if there is no header.
names : array-like, default None
List of column names to use. If file contains no header row,
then you should explicitly pass header=None.
index_col : int, list of int, default None
Column (0-indexed) to use as the row labels of the DataFrame.
Pass None if there is no such column. If a list is passed,
those columns will be combined into a ``MultiIndex``. If a
subset of data is selected with ``usecols``, index_col
is based on the subset.
usecols : int, str, list-like, or callable default None
Return a subset of the columns.
* If None, then parse all columns.
* If str, then indicates comma separated list of Excel column letters
and column ranges (e.g. "A:E" or "A,C,E:F"). Ranges are inclusive of
both sides.
* If list of int, then indicates list of column numbers to be parsed.
* If list of string, then indicates list of column names to be parsed.
* If callable, then evaluate each column name against it and parse the
column if the callable returns ``True``.
squeeze : bool, default False
If the parsed data only contains one column then return a Series.
dtype : Type name or dict of column -> type, default None
Data type for data or columns. E.g. {'a': np.float64, 'b': np.int32}
Use `object` to preserve data as stored in Excel and not interpret dtype.
If converters are specified, they will be applied INSTEAD
of dtype conversion.
engine : str, default None
If io is not a buffer or path, this must be set to identify io.
Acceptable values are None or xlrd.
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the Excel cell content, and return the transformed
content.
true_values : list, default None
Values to consider as True.
false_values : list, default None
Values to consider as False.
skiprows : list-like
Rows to skip at the beginning (0-indexed).
nrows : int, default None
Number of rows to parse.
na_values : scalar, str, list-like, or dict, default None
Additional strings to recognize as NA/NaN. If dict passed, specific
per-column NA values. By default the following values are interpreted
as NaN.
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to.
verbose : bool, default False
Indicate number of NA values placed in non-numeric columns.
parse_dates : bool, list-like, or dict, default False
The behavior is as follows:
* bool. If True -> try parsing the index.
* list of int or names. e.g. If [1, 2, 3] -> try parsing columns 1, 2, 3
each as a separate date column.
* list of lists. e.g. If [[1, 3]] -> combine columns 1 and 3 and parse as
a single date column.
* dict, e.g. {{'foo' : [1, 3]}} -> parse columns 1, 3 as date and call
result 'foo'
If a column or index contains an unparseable date, the entire column or
index will be returned unaltered as an object data type. For non-standard
datetime parsing, use ``pd.to_datetime`` after ``pd.read_csv``
Note: A fast-path exists for iso8601-formatted dates.
date_parser : function, optional
Function to use for converting a sequence of string columns to an array of
datetime instances. The default uses ``dateutil.parser.parser`` to do the
conversion. pandas-on-Spark will try to call `date_parser` in three different ways,
advancing to the next if an exception occurs: 1) Pass one or more arrays
(as defined by `parse_dates`) as arguments; 2) concatenate (row-wise) the
string values from the columns defined by `parse_dates` into a single array
and pass that; and 3) call `date_parser` once for each row using one or
more strings (corresponding to the columns defined by `parse_dates`) as
arguments.
thousands : str, default None
Thousands separator for parsing string columns to numeric. Note that
this parameter is only necessary for columns stored as TEXT in Excel,
any numeric columns will automatically be parsed, regardless of display
format.
comment : str, default None
Comments out remainder of line. Pass a character or characters to this
argument to indicate comments in the input file. Any data between the
comment string and the end of the current line is ignored.
skipfooter : int, default 0
Rows at the end to skip (0-indexed).
convert_float : bool, default True
Convert integral floats to int (i.e., 1.0 --> 1). If False, all numeric
data will be read in as floats: Excel stores all numbers as floats
internally.
mangle_dupe_cols : bool, default True
Duplicate columns will be specified as 'X', 'X.1', ...'X.N', rather than
'X'...'X'. Passing in False will cause data to be overwritten if there
are duplicate names in the columns.
**kwds : optional
Optional keyword arguments can be passed to ``TextFileReader``.
Returns
-------
DataFrame or dict of DataFrames
DataFrame from the passed in Excel file. See notes in sheet_name
argument for more information on when a dict of DataFrames is returned.
See Also
--------
DataFrame.to_excel : Write DataFrame to an Excel file.
DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file.
read_csv : Read a comma-separated values (csv) file into DataFrame.
Examples
--------
The file can be read using the file name as string or an open file object:
>>> ps.read_excel('tmp.xlsx', index_col=0) # doctest: +SKIP
Name Value
0 string1 1
1 string2 2
2 #Comment 3
>>> ps.read_excel(open('tmp.xlsx', 'rb'),
... sheet_name='Sheet3') # doctest: +SKIP
Unnamed: 0 Name Value
0 0 string1 1
1 1 string2 2
2 2 #Comment 3
Index and header can be specified via the `index_col` and `header` arguments
>>> ps.read_excel('tmp.xlsx', index_col=None, header=None) # doctest: +SKIP
0 1 2
0 NaN Name Value
1 0.0 string1 1
2 1.0 string2 2
3 2.0 #Comment 3
Column types are inferred but can be explicitly specified
>>> ps.read_excel('tmp.xlsx', index_col=0,
... dtype={'Name': str, 'Value': float}) # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 #Comment 3.0
True, False, and NA values, and thousands separators have defaults,
but can be explicitly specified, too. Supply the values you would like
as strings or lists of strings!
>>> ps.read_excel('tmp.xlsx', index_col=0,
... na_values=['string1', 'string2']) # doctest: +SKIP
Name Value
0 None 1
1 None 2
2 #Comment 3
Comment lines in the excel input file can be skipped using the `comment` kwarg
>>> ps.read_excel('tmp.xlsx', index_col=0, comment='#') # doctest: +SKIP
Name Value
0 string1 1.0
1 string2 2.0
2 None NaN
"""
def pd_read_excel(
io_or_bin: Any, sn: Union[str, int, List[Union[str, int]], None], sq: bool
) -> pd.DataFrame:
return pd.read_excel(
io=BytesIO(io_or_bin) if isinstance(io_or_bin, (bytes, bytearray)) else io_or_bin,
sheet_name=sn,
header=header,
names=names,
index_col=index_col,
usecols=usecols,
squeeze=sq,
dtype=dtype,
engine=engine,
converters=converters,
true_values=true_values,
false_values=false_values,
skiprows=skiprows,
nrows=nrows,
na_values=na_values,
keep_default_na=keep_default_na,
verbose=verbose,
parse_dates=parse_dates,
date_parser=date_parser,
thousands=thousands,
comment=comment,
skipfooter=skipfooter,
convert_float=convert_float,
mangle_dupe_cols=mangle_dupe_cols,
**kwds
)
if isinstance(io, str):
# 'binaryFile' format is available since Spark 3.0.0.
binaries = default_session().read.format("binaryFile").load(io).select("content").head(2)
io_or_bin = binaries[0][0]
single_file = len(binaries) == 1
else:
io_or_bin = io
single_file = True
pdf_or_psers = pd_read_excel(io_or_bin, sn=sheet_name, sq=squeeze)
if single_file:
if isinstance(pdf_or_psers, dict):
return OrderedDict(
[(sn, from_pandas(pdf_or_pser)) for sn, pdf_or_pser in pdf_or_psers.items()]
)
else:
return cast(Union[DataFrame, Series], from_pandas(pdf_or_psers))
else:
def read_excel_on_spark(
pdf_or_pser: Union[pd.DataFrame, pd.Series],
sn: Union[str, int, List[Union[str, int]], None],
) -> Union[DataFrame, Series]:
if isinstance(pdf_or_pser, pd.Series):
pdf = pdf_or_pser.to_frame()
else:
pdf = pdf_or_pser
psdf = from_pandas(pdf)
return_schema = force_decimal_precision_scale(
as_nullable_spark_type(psdf._internal.spark_frame.drop(*HIDDEN_COLUMNS).schema)
)
def output_func(pdf: pd.DataFrame) -> pd.DataFrame:
pdf = pd.concat(
[pd_read_excel(bin, sn=sn, sq=False) for bin in pdf[pdf.columns[0]]]
)
reset_index = pdf.reset_index()
for name, col in reset_index.iteritems():
dt = col.dtype
if is_datetime64_dtype(dt) or is_datetime64tz_dtype(dt):
continue
reset_index[name] = col.replace({np.nan: None})
pdf = reset_index
# Just positionally map the column names to given schema's.
return pdf.rename(columns=dict(zip(pdf.columns, return_schema.names)))
sdf = (
default_session()
.read.format("binaryFile")
.load(io)
.select("content")
.mapInPandas(lambda iterator: map(output_func, iterator), schema=return_schema)
)
psdf = DataFrame(psdf._internal.with_new_sdf(sdf))
if squeeze and len(psdf.columns) == 1:
return first_series(psdf)
else:
return psdf
if isinstance(pdf_or_psers, dict):
return OrderedDict(
[
(sn, read_excel_on_spark(pdf_or_pser, sn))
for sn, pdf_or_pser in pdf_or_psers.items()
]
)
else:
return read_excel_on_spark(pdf_or_psers, sheet_name)
def read_html(
io: Union[str, Any],
match: str = ".+",
flavor: Optional[str] = None,
header: Optional[Union[int, List[int]]] = None,
index_col: Optional[Union[int, List[int]]] = None,
skiprows: Optional[Union[int, List[int], slice]] = None,
attrs: Optional[Dict[str, str]] = None,
parse_dates: bool = False,
thousands: str = ",",
encoding: Optional[str] = None,
decimal: str = ".",
converters: Optional[Dict] = None,
na_values: Optional[Any] = None,
keep_default_na: bool = True,
displayed_only: bool = True,
) -> List[DataFrame]:
r"""Read HTML tables into a ``list`` of ``DataFrame`` objects.
Parameters
----------
io : str or file-like
A URL, a file-like object, or a raw string containing HTML. Note that
lxml only accepts the http, ftp and file url protocols. If you have a
URL that starts with ``'https'`` you might try removing the ``'s'``.
match : str or compiled regular expression, optional
The set of tables containing text matching this regex or string will be
returned. Unless the HTML is extremely simple you will probably need to
pass a non-empty string here. Defaults to '.+' (match any non-empty
string). The default value will return all tables contained on a page.
This value is converted to a regular expression so that there is
consistent behavior between Beautiful Soup and lxml.
flavor : str or None, container of strings
The parsing engine to use. 'bs4' and 'html5lib' are synonymous with
each other, they are both there for backwards compatibility. The
default of ``None`` tries to use ``lxml`` to parse and if that fails it
falls back on ``bs4`` + ``html5lib``.
header : int or list-like or None, optional
The row (or list of rows for a :class:`~ps.MultiIndex`) to use to
make the columns headers.
index_col : int or list-like or None, optional
The column (or list of columns) to use to create the index.
skiprows : int or list-like or slice or None, optional
0-based. Number of rows to skip after parsing the column integer. If a
sequence of integers or a slice is given, will skip the rows indexed by
that sequence. Note that a single element sequence means 'skip the nth
row' whereas an integer means 'skip n rows'.
attrs : dict or None, optional
This is a dictionary of attributes that you can pass to use to identify
the table in the HTML. These are not checked for validity before being
passed to lxml or Beautiful Soup. However, these attributes must be
valid HTML table attributes to work correctly. For example, ::
attrs = {'id': 'table'}
is a valid attribute dictionary because the 'id' HTML tag attribute is
a valid HTML attribute for *any* HTML tag as per `this document
<http://www.w3.org/TR/html-markup/global-attributes.html>`__. ::
attrs = {'asdf': 'table'}
is *not* a valid attribute dictionary because 'asdf' is not a valid
HTML attribute even if it is a valid XML attribute. Valid HTML 4.01
table attributes can be found `here
<http://www.w3.org/TR/REC-html40/struct/tables.html#h-11.2>`__. A
working draft of the HTML 5 spec can be found `here
<http://www.w3.org/TR/html-markup/table.html>`__. It contains the
latest information on table attributes for the modern web.
parse_dates : bool, optional
See :func:`~ps.read_csv` for more details.
thousands : str, optional
Separator to use to parse thousands. Defaults to ``','``.
encoding : str or None, optional
The encoding used to decode the web page. Defaults to ``None``.``None``
preserves the previous encoding behavior, which depends on the
underlying parser library (e.g., the parser library will try to use
the encoding provided by the document).
decimal : str, default '.'
Character to recognize as decimal point (example: use ',' for European
data).
converters : dict, default None
Dict of functions for converting values in certain columns. Keys can
either be integers or column labels, values are functions that take one
input argument, the cell (not column) content, and return the
transformed content.
na_values : iterable, default None
Custom NA values
keep_default_na : bool, default True
If na_values are specified and keep_default_na is False the default NaN
values are overridden, otherwise they're appended to
displayed_only : bool, default True
Whether elements with "display: none" should be parsed
Returns
-------
dfs : list of DataFrames
See Also
--------
read_csv
DataFrame.to_html
"""
pdfs = pd.read_html(
io=io,
match=match,
flavor=flavor,
header=header,
index_col=index_col,
skiprows=skiprows,
attrs=attrs,
parse_dates=parse_dates,
thousands=thousands,
encoding=encoding,
decimal=decimal,
converters=converters,
na_values=na_values,
keep_default_na=keep_default_na,
displayed_only=displayed_only,
)
return cast(List[DataFrame], [from_pandas(pdf) for pdf in pdfs])
# TODO: add `coerce_float` and 'parse_dates' parameters
def read_sql_table(
table_name: str,
con: str,
schema: Optional[str] = None,
index_col: Optional[Union[str, List[str]]] = None,
columns: Optional[Union[str, List[str]]] = None,
**options: Any
) -> DataFrame:
"""
Read SQL database table into a DataFrame.
Given a table name and a JDBC URI, returns a DataFrame.
Parameters
----------
table_name : str
Name of SQL table in database.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
schema : str, default None
Name of SQL schema in database to query (if database flavor
supports this). Uses default schema if None (default).
index_col : str or list of str, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default None
List of column names to select from SQL table.
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
A SQL table is returned as two-dimensional data structure with labeled
axes.
See Also
--------
read_sql_query : Read SQL query into a DataFrame.
read_sql : Read SQL query or database table into a DataFrame.
Examples
--------
>>> ps.read_sql_table('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("dbtable", table_name)
reader.option("url", con)
if schema is not None:
reader.schema(schema)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
psdf = DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
) # type: DataFrame
if columns is not None:
if isinstance(columns, str):
columns = [columns]
psdf = psdf[columns]
return psdf
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql_query(
sql: str, con: str, index_col: Optional[Union[str, List[str]]] = None, **options: Any
) -> DataFrame:
"""Read SQL query into a DataFrame.
Returns a DataFrame corresponding to the result set of the query
string. Optionally provide an `index_col` parameter to use one of the
columns as the index, otherwise default index will be used.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string SQL query
SQL query to be executed.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql
Examples
--------
>>> ps.read_sql_query('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
reader = default_session().read
reader.option("query", sql)
reader.option("url", con)
reader.options(**options)
sdf = reader.format("jdbc").load()
index_spark_columns, index_names = _get_index_map(sdf, index_col)
return DataFrame(
InternalFrame(
spark_frame=sdf, index_spark_columns=index_spark_columns, index_names=index_names
)
)
# TODO: add `coerce_float`, `params`, and 'parse_dates' parameters
def read_sql(
sql: str,
con: str,
index_col: Optional[Union[str, List[str]]] = None,
columns: Optional[Union[str, List[str]]] = None,
**options: Any
) -> DataFrame:
"""
Read SQL query or database table into a DataFrame.
This function is a convenience wrapper around ``read_sql_table`` and
``read_sql_query`` (for backward compatibility). It will delegate
to the specific function depending on the provided input. A SQL query
will be routed to ``read_sql_query``, while a database table name will
be routed to ``read_sql_table``. Note that the delegated function might
have more specific notes about their functionality not listed here.
.. note:: Some database might hit the issue of Spark: SPARK-27596
Parameters
----------
sql : string
SQL query to be executed or a table name.
con : str
A JDBC URI could be provided as as str.
.. note:: The URI must be JDBC URI instead of Python's database URI.
index_col : string or list of strings, optional, default: None
Column(s) to set as index(MultiIndex).
columns : list, default: None
List of column names to select from SQL table (only used when reading
a table).
options : dict
All other options passed directly into Spark's JDBC data source.
Returns
-------
DataFrame
See Also
--------
read_sql_table : Read SQL database table into a DataFrame.
read_sql_query : Read SQL query into a DataFrame.
Examples
--------
>>> ps.read_sql('table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
>>> ps.read_sql('SELECT * FROM table_name', 'jdbc:postgresql:db_name') # doctest: +SKIP
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
striped = sql.strip()
if " " not in striped: # TODO: identify the table name or not more precisely.
return read_sql_table(sql, con, index_col=index_col, columns=columns, **options)
else:
return read_sql_query(sql, con, index_col=index_col, **options)
@no_type_check
def to_datetime(
arg,
errors: str = "raise",
format: Optional[str] = None,
unit: Optional[str] = None,
infer_datetime_format: bool = False,
origin: str = "unix",
):
"""
Convert argument to datetime.
Parameters
----------
arg : integer, float, string, datetime, list, tuple, 1-d array, Series
or DataFrame/dict-like
errors : {'ignore', 'raise', 'coerce'}, default 'raise'
- If 'raise', then invalid parsing will raise an exception
- If 'coerce', then invalid parsing will be set as NaT
- If 'ignore', then invalid parsing will return the input
format : string, default None
strftime to parse time, eg "%d/%m/%Y", note that "%f" will parse
all the way up to nanoseconds.
unit : string, default None
unit of the arg (D,s,ms,us,ns) denote the unit, which is an
integer or float number. This will be based off the origin.
Example, with unit='ms' and origin='unix' (the default), this
would calculate the number of milliseconds to the unix epoch start.
infer_datetime_format : boolean, default False
If True and no `format` is given, attempt to infer the format of the
datetime strings, and if it can be inferred, switch to a faster
method of parsing them. In some cases this can increase the parsing
speed by ~5-10x.
origin : scalar, default 'unix'
Define the reference date. The numeric values would be parsed as number
of units (defined by `unit`) since this reference date.
- If 'unix' (or POSIX) time; origin is set to 1970-01-01.
- If 'julian', unit must be 'D', and origin is set to beginning of
Julian Calendar. Julian day number 0 is assigned to the day starting
at noon on January 1, 4713 BC.
- If Timestamp convertible, origin is set to Timestamp identified by
origin.
Returns
-------
ret : datetime if parsing succeeded.
Return type depends on input:
- list-like: DatetimeIndex
- Series: Series of datetime64 dtype
- scalar: Timestamp
In case when it is not possible to return designated types (e.g. when
any element of input is before Timestamp.min or after Timestamp.max)
return will have datetime.datetime type (or corresponding
array/Series).
Examples
--------
Assembling a datetime from multiple columns of a DataFrame. The keys can be
common abbreviations like ['year', 'month', 'day', 'minute', 'second',
'ms', 'us', 'ns']) or plurals of the same
>>> df = ps.DataFrame({'year': [2015, 2016],
... 'month': [2, 3],
... 'day': [4, 5]})
>>> ps.to_datetime(df)
0 2015-02-04
1 2016-03-05
dtype: datetime64[ns]
If a date does not meet the `timestamp limitations
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html
#timeseries-timestamp-limits>`_, passing errors='ignore'
will return the original input instead of raising any exception.
Passing errors='coerce' will force an out-of-bounds date to NaT,
in addition to forcing non-dates (or non-parseable dates) to NaT.
>>> ps.to_datetime('13000101', format='%Y%m%d', errors='ignore')
datetime.datetime(1300, 1, 1, 0, 0)
>>> ps.to_datetime('13000101', format='%Y%m%d', errors='coerce')
NaT
Passing infer_datetime_format=True can often-times speedup a parsing
if its not an ISO8601 format exactly, but in a regular format.
>>> s = ps.Series(['3/11/2000', '3/12/2000', '3/13/2000'] * 1000)
>>> s.head()
0 3/11/2000
1 3/12/2000
2 3/13/2000
3 3/11/2000
4 3/12/2000
dtype: object
>>> import timeit
>>> timeit.timeit(
... lambda: repr(ps.to_datetime(s, infer_datetime_format=True)),
... number = 1) # doctest: +SKIP
0.35832712500000063
>>> timeit.timeit(
... lambda: repr(ps.to_datetime(s, infer_datetime_format=False)),
... number = 1) # doctest: +SKIP
0.8895321660000004
Using a unix epoch time
>>> ps.to_datetime(1490195805, unit='s')
Timestamp('2017-03-22 15:16:45')
>>> ps.to_datetime(1490195805433502912, unit='ns')
Timestamp('2017-03-22 15:16:45.433502912')
Using a non-unix epoch origin
>>> ps.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01'))
DatetimeIndex(['1960-01-02', '1960-01-03', '1960-01-04'], dtype='datetime64[ns]', freq=None)
"""
def pandas_to_datetime(pser_or_pdf: Union[pd.DataFrame, pd.Series]) -> Series[np.datetime64]:
if isinstance(pser_or_pdf, pd.DataFrame):
pser_or_pdf = pser_or_pdf[["year", "month", "day"]]
return pd.to_datetime(
pser_or_pdf,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
if isinstance(arg, Series):
return arg.pandas_on_spark.transform_batch(pandas_to_datetime)
if isinstance(arg, DataFrame):
psdf = arg[["year", "month", "day"]]
return psdf.pandas_on_spark.transform_batch(pandas_to_datetime)
return pd.to_datetime(
arg,
errors=errors,
format=format,
unit=unit,
infer_datetime_format=infer_datetime_format,
origin=origin,
)
def date_range(
start: Union[str, Any] = None,
end: Union[str, Any] = None,
periods: Optional[int] = None,
freq: Optional[Union[str, DateOffset]] = None,
tz: Optional[Union[str, tzinfo]] = None,
normalize: bool = False,
name: Optional[str] = None,
closed: Optional[str] = None,
**kwargs: Any
) -> DatetimeIndex:
"""
Return a fixed frequency DatetimeIndex.
Parameters
----------
start : str or datetime-like, optional
Left bound for generating dates.
end : str or datetime-like, optional
Right bound for generating dates.
periods : int, optional
Number of periods to generate.
freq : str or DateOffset, default 'D'
Frequency strings can have multiples, e.g. '5H'.
tz : str or tzinfo, optional
Time zone name for returning localized DatetimeIndex, for example
'Asia/Hong_Kong'. By default, the resulting DatetimeIndex is
timezone-naive.
normalize : bool, default False
Normalize start/end dates to midnight before generating date range.
name : str, default None
Name of the resulting DatetimeIndex.
closed : {None, 'left', 'right'}, optional
Make the interval closed with respect to the given frequency to
the 'left', 'right', or both sides (None, the default).
**kwargs
For compatibility. Has no effect on the result.
Returns
-------
rng : DatetimeIndex
See Also
--------
DatetimeIndex : An immutable container for datetimes.
Notes
-----
Of the four parameters ``start``, ``end``, ``periods``, and ``freq``,
exactly three must be specified. If ``freq`` is omitted, the resulting
``DatetimeIndex`` will have ``periods`` linearly spaced elements between
``start`` and ``end`` (closed on both sides).
To learn more about the frequency strings, please see `this link
<https://pandas.pydata.org/pandas-docs/stable/user_guide/timeseries.html#offset-aliases>`__.
Examples
--------
**Specifying the values**
The next four examples generate the same `DatetimeIndex`, but vary
the combination of `start`, `end` and `periods`.
Specify `start` and `end`, with the default daily frequency.
>>> ps.date_range(start='1/1/2018', end='1/08/2018') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `start` and `periods`, the number of periods (days).
>>> ps.date_range(start='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-01', '2018-01-02', '2018-01-03', '2018-01-04',
'2018-01-05', '2018-01-06', '2018-01-07', '2018-01-08'],
dtype='datetime64[ns]', freq=None)
Specify `end` and `periods`, the number of periods (days).
>>> ps.date_range(end='1/1/2018', periods=8) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-12-25', '2017-12-26', '2017-12-27', '2017-12-28',
'2017-12-29', '2017-12-30', '2017-12-31', '2018-01-01'],
dtype='datetime64[ns]', freq=None)
Specify `start`, `end`, and `periods`; the frequency is generated
automatically (linearly spaced).
>>> ps.date_range(
... start='2018-04-24', end='2018-04-27', periods=3
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-04-24 00:00:00', '2018-04-25 12:00:00',
'2018-04-27 00:00:00'],
dtype='datetime64[ns]', freq=None)
**Other Parameters**
Changed the `freq` (frequency) to ``'M'`` (month end frequency).
>>> ps.date_range(start='1/1/2018', periods=5, freq='M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-02-28', '2018-03-31', '2018-04-30',
'2018-05-31'],
dtype='datetime64[ns]', freq=None)
Multiples are allowed
>>> ps.date_range(start='1/1/2018', periods=5, freq='3M') # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`freq` can also be specified as an Offset object.
>>> ps.date_range(
... start='1/1/2018', periods=5, freq=pd.offsets.MonthEnd(3)
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2018-01-31', '2018-04-30', '2018-07-31', '2018-10-31',
'2019-01-31'],
dtype='datetime64[ns]', freq=None)
`closed` controls whether to include `start` and `end` that are on the
boundary. The default includes boundary points on either end.
>>> ps.date_range(
... start='2017-01-01', end='2017-01-04', closed=None
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03', '2017-01-04'],
dtype='datetime64[ns]', freq=None)
Use ``closed='left'`` to exclude `end` if it falls on the boundary.
>>> ps.date_range(
... start='2017-01-01', end='2017-01-04', closed='left'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-01', '2017-01-02', '2017-01-03'], dtype='datetime64[ns]', freq=None)
Use ``closed='right'`` to exclude `start` if it falls on the boundary.
>>> ps.date_range(
... start='2017-01-01', end='2017-01-04', closed='right'
... ) # doctest: +NORMALIZE_WHITESPACE
DatetimeIndex(['2017-01-02', '2017-01-03', '2017-01-04'], dtype='datetime64[ns]', freq=None)
"""
assert freq not in ["N", "ns"], "nanoseconds is not supported"
assert tz is None, "Localized DatetimeIndex is not supported"
return cast(
DatetimeIndex,
ps.from_pandas(
pd.date_range(
start=start,
end=end,
periods=periods,
freq=freq,
tz=tz,
normalize=normalize,
name=name,
closed=closed,
**kwargs
)
),
)
def get_dummies(
data: Union[DataFrame, Series],
prefix: Optional[Union[str, List[str], Dict[str, str]]] = None,
prefix_sep: str = "_",
dummy_na: bool = False,
columns: Optional[Union[Any, Tuple, List[Union[Any, Tuple]]]] = None,
sparse: bool = False,
drop_first: bool = False,
dtype: Optional[Union[str, Dtype]] = None,
) -> DataFrame:
"""
Convert categorical variable into dummy/indicator variables, also
known as one hot encoding.
Parameters
----------
data : array-like, Series, or DataFrame
prefix : string, list of strings, or dict of strings, default None
String to append DataFrame column names.
Pass a list with length equal to the number of columns
when calling get_dummies on a DataFrame. Alternatively, `prefix`
can be a dictionary mapping column names to prefixes.
prefix_sep : string, default '_'
If appending prefix, separator/delimiter to use. Or pass a
list or dictionary as with `prefix.`
dummy_na : bool, default False
Add a column to indicate NaNs, if False NaNs are ignored.
columns : list-like, default None
Column names in the DataFrame to be encoded.
If `columns` is None then all the columns with
`object` or `category` dtype will be converted.
sparse : bool, default False
Whether the dummy-encoded columns should be be backed by
a :class:`SparseArray` (True) or a regular NumPy array (False).
In pandas-on-Spark, this value must be "False".
drop_first : bool, default False
Whether to get k-1 dummies out of k categorical levels by removing the
first level.
dtype : dtype, default np.uint8
Data type for new columns. Only a single dtype is allowed.
Returns
-------
dummies : DataFrame
See Also
--------
Series.str.get_dummies
Examples
--------
>>> s = ps.Series(list('abca'))
>>> ps.get_dummies(s)
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
>>> df = ps.DataFrame({'A': ['a', 'b', 'a'], 'B': ['b', 'a', 'c'],
... 'C': [1, 2, 3]},
... columns=['A', 'B', 'C'])
>>> ps.get_dummies(df, prefix=['col1', 'col2'])
C col1_a col1_b col2_a col2_b col2_c
0 1 1 0 0 1 0
1 2 0 1 1 0 0
2 3 1 0 0 0 1
>>> ps.get_dummies(ps.Series(list('abcaa')))
a b c
0 1 0 0
1 0 1 0
2 0 0 1
3 1 0 0
4 1 0 0
>>> ps.get_dummies(ps.Series(list('abcaa')), drop_first=True)
b c
0 0 0
1 1 0
2 0 1
3 0 0
4 0 0
>>> ps.get_dummies(ps.Series(list('abc')), dtype=float)
a b c
0 1.0 0.0 0.0
1 0.0 1.0 0.0
2 0.0 0.0 1.0
"""
if sparse is not False:
raise NotImplementedError("get_dummies currently does not support sparse")
if columns is not None:
if not is_list_like(columns):
raise TypeError("Input must be a list-like for parameter `columns`")
if dtype is None:
dtype = "byte"
if isinstance(data, Series):
if prefix is not None:
prefix = [str(prefix)]
psdf = data.to_frame()
column_labels = psdf._internal.column_labels
remaining_columns = []
else:
if isinstance(prefix, str):
raise NotImplementedError(
"get_dummies currently does not support prefix as string types"
)
psdf = data.copy()
if columns is None:
column_labels = [
label
for label in psdf._internal.column_labels
if isinstance(
psdf._internal.spark_type_for(label), _get_dummies_default_accept_types
)
]
else:
if is_name_like_tuple(columns):
column_labels = [
label
for label in psdf._internal.column_labels
if label[: len(columns)] == columns
]
if len(column_labels) == 0:
raise KeyError(name_like_string(columns))
if prefix is None:
prefix = [
str(label[len(columns) :])
if len(label) > len(columns) + 1
else label[len(columns)]
if len(label) == len(columns) + 1
else ""
for label in column_labels
]
elif any(isinstance(col, tuple) for col in columns) and any(
not is_name_like_tuple(col) for col in columns
):
raise ValueError(
"Expected tuple, got {}".format(
type(set(col for col in columns if not is_name_like_tuple(col)).pop())
)
)
else:
column_labels = [
label
for key in columns
for label in psdf._internal.column_labels
if label == key or label[0] == key
]
if len(column_labels) == 0:
if columns is None:
return psdf
raise KeyError("{} not in index".format(columns))
if prefix is None:
prefix = [str(label) if len(label) > 1 else label[0] for label in column_labels]
column_labels_set = set(column_labels)
remaining_columns = [
(
psdf[label]
if psdf._internal.column_labels_level == 1
else psdf[label].rename(name_like_string(label))
)
for label in psdf._internal.column_labels
if label not in column_labels_set
]
if any(
not isinstance(psdf._internal.spark_type_for(label), _get_dummies_acceptable_types)
for label in column_labels
):
raise NotImplementedError(
"get_dummies currently only accept {} values".format(
", ".join(
[cast(Type[DataType], t).typeName() for t in _get_dummies_acceptable_types]
)
)
)
if prefix is not None and len(column_labels) != len(prefix):
raise ValueError(
"Length of 'prefix' ({}) did not match the length of "
"the columns being encoded ({}).".format(len(prefix), len(column_labels))
)
elif isinstance(prefix, dict):
prefix = [prefix[column_label[0]] for column_label in column_labels]
all_values = _reduce_spark_multi(
psdf._internal.spark_frame,
[F.collect_set(psdf._internal.spark_column_for(label)) for label in column_labels],
)
for i, label in enumerate(column_labels):
values = all_values[i]
if isinstance(values, np.ndarray):
values = values.tolist()
values = sorted(values)
if drop_first:
values = values[1:]
def column_name(value: str) -> str:
if prefix is None or cast(List[str], prefix)[i] == "":
return value
else:
return "{}{}{}".format(cast(List[str], prefix)[i], prefix_sep, value)
for value in values:
remaining_columns.append(
(psdf[label].notnull() & (psdf[label] == value))
.astype(dtype)
.rename(column_name(value))
)
if dummy_na:
remaining_columns.append(psdf[label].isnull().astype(dtype).rename(column_name(np.nan)))
return psdf[remaining_columns]
# TODO: there are many parameters to implement and support. See pandas's pd.concat.
def concat(
objs: List[Union[DataFrame, Series]],
axis: Union[int, str] = 0,
join: str = "outer",
ignore_index: bool = False,
sort: bool = False,
) -> Union[Series, DataFrame]:
"""
Concatenate pandas-on-Spark objects along a particular axis with optional set logic
along the other axes.
Parameters
----------
objs : a sequence of Series or DataFrame
Any None objects will be dropped silently unless
they are all None in which case a ValueError will be raised
axis : {0/'index', 1/'columns'}, default 0
The axis to concatenate along.
join : {'inner', 'outer'}, default 'outer'
How to handle indexes on other axis (or axes).
ignore_index : bool, default False
If True, do not use the index values along the concatenation axis. The
resulting axis will be labeled 0, ..., n - 1. This is useful if you are
concatenating objects where the concatenation axis does not have
meaningful indexing information. Note the index values on the other
axes are still respected in the join.
sort : bool, default False
Sort non-concatenation axis if it is not already aligned.
Returns
-------
object, type of objs
When concatenating all ``Series`` along the index (axis=0), a
``Series`` is returned. When ``objs`` contains at least one
``DataFrame``, a ``DataFrame`` is returned. When concatenating along
the columns (axis=1), a ``DataFrame`` is returned.
See Also
--------
Series.append : Concatenate Series.
DataFrame.join : Join DataFrames using indexes.
DataFrame.merge : Merge DataFrames by indexes or columns.
Examples
--------
>>> from pyspark.pandas.config import set_option, reset_option
>>> set_option("compute.ops_on_diff_frames", True)
Combine two ``Series``.
>>> s1 = ps.Series(['a', 'b'])
>>> s2 = ps.Series(['c', 'd'])
>>> ps.concat([s1, s2])
0 a
1 b
0 c
1 d
dtype: object
Clear the existing index and reset it in the result
by setting the ``ignore_index`` option to ``True``.
>>> ps.concat([s1, s2], ignore_index=True)
0 a
1 b
2 c
3 d
dtype: object
Combine two ``DataFrame`` objects with identical columns.
>>> df1 = ps.DataFrame([['a', 1], ['b', 2]],
... columns=['letter', 'number'])
>>> df1
letter number
0 a 1
1 b 2
>>> df2 = ps.DataFrame([['c', 3], ['d', 4]],
... columns=['letter', 'number'])
>>> df2
letter number
0 c 3
1 d 4
>>> ps.concat([df1, df2])
letter number
0 a 1
1 b 2
0 c 3
1 d 4
Combine ``DataFrame`` and ``Series`` objects with different columns.
>>> ps.concat([df2, s1])
letter number 0
0 c 3.0 None
1 d 4.0 None
0 None NaN a
1 None NaN b
Combine ``DataFrame`` objects with overlapping columns
and return everything. Columns outside the intersection will
be filled with ``None`` values.
>>> df3 = ps.DataFrame([['c', 3, 'cat'], ['d', 4, 'dog']],
... columns=['letter', 'number', 'animal'])
>>> df3
letter number animal
0 c 3 cat
1 d 4 dog
>>> ps.concat([df1, df3])
letter number animal
0 a 1 None
1 b 2 None
0 c 3 cat
1 d 4 dog
Sort the columns.
>>> ps.concat([df1, df3], sort=True)
animal letter number
0 None a 1
1 None b 2
0 cat c 3
1 dog d 4
Combine ``DataFrame`` objects with overlapping columns
and return only those that are shared by passing ``inner`` to
the ``join`` keyword argument.
>>> ps.concat([df1, df3], join="inner")
letter number
0 a 1
1 b 2
0 c 3
1 d 4
>>> df4 = ps.DataFrame([['bird', 'polly'], ['monkey', 'george']],
... columns=['animal', 'name'])
Combine with column axis.
>>> ps.concat([df1, df4], axis=1)
letter number animal name
0 a 1 bird polly
1 b 2 monkey george
>>> reset_option("compute.ops_on_diff_frames")
"""
if isinstance(objs, (DataFrame, IndexOpsMixin)) or not isinstance(
objs, Iterable
): # TODO: support dict
raise TypeError(
"first argument must be an iterable of pandas-on-Spark "
"objects, you passed an object of type "
'"{name}"'.format(name=type(objs).__name__)
)
if len(cast(Sized, objs)) == 0:
raise ValueError("No objects to concatenate")
objs = list(filter(lambda obj: obj is not None, objs))
if len(objs) == 0:
raise ValueError("All objects passed were None")
for obj in objs:
if not isinstance(obj, (Series, DataFrame)):
raise TypeError(
"cannot concatenate object of type "
"'{name}"
"; only ps.Series "
"and ps.DataFrame are valid".format(name=type(objs).__name__)
)
if join not in ["inner", "outer"]:
raise ValueError("Only can inner (intersect) or outer (union) join the other axis.")
axis = validate_axis(axis)
if axis == 1:
psdfs = [obj.to_frame() if isinstance(obj, Series) else obj for obj in objs]
level = min(psdf._internal.column_labels_level for psdf in psdfs)
psdfs = [
DataFrame._index_normalized_frame(level, psdf)
if psdf._internal.column_labels_level > level
else psdf
for psdf in psdfs
]
concat_psdf = psdfs[0]
column_labels = concat_psdf._internal.column_labels.copy()
psdfs_not_same_anchor = []
for psdf in psdfs[1:]:
duplicated = [label for label in psdf._internal.column_labels if label in column_labels]
if len(duplicated) > 0:
pretty_names = [name_like_string(label) for label in duplicated]
raise ValueError(
"Labels have to be unique; however, got duplicated labels %s." % pretty_names
)
column_labels.extend(psdf._internal.column_labels)
if same_anchor(concat_psdf, psdf):
concat_psdf = DataFrame(
concat_psdf._internal.with_new_columns(
[
concat_psdf._psser_for(label)
for label in concat_psdf._internal.column_labels
]
+ [psdf._psser_for(label) for label in psdf._internal.column_labels]
)
)
else:
psdfs_not_same_anchor.append(psdf)
if len(psdfs_not_same_anchor) > 0:
@no_type_check
def resolve_func(psdf, this_column_labels, that_column_labels):
raise AssertionError("This should not happen.")
for psdf in psdfs_not_same_anchor:
if join == "inner":
concat_psdf = align_diff_frames(
resolve_func,
concat_psdf,
psdf,
fillna=False,
how="inner",
)
elif join == "outer":
concat_psdf = align_diff_frames(
resolve_func,
concat_psdf,
psdf,
fillna=False,
how="full",
)
concat_psdf = concat_psdf[column_labels]
if ignore_index:
concat_psdf.columns = list(map(str, _range(len(concat_psdf.columns))))
if sort:
concat_psdf = concat_psdf.sort_index()
return concat_psdf
# Series, Series ...
# We should return Series if objects are all Series.
should_return_series = all(map(lambda obj: isinstance(obj, Series), objs))
# DataFrame, Series ... & Series, Series ...
# In this case, we should return DataFrame.
new_objs = []
num_series = 0
series_names = set()
for obj in objs:
if isinstance(obj, Series):
num_series += 1
series_names.add(obj.name)
obj = obj.to_frame(DEFAULT_SERIES_NAME)
new_objs.append(obj)
column_labels_levels = set(obj._internal.column_labels_level for obj in new_objs)
if len(column_labels_levels) != 1:
raise ValueError("MultiIndex columns should have the same levels")
# DataFrame, DataFrame, ...
# All Series are converted into DataFrame and then compute concat.
if not ignore_index:
indices_of_psdfs = [psdf.index for psdf in new_objs]
index_of_first_psdf = indices_of_psdfs[0]
for index_of_psdf in indices_of_psdfs:
if index_of_first_psdf.names != index_of_psdf.names:
raise ValueError(
"Index type and names should be same in the objects to concatenate. "
"You passed different indices "
"{index_of_first_psdf} and {index_of_psdf}".format(
index_of_first_psdf=index_of_first_psdf.names,
index_of_psdf=index_of_psdf.names,
)
)
column_labels_of_psdfs = [psdf._internal.column_labels for psdf in new_objs]
if ignore_index:
index_names_of_psdfs = [[] for _ in new_objs] # type: List
else:
index_names_of_psdfs = [psdf._internal.index_names for psdf in new_objs]
if all(name == index_names_of_psdfs[0] for name in index_names_of_psdfs) and all(
idx == column_labels_of_psdfs[0] for idx in column_labels_of_psdfs
):
# If all columns are in the same order and values, use it.
psdfs = new_objs
else:
if join == "inner":
interested_columns = set.intersection(*map(lambda x: set(x), column_labels_of_psdfs))
# Keep the column order with its firsts DataFrame.
merged_columns = [
label for label in column_labels_of_psdfs[0] if label in interested_columns
]
# When multi-index column, although pandas is flaky if `join="inner" and sort=False`,
# always sort to follow the `join="outer"` case behavior.
if (len(merged_columns) > 0 and len(merged_columns[0]) > 1) or sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
psdfs = [psdf[merged_columns] for psdf in new_objs]
elif join == "outer":
merged_columns = []
for labels in column_labels_of_psdfs:
merged_columns.extend(label for label in labels if label not in merged_columns)
assert len(merged_columns) > 0
if LooseVersion(pd.__version__) < LooseVersion("0.24"):
# Always sort when multi-index columns, and if there are Series, never sort.
sort = len(merged_columns[0]) > 1 or (num_series == 0 and sort)
else:
# Always sort when multi-index columns or there are more than two Series,
# and if there is only one Series, never sort.
sort = len(merged_columns[0]) > 1 or num_series > 1 or (num_series != 1 and sort)
if sort:
# FIXME: better ordering
merged_columns = sorted(merged_columns, key=name_like_string)
psdfs = []
for psdf in new_objs:
columns_to_add = list(set(merged_columns) - set(psdf._internal.column_labels))
# TODO: NaN and None difference for missing values. pandas seems filling NaN.
sdf = psdf._internal.resolved_copy.spark_frame
for label in columns_to_add:
sdf = sdf.withColumn(name_like_string(label), F.lit(None))
data_columns = psdf._internal.data_spark_column_names + [
name_like_string(label) for label in columns_to_add
]
psdf = DataFrame(
psdf._internal.copy(
spark_frame=sdf,
index_spark_columns=[
scol_for(sdf, col) for col in psdf._internal.index_spark_column_names
],
column_labels=(psdf._internal.column_labels + columns_to_add),
data_spark_columns=[scol_for(sdf, col) for col in data_columns],
data_fields=(psdf._internal.data_fields + ([None] * len(columns_to_add))),
)
)
psdfs.append(psdf[merged_columns])
if ignore_index:
sdfs = [
psdf._internal.spark_frame.select(psdf._internal.data_spark_columns) for psdf in psdfs
]
else:
sdfs = [
psdf._internal.spark_frame.select(
psdf._internal.index_spark_columns + psdf._internal.data_spark_columns
)
for psdf in psdfs
]
concatenated = reduce(lambda x, y: x.union(y), sdfs)
if ignore_index:
index_spark_column_names = []
index_names = []
index_fields = []
else:
index_spark_column_names = psdfs[0]._internal.index_spark_column_names
index_names = psdfs[0]._internal.index_names
index_fields = psdfs[0]._internal.index_fields
result_psdf = DataFrame(
psdfs[0]._internal.copy(
spark_frame=concatenated,
index_spark_columns=[scol_for(concatenated, col) for col in index_spark_column_names],
index_names=index_names,
index_fields=index_fields,
data_spark_columns=[
scol_for(concatenated, col) for col in psdfs[0]._internal.data_spark_column_names
],
data_fields=None, # TODO: dtypes?
)
) # type: DataFrame
if should_return_series:
# If all input were Series, we should return Series.
if len(series_names) == 1:
name = series_names.pop()
else:
name = None
return first_series(result_psdf).rename(name)
else:
return result_psdf
def melt(
frame: DataFrame,
id_vars: Optional[Union[Any, Tuple, List[Union[Any, Tuple]]]] = None,
value_vars: Optional[Union[Any, Tuple, List[Union[Any, Tuple]]]] = None,
var_name: Optional[Union[str, List[str]]] = None,
value_name: str = "value",
) -> DataFrame:
return DataFrame.melt(frame, id_vars, value_vars, var_name, value_name)
melt.__doc__ = DataFrame.melt.__doc__
@no_type_check
def isna(obj):
"""
Detect missing values for an array-like object.
This function takes a scalar or array-like object and indicates
whether values are missing (``NaN`` in numeric arrays, ``None`` or ``NaN``
in object arrays).
Parameters
----------
obj : scalar or array-like
Object to check for null or missing values.
Returns
-------
bool or array-like of bool
For scalar input, returns a scalar boolean.
For array input, returns an array of boolean indicating whether each
corresponding element is missing.
See Also
--------
Series.isna : Detect missing values in a Series.
Series.isnull : Detect missing values in a Series.
DataFrame.isna : Detect missing values in a DataFrame.
DataFrame.isnull : Detect missing values in a DataFrame.
Index.isna : Detect missing values in an Index.
Index.isnull : Detect missing values in an Index.
Examples
--------
Scalar arguments (including strings) result in a scalar boolean.
>>> ps.isna('dog')
False
>>> ps.isna(np.nan)
True
ndarrays result in an ndarray of booleans.
>>> array = np.array([[1, np.nan, 3], [4, 5, np.nan]])
>>> array
array([[ 1., nan, 3.],
[ 4., 5., nan]])
>>> ps.isna(array)
array([[False, True, False],
[False, False, True]])
For Series and DataFrame, the same type is returned, containing booleans.
>>> df = ps.DataFrame({'a': ['ant', 'bee', 'cat'], 'b': ['dog', None, 'fly']})
>>> df
a b
0 ant dog
1 bee None
2 cat fly
>>> ps.isna(df)
a b
0 False False
1 False True
2 False False
>>> ps.isnull(df.b)
0 False
1 True
2 False
Name: b, dtype: bool
"""
# TODO: Add back:
# notnull : Boolean inverse of pandas.isnull.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.isnull()
else:
return pd.isnull(obj)
isnull = isna
@no_type_check
def notna(obj):
"""
Detect existing (non-missing) values.
Return a boolean same-sized object indicating if the values are not NA.
Non-missing values get mapped to True. NA values, such as None or
:attr:`numpy.NaN`, get mapped to False values.
Returns
-------
bool or array-like of bool
Mask of bool values for each element that
indicates whether an element is not an NA value.
See Also
--------
isna : Detect missing values for an array-like object.
Series.notna : Boolean inverse of Series.isna.
DataFrame.notnull : Boolean inverse of DataFrame.isnull.
Index.notna : Boolean inverse of Index.isna.
Index.notnull : Boolean inverse of Index.isnull.
Examples
--------
Show which entries in a DataFrame are not NA.
>>> df = ps.DataFrame({'age': [5, 6, np.NaN],
... 'born': [pd.NaT, pd.Timestamp('1939-05-27'),
... pd.Timestamp('1940-04-25')],
... 'name': ['Alfred', 'Batman', ''],
... 'toy': [None, 'Batmobile', 'Joker']})
>>> df
age born name toy
0 5.0 NaT Alfred None
1 6.0 1939-05-27 Batman Batmobile
2 NaN 1940-04-25 Joker
>>> df.notnull()
age born name toy
0 True False True False
1 True True True True
2 False True True True
Show which entries in a Series are not NA.
>>> ser = ps.Series([5, 6, np.NaN])
>>> ser
0 5.0
1 6.0
2 NaN
dtype: float64
>>> ps.notna(ser)
0 True
1 True
2 False
dtype: bool
>>> ps.notna(ser.index)
True
"""
# TODO: Add back:
# Series.notnull :Boolean inverse of Series.isnull.
# DataFrame.notna :Boolean inverse of DataFrame.isna.
# into the See Also in the docstring. It does not find the method in the latest numpydoc.
if isinstance(obj, (DataFrame, Series)):
return obj.notna()
else:
return pd.notna(obj)
notnull = notna
def merge(
obj: DataFrame,
right: DataFrame,
how: str = "inner",
on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
left_on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
right_on: Union[Any, List[Any], Tuple, List[Tuple]] = None,
left_index: bool = False,
right_index: bool = False,
suffixes: Tuple[str, str] = ("_x", "_y"),
) -> "DataFrame":
"""
Merge DataFrame objects with a database-style join.
The index of the resulting DataFrame will be one of the following:
- 0...n if no index is used for merging
- Index of the left DataFrame if merged only on the index of the right DataFrame
- Index of the right DataFrame if merged only on the index of the left DataFrame
- All involved indices if merged using the indices of both DataFrames
e.g. if `left` with indices (a, x) and `right` with indices (b, x), the result will
be an index (x, a, b)
Parameters
----------
right: Object to merge with.
how: Type of merge to be performed.
{'left', 'right', 'outer', 'inner'}, default 'inner'
left: use only keys from left frame, similar to a SQL left outer join; preserve key
order.
right: use only keys from right frame, similar to a SQL right outer join; preserve key
order.
outer: use union of keys from both frames, similar to a SQL full outer join; sort keys
lexicographically.
inner: use intersection of keys from both frames, similar to a SQL inner join;
preserve the order of the left keys.
on: Column or index level names to join on. These must be found in both DataFrames. If on
is None and not merging on indexes then this defaults to the intersection of the
columns in both DataFrames.
left_on: Column or index level names to join on in the left DataFrame. Can also
be an array or list of arrays of the length of the left DataFrame.
These arrays are treated as if they are columns.
right_on: Column or index level names to join on in the right DataFrame. Can also
be an array or list of arrays of the length of the right DataFrame.
These arrays are treated as if they are columns.
left_index: Use the index from the left DataFrame as the join key(s). If it is a
MultiIndex, the number of keys in the other DataFrame (either the index or a number of
columns) must match the number of levels.
right_index: Use the index from the right DataFrame as the join key. Same caveats as
left_index.
suffixes: Suffix to apply to overlapping column names in the left and right side,
respectively.
Returns
-------
DataFrame
A DataFrame of the two merged objects.
Examples
--------
>>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value'])
>>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value'])
>>> df1
lkey value
0 foo 1
1 bar 2
2 baz 3
3 foo 5
>>> df2
rkey value
0 foo 5
1 bar 6
2 baz 7
3 foo 8
Merge df1 and df2 on the lkey and rkey columns. The value columns have
the default suffixes, _x and _y, appended.
>>> merged = ps.merge(df1, df2, left_on='lkey', right_on='rkey')
>>> merged.sort_values(by=['lkey', 'value_x', 'rkey', 'value_y']) # doctest: +ELLIPSIS
lkey value_x rkey value_y
...bar 2 bar 6
...baz 3 baz 7
...foo 1 foo 5
...foo 1 foo 8
...foo 5 foo 5
...foo 5 foo 8
>>> left_psdf = ps.DataFrame({'A': [1, 2]})
>>> right_psdf = ps.DataFrame({'B': ['x', 'y']}, index=[1, 2])
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True).sort_index()
A B
1 2 x
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='left').sort_index()
A B
0 1 None
1 2 x
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='right').sort_index()
A B
1 2.0 x
2 NaN y
>>> ps.merge(left_psdf, right_psdf, left_index=True, right_index=True, how='outer').sort_index()
A B
0 1.0 None
1 2.0 x
2 NaN y
Notes
-----
As described in #263, joining string columns currently returns None for missing values
instead of NaN.
"""
return obj.merge(
right,
how=how,
on=on,
left_on=left_on,
right_on=right_on,
left_index=left_index,
right_index=right_index,
suffixes=suffixes,
)
@no_type_check
def to_numeric(arg):
"""
Convert argument to a numeric type.
Parameters
----------
arg : scalar, list, tuple, 1-d array, or Series
Returns
-------
ret : numeric if parsing succeeded.
See Also
--------
DataFrame.astype : Cast argument to a specified dtype.
to_datetime : Convert argument to datetime.
to_timedelta : Convert argument to timedelta.
numpy.ndarray.astype : Cast a numpy array to a specified type.
Examples
--------
>>> psser = ps.Series(['1.0', '2', '-3'])
>>> psser
0 1.0
1 2
2 -3
dtype: object
>>> ps.to_numeric(psser)
0 1.0
1 2.0
2 -3.0
dtype: float32
If given Series contains invalid value to cast float, just cast it to `np.nan`
>>> psser = ps.Series(['apple', '1.0', '2', '-3'])
>>> psser
0 apple
1 1.0
2 2
3 -3
dtype: object
>>> ps.to_numeric(psser)
0 NaN
1 1.0
2 2.0
3 -3.0
dtype: float32
Also support for list, tuple, np.array, or a scalar
>>> ps.to_numeric(['1.0', '2', '-3'])
array([ 1., 2., -3.])
>>> ps.to_numeric(('1.0', '2', '-3'))
array([ 1., 2., -3.])
>>> ps.to_numeric(np.array(['1.0', '2', '-3']))
array([ 1., 2., -3.])
>>> ps.to_numeric('1.0')
1.0
"""
if isinstance(arg, Series):
return arg._with_new_scol(arg.spark.column.cast("float"))
else:
return pd.to_numeric(arg)
def broadcast(obj: DataFrame) -> DataFrame:
"""
Marks a DataFrame as small enough for use in broadcast joins.
Parameters
----------
obj : DataFrame
Returns
-------
ret : DataFrame with broadcast hint.
See Also
--------
DataFrame.merge : Merge DataFrame objects with a database-style join.
DataFrame.join : Join columns of another DataFrame.
DataFrame.update : Modify in place using non-NA values from another DataFrame.
DataFrame.hint : Specifies some hint on the current DataFrame.
Examples
--------
>>> df1 = ps.DataFrame({'lkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [1, 2, 3, 5]},
... columns=['lkey', 'value']).set_index('lkey')
>>> df2 = ps.DataFrame({'rkey': ['foo', 'bar', 'baz', 'foo'],
... 'value': [5, 6, 7, 8]},
... columns=['rkey', 'value']).set_index('rkey')
>>> merged = df1.merge(ps.broadcast(df2), left_index=True, right_index=True)
>>> merged.spark.explain() # doctest: +ELLIPSIS
== Physical Plan ==
...
...BroadcastHashJoin...
...
"""
if not isinstance(obj, DataFrame):
raise TypeError("Invalid type : expected DataFrame got {}".format(type(obj).__name__))
return DataFrame(
obj._internal.with_new_sdf(F.broadcast(obj._internal.resolved_copy.spark_frame))
)
def read_orc(
path: str,
columns: Optional[List[str]] = None,
index_col: Optional[Union[str, List[str]]] = None,
**options: Any
) -> "DataFrame":
"""
Load an ORC object from the file path, returning a DataFrame.
Parameters
----------
path : str
The path string storing the ORC file to be read.
columns : list, default None
If not None, only these columns will be read from the file.
index_col : str or list of str, optional, default: None
Index column of table in Spark.
options : dict
All other options passed directly into Spark's data source.
Returns
-------
DataFrame
Examples
--------
>>> ps.range(1).to_orc('%s/read_spark_io/data.orc' % path)
>>> ps.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'])
id
0 0
You can preserve the index in the roundtrip as below.
>>> ps.range(1).to_orc('%s/read_spark_io/data.orc' % path, index_col="index")
>>> ps.read_orc('%s/read_spark_io/data.orc' % path, columns=['id'], index_col="index")
... # doctest: +NORMALIZE_WHITESPACE
id
index
0 0
"""
if "options" in options and isinstance(options.get("options"), dict) and len(options) == 1:
options = options.get("options") # type: ignore
psdf = read_spark_io(path, format="orc", index_col=index_col, **options)
if columns is not None:
psdf_columns = psdf.columns
new_columns = list()
for column in list(columns):
if column in psdf_columns:
new_columns.append(column)
else:
raise ValueError("Unknown column name '{}'".format(column))
psdf = psdf[new_columns]
return psdf
def _get_index_map(
sdf: spark.DataFrame, index_col: Optional[Union[str, List[str]]] = None
) -> Tuple[Optional[List[spark.Column]], Optional[List[Tuple]]]:
if index_col is not None:
if isinstance(index_col, str):
index_col = [index_col]
sdf_columns = set(sdf.columns)
for col in index_col:
if col not in sdf_columns:
raise KeyError(col)
index_spark_columns = [
scol_for(sdf, col) for col in index_col
] # type: Optional[List[spark.Column]]
index_names = [(col,) for col in index_col] # type: Optional[List[Tuple]]
else:
index_spark_columns = None
index_names = None
return index_spark_columns, index_names
_get_dummies_default_accept_types = (DecimalType, StringType, DateType)
_get_dummies_acceptable_types = _get_dummies_default_accept_types + (
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
BooleanType,
TimestampType,
)
def _test() -> None:
import os
import doctest
import shutil
import sys
import tempfile
import uuid
from pyspark.sql import SparkSession
import pyspark.pandas.namespace
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.pandas.namespace.__dict__.copy()
globs["ps"] = pyspark.pandas
spark = (
SparkSession.builder.master("local[4]")
.appName("pyspark.pandas.namespace tests")
.getOrCreate()
)
db_name = "db%s" % str(uuid.uuid4()).replace("-", "")
spark.sql("CREATE DATABASE %s" % db_name)
globs["db"] = db_name
path = tempfile.mkdtemp()
globs["path"] = path
(failure_count, test_count) = doctest.testmod(
pyspark.pandas.namespace,
globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE,
)
shutil.rmtree(path, ignore_errors=True)
spark.sql("DROP DATABASE IF EXISTS %s CASCADE" % db_name)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
xi-studio/anime | newnet/show.py | 1 | 1080 | import numpy as np
import matplotlib.pyplot as plt
from scipy.sparse import csc_matrix
head = np.random.randint(low=0,high=10,size=20)
tail = np.random.randint(low=0,high=10,size=20)
row = np.arange(20)
data = np.ones(20)
a = csc_matrix((data, (row,head)),shape=(20,10)).toarray()
b = csc_matrix((data, (row,tail)),shape=(20,10)).toarray()
def plotCM(cm,title,colorbarOn,givenAX):
ax = givenAX
idx = np.arange(10)
idy = np.arange(20)
plt.matshow(cm, fignum=False, cmap='Blues', vmin=0, vmax=5.0)
ax.set_xticks(range(10))
ax.set_xticklabels(idx)
plt.title(title,size=12)
for i in range(cm.shape[0]):
for j in range(cm.shape[1]):
ax.text(j,i,int(cm[i,j]),va='center', ha='center')
#fig1=plt.subplot(1, 3, 1)
#plotCM(a,"Head Index","off",fig1.axes)
fig2=plt.subplot(1, 1, 1)
w = np.random.randn(20,1)
plt.matshow(w, fignum=False, cmap='Blues', vmin=0, vmax=1.0)
for x in range(20):
fig2.axes.text(0,x,w[x,0],va='center', ha='center')
#fig3=plt.subplot(1, 3, 3)
#plotCM(b,"Tail Index","off",fig3.axes)
plt.show()
| mit |
aptiko/enhydris | enhydris/tests/test_models.py | 2 | 51867 | import datetime as dt
from io import StringIO
from unittest import mock
from django.contrib.auth.models import User
from django.contrib.gis.geos import MultiPolygon, Point, Polygon
from django.db import IntegrityError
from django.db.models.signals import post_save
from django.test import TestCase, override_settings
from django.utils import translation
import pandas as pd
from htimeseries import HTimeseries
from model_mommy import mommy
from parler.utils.context import switch_language
from enhydris import models
from enhydris.tests import TimeseriesDataMixin
class TestTimeseriesMixin:
@classmethod
def _create_test_timeseries(cls, data=""):
cls.station = mommy.make(
models.Station,
name="Celduin",
original_srid=2100,
geom=Point(x=21.06071, y=39.09518, srid=4326),
altitude=219,
)
cls.timeseries_group = mommy.make(
models.TimeseriesGroup,
name="Daily temperature",
gentity=cls.station,
unit_of_measurement__symbol="mm",
time_zone__code="IST",
time_zone__utc_offset=330,
variable__descr="Temperature",
precision=1,
remarks="This timeseries group rocks",
)
cls.timeseries = mommy.make(
models.Timeseries, timeseries_group=cls.timeseries_group, time_step="H"
)
cls.timeseries.set_data(StringIO(data))
class PersonTestCase(TestCase):
def test_create(self):
person = models.Person(last_name="Brown", first_name="Alice", initials="A.")
person.save()
self.assertEqual(models.Person.objects.first().last_name, "Brown")
def test_update(self):
mommy.make(models.Person)
person = models.Person.objects.first()
person.first_name = "Bob"
person.save()
self.assertEqual(models.Person.objects.first().first_name, "Bob")
def test_delete(self):
mommy.make(models.Person)
person = models.Person.objects.first()
person.delete()
self.assertEqual(models.Person.objects.count(), 0)
def test_str(self):
person = mommy.make(
models.Person, last_name="Brown", first_name="Alice", initials="A."
)
self.assertEqual(str(person), "Brown A.")
def test_ordering_string(self):
mommy.make(models.Person, last_name="Brown", first_name="Alice", initials="A.")
person = models.Person.objects.first()
self.assertEqual(person.ordering_string, "Brown Alice")
class OrganizationTestCase(TestCase):
def test_create(self):
organization = models.Organization(name="Crooks Intl", acronym="Crooks")
organization.save()
self.assertEqual(models.Organization.objects.first().name, "Crooks Intl")
def test_update(self):
mommy.make(models.Organization)
organization = models.Organization.objects.first()
organization.acronym = "Crooks"
organization.save()
self.assertEqual(models.Organization.objects.first().acronym, "Crooks")
def test_delete(self):
mommy.make(models.Organization)
organization = models.Organization.objects.first()
organization.delete()
self.assertEqual(models.Organization.objects.count(), 0)
def test_str(self):
org = mommy.make(models.Organization, name="Crooks Intl", acronym="Crooks")
self.assertEqual(str(org), "Crooks")
def test_ordering_string(self):
mommy.make(models.Organization, name="Crooks Intl", acronym="Crooks")
organization = models.Organization.objects.first()
self.assertEqual(organization.ordering_string, "Crooks Intl")
class VariableTestCase(TestCase):
def test_create(self):
gact = models.Variable(descr="Temperature")
gact.save()
self.assertEqual(models.Variable.objects.first().descr, "Temperature")
def test_update(self):
mommy.make(models.Variable, descr="Irrelevant")
gact = models.Variable.objects.first()
gact.descr = "Temperature"
gact.save()
self.assertEqual(models.Variable.objects.first().descr, "Temperature")
def test_delete(self):
mommy.make(models.Variable, descr="Temperature")
gact = models.Variable.objects.first()
gact.delete()
self.assertEqual(models.Variable.objects.count(), 0)
def test_str(self):
gact = self._create_variable("Temperature", "Θερμοκρασία")
self.assertEqual(str(gact), "Temperature")
with switch_language(gact, "el"):
self.assertEqual(str(gact), "Θερμοκρασία")
def test_manager_includes_objects_with_missing_translations(self):
variable = mommy.make(models.Variable, descr="hello")
self.assertEqual(str(variable), "hello")
with switch_language(variable, "el"):
models.Variable.objects.get(id=variable.id) # Shouldn't raise anything
def test_sort(self):
self._create_variable("Temperature", "Θερμοκρασία")
self._create_variable("Humidity", "Υγρασία")
self.assertEqual(
[v.descr for v in models.Variable.objects.all()],
["Humidity", "Temperature"],
)
with translation.override("el"):
self.assertEqual(
[v.descr for v in models.Variable.objects.all()],
["Θερμοκρασία", "Υγρασία"],
)
def _create_variable(self, english_name, greek_name):
mommy.make(models.Variable, descr=english_name)
variable = models.Variable.objects.get(translations__descr=english_name)
variable.translations.create(language_code="el", descr=greek_name)
return variable
@override_settings(
ENHYDRIS_USERS_CAN_ADD_CONTENT=True,
LANGUAGE_CODE="en",
LANGUAGES={("en", "English"), ("el", "Ελληνικά")},
)
def test_translation_bug(self):
# Normally Variable.__str__() should return a simple "return self.descr".
# However, there's a tricky bug somewhere, most probably in django-parler, but I
# can't nail it. Sometimes, when there's no registered translation in the
# active language, self.descr is None (when it should fall back to the fallback
# language). It occurs when trying to visit /admin/enhydris/station/add/, the
# active language is Greek, and one of the variables has an English translation
# but not a Greek translation. We work around it by changing Variable.__str__()
# to return whatever translation exists.
#
# Unfortunately, PARLER_LANGUAGES seems to not be overridable in tests;
# therefore, if you change Variable.__str__() to a simple "return self.descr",
# the only way to make this test fail is by manually specifying this in the
# settings:
# PARLER_LANGUAGES={
# SITE_ID: [{"code": "en"}, {"code": "el"}],
# "default": {"fallbacks": ["en"], "hide_untranslated": True},
# }
User.objects.create_user(
username="alice", password="topsecret", is_active=True, is_staff=True
)
self.client.login(username="alice", password="topsecret")
mommy.make(models.Variable, descr="pH")
response = self.client.get(
"/admin/enhydris/station/add/", HTTP_ACCEPT_LANGUAGE="el"
)
self.assertEqual(response.status_code, 200)
class GentityFileTestCase(TestCase):
def test_create(self):
station = mommy.make(models.Station)
gentity_file = models.GentityFile(gentity=station, descr="North view")
gentity_file.save()
self.assertEqual(models.GentityFile.objects.first().descr, "North view")
def test_update(self):
mommy.make(models.GentityFile)
gentity_file = models.GentityFile.objects.first()
gentity_file.descr = "North view"
gentity_file.save()
self.assertEqual(models.GentityFile.objects.first().descr, "North view")
def test_delete(self):
mommy.make(models.GentityFile)
gentity_file = models.GentityFile.objects.first()
gentity_file.delete()
self.assertEqual(models.GentityFile.objects.count(), 0)
def test_str(self):
gentity_file = mommy.make(models.GentityFile, descr="North view")
self.assertEqual(str(gentity_file), "North view")
def test_related_station(self):
station = mommy.make(models.Station)
gentity_file = mommy.make(models.GentityFile, gentity=station)
self.assertEqual(gentity_file.related_station, station)
def test_related_station_is_empty_when_gentity_is_not_station(self):
garea = mommy.make(models.Garea)
gentity_file = mommy.make(models.GentityFile, gentity=garea)
self.assertIsNone(gentity_file.related_station)
class GentityImageTestCase(TestCase):
def test_str_desc(self):
image = mommy.make(models.GentityImage, descr="hello")
self.assertEqual(str(image), "hello")
def test_str_date(self):
image = mommy.make(models.GentityImage, descr="", date=dt.datetime(2021, 1, 22))
self.assertEqual(str(image), "2021-01-22")
def test_str_id(self):
image = mommy.make(models.GentityImage, descr="", date=None, id=85)
self.assertEqual(str(image), "85")
class GentityEventTestCase(TestCase):
def test_create(self):
station = mommy.make(models.Station)
type = mommy.make(models.EventType)
gentity_event = models.GentityEvent(
gentity=station,
type=type,
date=dt.datetime.now(),
user="Alice",
report="Station exploded",
)
gentity_event.save()
self.assertEqual(models.GentityEvent.objects.first().report, "Station exploded")
def test_update(self):
mommy.make(models.GentityEvent)
gentity_event = models.GentityEvent.objects.first()
gentity_event.report = "Station exploded"
gentity_event.save()
self.assertEqual(models.GentityEvent.objects.first().report, "Station exploded")
def test_delete(self):
mommy.make(models.GentityEvent)
gentity_event = models.GentityEvent.objects.first()
gentity_event.delete()
self.assertEqual(models.GentityEvent.objects.count(), 0)
def test_str(self):
gentity_event = mommy.make(
models.GentityEvent, date="2018-11-14", type__descr="Explosion"
)
self.assertEqual(str(gentity_event), "2018-11-14 Explosion")
def test_related_station(self):
station = mommy.make(models.Station)
gentity_event = mommy.make(models.GentityEvent, gentity=station)
self.assertEqual(gentity_event.related_station, station)
def test_related_station_is_empty_when_gentity_is_not_station(self):
garea = mommy.make(models.Garea)
gentity_event = mommy.make(models.GentityEvent, gentity=garea)
self.assertIsNone(gentity_event.related_station)
class GareaTestCase(TestCase):
def test_create(self):
category = mommy.make(models.GareaCategory)
garea = models.Garea(
name="Esgalduin",
category=category,
geom=MultiPolygon(Polygon(((30, 20), (45, 40), (10, 40), (30, 20)))),
)
garea.save()
self.assertEqual(models.Garea.objects.first().name, "Esgalduin")
def test_update(self):
mommy.make(models.Garea)
garea = models.Garea.objects.first()
garea.name = "Esgalduin"
garea.save()
self.assertEqual(models.Garea.objects.first().name, "Esgalduin")
def test_delete(self):
mommy.make(models.Garea)
garea = models.Garea.objects.first()
garea.delete()
self.assertEqual(models.Garea.objects.count(), 0)
def test_str(self):
garea = mommy.make(models.Garea, name="Esgalduin")
self.assertEqual(str(garea), "Esgalduin")
class StationTestCase(TestCase):
def test_create(self):
person = mommy.make(models.Person)
station = models.Station(
owner=person,
name="Hobbiton",
geom=Point(x=21.06071, y=39.09518, srid=4326),
)
station.save()
self.assertEqual(models.Station.objects.first().name, "Hobbiton")
def test_update(self):
mommy.make(models.Station)
station = models.Station.objects.first()
station.name = "Hobbiton"
station.save()
self.assertEqual(models.Station.objects.first().name, "Hobbiton")
def test_delete(self):
mommy.make(models.Station)
station = models.Station.objects.first()
station.delete()
self.assertEqual(models.Station.objects.count(), 0)
def test_str(self):
station = mommy.make(models.Station, name="Hobbiton")
self.assertEqual(str(station), "Hobbiton")
class StationOriginalCoordinatesTestCase(TestCase):
def setUp(self):
mommy.make(
models.Station,
name="Komboti",
geom=Point(x=21.06071, y=39.09518, srid=4326),
original_srid=2100,
)
self.station = models.Station.objects.get(name="Komboti")
def test_original_abscissa(self):
self.assertAlmostEqual(self.station.original_abscissa(), 245648.96, places=1)
def test_original_ordinate(self):
self.assertAlmostEqual(self.station.original_ordinate(), 4331165.20, places=1)
class StationOriginalCoordinatesWithNullSridTestCase(TestCase):
def setUp(self):
mommy.make(
models.Station,
name="Komboti",
geom=Point(x=21.06071, y=39.09518, srid=4326),
original_srid=None,
)
self.station = models.Station.objects.get(name="Komboti")
def test_original_abscissa(self):
self.assertAlmostEqual(self.station.original_abscissa(), 21.06071)
def test_original_ordinate(self):
self.assertAlmostEqual(self.station.original_ordinate(), 39.09518)
class StationLastUpdateTestCase(TestCase):
def setUp(self):
self.station = mommy.make(models.Station)
self.time_zone = mommy.make(models.TimeZone, code="EET", utc_offset=120)
self.timeseries_group = mommy.make(
models.TimeseriesGroup,
gentity=self.station,
time_zone=self.time_zone,
variable__descr="irrelevant",
precision=2,
)
def _create_timeseries(
self,
ye=None,
mo=None,
da=None,
ho=None,
mi=None,
type=models.Timeseries.INITIAL,
):
if ye:
end_date_utc = dt.datetime(ye, mo, da, ho, mi, tzinfo=dt.timezone.utc)
else:
end_date_utc = None
timeseries = mommy.make(
models.Timeseries, timeseries_group=self.timeseries_group, type=type
)
if end_date_utc:
timeseries.timeseriesrecord_set.create(
timestamp=end_date_utc, value=0, flags=""
)
def test_last_update_naive_when_all_timeseries_have_end_date(self):
self._create_timeseries(2019, 7, 24, 11, 26, type=models.Timeseries.INITIAL)
self._create_timeseries(2019, 7, 23, 5, 10, type=models.Timeseries.CHECKED)
self.assertEqual(
self.station.last_update_naive, dt.datetime(2019, 7, 24, 13, 26)
)
def test_last_update_naive_when_one_timeseries_has_no_data(self):
self._create_timeseries(2019, 7, 24, 11, 26, type=models.Timeseries.INITIAL)
self._create_timeseries(type=models.Timeseries.CHECKED)
self.assertEqual(
self.station.last_update_naive, dt.datetime(2019, 7, 24, 13, 26)
)
def test_last_update_naive_when_all_timeseries_has_no_data(self):
self._create_timeseries(type=models.Timeseries.INITIAL)
self._create_timeseries(type=models.Timeseries.CHECKED)
self.assertIsNone(self.station.last_update_naive)
def test_last_update_naive_when_no_timeseries(self):
self.assertIsNone(self.station.last_update_naive)
def test_last_update(self):
self._create_timeseries(2019, 7, 24, 11, 26, type=models.Timeseries.INITIAL)
tzinfo = dt.timezone(dt.timedelta(hours=2), "EET")
self.assertEqual(
self.station.last_update, dt.datetime(2019, 7, 24, 13, 26, tzinfo=tzinfo)
)
def test_last_update_cache_when_atleast_one_timeseries_has_end_date(self):
# Since it's not possible to cache `None` values, ensure to create
# at least one timeseries with an end date value.
self._create_timeseries(2019, 7, 24, 11, 26, type=models.Timeseries.INITIAL)
# Make sure to fetch the `end_date` value of the timeseries
timeseries = models.Timeseries.objects.filter(
timeseries_group__gentity_id=self.station.id
)
for t in timeseries:
t.end_date
with self.assertNumQueries(1):
self.station.last_update
station = models.Station.objects.get(id=self.station.id)
with self.assertNumQueries(0):
station.last_update
def test_last_update_naive_cace_when_atleast_one_timeseries_has_end_date(self):
# Since it's not possible to cache `None` values, ensure to create
# at least one timeseries with an end date value.
self._create_timeseries(2019, 7, 24, 11, 26, type=models.Timeseries.INITIAL)
# Make sure to fetch the `end_date` value of the timeseries
timeseries = models.Timeseries.objects.filter(
timeseries_group__gentity_id=self.station.id
)
for t in timeseries:
t.end_date
with self.assertNumQueries(1):
self.station.last_update_naive
station = models.Station.objects.get(id=self.station.id)
with self.assertNumQueries(0):
station.last_update_naive
class UnitOfMeasurementTestCase(TestCase):
def test_str(self):
unit = mommy.make(models.UnitOfMeasurement, symbol="mm")
self.assertEqual(str(unit), "mm")
def test_str_when_symbol_is_empty(self):
unit = mommy.make(models.UnitOfMeasurement, symbol="")
self.assertEqual(str(unit), str(unit.id))
class TimeZoneTestCase(TestCase):
def test_create(self):
time_zone = models.TimeZone(code="EET", utc_offset=120)
time_zone.save()
self.assertEqual(models.TimeZone.objects.first().code, "EET")
def test_update(self):
mommy.make(models.TimeZone)
time_zone = models.TimeZone.objects.first()
time_zone.code = "EET"
time_zone.save()
self.assertEqual(models.TimeZone.objects.first().code, "EET")
def test_delete(self):
mommy.make(models.TimeZone)
time_zone = models.TimeZone.objects.first()
time_zone.delete()
self.assertEqual(models.TimeZone.objects.count(), 0)
def test_str(self):
time_zone = mommy.make(models.TimeZone, code="EET", utc_offset=120)
self.assertEqual(str(time_zone), "EET (UTC+0200)")
def test_as_tzinfo(self):
time_zone = mommy.make(models.TimeZone, code="EET", utc_offset=120)
self.assertEqual(time_zone.as_tzinfo, dt.timezone(dt.timedelta(hours=2), "EET"))
class TimeseriesGroupGetNameTestCase(TestCase):
def setUp(self):
self.timeseries_group = mommy.make(
models.TimeseriesGroup, variable__descr="Temperature", name=""
)
def test_get_name_when_name_is_blank(self):
self.assertEqual(self.timeseries_group.get_name(), "Temperature")
def test_get_name_when_name_is_not_blank(self):
self.timeseries_group.name = "Temperature from sensor 1"
self.assertEqual(self.timeseries_group.get_name(), "Temperature from sensor 1")
def test_get_name_when_translations_are_inactive(self):
with translation.override(None):
self.timeseries_group.variable._current_language = None
self.assertEqual(
self.timeseries_group.get_name(),
f"Timeseries group {self.timeseries_group.id}",
)
class TimeseriesGroupDefaultTimeseriesTestCase(TestCase):
def setUp(self):
self.timeseries_group = mommy.make(
models.TimeseriesGroup, variable__descr="Temperature", name=""
)
self.initial_timeseries = self._make_timeseries(models.Timeseries.INITIAL)
self.checked_timeseries = self._make_timeseries(models.Timeseries.CHECKED)
self.regularized_timeseries = self._make_timeseries(
models.Timeseries.REGULARIZED
)
def _make_timeseries(self, type):
return mommy.make(
models.Timeseries, timeseries_group=self.timeseries_group, type=type
)
def test_returns_regularized(self):
self.assertEqual(
self.timeseries_group.default_timeseries, self.regularized_timeseries
)
def test_returns_checked(self):
self.regularized_timeseries.delete()
self.assertEqual(
self.timeseries_group.default_timeseries, self.checked_timeseries
)
def test_returns_initial(self):
self.regularized_timeseries.delete()
self.checked_timeseries.delete()
self.assertEqual(
self.timeseries_group.default_timeseries, self.initial_timeseries
)
def test_returns_none(self):
self.regularized_timeseries.delete()
self.checked_timeseries.delete()
self.initial_timeseries.delete()
self.assertIsNone(self.timeseries_group.default_timeseries)
def test_caching(self):
with self.assertNumQueries(1):
self.timeseries_group.default_timeseries
with self.assertNumQueries(0):
self.timeseries_group.default_timeseries
def test_num_queries(self):
with self.assertNumQueries(2):
# The following should cause two queries.
group = models.TimeseriesGroup.objects.prefetch_related(
"timeseries_set"
).first()
# The following should cause no queries since the time series have
# been prefetched.
group.default_timeseries
class TimeseriesGroupStartAndEndDateTestCase(TestCase, TimeseriesDataMixin):
def setUp(self):
self.create_timeseries()
def test_start_date(self):
self.assertEqual(
self.timeseries_group.start_date,
dt.datetime(2017, 11, 23, 17, 23, tzinfo=self.time_zone.as_tzinfo),
)
def test_start_date_cache(self):
# Make sure to retrieve the `default_timeseries` first.
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.start_date
timeseries_group = models.TimeseriesGroup.objects.get(
id=self.timeseries_group.id
)
with self.assertNumQueries(0):
timeseries_group.start_date
def test_end_date(self):
self.assertEqual(
self.timeseries_group.end_date,
dt.datetime(2018, 11, 25, 1, 0, tzinfo=self.time_zone.as_tzinfo),
)
def test_end_date_cache(self):
# Make sure to retrieve the `default_timeseries` first.
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.end_date
timeseries_group = models.TimeseriesGroup.objects.get(
id=self.timeseries_group.id
)
with self.assertNumQueries(0):
timeseries_group.end_date
def test_start_date_when_timeseries_is_empty(self):
self.timeseries.set_data(StringIO(""))
self.assertIsNone(self.timeseries_group.start_date)
def test_end_date_when_timeseries_is_empty(self):
self.timeseries.set_data(StringIO(""))
self.assertIsNone(self.timeseries_group.end_date)
def test_start_date_when_timeseries_does_not_exist(self):
self.timeseries.delete()
self.assertIsNone(self.timeseries_group.start_date)
def test_end_date_when_timeseries_does_not_exist(self):
self.timeseries.delete()
self.assertIsNone(self.timeseries_group.end_date)
def test_start_date_naive(self):
self.assertEqual(
self.timeseries_group.start_date_naive, dt.datetime(2017, 11, 23, 17, 23)
)
def test_start_date_naive_cache(self):
# Make sure to have access to the `start_date` first
self.timeseries_group.start_date
timeseries_group = models.TimeseriesGroup.objects.get(
id=self.timeseries_group.id
)
with self.assertNumQueries(0):
timeseries_group.start_date_naive
def test_end_date_naive(self):
self.assertEqual(
self.timeseries_group.end_date_naive, dt.datetime(2018, 11, 25, 1, 0)
)
def test_end_date_naive_cache(self):
# Make sure to have access to the `end_date` first
self.timeseries_group.end_date
timeseries_group = models.TimeseriesGroup.objects.get(
id=self.timeseries_group.id
)
with self.assertNumQueries(0):
timeseries_group.end_date_naive
def test_start_date_naive_when_timeseries_is_empty(self):
self.timeseries.set_data(StringIO(""))
self.assertIsNone(self.timeseries_group.start_date_naive)
def test_end_date_naive_when_timeseries_is_empty(self):
self.timeseries.set_data(StringIO(""))
self.assertIsNone(self.timeseries_group.end_date_naive)
class TimeseriesTestCase(TestCase):
def test_create(self):
timeseries_group = mommy.make(models.TimeseriesGroup)
timeseries = models.Timeseries(
type=models.Timeseries.AGGREGATED, timeseries_group=timeseries_group
)
timeseries.save()
self.assertEqual(
models.Timeseries.objects.first().type, models.Timeseries.AGGREGATED
)
def test_update(self):
mommy.make(models.Timeseries, type=models.Timeseries.INITIAL)
timeseries = models.Timeseries.objects.first()
timeseries.type = models.Timeseries.AGGREGATED
timeseries.save()
self.assertEqual(
models.Timeseries.objects.first().type, models.Timeseries.AGGREGATED
)
def test_delete(self):
mommy.make(models.Timeseries)
timeseries = models.Timeseries.objects.first()
timeseries.delete()
self.assertEqual(models.Timeseries.objects.count(), 0)
def test_str_initial(self):
self._test_str(type=models.Timeseries.INITIAL, result="Initial")
def test_str_checked(self):
self._test_str(type=models.Timeseries.CHECKED, result="Checked")
def test_str_regularized(self):
self._test_str(type=models.Timeseries.REGULARIZED, result="Regularized")
def test_str_aggregated(self):
self._test_str(type=models.Timeseries.AGGREGATED, result="Aggregated (H)")
def _make_timeseries(self, timeseries_group, type):
return mommy.make(
models.Timeseries,
timeseries_group=timeseries_group,
type=type,
time_step="H",
)
def _test_str(self, type, result):
timeseries_group = mommy.make(models.TimeseriesGroup, name="Temperature")
timeseries = self._make_timeseries(timeseries_group, type)
self.assertEqual(str(timeseries), result)
def test_only_one_initial_per_group(self):
timeseries_group = mommy.make(models.TimeseriesGroup, name="Temperature")
self._make_timeseries(timeseries_group, models.Timeseries.INITIAL)
with self.assertRaises(IntegrityError):
models.Timeseries(
timeseries_group=timeseries_group,
type=models.Timeseries.INITIAL,
time_step="D",
).save()
def test_only_one_checked_per_group(self):
timeseries_group = mommy.make(models.TimeseriesGroup, name="Temperature")
self._make_timeseries(timeseries_group, models.Timeseries.CHECKED)
with self.assertRaises(IntegrityError):
models.Timeseries(
timeseries_group=timeseries_group,
type=models.Timeseries.CHECKED,
time_step="D",
).save()
def test_only_one_regularized_per_group(self):
timeseries_group = mommy.make(models.TimeseriesGroup, name="Temperature")
self._make_timeseries(timeseries_group, models.Timeseries.REGULARIZED)
with self.assertRaises(IntegrityError):
models.Timeseries(
timeseries_group=timeseries_group,
type=models.Timeseries.REGULARIZED,
time_step="D",
).save()
def test_uniqueness(self):
timeseries_group = mommy.make(models.TimeseriesGroup, name="Temperature")
self._make_timeseries(timeseries_group, models.Timeseries.AGGREGATED)
with self.assertRaises(IntegrityError):
models.Timeseries(
timeseries_group=timeseries_group,
type=models.Timeseries.AGGREGATED,
time_step="H",
).save()
def test_many_aggregated_per_group(self):
timeseries_group = mommy.make(models.TimeseriesGroup, name="Temperature")
self._make_timeseries(timeseries_group, models.Timeseries.AGGREGATED)
models.Timeseries(
timeseries_group=timeseries_group,
type=models.Timeseries.AGGREGATED,
time_step="D",
).save()
def make_timeseries(*, start_date, end_date, **kwargs):
"""Make a test timeseries, setting start_date and end_date.
This is essentially the same as mommy.make(models.Timeseries, **kwargs), except
that it also creates two records with the specified dates.
"""
result = mommy.make(models.Timeseries, **kwargs)
result.timeseriesrecord_set.create(timestamp=start_date, value=0, flags="")
result.timeseriesrecord_set.create(timestamp=end_date, value=0, flags="")
return result
class TimeseriesDatesTestCase(TestCase):
def setUp(self):
self.timeseries = make_timeseries(
timeseries_group__time_zone__utc_offset=120,
timeseries_group__precision=2,
start_date=dt.datetime(2018, 11, 15, 16, 0, tzinfo=dt.timezone.utc),
end_date=dt.datetime(2018, 11, 17, 23, 0, tzinfo=dt.timezone.utc),
)
def test_start_date(self):
self.assertEqual(
self.timeseries.start_date,
dt.datetime(
2018,
11,
15,
18,
0,
tzinfo=self.timeseries.timeseries_group.time_zone.as_tzinfo,
),
)
def test_start_date_tzinfo(self):
self.assertEqual(
self.timeseries.start_date.tzinfo,
self.timeseries.timeseries_group.time_zone.as_tzinfo,
)
def test_start_date_cache(self):
with self.assertNumQueries(1):
self.timeseries.start_date
timeseries = models.Timeseries.objects.get(id=self.timeseries.id)
with self.assertNumQueries(0):
timeseries.start_date
def test_end_date(self):
self.assertEqual(
self.timeseries.end_date,
dt.datetime(
2018,
11,
18,
1,
0,
tzinfo=self.timeseries.timeseries_group.time_zone.as_tzinfo,
),
)
def test_end_date_tzinfo(self):
self.assertEqual(
self.timeseries.end_date.tzinfo,
self.timeseries.timeseries_group.time_zone.as_tzinfo,
)
def test_end_date_cache(self):
with self.assertNumQueries(1):
self.timeseries.end_date
timeseries = models.Timeseries.objects.get(id=self.timeseries.id)
with self.assertNumQueries(0):
timeseries.end_date
def test_start_date_naive(self):
self.assertEqual(
self.timeseries.start_date_naive, dt.datetime(2018, 11, 15, 18, 0)
)
def test_start_date_naive_cache(self):
with self.assertNumQueries(1):
self.timeseries.start_date_naive
timeseries = models.Timeseries.objects.get(id=self.timeseries.id)
with self.assertNumQueries(0):
timeseries.start_date_naive
def test_end_date_naive(self):
self.assertEqual(
self.timeseries.end_date_naive, dt.datetime(2018, 11, 18, 1, 0)
)
def test_end_date_naive_cache(self):
with self.assertNumQueries(1):
self.timeseries.end_date_naive
timeseries = models.Timeseries.objects.get(id=self.timeseries.id)
with self.assertNumQueries(0):
timeseries.end_date_naive
class DataTestCase(TestCase, TestTimeseriesMixin):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._create_test_timeseries("2017-11-23 17:23,1,\n2018-11-25 01:00,2,\n")
cls.expected_result = pd.DataFrame(
data={"value": [1.0, 2.0], "flags": ["", ""]},
columns=["value", "flags"],
index=[dt.datetime(2017, 11, 23, 17, 23), dt.datetime(2018, 11, 25, 1, 0)],
)
cls.expected_result.index.name = "date"
class TimeseriesGetDataTestCase(DataTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.data = cls.timeseries.get_data()
def test_abscissa(self):
self.assertAlmostEqual(self.data.location["abscissa"], 245648.96, places=2)
def test_ordinate(self):
self.assertAlmostEqual(self.data.location["ordinate"], 4331165.20, places=2)
def test_srid(self):
self.assertAlmostEqual(self.data.location["srid"], 2100)
def test_altitude(self):
self.assertAlmostEqual(self.data.location["altitude"], 219)
def test_time_step(self):
self.assertEqual(self.data.time_step, "H")
def test_unit(self):
self.assertEqual(self.data.unit, "mm")
def test_title(self):
self.assertEqual(self.data.title, "Daily temperature")
def test_timezone(self):
self.assertEqual(self.data.timezone, "IST (UTC+0530)")
def test_negative_timezone(self):
self.timeseries.timeseries_group.time_zone.code = "NST"
self.timeseries.timeseries_group.time_zone.utc_offset = -210
data = self.timeseries.get_data()
self.assertEqual(data.timezone, "NST (UTC-0330)")
def test_variable(self):
self.assertEqual(self.data.variable, "Temperature")
def test_precision(self):
self.assertEqual(self.data.precision, 1)
def test_comment(self):
self.assertEqual(self.data.comment, "Celduin\n\nThis timeseries group rocks")
def test_location_is_none(self):
self.timeseries.timeseries_group.gentity.geom = None
data = self.timeseries.get_data()
self.assertIsNone(data.location)
def test_data(self):
pd.testing.assert_frame_equal(self.data.data, self.expected_result)
class TimeseriesGetDataWithNullTestCase(TestCase, TestTimeseriesMixin):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls._create_test_timeseries("2017-11-23 17:23,,\n2018-11-25 01:00,2,\n")
cls.expected_result = pd.DataFrame(
data={"value": [float("NaN"), 2.0], "flags": ["", ""]},
columns=["value", "flags"],
index=[dt.datetime(2017, 11, 23, 17, 23), dt.datetime(2018, 11, 25, 1, 0)],
)
cls.expected_result.index.name = "date"
cls.data = cls.timeseries.get_data()
def test_data(self):
pd.testing.assert_frame_equal(self.data.data, self.expected_result)
class TimeseriesGetDataWithStartAndEndDateTestCase(DataTestCase):
def _check(self, start_index=None, end_index=None):
"""Check self.htimeseries.data against the initial timeseries sliced from
start_index to end_index.
"""
full_result = pd.DataFrame(
data={"value": [1.0, 2.0], "flags": ["", ""]},
columns=["value", "flags"],
index=[dt.datetime(2017, 11, 23, 17, 23), dt.datetime(2018, 11, 25, 1, 0)],
)
full_result.index.name = "date"
expected_result = full_result.iloc[start_index:end_index]
pd.testing.assert_frame_equal(self.ahtimeseries.data, expected_result)
def test_with_start_date_just_before_start_of_timeseries(self):
tzinfo = self.timeseries.timeseries_group.time_zone.as_tzinfo
start_date = dt.datetime(2017, 11, 23, 17, 22, tzinfo=tzinfo)
self.ahtimeseries = self.timeseries.get_data(start_date=start_date)
self._check()
def test_with_start_date_on_start_of_timeseries(self):
tzinfo = self.timeseries.timeseries_group.time_zone.as_tzinfo
start_date = dt.datetime(2017, 11, 23, 17, 23, tzinfo=tzinfo)
self.ahtimeseries = self.timeseries.get_data(start_date=start_date)
self._check()
def test_with_start_date_just_after_start_of_timeseries(self):
tzinfo = self.timeseries.timeseries_group.time_zone.as_tzinfo
start_date = dt.datetime(2017, 11, 23, 17, 24, tzinfo=tzinfo)
self.ahtimeseries = self.timeseries.get_data(start_date=start_date)
self._check(start_index=1)
def test_with_end_date_just_after_end_of_timeseries(self):
tzinfo = self.timeseries.timeseries_group.time_zone.as_tzinfo
end_date = dt.datetime(2018, 11, 25, 1, 1, tzinfo=tzinfo)
self.ahtimeseries = self.timeseries.get_data(end_date=end_date)
self._check()
def test_with_end_date_on_end_of_timeseries(self):
tzinfo = self.timeseries.timeseries_group.time_zone.as_tzinfo
end_date = dt.datetime(2018, 11, 25, 1, 0, tzinfo=tzinfo)
self.ahtimeseries = self.timeseries.get_data(end_date=end_date)
self._check()
def test_with_end_date_just_before_end_of_timeseries(self):
tzinfo = self.timeseries.timeseries_group.time_zone.as_tzinfo
end_date = dt.datetime(2018, 11, 25, 0, 59, tzinfo=tzinfo)
self.ahtimeseries = self.timeseries.get_data(end_date=end_date)
self._check(end_index=1)
@override_settings(
CACHES={"default": {"BACKEND": "django.core.cache.backends.locmem.LocMemCache"}}
)
class TimeseriesGetDataCacheTestCase(DataTestCase):
def test_cache(self):
# Make sure we've accessed gpoint already, otherwise it screws up the number of
# queries later
self.timeseries.timeseries_group.gentity.gpoint.altitude
self._get_data_and_check_num_queries(1)
self._get_data_and_check_num_queries(0)
# Check cache invalidation
self.timeseries.save()
self._get_data_and_check_num_queries(1)
def _get_data_and_check_num_queries(self, num_queries):
with self.assertNumQueries(num_queries):
data = self.timeseries.get_data()
pd.testing.assert_frame_equal(data.data, self.expected_result)
class TimeseriesSetDataTestCase(TestCase, TestTimeseriesMixin):
def setUp(self):
self._create_test_timeseries()
def test_call_with_file_object(self):
self.returned_length = self.timeseries.set_data(
StringIO("2017-11-23 17:23,1,\n" "2018-11-25 01:00,2,\n")
)
self._check_results()
def test_call_with_dataframe(self):
self.returned_length = self.timeseries.set_data(self._get_dataframe())
self._check_results()
def test_call_with_htimeseries(self):
self.returned_length = self.timeseries.set_data(
HTimeseries(self._get_dataframe())
)
self._check_results()
def _get_dataframe(self):
result = pd.DataFrame(
data={"value": [1.0, 2.0], "flags": ["", ""]},
columns=["value", "flags"],
index=[dt.datetime(2017, 11, 23, 17, 23), dt.datetime(2018, 11, 25, 1, 0)],
)
result.index.name = "date"
return result
def _check_results(self):
self.assertEqual(self.returned_length, 2)
self.assertEqual(
list(self.timeseries.timeseriesrecord_set.values()),
[
{
"timeseries_id": self.timeseries.id,
"timestamp": dt.datetime(
2017,
11,
23,
17,
23,
tzinfo=models.TimeZone(code="IST", utc_offset=330).as_tzinfo,
),
"value": 1.0,
"flags": "",
},
{
"timeseries_id": self.timeseries.id,
"timestamp": dt.datetime(
2018,
11,
25,
1,
0,
tzinfo=models.TimeZone(code="IST", utc_offset=330).as_tzinfo,
),
"value": 2.0,
"flags": "",
},
],
)
class TimeseriesAppendDataTestCase(TestCase, TestTimeseriesMixin):
def setUp(self):
self._create_test_timeseries("2016-01-01 00:00,42,\n")
def test_call_with_file_object(self):
returned_length = self.timeseries.append_data(
StringIO("2017-11-23 17:23,1,\n" "2018-11-25 01:00,2,\n")
)
self.assertEqual(returned_length, 2)
self._assert_wrote_data()
def test_call_with_dataframe(self):
returned_length = self.timeseries.append_data(self._get_dataframe())
self.assertEqual(returned_length, 2)
self._assert_wrote_data()
def test_call_with_htimeseries(self):
returned_length = self.timeseries.append_data(
HTimeseries(self._get_dataframe())
)
self.assertEqual(returned_length, 2)
self._assert_wrote_data()
def _get_dataframe(self):
result = pd.DataFrame(
data={"value": [1.0, 2.0], "flags": ["", ""]},
columns=["value", "flags"],
index=[dt.datetime(2017, 11, 23, 17, 23), dt.datetime(2018, 11, 25, 1, 0)],
)
result.index.name = "date"
return result
def _assert_wrote_data(self):
expected_result = pd.DataFrame(
data={"value": [42.0, 1.0, 2.0], "flags": ["", "", ""]},
columns=["value", "flags"],
index=[
dt.datetime(2016, 1, 1, 0, 0),
dt.datetime(2017, 11, 23, 17, 23),
dt.datetime(2018, 11, 25, 1, 0),
],
)
expected_result.index.name = "date"
pd.testing.assert_frame_equal(self.timeseries.get_data().data, expected_result)
class TimeseriesAppendDataToEmptyTimeseriesTestCase(TestCase, TestTimeseriesMixin):
def setUp(self):
self._create_test_timeseries()
def test_call_with_dataframe(self):
returned_length = self.timeseries.append_data(self._get_dataframe())
self.assertEqual(returned_length, 2)
self._assert_wrote_data()
def _get_dataframe(self):
result = pd.DataFrame(
data={"value": [1.0, 2.0], "flags": ["", ""]},
columns=["value", "flags"],
index=[dt.datetime(2017, 11, 23, 17, 23), dt.datetime(2018, 11, 25, 1, 0)],
)
result.index.name = "date"
return result
def _assert_wrote_data(self):
pd.testing.assert_frame_equal(
self.timeseries.get_data().data, self._get_dataframe()
)
class TimeseriesAppendErrorTestCase(TestCase, TestTimeseriesMixin):
def test_does_not_update_if_data_to_append_are_not_later(self):
self._create_test_timeseries("2018-01-01 00:00,42,\n")
with self.assertRaises(IntegrityError):
self.timeseries.append_data(
StringIO("2017-11-23 17:23,1,\n2018-11-25 01:00,2,\n")
)
class TimeseriesGetLastRecordAsStringTestCase(TestCase, TestTimeseriesMixin):
def test_when_record_exists(self):
self._create_test_timeseries("2017-11-23 17:23,1,\n2018-11-25 01:00,2,\n")
self.assertEqual(
self.timeseries.get_last_record_as_string(), "2018-11-25 01:00,2.0,"
)
def test_when_record_does_not_exist(self):
self._create_test_timeseries()
self.assertEqual(self.timeseries.get_last_record_as_string(), "")
class TimeseriesExecutesTriggersUponAddingRecordsTestCase(DataTestCase):
def setUp(self):
self.trigger = mock.MagicMock()
post_save.connect(self.trigger, sender="enhydris.Timeseries")
def tearDown(self):
post_save.disconnect(self.trigger, sender="enhydris.Timeseries")
def test_calls_trigger_upon_setting_data(self):
self.timeseries.set_data(StringIO("2020-10-26 09:34,18,\n"))
self.trigger.assert_called()
def test_calls_trigger_upon_appending_data(self):
self.timeseries.append_data(StringIO("2020-10-26 09:34,18,\n"))
self.trigger.assert_called()
class TimestepTestCase(TestCase):
def setUp(self):
self.timeseries = mommy.make(models.Timeseries)
def set_time_step(self, time_step):
self.timeseries.time_step = time_step
self.timeseries.save()
def test_min(self):
self.set_time_step("27min")
self.assertEqual(models.Timeseries.objects.first().time_step, "27min")
def test_hour(self):
self.set_time_step("3H")
self.assertEqual(models.Timeseries.objects.first().time_step, "3H")
def test_day(self):
self.set_time_step("3D")
self.assertEqual(models.Timeseries.objects.first().time_step, "3D")
def test_month(self):
self.set_time_step("3M")
self.assertEqual(models.Timeseries.objects.first().time_step, "3M")
def test_3Y(self):
self.set_time_step("3Y")
self.assertEqual(models.Timeseries.objects.first().time_step, "3Y")
def test_Y(self):
self.set_time_step("Y")
self.assertEqual(models.Timeseries.objects.first().time_step, "Y")
def test_garbage(self):
with self.assertRaisesRegex(ValueError, '"hello" is not a valid time step'):
self.set_time_step("hello")
def test_wrong_number(self):
with self.assertRaisesRegex(ValueError, '"FM" is not a valid time step'):
self.set_time_step("FM")
def test_wrong_unit(self):
with self.assertRaisesRegex(ValueError, '"3B" is not a valid time step'):
self.set_time_step("3B")
class TimeseriesRecordTestCase(TestCase, TestTimeseriesMixin):
def test_str(self):
self._create_test_timeseries("2017-11-23 17:23,3.14159,\n")
record = models.TimeseriesRecord.objects.first()
self.assertAlmostEqual(record.value, 3.14159)
self.assertEqual(str(record), "2017-11-23 17:23,3.1,")
def test_str_when_no_value(self):
self._create_test_timeseries("2017-11-23 17:23,,\n")
record = models.TimeseriesRecord.objects.first()
record.save()
self.assertEqual(str(record), "2017-11-23 17:23,,")
class TimeseriesRecordBulkInsertTestCase(TestCase, TestTimeseriesMixin):
@classmethod
def setUpTestData(cls):
cls._create_test_timeseries()
ahtimeseries = HTimeseries(
StringIO("2020-09-08 20:00,15.7,,\n2020-09-08 21:00,,\n")
)
models.TimeseriesRecord.bulk_insert(cls.timeseries, ahtimeseries)
cls.timeseries_records = models.TimeseriesRecord.objects.all()
def test_first_value(self):
self.assertAlmostEqual(self.timeseries_records[0].value, 15.7)
def test_empty_value(self):
self.assertIsNone(self.timeseries_records[1].value)
class TimeseriesDatesCacheInvalidationTestCase(TestCase):
def setUp(self):
self.station = mommy.make(models.Station, name="Celduin")
self.timeseries_group = mommy.make(models.TimeseriesGroup, gentity=self.station)
self.timeseries = mommy.make(
models.Timeseries,
timeseries_group=self.timeseries_group,
type=models.Timeseries.INITIAL,
)
def test_station_last_update_cache_invalidation(self):
with self.assertNumQueries(2):
self.station.last_update
# Check cache invalidation
self.timeseries.save()
with self.assertNumQueries(2):
self.station.last_update
def test_station_last_update_naive_cache_invalidation(self):
with self.assertNumQueries(2):
self.station.last_update_naive
# Check cache invalidation
self.timeseries.save()
with self.assertNumQueries(2):
self.station.last_update_naive
def test_timeseries_group_start_date_cache_invalidation(self):
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.start_date
# Check cache invalidation
self.timeseries.save()
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.start_date
def test_timeseries_group_start_date_naive_cache_invalidation(self):
# Make sure to retrieve the `default_timeseries` first.
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.start_date_naive
# Check cache invalidation
self.timeseries.save()
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.start_date_naive
def test_timeseries_group_end_date_cache_invalidation(self):
# Make sure to retrieve the `default_timeseries` first.
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.end_date
# Check cache invalidation
self.timeseries.save()
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.end_date
def test_timeseries_group_end_date_naive_cache_invalidation(self):
# Make sure to retrieve the `default_timeseries` first.
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.end_date_naive
# Check cache invalidation
self.timeseries.save()
self.timeseries_group.default_timeseries
with self.assertNumQueries(1):
self.timeseries_group.end_date_naive
def test_timeseries_start_date_cache_invalidation(self):
with self.assertNumQueries(1):
self.timeseries.start_date
# Check cache invalidation
self.timeseries.save()
with self.assertNumQueries(1):
self.timeseries.start_date
def test_timeseries_start_date_naive_cache_invalidation(self):
with self.assertNumQueries(1):
self.timeseries.start_date_naive
# Check cache invalidation
self.timeseries.save()
with self.assertNumQueries(1):
self.timeseries.start_date_naive
def test_timeseries_end_date_cache_invalidation(self):
with self.assertNumQueries(1):
self.timeseries.end_date
# Check cache invalidation
self.timeseries.save()
with self.assertNumQueries(1):
self.timeseries.end_date
def test_timeseries_end_date_naive_cache_invalidation(self):
with self.assertNumQueries(1):
self.timeseries.end_date_naive
# Check cache invalidation
self.timeseries.save()
with self.assertNumQueries(1):
self.timeseries.end_date_naive
| agpl-3.0 |
cemonatk/tools | DSBSCModulation.py | 2 | 1699 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__date__ = '10.10.2016'
__author__ = 'cemonatk'
from numpy import cos,pi,sin,arange
from matplotlib.pyplot import plot,show,title,subplot,suptitle,xlabel,imshow
from matplotlib.image import imread
from Tkinter import *
def DSBSCModulate(fm,fc,aralik):
suptitle('Analog-Lab')
t = arange(1,aralik)
tc = cos(2*pi*fc*t)
ms = sin(2*pi*fm*t)
DSB = tc*ms
subplot(2,2,1)
plot(tc)
title('Carrier Wave')
subplot(2,2,2)
plot(ms)
title('message Sinyali')
subplot(2,1,2)
plot(DSB)
title('DSB-SC Modulasyonu')
show()
def AmModulation():
pass
def Show():
img = imread('picture.jpg')
imshow(img)
title('MATLAB Kullanmayi Acilen Birakman Gerek...')
show()
def StrToFloat():
carrier = 1.0/float(carrierfreq.get())
message = 1.0/float(messagefreq.get())
try:
aralik = int(aralik.get())
except:
aralik = 360
DsbscCiz(message, carrier, aralik)
kok = Tk()
kok.title("Analog-Lab")
carrierfreq = StringVar()
e = Entry(kok, textvariable=carrierfreq)
e.pack()
carrierfreq.set("Carrier Freq")
messagefreq = StringVar()
e = Entry(kok, textvariable=messagefreq)
e.pack()
messagefreq.set("message freq")
aralik = StringVar()
e = Entry(kok, textvariable=aralik)
e.pack()
aralik.set("Aralık(360 varsayılan)")
Button(kok, text='Draw DSB-SC', height=2, width=35, command=StrToFloat).pack()
Button(kok, text='Gizli Suprizi Goster', height=2, width=35, command=Show).pack()
Button(kok, text='Exit', command=kok.destroy).pack()
kok.mainloop()
Button(kok, text='Draw DSB-SC', height=2, width=35, command=lambda: DSBSCModulate(messagefreq,carrier,range)).pack()
| mit |
neale/CS-program | 434-MachineLearning/final_project/linearClassifier/sklearn/metrics/tests/test_ranking.py | 32 | 41905 | from __future__ import division, print_function
import numpy as np
from itertools import product
import warnings
from scipy.sparse import csr_matrix
from sklearn import datasets
from sklearn import svm
from sklearn import ensemble
from sklearn.datasets import make_multilabel_classification
from sklearn.random_projection import sparse_random_matrix
from sklearn.utils.validation import check_array, check_consistent_length
from sklearn.utils.validation import check_random_state
from sklearn.utils.testing import assert_raises, clean_warning_registry
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_warns
from sklearn.metrics import auc
from sklearn.metrics import average_precision_score
from sklearn.metrics import coverage_error
from sklearn.metrics import label_ranking_average_precision_score
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import label_ranking_loss
from sklearn.metrics import roc_auc_score
from sklearn.metrics import roc_curve
from sklearn.exceptions import UndefinedMetricWarning
###############################################################################
# Utilities for testing
def make_prediction(dataset=None, binary=False):
"""Make some classification predictions on a toy dataset using a SVC
If binary is True restrict to a binary classification problem instead of a
multiclass classification problem
"""
if dataset is None:
# import some data to play with
dataset = datasets.load_iris()
X = dataset.data
y = dataset.target
if binary:
# restrict to a binary classification task
X, y = X[y < 2], y[y < 2]
n_samples, n_features = X.shape
p = np.arange(n_samples)
rng = check_random_state(37)
rng.shuffle(p)
X, y = X[p], y[p]
half = int(n_samples / 2)
# add noisy features to make the problem harder and avoid perfect results
rng = np.random.RandomState(0)
X = np.c_[X, rng.randn(n_samples, 200 * n_features)]
# run classifier, get class probabilities and label predictions
clf = svm.SVC(kernel='linear', probability=True, random_state=0)
probas_pred = clf.fit(X[:half], y[:half]).predict_proba(X[half:])
if binary:
# only interested in probabilities of the positive case
# XXX: do we really want a special API for the binary case?
probas_pred = probas_pred[:, 1]
y_pred = clf.predict(X[half:])
y_true = y[half:]
return y_true, y_pred, probas_pred
###############################################################################
# Tests
def _auc(y_true, y_score):
"""Alternative implementation to check for correctness of
`roc_auc_score`."""
pos_label = np.unique(y_true)[1]
# Count the number of times positive samples are correctly ranked above
# negative samples.
pos = y_score[y_true == pos_label]
neg = y_score[y_true != pos_label]
diff_matrix = pos.reshape(1, -1) - neg.reshape(-1, 1)
n_correct = np.sum(diff_matrix > 0)
return n_correct / float(len(pos) * len(neg))
def _average_precision(y_true, y_score):
"""Alternative implementation to check for correctness of
`average_precision_score`."""
pos_label = np.unique(y_true)[1]
n_pos = np.sum(y_true == pos_label)
order = np.argsort(y_score)[::-1]
y_score = y_score[order]
y_true = y_true[order]
score = 0
for i in range(len(y_score)):
if y_true[i] == pos_label:
# Compute precision up to document i
# i.e, percentage of relevant documents up to document i.
prec = 0
for j in range(0, i + 1):
if y_true[j] == pos_label:
prec += 1.0
prec /= (i + 1.0)
score += prec
return score / n_pos
def test_roc_curve():
# Test Area under Receiver Operating Characteristic (ROC) curve
y_true, _, probas_pred = make_prediction(binary=True)
expected_auc = _auc(y_true, probas_pred)
for drop in [True, False]:
fpr, tpr, thresholds = roc_curve(y_true, probas_pred,
drop_intermediate=drop)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, expected_auc, decimal=2)
assert_almost_equal(roc_auc, roc_auc_score(y_true, probas_pred))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_end_points():
# Make sure that roc_curve returns a curve start at 0 and ending and
# 1 even in corner cases
rng = np.random.RandomState(0)
y_true = np.array([0] * 50 + [1] * 50)
y_pred = rng.randint(3, size=100)
fpr, tpr, thr = roc_curve(y_true, y_pred, drop_intermediate=True)
assert_equal(fpr[0], 0)
assert_equal(fpr[-1], 1)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thr.shape)
def test_roc_returns_consistency():
# Test whether the returned threshold matches up with tpr
# make small toy dataset
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred)
# use the given thresholds to determine the tpr
tpr_correct = []
for t in thresholds:
tp = np.sum((probas_pred >= t) & y_true)
p = np.sum(y_true)
tpr_correct.append(1.0 * tp / p)
# compare tpr and tpr_correct to see if the thresholds' order was correct
assert_array_almost_equal(tpr, tpr_correct, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_nonrepeating_thresholds():
# Test to ensure that we don't return spurious repeating thresholds.
# Duplicated thresholds can arise due to machine precision issues.
dataset = datasets.load_digits()
X = dataset['data']
y = dataset['target']
# This random forest classifier can only return probabilities
# significant to two decimal places
clf = ensemble.RandomForestClassifier(n_estimators=100, random_state=0)
# How well can the classifier predict whether a digit is less than 5?
# This task contributes floating point roundoff errors to the probabilities
train, test = slice(None, None, 2), slice(1, None, 2)
probas_pred = clf.fit(X[train], y[train]).predict_proba(X[test])
y_score = probas_pred[:, :5].sum(axis=1) # roundoff errors begin here
y_true = [yy < 5 for yy in y[test]]
# Check for repeating values in the thresholds
fpr, tpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=False)
assert_equal(thresholds.size, np.unique(np.round(thresholds, 2)).size)
def test_roc_curve_multi():
# roc_curve not applicable for multi-class problems
y_true, _, probas_pred = make_prediction(binary=False)
assert_raises(ValueError, roc_curve, y_true, probas_pred)
def test_roc_curve_confidence():
# roc_curve for confidence scores
y_true, _, probas_pred = make_prediction(binary=True)
fpr, tpr, thresholds = roc_curve(y_true, probas_pred - 0.5)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.90, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_hard():
# roc_curve for hard decisions
y_true, pred, probas_pred = make_prediction(binary=True)
# always predict one
trivial_pred = np.ones(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# always predict zero
trivial_pred = np.zeros(y_true.shape)
fpr, tpr, thresholds = roc_curve(y_true, trivial_pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.50, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# hard decisions
fpr, tpr, thresholds = roc_curve(y_true, pred)
roc_auc = auc(fpr, tpr)
assert_array_almost_equal(roc_auc, 0.78, decimal=2)
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_one_label():
y_true = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
y_pred = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# assert there are warnings
w = UndefinedMetricWarning
fpr, tpr, thresholds = assert_warns(w, roc_curve, y_true, y_pred)
# all true labels, all fpr should be nan
assert_array_equal(fpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
# assert there are warnings
fpr, tpr, thresholds = assert_warns(w, roc_curve,
[1 - x for x in y_true],
y_pred)
# all negative labels, all tpr should be nan
assert_array_equal(tpr,
np.nan * np.ones(len(thresholds)))
assert_equal(fpr.shape, tpr.shape)
assert_equal(fpr.shape, thresholds.shape)
def test_roc_curve_toydata():
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [0, 1]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1, 1])
assert_array_almost_equal(fpr, [0, 0, 1])
assert_almost_equal(roc_auc, 0.)
y_true = [1, 0]
y_score = [1, 1]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, 0.5)
y_true = [1, 0]
y_score = [1, 0]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [1, 1])
assert_almost_equal(roc_auc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
tpr, fpr, _ = roc_curve(y_true, y_score)
roc_auc = roc_auc_score(y_true, y_score)
assert_array_almost_equal(tpr, [0, 1])
assert_array_almost_equal(fpr, [0, 1])
assert_almost_equal(roc_auc, .5)
y_true = [0, 0]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no positive sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [0., 0.5, 1.])
assert_array_almost_equal(fpr, [np.nan, np.nan, np.nan])
y_true = [1, 1]
y_score = [0.25, 0.75]
# assert UndefinedMetricWarning because of no negative sample in y_true
tpr, fpr, _ = assert_warns(UndefinedMetricWarning, roc_curve, y_true, y_score)
assert_raises(ValueError, roc_auc_score, y_true, y_score)
assert_array_almost_equal(tpr, [np.nan, np.nan])
assert_array_almost_equal(fpr, [0.5, 1.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 1.)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(ValueError, roc_auc_score, y_true, y_score, average="macro")
assert_raises(ValueError, roc_auc_score, y_true, y_score,
average="weighted")
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0.5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0.5)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), 0)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), 0)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(roc_auc_score(y_true, y_score, average="macro"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="weighted"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="samples"), .5)
assert_almost_equal(roc_auc_score(y_true, y_score, average="micro"), .5)
def test_roc_curve_drop_intermediate():
# Test that drop_intermediate drops the correct thresholds
y_true = [0, 0, 0, 0, 1, 1]
y_score = [0., 0.2, 0.5, 0.6, 0.7, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds, [1., 0.7, 0.])
# Test dropping thresholds with repeating scores
y_true = [0, 0, 0, 0, 0, 0, 0,
1, 1, 1, 1, 1, 1]
y_score = [0., 0.1, 0.6, 0.6, 0.7, 0.8, 0.9,
0.6, 0.7, 0.8, 0.9, 0.9, 1.0]
tpr, fpr, thresholds = roc_curve(y_true, y_score, drop_intermediate=True)
assert_array_almost_equal(thresholds,
[1.0, 0.9, 0.7, 0.6, 0.])
def test_auc():
# Test Area Under Curve (AUC) computation
x = [0, 1]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0]
y = [0, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [1, 0, 0]
y = [0, 1, 1]
assert_array_almost_equal(auc(x, y), 0.5)
x = [0, 1]
y = [1, 1]
assert_array_almost_equal(auc(x, y), 1)
x = [0, 0.5, 1]
y = [0, 0.5, 1]
assert_array_almost_equal(auc(x, y), 0.5)
def test_auc_duplicate_values():
# Test Area Under Curve (AUC) computation with duplicate values
# auc() was previously sorting the x and y arrays according to the indices
# from numpy.argsort(x), which was reordering the tied 0's in this example
# and resulting in an incorrect area computation. This test detects the
# error.
x = [-2.0, 0.0, 0.0, 0.0, 1.0]
y1 = [2.0, 0.0, 0.5, 1.0, 1.0]
y2 = [2.0, 1.0, 0.0, 0.5, 1.0]
y3 = [2.0, 1.0, 0.5, 0.0, 1.0]
for y in (y1, y2, y3):
assert_array_almost_equal(auc(x, y, reorder=True), 3.0)
def test_auc_errors():
# Incompatible shapes
assert_raises(ValueError, auc, [0.0, 0.5, 1.0], [0.1, 0.2])
# Too few x values
assert_raises(ValueError, auc, [0.0], [0.1])
# x is not in order
assert_raises(ValueError, auc, [1.0, 0.0, 0.5], [0.0, 0.0, 0.0])
def test_auc_score_non_binary_class():
# Test that roc_auc_score function returns an error when trying
# to compute AUC for non-binary class values.
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
clean_warning_registry()
with warnings.catch_warnings(record=True):
rng = check_random_state(404)
y_pred = rng.rand(10)
# y_true contains only one class value
y_true = np.zeros(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
y_true = -np.ones(10, dtype="int")
assert_raise_message(ValueError, "ROC AUC score is not defined",
roc_auc_score, y_true, y_pred)
# y_true contains three different class values
y_true = rng.randint(0, 3, size=10)
assert_raise_message(ValueError, "multiclass format is not supported",
roc_auc_score, y_true, y_pred)
def test_precision_recall_curve():
y_true, _, probas_pred = make_prediction(binary=True)
_test_precision_recall_curve(y_true, probas_pred)
# Use {-1, 1} for labels; make sure original labels aren't modified
y_true[np.where(y_true == 0)] = -1
y_true_copy = y_true.copy()
_test_precision_recall_curve(y_true, probas_pred)
assert_array_equal(y_true_copy, y_true)
labels = [1, 0, 0, 1]
predict_probas = [1, 2, 3, 4]
p, r, t = precision_recall_curve(labels, predict_probas)
assert_array_almost_equal(p, np.array([0.5, 0.33333333, 0.5, 1., 1.]))
assert_array_almost_equal(r, np.array([1., 0.5, 0.5, 0.5, 0.]))
assert_array_almost_equal(t, np.array([1, 2, 3, 4]))
assert_equal(p.size, r.size)
assert_equal(p.size, t.size + 1)
def test_precision_recall_curve_pos_label():
y_true, _, probas_pred = make_prediction(binary=False)
pos_label = 2
p, r, thresholds = precision_recall_curve(y_true,
probas_pred[:, pos_label],
pos_label=pos_label)
p2, r2, thresholds2 = precision_recall_curve(y_true == pos_label,
probas_pred[:, pos_label])
assert_array_almost_equal(p, p2)
assert_array_almost_equal(r, r2)
assert_array_almost_equal(thresholds, thresholds2)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def _test_precision_recall_curve(y_true, probas_pred):
# Test Precision-Recall and aread under PR curve
p, r, thresholds = precision_recall_curve(y_true, probas_pred)
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.85, 2)
assert_array_almost_equal(precision_recall_auc,
average_precision_score(y_true, probas_pred))
assert_almost_equal(_average_precision(y_true, probas_pred),
precision_recall_auc, 1)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
# Smoke test in the case of proba having only one value
p, r, thresholds = precision_recall_curve(y_true,
np.zeros_like(probas_pred))
precision_recall_auc = auc(r, p)
assert_array_almost_equal(precision_recall_auc, 0.75, 3)
assert_equal(p.size, r.size)
assert_equal(p.size, thresholds.size + 1)
def test_precision_recall_curve_errors():
# Contains non-binary labels
assert_raises(ValueError, precision_recall_curve,
[0, 1, 2], [[0.0], [1.0], [1.0]])
def test_precision_recall_curve_toydata():
with np.errstate(all="raise"):
# Binary classification
y_true = [0, 1]
y_score = [0, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [0, 1]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 0., 1.])
assert_array_almost_equal(r, [1., 0., 0.])
assert_almost_equal(auc_prc, 0.25)
y_true = [1, 0]
y_score = [1, 1]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1., 0])
assert_almost_equal(auc_prc, .75)
y_true = [1, 0]
y_score = [1, 0]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [1, 1])
assert_array_almost_equal(r, [1, 0])
assert_almost_equal(auc_prc, 1.)
y_true = [1, 0]
y_score = [0.5, 0.5]
p, r, _ = precision_recall_curve(y_true, y_score)
auc_prc = average_precision_score(y_true, y_score)
assert_array_almost_equal(p, [0.5, 1])
assert_array_almost_equal(r, [1, 0.])
assert_almost_equal(auc_prc, .75)
y_true = [0, 0]
y_score = [0.25, 0.75]
assert_raises(Exception, precision_recall_curve, y_true, y_score)
assert_raises(Exception, average_precision_score, y_true, y_score)
y_true = [1, 1]
y_score = [0.25, 0.75]
p, r, _ = precision_recall_curve(y_true, y_score)
assert_almost_equal(average_precision_score(y_true, y_score), 1.)
assert_array_almost_equal(p, [1., 1., 1.])
assert_array_almost_equal(r, [1, 0.5, 0.])
# Multi-label classification task
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [0, 1]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 1.)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 1.)
y_true = np.array([[0, 1], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_raises(Exception, average_precision_score, y_true, y_score,
average="macro")
assert_raises(Exception, average_precision_score, y_true, y_score,
average="weighted")
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.625)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.625)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0, 1], [1, 0]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.25)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.25)
y_true = np.array([[1, 0], [0, 1]])
y_score = np.array([[0.5, 0.5], [0.5, 0.5]])
assert_almost_equal(average_precision_score(y_true, y_score,
average="macro"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="weighted"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="samples"), 0.75)
assert_almost_equal(average_precision_score(y_true, y_score,
average="micro"), 0.75)
def test_score_scale_invariance():
# Test that average_precision_score and roc_auc_score are invariant by
# the scaling or shifting of probabilities
y_true, _, probas_pred = make_prediction(binary=True)
roc_auc = roc_auc_score(y_true, probas_pred)
roc_auc_scaled = roc_auc_score(y_true, 100 * probas_pred)
roc_auc_shifted = roc_auc_score(y_true, probas_pred - 10)
assert_equal(roc_auc, roc_auc_scaled)
assert_equal(roc_auc, roc_auc_shifted)
pr_auc = average_precision_score(y_true, probas_pred)
pr_auc_scaled = average_precision_score(y_true, 100 * probas_pred)
pr_auc_shifted = average_precision_score(y_true, probas_pred - 10)
assert_equal(pr_auc, pr_auc_scaled)
assert_equal(pr_auc, pr_auc_shifted)
def check_lrap_toy(lrap_score):
# Check on several small example that it works
assert_almost_equal(lrap_score([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1]], [[0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 1) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.75, 0.5, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.75, 0.5, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 1 / 3)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.5, 0.75, 0.25]]),
(1 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 1 / 2)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.5, 0.75, 0.25]]),
(1 / 2 + 2 / 3) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 1)
# Tie handling
assert_almost_equal(lrap_score([[1, 0]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1]], [[0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[1, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 0.5)
assert_almost_equal(lrap_score([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1 / 3)
assert_almost_equal(lrap_score([[1, 0, 1]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.25, 0.5, 0.5]]),
(2 / 3 + 1 / 2) / 2)
assert_almost_equal(lrap_score([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(lrap_score([[1, 1, 0]], [[0.5, 0.5, 0.5]]), 2 / 3)
assert_almost_equal(lrap_score([[1, 1, 1, 0]], [[0.5, 0.5, 0.5, 0.5]]),
3 / 4)
def check_zero_or_all_relevant_labels(lrap_score):
random_state = check_random_state(0)
for n_labels in range(2, 5):
y_score = random_state.uniform(size=(1, n_labels))
y_score_ties = np.zeros_like(y_score)
# No relevant labels
y_true = np.zeros((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Only relevant labels
y_true = np.ones((1, n_labels))
assert_equal(lrap_score(y_true, y_score), 1.)
assert_equal(lrap_score(y_true, y_score_ties), 1.)
# Degenerate case: only one label
assert_almost_equal(lrap_score([[1], [0], [1], [0]],
[[0.5], [0.5], [0.5], [0.5]]), 1.)
def check_lrap_error_raised(lrap_score):
# Raise value error if not appropriate format
assert_raises(ValueError, lrap_score,
[0, 1, 0], [0.25, 0.3, 0.2])
assert_raises(ValueError, lrap_score, [0, 1, 2],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
assert_raises(ValueError, lrap_score, [(0), (1), (2)],
[[0.25, 0.75, 0.0], [0.7, 0.3, 0.0], [0.8, 0.2, 0.0]])
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, lrap_score, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, lrap_score, [[0, 1], [0, 1]], [[0], [1]])
def check_lrap_only_ties(lrap_score):
# Check tie handling in score
# Basic check with only ties and increasing label space
for n_labels in range(2, 10):
y_score = np.ones((1, n_labels))
# Check for growing number of consecutive relevant
for n_relevant in range(1, n_labels):
# Check for a bunch of positions
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
n_relevant / n_labels)
def check_lrap_without_tie_and_increasing_score(lrap_score):
# Check that Label ranking average precision works for various
# Basic check with increasing label space size and decreasing score
for n_labels in range(2, 10):
y_score = n_labels - (np.arange(n_labels).reshape((1, n_labels)) + 1)
# First and last
y_true = np.zeros((1, n_labels))
y_true[0, 0] = 1
y_true[0, -1] = 1
assert_almost_equal(lrap_score(y_true, y_score),
(2 / n_labels + 1) / 2)
# Check for growing number of consecutive relevant label
for n_relevant in range(1, n_labels):
# Check for a bunch of position
for pos in range(n_labels - n_relevant):
y_true = np.zeros((1, n_labels))
y_true[0, pos:pos + n_relevant] = 1
assert_almost_equal(lrap_score(y_true, y_score),
sum((r + 1) / ((pos + r + 1) * n_relevant)
for r in range(n_relevant)))
def _my_lrap(y_true, y_score):
"""Simple implementation of label ranking average precision"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true)
y_score = check_array(y_score)
n_samples, n_labels = y_true.shape
score = np.empty((n_samples, ))
for i in range(n_samples):
# The best rank correspond to 1. Rank higher than 1 are worse.
# The best inverse ranking correspond to n_labels.
unique_rank, inv_rank = np.unique(y_score[i], return_inverse=True)
n_ranks = unique_rank.size
rank = n_ranks - inv_rank
# Rank need to be corrected to take into account ties
# ex: rank 1 ex aequo means that both label are rank 2.
corr_rank = np.bincount(rank, minlength=n_ranks + 1).cumsum()
rank = corr_rank[rank]
relevant = y_true[i].nonzero()[0]
if relevant.size == 0 or relevant.size == n_labels:
score[i] = 1
continue
score[i] = 0.
for label in relevant:
# Let's count the number of relevant label with better rank
# (smaller rank).
n_ranked_above = sum(rank[r] <= rank[label] for r in relevant)
# Weight by the rank of the actual label
score[i] += n_ranked_above / rank[label]
score[i] /= relevant.size
return score.mean()
def check_alternative_lrap_implementation(lrap_score, n_classes=5,
n_samples=20, random_state=0):
_, y_true = make_multilabel_classification(n_features=1,
allow_unlabeled=False,
random_state=random_state,
n_classes=n_classes,
n_samples=n_samples)
# Score with ties
y_score = sparse_random_matrix(n_components=y_true.shape[0],
n_features=y_true.shape[1],
random_state=random_state)
if hasattr(y_score, "toarray"):
y_score = y_score.toarray()
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
# Uniform score
random_state = check_random_state(random_state)
y_score = random_state.uniform(size=(n_samples, n_classes))
score_lrap = label_ranking_average_precision_score(y_true, y_score)
score_my_lrap = _my_lrap(y_true, y_score)
assert_almost_equal(score_lrap, score_my_lrap)
def test_label_ranking_avp():
for fn in [label_ranking_average_precision_score, _my_lrap]:
yield check_lrap_toy, fn
yield check_lrap_without_tie_and_increasing_score, fn
yield check_lrap_only_ties, fn
yield check_zero_or_all_relevant_labels, fn
yield check_lrap_error_raised, label_ranking_average_precision_score
for n_samples, n_classes, random_state in product((1, 2, 8, 20),
(2, 5, 10),
range(1)):
yield (check_alternative_lrap_implementation,
label_ranking_average_precision_score,
n_classes, n_samples, random_state)
def test_coverage_error():
# Toy case
assert_almost_equal(coverage_error([[0, 1]], [[0.25, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.75]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.75]]), 1)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.75]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.75]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.75, 0.5, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.75, 0.5, 0.25]]), 1)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.75, 0.5, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.75, 0.5, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.5, 0.75, 0.25]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.5, 0.75, 0.25]]), 1)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.5, 0.75, 0.25]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.5, 0.75, 0.25]]), 2)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.5, 0.75, 0.25]]), 3)
# Non trival case
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(1 + 3) / 2.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
assert_almost_equal(coverage_error([[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(1 + 3 + 3) / 3.)
def test_coverage_tie_handling():
assert_almost_equal(coverage_error([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[1, 0]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 1]], [[0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 0, 0]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(coverage_error([[0, 0, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 0]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 2)
assert_almost_equal(coverage_error([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 3)
assert_almost_equal(coverage_error([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 3)
def test_label_ranking_loss():
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.25, 0.75]]), 0)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.75, 0.25]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.75]]),
0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.75]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.75]]),
2 / 2)
# Undefined metrics - the ranking doesn't matter
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.75, 0.25]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 1]], [[0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.5, 0.75, 0.25]]),
0)
assert_almost_equal(label_ranking_loss([[0, 0, 0]], [[0.25, 0.5, 0.5]]),
0)
assert_almost_equal(label_ranking_loss([[1, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
# Non trival case
assert_almost_equal(label_ranking_loss([[0, 1, 0], [1, 1, 0]],
[[0.1, 10., -3], [0, 1, 3]]),
(0 + 2 / 2) / 2.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [0, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
assert_almost_equal(label_ranking_loss(
[[0, 1, 0], [1, 1, 0], [0, 1, 1]],
[[0.1, 10, -3], [3, 1, 3], [0, 2, 0]]),
(0 + 2 / 2 + 1 / 2) / 3.)
# Sparse csr matrices
assert_almost_equal(label_ranking_loss(
csr_matrix(np.array([[0, 1, 0], [1, 1, 0]])),
[[0.1, 10, -3], [3, 1, 3]]),
(0 + 2 / 2) / 2.)
def test_ranking_appropriate_input_shape():
# Check that y_true.shape != y_score.shape raise the proper exception
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [0, 1])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0, 1], [0, 1]], [[0], [1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss,
[[0], [1]], [[0, 1], [0, 1]])
assert_raises(ValueError, label_ranking_loss, [[0, 1], [0, 1]], [[0], [1]])
def test_ranking_loss_ties_handling():
# Tie handling
assert_almost_equal(label_ranking_loss([[1, 0]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 1]], [[0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[0, 0, 1]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 0]], [[0.25, 0.5, 0.5]]),
1 / 2)
assert_almost_equal(label_ranking_loss([[0, 1, 1]], [[0.25, 0.5, 0.5]]), 0)
assert_almost_equal(label_ranking_loss([[1, 0, 0]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 0, 1]], [[0.25, 0.5, 0.5]]), 1)
assert_almost_equal(label_ranking_loss([[1, 1, 0]], [[0.25, 0.5, 0.5]]), 1)
| unlicense |
goodfeli/pylearn2 | pylearn2/scripts/datasets/browse_small_norb.py | 44 | 6901 | #!/usr/bin/env python
import sys
import argparse
import pickle
import warnings
import exceptions
import numpy
try:
from matplotlib import pyplot
except ImportError as import_error:
warnings.warn("Can't use this script without matplotlib.")
pyplot = None
from pylearn2.datasets import norb
warnings.warn("This script is deprecated. Please use ./browse_norb.py "
"instead. It is kept around as a tester for deprecated class "
"datasets.norb.SmallNORB",
exceptions.DeprecationWarning)
def main():
def parse_args():
parser = argparse.ArgumentParser(
description="Browser for SmallNORB dataset.")
parser.add_argument('--which_set',
default='train',
help="'train', 'test', or the path to a .pkl file")
parser.add_argument('--zca',
default=None,
help=("if --which_set points to a .pkl "
"file storing a ZCA-preprocessed "
"NORB dataset, you can optionally "
"enter the preprocessor's .pkl "
"file path here to undo the "
"ZCA'ing for visualization "
"purposes."))
return parser.parse_args()
def get_data(args):
if args.which_set in ('train', 'test'):
dataset = norb.SmallNORB(args.which_set, True)
else:
with open(args.which_set) as norb_file:
dataset = pickle.load(norb_file)
if len(dataset.y.shape) < 2 or dataset.y.shape[1] == 1:
print("This viewer does not support NORB datasets that "
"only have classification labels.")
sys.exit(1)
if args.zca is not None:
with open(args.zca) as zca_file:
zca = pickle.load(zca_file)
dataset.X = zca.inverse(dataset.X)
num_examples = dataset.X.shape[0]
topo_shape = ((num_examples, ) +
tuple(dataset.view_converter.shape))
assert topo_shape[-1] == 1
topo_shape = topo_shape[:-1]
values = dataset.X.reshape(topo_shape)
labels = numpy.array(dataset.y, 'int')
return values, labels, dataset.which_set
args = parse_args()
values, labels, which_set = get_data(args)
# For programming convenience, internally remap the instance labels to be
# 0-4, and the azimuth labels to be 0-17. The user will still only see the
# original, unmodified label values.
instance_index = norb.SmallNORB.label_type_to_index['instance']
def remap_instances(which_set, labels):
if which_set == 'train':
new_to_old_instance = [4, 6, 7, 8, 9]
elif which_set == 'test':
new_to_old_instance = [0, 1, 2, 3, 5]
num_instances = len(new_to_old_instance)
old_to_new_instance = numpy.ndarray(10, 'int')
old_to_new_instance.fill(-1)
old_to_new_instance[new_to_old_instance] = numpy.arange(num_instances)
instance_slice = numpy.index_exp[:, instance_index]
old_instances = labels[instance_slice]
new_instances = old_to_new_instance[old_instances]
labels[instance_slice] = new_instances
azimuth_index = norb.SmallNORB.label_type_to_index['azimuth']
azimuth_slice = numpy.index_exp[:, azimuth_index]
labels[azimuth_slice] = labels[azimuth_slice] / 2
return new_to_old_instance
new_to_old_instance = remap_instances(which_set, labels)
def get_new_azimuth_degrees(scalar_label):
return 20 * scalar_label
# Maps a label vector to the corresponding index in <values>
num_labels_by_type = numpy.array(norb.SmallNORB.num_labels_by_type, 'int')
num_labels_by_type[instance_index] = len(new_to_old_instance)
label_to_index = numpy.ndarray(num_labels_by_type, 'int')
label_to_index.fill(-1)
for i, label in enumerate(labels):
label_to_index[tuple(label)] = i
assert not numpy.any(label_to_index == -1) # all elements have been set
figure, axes = pyplot.subplots(1, 2, squeeze=True)
figure.canvas.set_window_title('Small NORB dataset (%sing set)' %
which_set)
# shift subplots down to make more room for the text
figure.subplots_adjust(bottom=0.05)
num_label_types = len(norb.SmallNORB.num_labels_by_type)
current_labels = numpy.zeros(num_label_types, 'int')
current_label_type = [0, ]
label_text = figure.suptitle("title text",
x=0.1,
horizontalalignment="left")
def redraw(redraw_text, redraw_images):
if redraw_text:
cl = current_labels
lines = [
'category: %s' % norb.SmallNORB.get_category(cl[0]),
'instance: %d' % new_to_old_instance[cl[1]],
'elevation: %d' % norb.SmallNORB.get_elevation_degrees(cl[2]),
'azimuth: %d' % get_new_azimuth_degrees(cl[3]),
'lighting: %d' % cl[4]]
lt = current_label_type[0]
lines[lt] = '==> ' + lines[lt]
text = ('Up/down arrows choose label, left/right arrows change it'
'\n\n' +
'\n'.join(lines))
label_text.set_text(text)
if redraw_images:
index = label_to_index[tuple(current_labels)]
image_pair = values[index, :, :, :]
for i in range(2):
axes[i].imshow(image_pair[i, :, :], cmap='gray')
figure.canvas.draw()
def on_key_press(event):
def add_mod(arg, step, size):
return (arg + size + step) % size
def incr_label_type(step):
current_label_type[0] = add_mod(current_label_type[0],
step,
num_label_types)
def incr_label(step):
lt = current_label_type[0]
num_labels = num_labels_by_type[lt]
current_labels[lt] = add_mod(current_labels[lt], step, num_labels)
if event.key == 'up':
incr_label_type(-1)
redraw(True, False)
elif event.key == 'down':
incr_label_type(1)
redraw(True, False)
elif event.key == 'left':
incr_label(-1)
redraw(True, True)
elif event.key == 'right':
incr_label(1)
redraw(True, True)
elif event.key == 'q':
sys.exit(0)
figure.canvas.mpl_connect('key_press_event', on_key_press)
redraw(True, True)
pyplot.show()
if __name__ == '__main__':
main()
| bsd-3-clause |
Ginkgo-Biloba/Misc-Python | numpy/SciPyInt.py | 1 | 3425 | # coding=utf-8
import numpy as np
from scipy import integrate as intgrt
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from math import sqrt
# 计算半球的体积
def ballVolume():
def halfBall(x, y):
return sqrt(1 - x**2 - y**2)
def halfCircle(x):
return sqrt(1 - x**2)
(vol, error) = intgrt.dblquad(halfBall, -1, 1, lambda x: -halfCircle(x), lambda x: halfCircle(x))
print ("vol =", vol)
# 对常微分方程组积分
# 计算洛伦茨吸引子的轨迹
def LorenzAttactor():
# 给出位置矢量 w 和三个参数 sigma rho beta 计算出速度矢量 dx dy dz
def lorenz(w, t, sigma, rho, beta):
(x, y, z) = w.tolist()
return (sigma * (y - x), x * (rho - z), x * y - beta * z)
t = np.arange(0, 20, 0.01) # 创建时间点
# 调用 ode 对 lorenz 进行求解 用两个不同的初始值
track1 = intgrt.odeint(lorenz, (0.0, 1.0, 0.0), t, args=(10.0, 28.0, 2.7))
track2 = intgrt.odeint(lorenz, (0.0, 1.01, 0.0), t, args=(10.0, 28.0, 2.7))
# 绘图
fig = plt.figure()
ax = Axes3D(fig)
ax.plot(track1[:, 0], track1[:, 1], track1[:, 2], label="$y=1.0$")
ax.plot(track2[:, 0], track2[:, 1], track2[:, 2], label="$y=1.01$")
plt.legend(loc="best")
plt.show()
# 质量-弹簧-阻尼系统
# Mx'' + bx' + kx = F
def msd(xu, t, M, k, b, F):
(x, u) = xu.tolist()
dx = u
du = (F - k * x - b * u) / M
return (dx, du)
def msdDemo():
# 初始滑块在位移 x = -1.0 处 起始速度为 0 外部控制力恒为 1.0
initxu = (-1.0, 0.0)
(M, k, b, F) = (1.0, 0.5, 0.2, 1.0)
t = np.arange(0, 40, 0.02)
rst = intgrt.odeint(msd, initxu, t, args=(M, k, b, F))
(fig, (ax1, ax2)) = plt.subplots(2, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax1.legend(); ax2.legend()
plt.show()
# 质量-弹簧-阻尼系统
class MassSpringDamper(object):
def __init__(self, M, k, b, F):
(self.M, self.k, self.b, self.F) = (M, k, b, F)
# 求导函数
def dee(self, t, xu):
(x, u) = xu.tolist()
dx = u
du = (self.F - self.k * x - self.b * u) / self.M
return [dx, du] # 要求返回列表而不是元组
# 采用 PID 控制器
class PID(object):
def __init__(self, kp, ki, kd, dt):
(self.kp, self.ki, self.kd, self.dt) = (kp, ki, kd, dt)
self.lastErr = None
self.x = 0.0
def update(self, err):
p = self.kp * err
i = self.ki * self.x
if self.lastErr is None:
d = 0.0
else:
d = self.kd * (err - self.lastErr) / self.dt
self.x += err * self.dt
self.lastErr = err
return p + i + d
# 控制外力 F 使滑块更迅速地停止在位移 2.0 处
def msdPID(kp, ki, kd, dt):
stm = MassSpringDamper(M=1.0, k=0.5, b=0.2, F=1.0)
initxu = (-1.0, 0.0)
pid = PID(kp, ki, kd, dt)
r = intgrt.ode(stm.dee)
r.set_integrator("vode", method="bdf")
r.set_initial_value(initxu, 0)
t = list(); rst = list(); FArr = list()
while (r.successful() and (r.t + dt < 3)):
r.integrate(r.t + dt)
t.append(r.t)
rst.append(r.y)
err = 2.0 - r.y[0]
F = pid.update(err)
stm.F = F
FArr.append(F)
rst = np.array(rst)
t = np.array(t)
FArr = np.array(FArr)
(fig, (ax1, ax2, ax3)) = plt.subplots(3, 1)
ax1.plot(t, rst[:, 0], label=u"位移 x")
ax2.plot(t, rst[:, 1], label=u"速度 u")
ax3.plot(t, FArr, label=u"控制力 F")
ax1.legend(); ax2.legend(); ax3.legend()
plt.show()
if (__name__ == "__main__"):
# ballVolume()
LorenzAttactor()
# msdDemo()
# msdPID(19.29, 1.41, 6.25, 0.02) # 最优的一组数
| gpl-3.0 |
phillynch7/sportsref | sportsref/nba/seasons.py | 1 | 9902 | from __future__ import print_function
from __future__ import division
from future import standard_library
standard_library.install_aliases()
from builtins import range, zip
from past.utils import old_div
import urllib.parse
import future
import future.utils
import numpy as np
import pandas as pd
from pyquery import PyQuery as pq
import sportsref
class Season(future.utils.with_metaclass(sportsref.decorators.Cached, object)):
"""Object representing a given NBA season."""
def __init__(self, year):
"""Initializes a Season object for an NBA season.
:year: The year of the season we want.
"""
self.yr = int(year)
def __eq__(self, other):
return (self.yr == other.yr)
def __hash__(self):
return hash(self.yr)
def __repr__(self):
return 'Season({})'.format(self.yr)
def _subpage_url(self, page):
return (sportsref.nba.BASE_URL +
'/leagues/NBA_{}_{}.html'.format(self.yr, page))
@sportsref.decorators.memoize
def get_main_doc(self):
"""Returns PyQuery object for the main season URL.
:returns: PyQuery object.
"""
url = (sportsref.nba.BASE_URL +
'/leagues/NBA_{}.html'.format(self.yr))
return pq(sportsref.utils.get_html(url))
@sportsref.decorators.memoize
def get_sub_doc(self, subpage):
"""Returns PyQuery object for a given subpage URL.
:subpage: The subpage of the season, e.g. 'per_game'.
:returns: PyQuery object.
"""
html = sportsref.utils.get_html(self._subpage_url(subpage))
return pq(html)
@sportsref.decorators.memoize
def get_team_ids(self):
"""Returns a list of the team IDs for the given year.
:returns: List of team IDs.
"""
df = self.team_stats_per_game()
if not df.empty:
return df.index.tolist()
else:
print('ERROR: no teams found')
return []
@sportsref.decorators.memoize
def team_ids_to_names(self):
"""Mapping from 3-letter team IDs to full team names.
:returns: Dictionary with team IDs as keys and full team strings as
values.
"""
doc = self.get_main_doc()
table = doc('table#team-stats-per_game')
flattened = sportsref.utils.parse_table(table, flatten=True)
unflattened = sportsref.utils.parse_table(table, flatten=False)
team_ids = flattened['team_id']
team_names = unflattened['team_name']
if len(team_names) != len(team_ids):
raise Exception("team names and team IDs don't align")
return dict(zip(team_ids, team_names))
@sportsref.decorators.memoize
def team_names_to_ids(self):
"""Mapping from full team names to 3-letter team IDs.
:returns: Dictionary with tean names as keys and team IDs as values.
"""
d = self.team_ids_to_names()
return {v: k for k, v in d.items()}
@sportsref.decorators.memoize
@sportsref.decorators.kind_rpb(include_type=True)
def schedule(self, kind='R'):
"""Returns a list of BoxScore IDs for every game in the season.
Only needs to handle 'R' or 'P' options because decorator handles 'B'.
:param kind: 'R' for regular season, 'P' for playoffs, 'B' for both.
Defaults to 'R'.
:returns: DataFrame of schedule information.
:rtype: pd.DataFrame
"""
kind = kind.upper()[0]
dfs = []
# get games from each month
for month in ('october', 'november', 'december', 'january', 'february',
'march', 'april', 'may', 'june'):
try:
doc = self.get_sub_doc('games-{}'.format(month))
except ValueError:
continue
table = doc('table#schedule')
df = sportsref.utils.parse_table(table)
dfs.append(df)
df = pd.concat(dfs).reset_index(drop=True)
# figure out how many regular season games
try:
sportsref.utils.get_html('{}/playoffs/NBA_{}.html'.format(
sportsref.nba.BASE_URL, self.yr)
)
is_past_season = True
except ValueError:
is_past_season = False
if is_past_season:
team_per_game = self.team_stats_per_game()
n_reg_games = int(team_per_game.g.sum() // 2)
else:
n_reg_games = len(df)
# subset appropriately based on `kind`
if kind == 'P':
return df.iloc[n_reg_games:]
else:
return df.iloc[:n_reg_games]
def finals_winner(self):
"""Returns the team ID for the winner of that year's NBA Finals.
:returns: 3-letter team ID for champ.
"""
raise NotImplementedError('nba.Season.finals_winner')
def finals_loser(self):
"""Returns the team ID for the loser of that year's NBA Finals.
:returns: 3-letter team ID for runner-up.
"""
raise NotImplementedError('nba.Season.finals_loser')
def standings(self):
"""Returns a DataFrame containing standings information."""
doc = self.get_sub_doc('standings')
east_table = doc('table#divs_standings_E')
east_df = pd.DataFrame(sportsref.utils.parse_table(east_table))
east_df.sort_values('wins', ascending=False, inplace=True)
east_df['seed'] = range(1, len(east_df) + 1)
east_df['conference'] = 'E'
west_table = doc('table#divs_standings_W')
west_df = sportsref.utils.parse_table(west_table)
west_df.sort_values('wins', ascending=False, inplace=True)
west_df['seed'] = range(1, len(west_df) + 1)
west_df['conference'] = 'W'
full_df = pd.concat([east_df, west_df], axis=0).reset_index(drop=True)
full_df['team_id'] = full_df.team_id.str.extract(r'(\w+)\W*\(\d+\)', expand=False)
full_df['gb'] = [gb if isinstance(gb, int) or isinstance(gb, float) else 0
for gb in full_df['gb']]
full_df = full_df.drop('has_class_full_table', axis=1)
expanded_table = doc('table#expanded_standings')
expanded_df = sportsref.utils.parse_table(expanded_table)
full_df = pd.merge(full_df, expanded_df, on='team_id')
return full_df
@sportsref.decorators.memoize
def _get_team_stats_table(self, selector):
"""Helper function for stats tables on season pages. Returns a
DataFrame."""
doc = self.get_main_doc()
table = doc(selector)
df = sportsref.utils.parse_table(table)
df.set_index('team_id', inplace=True)
return df
def team_stats_per_game(self):
"""Returns a Pandas DataFrame of each team's basic per-game stats for
the season."""
return self._get_team_stats_table('table#team-stats-per_game')
def opp_stats_per_game(self):
"""Returns a Pandas DataFrame of each team's opponent's basic per-game
stats for the season."""
return self._get_team_stats_table('table#opponent-stats-per_game')
def team_stats_totals(self):
"""Returns a Pandas DataFrame of each team's basic stat totals for the
season."""
return self._get_team_stats_table('table#team-stats-base')
def opp_stats_totals(self):
"""Returns a Pandas DataFrame of each team's opponent's basic stat
totals for the season."""
return self._get_team_stats_table('table#opponent-stats-base')
def misc_stats(self):
"""Returns a Pandas DataFrame of miscellaneous stats about each team's
season."""
return self._get_team_stats_table('table#misc_stats')
def team_stats_shooting(self):
"""Returns a Pandas DataFrame of each team's shooting stats for the
season."""
return self._get_team_stats_table('table#team_shooting')
def opp_stats_shooting(self):
"""Returns a Pandas DataFrame of each team's opponent's shooting stats
for the season."""
return self._get_team_stats_table('table#opponent_shooting')
@sportsref.decorators.memoize
def _get_player_stats_table(self, identifier):
"""Helper function for player season stats.
:identifier: string identifying the type of stat, e.g. 'per_game'.
:returns: A DataFrame of stats.
"""
doc = self.get_sub_doc(identifier)
table = doc('table#{}_stats'.format(identifier))
df = sportsref.utils.parse_table(table)
return df
def player_stats_per_game(self):
"""Returns a DataFrame of per-game player stats for a season."""
return self._get_player_stats_table('per_game')
def player_stats_totals(self):
"""Returns a DataFrame of player stat totals for a season."""
return self._get_player_stats_table('totals')
def player_stats_per36(self):
"""Returns a DataFrame of player per-36 min stats for a season."""
return self._get_player_stats_table('per_minute')
def player_stats_per100(self):
"""Returns a DataFrame of player per-100 poss stats for a season."""
return self._get_player_stats_table('per_poss')
def player_stats_advanced(self):
"""Returns a DataFrame of player per-100 poss stats for a season."""
return self._get_player_stats_table('advanced')
def mvp_voting(self):
"""Returns a DataFrame containing information about MVP voting."""
raise NotImplementedError('nba.Season.mvp_voting')
def roy_voting(self):
"""Returns a DataFrame containing information about ROY voting."""
url = '{}/awards/awards_{}.html'.format(sportsref.nba.BASE_URL, self.yr)
doc = pq(sportsref.utils.get_html(url))
table = doc('table#roy')
df = sportsref.utils.parse_table(table)
return df
| gpl-3.0 |
PatrickChrist/scikit-learn | benchmarks/bench_plot_ward.py | 290 | 1260 | """
Benchmark scikit-learn's Ward implement compared to SciPy's
"""
import time
import numpy as np
from scipy.cluster import hierarchy
import pylab as pl
from sklearn.cluster import AgglomerativeClustering
ward = AgglomerativeClustering(n_clusters=3, linkage='ward')
n_samples = np.logspace(.5, 3, 9)
n_features = np.logspace(1, 3.5, 7)
N_samples, N_features = np.meshgrid(n_samples,
n_features)
scikits_time = np.zeros(N_samples.shape)
scipy_time = np.zeros(N_samples.shape)
for i, n in enumerate(n_samples):
for j, p in enumerate(n_features):
X = np.random.normal(size=(n, p))
t0 = time.time()
ward.fit(X)
scikits_time[j, i] = time.time() - t0
t0 = time.time()
hierarchy.ward(X)
scipy_time[j, i] = time.time() - t0
ratio = scikits_time / scipy_time
pl.figure("scikit-learn Ward's method benchmark results")
pl.imshow(np.log(ratio), aspect='auto', origin="lower")
pl.colorbar()
pl.contour(ratio, levels=[1, ], colors='k')
pl.yticks(range(len(n_features)), n_features.astype(np.int))
pl.ylabel('N features')
pl.xticks(range(len(n_samples)), n_samples.astype(np.int))
pl.xlabel('N samples')
pl.title("Scikit's time, in units of scipy time (log)")
pl.show()
| bsd-3-clause |
zzcclp/spark | python/pyspark/pandas/tests/plot/test_frame_plot_matplotlib.py | 14 | 18666 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import base64
from distutils.version import LooseVersion
from io import BytesIO
import unittest
import pandas as pd
import numpy as np
from pyspark import pandas as ps
from pyspark.pandas.config import set_option, reset_option
from pyspark.testing.pandasutils import (
have_matplotlib,
matplotlib_requirement_message,
PandasOnSparkTestCase,
TestUtils,
)
if have_matplotlib:
import matplotlib
from matplotlib import pyplot as plt
matplotlib.use("agg")
@unittest.skipIf(not have_matplotlib, matplotlib_requirement_message)
class DataFramePlotMatplotlibTest(PandasOnSparkTestCase, TestUtils):
sample_ratio_default = None
@classmethod
def setUpClass(cls):
super().setUpClass()
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.set_option("plotting.backend", "matplotlib")
set_option("plotting.backend", "matplotlib")
set_option("plotting.max_rows", 2000)
set_option("plotting.sample_ratio", None)
@classmethod
def tearDownClass(cls):
if LooseVersion(pd.__version__) >= LooseVersion("0.25"):
pd.reset_option("plotting.backend")
reset_option("plotting.backend")
reset_option("plotting.max_rows")
reset_option("plotting.sample_ratio")
super().tearDownClass()
@property
def pdf1(self):
return pd.DataFrame(
{"a": [1, 2, 3, 4, 5, 6, 7, 8, 9, 15, 50], "b": [2, 3, 4, 5, 7, 9, 10, 15, 34, 45, 49]},
index=[0, 1, 3, 5, 6, 8, 9, 9, 9, 10, 10],
)
@property
def psdf1(self):
return ps.from_pandas(self.pdf1)
@staticmethod
def plot_to_base64(ax):
bytes_data = BytesIO()
ax.figure.savefig(bytes_data, format="png")
bytes_data.seek(0)
b64_data = base64.b64encode(bytes_data.read())
plt.close(ax.figure)
return b64_data
def test_line_plot(self):
def check_line_plot(pdf, psdf):
ax1 = pdf.plot(kind="line", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="line", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.line(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.line(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_line_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
psdf1.columns = columns
check_line_plot(pdf1, psdf1)
def test_area_plot(self):
def check_area_plot(pdf, psdf):
ax1 = pdf.plot(kind="area", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="area", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.area(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.area(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf = self.pdf1
psdf = self.psdf1
check_area_plot(pdf, psdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf.columns = columns
psdf.columns = columns
check_area_plot(pdf, psdf)
def test_area_plot_stacked_false(self):
def check_area_plot_stacked_false(pdf, psdf):
ax1 = pdf.plot.area(stacked=False)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.area(stacked=False)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# test if frame area plot is correct when stacked=False because default is True
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
check_area_plot_stacked_false(pdf, psdf)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "sales"), ("x", "signups"), ("y", "visits")])
pdf.columns = columns
psdf.columns = columns
check_area_plot_stacked_false(pdf, psdf)
def test_area_plot_y(self):
def check_area_plot_y(pdf, psdf, y):
ax1 = pdf.plot.area(y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.area(y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# test if frame area plot is correct when y is specified
pdf = pd.DataFrame(
{
"sales": [3, 2, 3, 9, 10, 6],
"signups": [5, 5, 6, 12, 14, 13],
"visits": [20, 42, 28, 62, 81, 50],
},
index=pd.date_range(start="2018/01/01", end="2018/07/01", freq="M"),
)
psdf = ps.from_pandas(pdf)
check_area_plot_y(pdf, psdf, y="sales")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "sales"), ("x", "signups"), ("y", "visits")])
pdf.columns = columns
psdf.columns = columns
check_area_plot_y(pdf, psdf, y=("x", "sales"))
def test_barh_plot_with_x_y(self):
def check_barh_plot_with_x_y(pdf, psdf, x, y):
ax1 = pdf.plot(kind="barh", x=x, y=y, colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="barh", x=x, y=y, colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.barh(x=x, y=y, colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.barh(x=x, y=y, colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# this is testing plot with specified x and y
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot_with_x_y(pdf1, psdf1, x="lab", y="val")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_barh_plot_with_x_y(pdf1, psdf1, x=("x", "lab"), y=("y", "val"))
def test_barh_plot(self):
def check_barh_plot(pdf, psdf):
ax1 = pdf.plot(kind="barh", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="barh", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.barh(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.barh(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# this is testing when x or y is not assigned
pdf1 = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf1 = ps.from_pandas(pdf1)
check_barh_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_barh_plot(pdf1, psdf1)
def test_bar_plot(self):
def check_bar_plot(pdf, psdf):
ax1 = pdf.plot(kind="bar", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="bar", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.bar(colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.bar(colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_bar_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf1.columns = columns
psdf1.columns = columns
check_bar_plot(pdf1, psdf1)
def test_bar_with_x_y(self):
# this is testing plot with specified x and y
pdf = pd.DataFrame({"lab": ["A", "B", "C"], "val": [10, 30, 20]})
psdf = ps.from_pandas(pdf)
ax1 = pdf.plot(kind="bar", x="lab", y="val", colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="bar", x="lab", y="val", colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax3 = pdf.plot.bar(x="lab", y="val", colormap="Paired")
bin3 = self.plot_to_base64(ax3)
ax4 = psdf.plot.bar(x="lab", y="val", colormap="Paired")
bin4 = self.plot_to_base64(ax4)
self.assertEqual(bin3, bin4)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "lab"), ("y", "val")])
pdf.columns = columns
psdf.columns = columns
ax5 = pdf.plot(kind="bar", x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin5 = self.plot_to_base64(ax5)
ax6 = psdf.plot(kind="bar", x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin6 = self.plot_to_base64(ax6)
self.assertEqual(bin5, bin6)
ax7 = pdf.plot.bar(x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin7 = self.plot_to_base64(ax7)
ax8 = psdf.plot.bar(x=("x", "lab"), y=("y", "val"), colormap="Paired")
bin8 = self.plot_to_base64(ax8)
self.assertEqual(bin7, bin8)
def test_pie_plot(self):
def check_pie_plot(pdf, psdf, y):
ax1 = pdf.plot.pie(y=y, figsize=(5, 5), colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.pie(y=y, figsize=(5, 5), colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="pie", y=y, figsize=(5, 5), colormap="Paired")
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="pie", y=y, figsize=(5, 5), colormap="Paired")
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax11, ax12 = pdf.plot.pie(figsize=(5, 5), subplots=True, colormap="Paired")
bin11 = self.plot_to_base64(ax11)
bin12 = self.plot_to_base64(ax12)
self.assertEqual(bin11, bin12)
ax21, ax22 = psdf.plot.pie(figsize=(5, 5), subplots=True, colormap="Paired")
bin21 = self.plot_to_base64(ax21)
bin22 = self.plot_to_base64(ax22)
self.assertEqual(bin21, bin22)
ax11, ax12 = pdf.plot(kind="pie", figsize=(5, 5), subplots=True, colormap="Paired")
bin11 = self.plot_to_base64(ax11)
bin12 = self.plot_to_base64(ax12)
self.assertEqual(bin11, bin12)
ax21, ax22 = psdf.plot(kind="pie", figsize=(5, 5), subplots=True, colormap="Paired")
bin21 = self.plot_to_base64(ax21)
bin22 = self.plot_to_base64(ax22)
self.assertEqual(bin21, bin22)
pdf1 = pd.DataFrame(
{"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
index=["Mercury", "Venus", "Earth"],
)
psdf1 = ps.from_pandas(pdf1)
check_pie_plot(pdf1, psdf1, y="mass")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "mass"), ("y", "radius")])
pdf1.columns = columns
psdf1.columns = columns
check_pie_plot(pdf1, psdf1, y=("x", "mass"))
def test_pie_plot_error_message(self):
# this is to test if error is correctly raising when y is not specified
# and subplots is not set to True
pdf = pd.DataFrame(
{"mass": [0.330, 4.87, 5.97], "radius": [2439.7, 6051.8, 6378.1]},
index=["Mercury", "Venus", "Earth"],
)
psdf = ps.from_pandas(pdf)
with self.assertRaises(ValueError) as context:
psdf.plot.pie(figsize=(5, 5), colormap="Paired")
error_message = "pie requires either y column or 'subplots=True'"
self.assertTrue(error_message in str(context.exception))
def test_scatter_plot(self):
def check_scatter_plot(pdf, psdf, x, y, c):
ax1 = pdf.plot.scatter(x=x, y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.scatter(x=x, y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="scatter", x=x, y=y)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="scatter", x=x, y=y)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# check when keyword c is given as name of a column
ax1 = pdf.plot.scatter(x=x, y=y, c=c, s=50)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.scatter(x=x, y=y, c=c, s=50)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
# Use pandas scatter plot example
pdf1 = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"])
psdf1 = ps.from_pandas(pdf1)
check_scatter_plot(pdf1, psdf1, x="a", y="b", c="c")
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("x", "b"), ("y", "c"), ("z", "d")])
pdf1.columns = columns
psdf1.columns = columns
check_scatter_plot(pdf1, psdf1, x=("x", "a"), y=("x", "b"), c=("y", "c"))
def test_hist_plot(self):
def check_hist_plot(pdf, psdf):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf.plot.hist()
bin1 = self.plot_to_base64(ax1)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf.plot.hist()
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot.hist(bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.hist(bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot(kind="hist", bins=15)
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot(kind="hist", bins=15)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
ax1 = pdf.plot.hist(bins=3, bottom=[2, 1, 3])
bin1 = self.plot_to_base64(ax1)
ax2 = psdf.plot.hist(bins=3, bottom=[2, 1, 3])
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
non_numeric_pdf = self.pdf1.copy()
non_numeric_pdf.c = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]
non_numeric_psdf = ps.from_pandas(non_numeric_pdf)
ax1 = non_numeric_pdf.plot.hist(
x=non_numeric_pdf.columns[0], y=non_numeric_pdf.columns[1], bins=3
)
bin1 = self.plot_to_base64(ax1)
ax2 = non_numeric_psdf.plot.hist(
x=non_numeric_pdf.columns[0], y=non_numeric_pdf.columns[1], bins=3
)
bin2 = self.plot_to_base64(ax2)
self.assertEqual(bin1, bin2)
pdf1 = self.pdf1
psdf1 = self.psdf1
check_hist_plot(pdf1, psdf1)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
psdf1.columns = columns
check_hist_plot(pdf1, psdf1)
def test_kde_plot(self):
def moving_average(a, n=10):
ret = np.cumsum(a, dtype=float)
ret[n:] = ret[n:] - ret[:-n]
return ret[n - 1 :] / n
def check_kde_plot(pdf, psdf, *args, **kwargs):
_, ax1 = plt.subplots(1, 1)
ax1 = pdf.plot.kde(*args, **kwargs)
_, ax2 = plt.subplots(1, 1)
ax2 = psdf.plot.kde(*args, **kwargs)
try:
for i, (line1, line2) in enumerate(zip(ax1.get_lines(), ax2.get_lines())):
expected = line1.get_xydata().ravel()
actual = line2.get_xydata().ravel()
# TODO: Due to implementation difference, the output is different comparing
# to pandas'. We should identify the root cause of difference, and reduce
# the diff.
# Note: Data is from 1 to 50. So, it smooths them by moving average and compares
# both.
self.assertTrue(
np.allclose(moving_average(actual), moving_average(expected), rtol=3.0)
)
finally:
ax1.cla()
ax2.cla()
pdf1 = self.pdf1
psdf1 = self.psdf1
check_kde_plot(pdf1, psdf1, bw_method=0.3)
check_kde_plot(pdf1, psdf1, ind=[1, 2, 3], bw_method=3.0)
# multi-index columns
columns = pd.MultiIndex.from_tuples([("x", "a"), ("y", "b")])
pdf1.columns = columns
pdf1.columns = columns
check_kde_plot(pdf1, psdf1, bw_method=0.3)
check_kde_plot(pdf1, psdf1, ind=[1, 2, 3], bw_method=3.0)
if __name__ == "__main__":
from pyspark.pandas.tests.plot.test_frame_plot_matplotlib import * # noqa: F401
try:
import xmlrunner # type: ignore[import]
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
| apache-2.0 |
wazeerzulfikar/scikit-learn | sklearn/externals/joblib/__init__.py | 54 | 5087 | """Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
========================= ================================================
**User documentation:** http://pythonhosted.org/joblib
**Download packages:** http://pypi.python.org/pypi/joblib#downloads
**Source code:** http://github.com/joblib/joblib
**Report issues:** http://github.com/joblib/joblib/issues
========================= ================================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make it easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.11'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
from .parallel import register_parallel_backend
from .parallel import parallel_backend
from .parallel import effective_n_jobs
__all__ = ['Memory', 'MemorizedResult', 'PrintTime', 'Logger', 'hash', 'dump',
'load', 'Parallel', 'delayed', 'cpu_count', 'effective_n_jobs',
'register_parallel_backend', 'parallel_backend']
| bsd-3-clause |
zoranzhao/NoSSim | NoS_Vgraph/core_util_plot.py | 1 | 5568 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
from mpl_toolkits.axes_grid.parasite_axes import SubplotHost
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
def plot(srv_app, srv_lwip, cli_app, cli_lwip):
#srv_app = {0:[],1:[],2:[]}
#srv_lwip = {0:[],1:[],2:[]}
#cli_app = {0:[],1:[],2:[]}
#cli_lwip = {0:[],1:[],2:[]}
O2lwip=cli_lwip[2]
O2comp=cli_app[2]
O1lwip=cli_lwip[1]
O1comp=cli_app[1]
O0lwip=cli_lwip[0]
O0comp=cli_app[0]
colorsred = ['brown', 'red', 'tomato', 'lightsalmon']
colorsgreen = ['darkgreen', 'seagreen', 'limegreen', 'springgreen']
colorsblue =['navy', 'blue', 'steelblue', 'lightsteelblue']
hatches = ['//', '++', 'xxx', 'oo','\\\\\\', 'OO', '..' , '---', "**"]
label_size=15
font_size=15
#client
N = 3
width = 0.25 # the width of the bars
xtra_space = 0.02
ind = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind1 = np.arange(N) + 2 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind2 = np.arange(N) + 2+(N+1) - (width*3+xtra_space*2)/2 # the x locations for the groups
ind3 = np.arange(N) + 2+N+1+N+1 - (width*3+xtra_space*2)/2 # the x locations for the groups
ind = np.append(ind1, ind2)
ind = np.append(ind, ind3)
#ind = np.append(ind, ind4)
#ind = np.append(ind, ind5)
fig, ax = plt.subplots(2)
a1 = ax[0].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[0].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[0].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[0].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[0].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[0].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
OLevel = ["O-0", "O-1", "O-2", "O-3"]
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - lwIP", " - App."]
legend_size=16
plt.figlegend(
(
a1, a2,
b1, b2,
c1, c2
),
(
OLevel[2]+duration_type[1], OLevel[2]+duration_type[0],
OLevel[1]+duration_type[1], OLevel[1]+duration_type[0],
OLevel[0]+duration_type[1], OLevel[0]+duration_type[0]
),
scatterpoints=1,
loc='upper center',
ncol=3,
prop={'size':legend_size})
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[0].set_xticks( xticks )
ax[0].set_xticks( xticks_minor, minor=True )
ax[0].set_xticklabels( xlbls )
ax[0].set_xlim( 1, 13 )
ax[0].grid( 'off', axis='x' )
ax[0].grid( 'off', axis='x', which='minor' )
# vertical alignment of xtick labels
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[0].get_xticklabels( ), va ):
t.set_y( y )
ax[0].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
#ax.tick_params( axis='x', which='major', direction='out', length=10 )
ax[0].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[0].get_yticks()
ax[0].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
#server
O2lwip=srv_lwip[2]
O2comp=srv_app[2]
O1lwip=srv_lwip[1]
O1comp=srv_app[1]
O0lwip=srv_lwip[0]
O0comp=srv_app[0]
a1 = ax[1].bar(ind, O2comp, width, color=[0,0.5,1])
a2 = ax[1].bar(ind, O2lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,0.5,1], bottom=O2comp)
b1 = ax[1].bar(ind+ width + xtra_space, O1comp, width, color=[0,1,0.5])
b2 = ax[1].bar(ind+ width + xtra_space, O1lwip, width, fill=False, hatch=hatches[0], edgecolor=[0,1,0.5], bottom=O1comp)
c1 = ax[1].bar(ind+ 2*(width + xtra_space), O0comp, width, color=[1,0.5,0])
c2 = ax[1].bar(ind+ 2*(width + xtra_space), O0lwip, width, fill=False, hatch=hatches[0], edgecolor=[1,0.5,0], bottom=O0comp)
channels = ["b@11Mbps", "g@9Mbps", "g@54Mbps"]
duration_type = [" - Communication", " - Computation"]
xticks = [ 2, 2.9, 3, 4, 6, 6.9, 7, 8, 10, 10.9, 11, 12]
xticks_minor = [ 1, 5, 9, 13 ]#longer
xlbls = [channels[0], '6-Cli.', channels[1], channels[2],
channels[0], '4-Cli.', channels[1], channels[2],
channels[0], '2-Cli.', channels[1], channels[2]]
ax[1].set_xticks( xticks )
ax[1].set_xticks( xticks_minor, minor=True )
ax[1].set_xticklabels( xlbls )
ax[1].set_xlim( 1, 13 )
ax[1].grid( 'off', axis='x' )
ax[1].grid( 'off', axis='x', which='minor' )
va = [ 0, -.1, 0, 0, 0, -.1, 0, 0, 0, -.1, 0, 0]
for t, y in zip( ax[1].get_xticklabels( ), va ):
t.set_y( y )
ax[1].tick_params( axis='x', which='minor', direction='out', length=40 , top='off')
ax[1].tick_params( axis='x', which='major', bottom='off', top='off' )
vals = ax[1].get_yticks()
ax[1].set_yticklabels(['{:3.0f}%'.format(x*100) for x in vals])
# add some text for labels, title and axes ticks
ax[0].set_ylabel('Core Utilization', fontsize=label_size)
ax[0].set_xlabel('Client', fontsize=label_size)
ax[1].set_ylabel('Core Utilization', fontsize=label_size)
ax[1].set_xlabel('Server', fontsize=label_size)
ax[0].tick_params(axis='y', labelsize=font_size)
ax[1].tick_params(axis='y', labelsize=font_size)
ax[0].tick_params(axis='x', labelsize=font_size)
ax[1].tick_params(axis='x', labelsize=font_size)
plt.show()
| bsd-3-clause |
adammenges/statsmodels | statsmodels/examples/tsa/arma_plots.py | 33 | 2516 | '''Plot acf and pacf for some ARMA(1,1)
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.tsa.arima_process as tsp
from statsmodels.sandbox.tsa.fftarma import ArmaFft as FftArmaProcess
import statsmodels.tsa.stattools as tss
from statsmodels.graphics.tsaplots import plotacf
np.set_printoptions(precision=2)
arcoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
macoefs = [0.9, 0., -0.5] #[0.9, 0.5, 0.1, 0., -0.5]
nsample = 1000
nburnin = 1000
sig = 1
fig = plt.figure(figsize=(8, 13))
fig.suptitle('ARMA: Autocorrelation (left) and Partial Autocorrelation (right)')
subplotcount = 1
nrows = 4
for arcoef in arcoefs[:-1]:
for macoef in macoefs[:-1]:
ar = np.r_[1., -arcoef]
ma = np.r_[1., macoef]
#y = tsp.arma_generate_sample(ar,ma,nsample, sig, burnin)
#armaprocess = FftArmaProcess(ar, ma, nsample) #TODO: make n optional
#armaprocess.plot4()
armaprocess = tsp.ArmaProcess(ar, ma)
acf = armaprocess.acf(20)[:20]
pacf = armaprocess.pacf(20)[:20]
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(acf, ax=ax)
## ax.set_title('Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
ax = fig.add_subplot(nrows, 2, subplotcount)
plotacf(pacf, ax=ax)
## ax.set_title('Partial Autocorrelation \nar=%s, ma=%rs' % (ar, ma),
## size='xx-small')
ax.text(0.7, 0.6, 'ar =%s \nma=%s' % (ar, ma),
transform=ax.transAxes,
horizontalalignment='left', #'right',
size='xx-small')
ax.set_xlim(-1,20)
subplotcount +=1
axs = fig.axes
### turn of the 2nd column y tick labels
##for ax in axs[1::2]:#[:,1].flat:
## for label in ax.get_yticklabels(): label.set_visible(False)
# turn off all but the bottom xtick labels
for ax in axs[:-2]:#[:-1,:].flat:
for label in ax.get_xticklabels(): label.set_visible(False)
# use a MaxNLocator on the first column y axis if you have a bunch of
# rows to avoid bunching; example below uses at most 3 ticks
import matplotlib.ticker as mticker
for ax in axs: #[::2]:#[:,1].flat:
ax.yaxis.set_major_locator( mticker.MaxNLocator(3 ))
plt.show()
| bsd-3-clause |
rgllm/uminho | 04/CN/TP3/src/src/parser/PsoTools.py | 1 | 4783 | import itertools
import json
import matplotlib.pyplot as plt
from matplotlib import style
import os
style.use('ggplot')
import numpy as np
from pprint import pprint
from os.path import basename
xrange=range
class PsoTools(object):
def __init__(self):
pass
# Convert a data raw file to a json file
def rawToJson(self, inputFilePath, outputFilePath):
inFile = open(inputFilePath, mode='r')
outFile = open(outputFilePath, mode='w')
meta_data = dict.fromkeys(['nb_customers', 'nb_depots',
'vehicle_cap', 'vehicle_cost', 'cost_type'])
cust_dict = dict.fromkeys(['x', 'y', 'demand'])
dep_dict = dict.fromkeys(['x', 'y', 'capacity'])
customers = {}
depots = {}
# Number of customers and available depots
nb_customers = int(inFile.readline())
nb_depots = int(inFile.readline())
meta_data['nb_customers'] = nb_customers
meta_data['nb_depots'] = nb_depots
inFile.readline() # Empty line
# Depots cordinates
for i, line in enumerate(inFile):
if i < nb_depots:
x = float(line.split()[0])
y = float(line.split()[1])
depots['d'+str(i)] = {}
depots['d'+str(i)]['x'] = x
depots['d'+str(i)]['y'] = y
else:
i=i-1
break
# Customers cordinates and vehicule capacity
for i, line in enumerate(inFile):
if i < nb_customers:
x = float(line.split()[0])
y = float(line.split()[1])
customers['c'+str(i)] = {}
customers['c'+str(i)]['x'] = x
customers['c'+str(i)]['y'] = y
else:
break
# Vehicules and depots capacity
for i, line in enumerate(inFile):
if i == 0:
vehicle_cap = float(line)
meta_data['vehicle_cap'] = vehicle_cap
elif i == 1:
pass
elif i < nb_depots+2:
depot_cap = float(line)
depots['d'+str(i-2)]['capacity'] = depot_cap
else:
break
# Customers demands
for i, line in enumerate(inFile):
if i < nb_customers:
demand = float(line)
customers['c'+str(i)]['demand'] = demand
else:
break
# Depots openning costs
for i, line in enumerate(inFile):
if i < nb_depots:
openning_cost = float(line)
depots['d'+str(i)]['opening_cost'] = openning_cost
elif i == nb_depots:
pass
elif i == nb_depots+1:
vehicle_cost = float(line)
meta_data['vehicle_cost'] = vehicle_cost
elif i == nb_depots+2:
pass
elif i == nb_depots+3:
cost_type = float(line)
meta_data['cost_type'] = cost_type
else:
break
final_output = {}
final_output['customers'] = customers
final_output['depots'] = depots
final_output['meta_data'] = meta_data
json.dump(final_output, outFile, indent=4)
inFile.close()
outFile.close()
# Plot the customers on the map
def plotCustomers(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
coords_cust = np.zeros(shape=(nb_customers,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot the depots on the map
def plotDepots(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_depots = data['meta_data']['nb_depots']
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='P', s=10, linewidth=5)
plt.show()
# Plot both depots and customers on the map
def plotAll(self, jsonInputFile):
if os.path.isfile(jsonInputFile):
with open(jsonInputFile) as data_file:
data = json.load(data_file)
nb_customers = data['meta_data']['nb_customers']
nb_depots = data['meta_data']['nb_depots']
coords_cust = np.zeros(shape=(nb_customers,2))
coords_depot = np.zeros(shape=(nb_depots,2))
for i in xrange(nb_customers):
x = data['customers']['c{0}'.format(i)]['x']
y = data['customers']['c{0}'.format(i)]['y']
coords_cust[i] = [x,y]
for i in xrange(nb_depots):
x = data['depots']['d{0}'.format(i)]['x']
y = data['depots']['d{0}'.format(i)]['y']
coords_depot[i] = [x,y]
filename = str(basename(os.path.splitext(jsonInputFile)[0]) + '.pdf')
plt.scatter(coords_cust[:,0], coords_cust[:,1], marker='s', s=10, linewidth=5)
plt.scatter(coords_depot[:,0], coords_depot[:,1], marker='8', s=10, linewidth=5)
plt.savefig(filename, format='pdf')
#~ plt.show()
| mit |
jat255/seaborn | seaborn/timeseries.py | 4 | 15212 | """Timeseries plotting functions."""
from __future__ import division
import numpy as np
import pandas as pd
from scipy import stats, interpolate
import matplotlib as mpl
import matplotlib.pyplot as plt
from .external.six import string_types
from . import utils
from . import algorithms as algo
from .palettes import color_palette
def tsplot(data, time=None, unit=None, condition=None, value=None,
err_style="ci_band", ci=68, interpolate=True, color=None,
estimator=np.mean, n_boot=5000, err_palette=None, err_kws=None,
legend=True, ax=None, **kwargs):
"""Plot one or more timeseries with flexible representation of uncertainty.
This function is intended to be used with data where observations are
nested within sampling units that were measured at multiple timepoints.
It can take data specified either as a long-form (tidy) DataFrame or as an
ndarray with dimensions (unit, time) The interpretation of some of the
other parameters changes depending on the type of object passed as data.
Parameters
----------
data : DataFrame or ndarray
Data for the plot. Should either be a "long form" dataframe or an
array with dimensions (unit, time, condition). In both cases, the
condition field/dimension is optional. The type of this argument
determines the interpretation of the next few parameters. When
using a DataFrame, the index has to be sequential.
time : string or series-like
Either the name of the field corresponding to time in the data
DataFrame or x values for a plot when data is an array. If a Series,
the name will be used to label the x axis.
unit : string
Field in the data DataFrame identifying the sampling unit (e.g.
subject, neuron, etc.). The error representation will collapse over
units at each time/condition observation. This has no role when data
is an array.
value : string
Either the name of the field corresponding to the data values in
the data DataFrame (i.e. the y coordinate) or a string that forms
the y axis label when data is an array.
condition : string or Series-like
Either the name of the field identifying the condition an observation
falls under in the data DataFrame, or a sequence of names with a length
equal to the size of the third dimension of data. There will be a
separate trace plotted for each condition. If condition is a Series
with a name attribute, the name will form the title for the plot
legend (unless legend is set to False).
err_style : string or list of strings or None
Names of ways to plot uncertainty across units from set of
{ci_band, ci_bars, boot_traces, boot_kde, unit_traces, unit_points}.
Can use one or more than one method.
ci : float or list of floats in [0, 100]
Confidence interval size(s). If a list, it will stack the error
plots for each confidence interval. Only relevant for error styles
with "ci" in the name.
interpolate : boolean
Whether to do a linear interpolation between each timepoint when
plotting. The value of this parameter also determines the marker
used for the main plot traces, unless marker is specified as a keyword
argument.
color : seaborn palette or matplotlib color name or dictionary
Palette or color for the main plots and error representation (unless
plotting by unit, which can be separately controlled with err_palette).
If a dictionary, should map condition name to color spec.
estimator : callable
Function to determine central tendency and to pass to bootstrap
must take an ``axis`` argument.
n_boot : int
Number of bootstrap iterations.
err_palette : seaborn palette
Palette name or list of colors used when plotting data for each unit.
err_kws : dict, optional
Keyword argument dictionary passed through to matplotlib function
generating the error plot,
legend : bool, optional
If ``True`` and there is a ``condition`` variable, add a legend to
the plot.
ax : axis object, optional
Plot in given axis; if None creates a new figure
kwargs :
Other keyword arguments are passed to main plot() call
Returns
-------
ax : matplotlib axis
axis with plot data
Examples
--------
Plot a trace with translucent confidence bands:
.. plot::
:context: close-figs
>>> import numpy as np; np.random.seed(22)
>>> import seaborn as sns; sns.set(color_codes=True)
>>> x = np.linspace(0, 15, 31)
>>> data = np.sin(x) + np.random.rand(10, 31) + np.random.randn(10, 1)
>>> ax = sns.tsplot(data=data)
Plot a long-form dataframe with several conditions:
.. plot::
:context: close-figs
>>> gammas = sns.load_dataset("gammas")
>>> ax = sns.tsplot(time="timepoint", value="BOLD signal",
... unit="subject", condition="ROI",
... data=gammas)
Use error bars at the positions of the observations:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="ci_bars", color="g")
Don't interpolate between the observations:
.. plot::
:context: close-figs
>>> import matplotlib.pyplot as plt
>>> ax = sns.tsplot(data=data, err_style="ci_bars", interpolate=False)
Show multiple confidence bands:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, ci=[68, 95], color="m")
Use a different estimator:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, estimator=np.median)
Show each bootstrap resample:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="boot_traces", n_boot=500)
Show the trace from each sampling unit:
.. plot::
:context: close-figs
>>> ax = sns.tsplot(data=data, err_style="unit_traces")
"""
# Sort out default values for the parameters
if ax is None:
ax = plt.gca()
if err_kws is None:
err_kws = {}
# Handle different types of input data
if isinstance(data, pd.DataFrame):
xlabel = time
ylabel = value
# Condition is optional
if condition is None:
condition = pd.Series(np.ones(len(data)))
legend = False
legend_name = None
n_cond = 1
else:
legend = True and legend
legend_name = condition
n_cond = len(data[condition].unique())
else:
data = np.asarray(data)
# Data can be a timecourse from a single unit or
# several observations in one condition
if data.ndim == 1:
data = data[np.newaxis, :, np.newaxis]
elif data.ndim == 2:
data = data[:, :, np.newaxis]
n_unit, n_time, n_cond = data.shape
# Units are experimental observations. Maybe subjects, or neurons
if unit is None:
units = np.arange(n_unit)
unit = "unit"
units = np.repeat(units, n_time * n_cond)
ylabel = None
# Time forms the xaxis of the plot
if time is None:
times = np.arange(n_time)
else:
times = np.asarray(time)
xlabel = None
if hasattr(time, "name"):
xlabel = time.name
time = "time"
times = np.tile(np.repeat(times, n_cond), n_unit)
# Conditions split the timeseries plots
if condition is None:
conds = range(n_cond)
legend = False
if isinstance(color, dict):
err = "Must have condition names if using color dict."
raise ValueError(err)
else:
conds = np.asarray(condition)
legend = True and legend
if hasattr(condition, "name"):
legend_name = condition.name
else:
legend_name = None
condition = "cond"
conds = np.tile(conds, n_unit * n_time)
# Value forms the y value in the plot
if value is None:
ylabel = None
else:
ylabel = value
value = "value"
# Convert to long-form DataFrame
data = pd.DataFrame(dict(value=data.ravel(),
time=times,
unit=units,
cond=conds))
# Set up the err_style and ci arguments for the loop below
if isinstance(err_style, string_types):
err_style = [err_style]
elif err_style is None:
err_style = []
if not hasattr(ci, "__iter__"):
ci = [ci]
# Set up the color palette
if color is None:
current_palette = utils.get_color_cycle()
if len(current_palette) < n_cond:
colors = color_palette("husl", n_cond)
else:
colors = color_palette(n_colors=n_cond)
elif isinstance(color, dict):
colors = [color[c] for c in data[condition].unique()]
else:
try:
colors = color_palette(color, n_cond)
except ValueError:
color = mpl.colors.colorConverter.to_rgb(color)
colors = [color] * n_cond
# Do a groupby with condition and plot each trace
for c, (cond, df_c) in enumerate(data.groupby(condition, sort=False)):
df_c = df_c.pivot(unit, time, value)
x = df_c.columns.values.astype(np.float)
# Bootstrap the data for confidence intervals
boot_data = algo.bootstrap(df_c.values, n_boot=n_boot,
axis=0, func=estimator)
cis = [utils.ci(boot_data, v, axis=0) for v in ci]
central_data = estimator(df_c.values, axis=0)
# Get the color for this condition
color = colors[c]
# Use subroutines to plot the uncertainty
for style in err_style:
# Allow for null style (only plot central tendency)
if style is None:
continue
# Grab the function from the global environment
try:
plot_func = globals()["_plot_%s" % style]
except KeyError:
raise ValueError("%s is not a valid err_style" % style)
# Possibly set up to plot each observation in a different color
if err_palette is not None and "unit" in style:
orig_color = color
color = color_palette(err_palette, len(df_c.values))
# Pass all parameters to the error plotter as keyword args
plot_kwargs = dict(ax=ax, x=x, data=df_c.values,
boot_data=boot_data,
central_data=central_data,
color=color, err_kws=err_kws)
# Plot the error representation, possibly for multiple cis
for ci_i in cis:
plot_kwargs["ci"] = ci_i
plot_func(**plot_kwargs)
if err_palette is not None and "unit" in style:
color = orig_color
# Plot the central trace
kwargs.setdefault("marker", "" if interpolate else "o")
ls = kwargs.pop("ls", "-" if interpolate else "")
kwargs.setdefault("linestyle", ls)
label = cond if legend else "_nolegend_"
ax.plot(x, central_data, color=color, label=label, **kwargs)
# Pad the sides of the plot only when not interpolating
ax.set_xlim(x.min(), x.max())
x_diff = x[1] - x[0]
if not interpolate:
ax.set_xlim(x.min() - x_diff, x.max() + x_diff)
# Add the plot labels
if xlabel is not None:
ax.set_xlabel(xlabel)
if ylabel is not None:
ax.set_ylabel(ylabel)
if legend:
ax.legend(loc=0, title=legend_name)
return ax
# Subroutines for tsplot errorbar plotting
# ----------------------------------------
def _plot_ci_band(ax, x, ci, color, err_kws, **kwargs):
"""Plot translucent error bands around the central tendancy."""
low, high = ci
if "alpha" not in err_kws:
err_kws["alpha"] = 0.2
ax.fill_between(x, low, high, facecolor=color, **err_kws)
def _plot_ci_bars(ax, x, central_data, ci, color, err_kws, **kwargs):
"""Plot error bars at each data point."""
for x_i, y_i, (low, high) in zip(x, central_data, ci.T):
ax.plot([x_i, x_i], [low, high], color=color,
solid_capstyle="round", **err_kws)
def _plot_boot_traces(ax, x, boot_data, color, err_kws, **kwargs):
"""Plot 250 traces from bootstrap."""
err_kws.setdefault("alpha", 0.25)
err_kws.setdefault("linewidth", 0.25)
if "lw" in err_kws:
err_kws["linewidth"] = err_kws.pop("lw")
ax.plot(x, boot_data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_traces(ax, x, data, ci, color, err_kws, **kwargs):
"""Plot a trace for each observation in the original data."""
if isinstance(color, list):
if "alpha" not in err_kws:
err_kws["alpha"] = .5
for i, obs in enumerate(data):
ax.plot(x, obs, color=color[i], label="_nolegend_", **err_kws)
else:
if "alpha" not in err_kws:
err_kws["alpha"] = .2
ax.plot(x, data.T, color=color, label="_nolegend_", **err_kws)
def _plot_unit_points(ax, x, data, color, err_kws, **kwargs):
"""Plot each original data point discretely."""
if isinstance(color, list):
for i, obs in enumerate(data):
ax.plot(x, obs, "o", color=color[i], alpha=0.8, markersize=4,
label="_nolegend_", **err_kws)
else:
ax.plot(x, data.T, "o", color=color, alpha=0.5, markersize=4,
label="_nolegend_", **err_kws)
def _plot_boot_kde(ax, x, boot_data, color, **kwargs):
"""Plot the kernal density estimate of the bootstrap distribution."""
kwargs.pop("data")
_ts_kde(ax, x, boot_data, color, **kwargs)
def _plot_unit_kde(ax, x, data, color, **kwargs):
"""Plot the kernal density estimate over the sample."""
_ts_kde(ax, x, data, color, **kwargs)
def _ts_kde(ax, x, data, color, **kwargs):
"""Upsample over time and plot a KDE of the bootstrap distribution."""
kde_data = []
y_min, y_max = data.min(), data.max()
y_vals = np.linspace(y_min, y_max, 100)
upsampler = interpolate.interp1d(x, data)
data_upsample = upsampler(np.linspace(x.min(), x.max(), 100))
for pt_data in data_upsample.T:
pt_kde = stats.kde.gaussian_kde(pt_data)
kde_data.append(pt_kde(y_vals))
kde_data = np.transpose(kde_data)
rgb = mpl.colors.ColorConverter().to_rgb(color)
img = np.zeros((kde_data.shape[0], kde_data.shape[1], 4))
img[:, :, :3] = rgb
kde_data /= kde_data.max(axis=0)
kde_data[kde_data > 1] = 1
img[:, :, 3] = kde_data
ax.imshow(img, interpolation="spline16", zorder=2,
extent=(x.min(), x.max(), y_min, y_max),
aspect="auto", origin="lower")
| bsd-3-clause |
cbyn/bitpredict | app/run_charts_extended.py | 2 | 5244 | import pandas as pd
import pymongo
from bokeh.plotting import cursession, figure, output_server, push
from bokeh.models.formatters import DatetimeTickFormatter, PrintfTickFormatter
from bokeh.io import vplot
from bokeh import embed
from json import load
from urllib2 import urlopen
import time
client = pymongo.MongoClient()
db = client['bitmicro']
collection = db['btc_predictions']
def get_data():
cursor = collection.find().limit(3*60*60).sort('_id', pymongo.DESCENDING)
data = pd.DataFrame(list(cursor))
data = data.set_index('_id')
data = data.sort_index(ascending=True)
timestamps = pd.to_datetime(data.index, unit='s').to_series()
prices = data.price
predictions = data.prediction*10000
returns = (data.position*data.change).cumsum()*10000
return timestamps, prices, predictions, returns
timestamps, prices, predictions, returns = get_data()
output_server('bitpredict_extended')
background = '#f2f2f2'
ylabel_standoff = 0
xformatter = DatetimeTickFormatter(formats=dict(hours=["%H:%M"]))
yformatter = PrintfTickFormatter(format="%8.1f")
p1 = figure(title=None,
plot_width=750,
plot_height=300,
x_axis_type='datetime',
min_border_top=10,
min_border_bottom=33,
background_fill=background,
tools='',
toolbar_location=None)
p1.line(x=timestamps,
y=prices,
name='prices',
color='#4271ae',
line_width=1,
legend='Bitcoin Bid/Ask Midpoint',
line_cap='round',
line_join='round')
p1.legend.orientation = 'top_left'
p1.legend.border_line_color = background
p1.outline_line_color = None
p1.xgrid.grid_line_color = 'white'
p1.ygrid.grid_line_color = 'white'
p1.axis.axis_line_color = None
p1.axis.major_tick_line_color = None
p1.axis.minor_tick_line_color = None
p1.yaxis.axis_label = 'Price'
p1.yaxis.axis_label_standoff = ylabel_standoff
p1.xaxis.formatter = xformatter
p1.yaxis.formatter = PrintfTickFormatter(format='%8.2f')
p1.yaxis.major_label_text_font = 'courier'
p1.xaxis.major_label_text_font = 'courier'
p2 = figure(title=None,
plot_width=750,
plot_height=295,
x_axis_type='datetime',
min_border_top=5,
min_border_bottom=33,
background_fill=background,
tools='',
toolbar_location=None)
p2.line(x=timestamps,
y=predictions,
name='predictions',
color='#c82829',
line_width=1,
legend='30 Second Prediction',
line_cap='round',
line_join='round')
p2.legend.orientation = 'top_left'
p2.legend.border_line_color = background
p2.outline_line_color = None
p2.xgrid.grid_line_color = 'white'
p2.ygrid.grid_line_color = 'white'
p2.axis.axis_line_color = None
p2.axis.major_tick_line_color = None
p2.axis.minor_tick_line_color = None
p2.yaxis.axis_label = 'Basis Points'
p2.yaxis.axis_label_standoff = ylabel_standoff
p2.xaxis.formatter = xformatter
p2.yaxis.formatter = yformatter
p2.yaxis.major_label_text_font = 'courier'
p2.xaxis.major_label_text_font = 'courier'
p2.x_range = p1.x_range
p3 = figure(title=None,
plot_width=750,
plot_height=320,
x_axis_type='datetime',
min_border_top=5,
min_border_bottom=10,
background_fill=background,
x_axis_label='Greenwich Mean Time',
tools='',
toolbar_location=None)
p3.line(x=timestamps,
y=returns,
name='returns',
color='#8959a8',
line_width=1,
legend='Cumulative Return',
line_cap='round',
line_join='round')
p3.legend.orientation = 'top_left'
p3.legend.border_line_color = background
p3.outline_line_color = None
p3.xgrid.grid_line_color = 'white'
p3.ygrid.grid_line_color = 'white'
p3.axis.axis_line_color = None
p3.axis.major_tick_line_color = None
p3.axis.minor_tick_line_color = None
p3.yaxis.axis_label = 'Basis Points'
p3.yaxis.axis_label_standoff = ylabel_standoff
p3.xaxis.formatter = xformatter
p3.yaxis.formatter = yformatter
p3.xaxis.axis_label_standoff = 12
p3.yaxis.major_label_text_font = 'courier'
p3.xaxis.major_label_text_font = 'courier'
p3.x_range = p1.x_range
vp = vplot(p1, p2, p3)
push()
ip = load(urlopen('http://jsonip.com'))['ip']
ssn = cursession()
ssn.publish()
tag = embed.autoload_server(vp, ssn, public=True).replace('localhost', ip)
html = """
{%% extends "layout.html" %%}
{%% block bokeh %%}
%s
{%% endblock %%}
""" % tag
with open('templates/extended.html', 'w+') as f:
f.write(html)
renderer = p1.select(dict(name='prices'))
ds_prices = renderer[0].data_source
renderer = p2.select(dict(name='predictions'))
ds_predictions = renderer[0].data_source
renderer = p3.select(dict(name='returns'))
ds_returns = renderer[0].data_source
while True:
timestamps, prices, predictions, returns = get_data()
ds_prices.data['x'] = timestamps
ds_predictions.data['x'] = timestamps
ds_returns.data['x'] = timestamps
ds_prices.data['y'] = prices
ds_predictions.data['y'] = predictions
ds_returns.data['y'] = returns
ssn.store_objects(ds_prices)
ssn.store_objects(ds_predictions)
ssn.store_objects(ds_returns)
time.sleep(60)
| mit |
JensWehner/votca-scripts | xtp/xtp_kmc_plottrajectory.py | 2 | 3563 | #!/usr/bin/env python
import matplotlib as mpl
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d import proj3d
import numpy as np
import matplotlib.pyplot as plt
import csv
import re
import sys
from __tools__ import MyParser
parser=MyParser(description="Tool to visualize kmc trajectory .csv files" )
parser.add_argument("-t","--trajectory",type=str,nargs="+",required=True,help="Files to visualize .csv format")
parser.add_argument("--steps",type=int,default=-1,help="Maximum number of steps to read in. default:-1")
args=parser.parse_args()
#parser.add_argument('-p',"--plot", action='store_const', const=1, default=0,help="Calculate exciton coupling in classical limit")
if type(args.trajectory)==str:
args.trajectory=[args.trajectory]
class carrierstorage(object):
numberofobjects= 0
def __init__(self):
carrierstorage.numberofobjects+=1
self.id=carrierstorage.numberofobjects
self.traj=[]
def append(self,posvec):
self.traj.append(posvec)
def array(self):
return np.array(self.traj)
def info(self):
print "Carrier No",self.id
print self.array().shape
listofcarriers=[]
for filename in args.trajectory:
locallistofcarriers=[]
with open(filename,"r") as f:
reader = csv.reader(f, dialect="excel-tab")
conversion=1
start=2
for i,row in enumerate(reader):
#print i
if args.steps>0 and i>args.steps:
break
if i==0:
commentlinelength=len(row)
if "carrier" in ''.join(row):
noofcharges=''.join(row).count("carrier")/3
else:
noofcharges=len(row)/3
print "Found {} carriers in file {}".format(noofcharges,filename)
if noofcharges==0:
break
for i in range(noofcharges):
newcarrier=carrierstorage()
listofcarriers.append(newcarrier)
locallistofcarriers.append(newcarrier)
continue
if i==1:
#print row
if len(row)!=commentlinelength:
print "header and trajectory do not have same number of columns. Ignoring steps colum"
start=1
nprow=np.array(row,dtype=float)
firstcoord=nprow[start:start+3]
if np.sqrt(np.sum(firstcoord**2))<0.0001:
print "Units is probably meter instead of nm. Old trajectory format"
conversion=1E9
else:
print "Units is probably nm."
if i>0:
nprow=np.array(row,dtype=float)
for j,carrier in enumerate(locallistofcarriers):
s=start+j*3
carrier.append(conversion*nprow[s:s+3])
print "Found {} carriers in total".format(len(listofcarriers))
if len(listofcarriers)==0:
print "No carriers found"
sys.exit()
fig = plt.figure(1)
ax = fig.gca(projection='3d')
ax.set_xlabel('x [nm]')
ax.set_ylabel('y [nm]')
ax.set_zlabel('z [nm]')
for i in listofcarriers:
posarray=i.array()
#print posarray
ax.plot(posarray[:,0], posarray[:,1], posarray[:,2])
ax.scatter(posarray[0,0], posarray[0,1], posarray[0,2],s=200,marker="+",c="black")
ax.scatter(posarray[-1,0], posarray[-1,1], posarray[-1,2],s=400,marker="x",c="black")
max_range = np.array([posarray[:,0].max()-posarray[:,0].min(), posarray[:,1].max()-posarray[:,1].min(), posarray[:,2].max()-posarray[:,2].min()]).max()
Xb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][0].flatten() + 0.5*(posarray[:,0].max()+posarray[:,0].min())
Yb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][1].flatten() + 0.5*(posarray[:,1].max()+posarray[:,1].min())
Zb = 0.5*max_range*np.mgrid[-1:2:2,-1:2:2,-1:2:2][2].flatten() + 0.5*(posarray[:,2].max()+posarray[:,2].min())
for xb, yb, zb in zip(Xb, Yb, Zb):
ax.plot([xb], [yb], [zb], 'w')
plt.grid()
plt.show()
| apache-2.0 |
richardhsu/naarad | src/naarad/graphing/matplotlib_naarad.py | 4 | 9100 | # coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import logging
import naarad.naarad_constants as CONSTANTS
logger = logging.getLogger('naarad.graphing.matplotlib')
def convert_to_mdate(date_str):
mdate = mdates.epoch2num(int(date_str) / 1000)
return mdate
# MPL-WA-07
# matplotlib does not rotate colors correctly when using multiple y axes. This method fills in that gap.
def get_current_color(index):
return CONSTANTS.COLOR_PALETTE[index % len(CONSTANTS.COLOR_PALETTE)]
def get_graph_metadata(plots):
height = 0
width = 0
title = ''
for plot in plots:
if plot.graph_height > height:
height = plot.graph_height
if plot.graph_width > width:
width = plot.graph_width
if title == '':
title = plot.graph_title
elif title != plot.graph_title:
title = title + ',' + plot.graph_title
return height / 80, width / 80, title
def curate_plot_list(plots):
delete_nodes = []
for plot in plots:
if os.path.exists(plot.input_csv):
if not os.path.getsize(plot.input_csv):
logger.warning("%s file is empty. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
else:
logger.warning("%s file does not exist. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
for node in delete_nodes:
plots.remove(node)
return plots
def highlight_region(plt, start_x, end_x):
"""
Highlight a region on the chart between the specified start and end x-co-ordinates.
param pyplot plt: matplotlibk pyplot which contains the charts to be highlighted
param string start_x : epoch time millis
param string end_x : epoch time millis
"""
start_x = convert_to_mdate(start_x)
end_x = convert_to_mdate(end_x)
plt.axvspan(start_x, end_x, color=CONSTANTS.HIGHLIGHT_COLOR, alpha=CONSTANTS.HIGHLIGHT_ALPHA)
def graph_data(list_of_plots, output_directory, resource_path, output_filename):
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(list_of_plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
current_axis = axis
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
timestamp, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',', converters={0: convert_to_mdate})
maximum_yvalue = numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count)
minimum_yvalue = numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count)
if current_plot_count == 0:
current_axis.yaxis.set_ticks_position('left')
if current_plot_count > 1:
current_axis = axis.twinx()
current_axis.yaxis.grid(False)
# Set right y-axis for additional plots
current_axis.yaxis.set_ticks_position('right')
# Offset the right y axis to avoid overlap
current_axis.spines['right'].set_position(('axes', 1 + CONSTANTS.Y_AXIS_OFFSET * (current_plot_count - 2)))
current_axis.spines['right'].set_smart_bounds(False)
current_axis.spines['right'].set_color(get_current_color(current_plot_count))
current_axis.set_frame_on(True)
current_axis.patch.set_visible(False)
current_axis.set_ylabel(plot.y_label, color=get_current_color(current_plot_count), fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
current_axis.set_ylim([minimum_yvalue, maximum_yvalue])
if plot.graph_type == 'line':
current_axis.plot_date(x=timestamp, y=yval, linestyle='-', marker=None, color=get_current_color(current_plot_count))
else:
current_axis.plot_date(x=timestamp, y=yval, marker='.', color=get_current_color(current_plot_count))
y_ticks = current_axis.get_yticklabels()
for y_tick in y_ticks:
y_tick.set_color(get_current_color(current_plot_count))
y_tick.set_fontsize(CONSTANTS.Y_TICKS_FONTSIZE)
for x_tick in current_axis.get_xticklabels():
x_tick.set_fontsize(CONSTANTS.X_TICKS_FONTSIZE)
if plot.highlight_regions is not None:
for region in plot.highlight_regions:
highlight_region(plt, str(region.start_timestamp), str(region.end_timestamp))
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
axis.set_xlabel('Time')
x_date_format = mdates.DateFormatter(CONSTANTS.X_TICKS_DATEFORMAT)
axis.xaxis.set_major_formatter(x_date_format)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align="center"><strong>' + os.path.basename(plot_file_name) +
'</strong></p></div><hr />')
return True, os.path.join(output_directory, output_filename + '.div')
def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename):
"""
graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF
"""
maximum_yvalue = -float('inf')
minimum_yvalue = float('inf')
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
# Generate each plot on the graph
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
xval, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',')
axis.plot(xval, yval, linestyle='-', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label)
axis.legend()
maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count))
minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count))
# Set properties of the plots
axis.yaxis.set_ticks_position('left')
axis.set_xlabel(plots[0].x_label)
axis.set_ylabel(plots[0].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
axis.set_ylim([minimum_yvalue, maximum_yvalue])
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>' + os.path.basename(plot_file_name) + '<br/></p></div>')
return True, os.path.join(output_directory, output_filename + '.div')
| apache-2.0 |
ContextLab/quail | quail/analysis/lagcrp.py | 1 | 4765 | import numpy as np
import pandas as pd
from .recmat import recall_matrix
from scipy.spatial.distance import cdist
from ..helpers import check_nan
def lagcrp_helper(egg, match='exact', distance='euclidean',
ts=None, features=None):
"""
Computes probabilities for each transition distance (probability that a word
recalled will be a given distance--in presentation order--from the previous
recalled word).
Parameters
----------
egg : quail.Egg
Data to analyze
match : str (exact, best or smooth)
Matching approach to compute recall matrix. If exact, the presented and
recalled items must be identical (default). If best, the recalled item
that is most similar to the presented items will be selected. If smooth,
a weighted average of all presented items will be used, where the
weights are derived from the similarity between the recalled item and
each presented item.
distance : str
The distance function used to compare presented and recalled items.
Applies only to 'best' and 'smooth' matching approaches. Can be any
distance function supported by numpy.spatial.distance.cdist.
Returns
----------
prec : numpy array
each float is the probability of transition distance (distnaces indexed by
position, from -(n-1) to (n-1), excluding zero
"""
def lagcrp(rec, lstlen):
"""Computes lag-crp for a given recall list"""
def check_pair(a, b):
if (a>0 and b>0) and (a!=b):
return True
else:
return False
def compute_actual(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in range(0,len(rec)-1):
a=rec[trial]
b=rec[trial+1]
if check_pair(a, b) and (a not in recalled) and (b not in recalled):
arr[b-a]+=1
recalled.append(a)
return arr
def compute_possible(rec, lstlen):
arr=pd.Series(data=np.zeros((lstlen)*2),
index=list(range(-lstlen,0))+list(range(1,lstlen+1)))
recalled=[]
for trial in rec:
if np.isnan(trial):
pass
else:
lbound=int(1-trial)
ubound=int(lstlen-trial)
chances=list(range(lbound,0))+list(range(1,ubound+1))
for each in recalled:
if each-trial in chances:
chances.remove(each-trial)
arr[chances]+=1
recalled.append(trial)
return arr
actual = compute_actual(rec, lstlen)
possible = compute_possible(rec, lstlen)
crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)]
crp.insert(int(len(crp) / 2), np.nan)
return crp
def nlagcrp(distmat, ts=None):
def lagcrp_model(s):
idx = list(range(0, -s, -1))
return np.array([list(range(i, i+s)) for i in idx])
# remove nan columns
distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T
model = lagcrp_model(distmat.shape[1])
lagcrp = np.zeros(ts * 2)
for rdx in range(len(distmat)-1):
item = distmat[rdx, :]
next_item = distmat[rdx+1, :]
if not np.isnan(item).any() and not np.isnan(next_item).any():
outer = np.outer(item, next_item)
lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts))))
lagcrp /= ts
lagcrp = list(lagcrp)
lagcrp.insert(int(len(lagcrp) / 2), np.nan)
return np.array(lagcrp)
def _format(p, r):
p = np.matrix([np.array(i) for i in p])
if p.shape[0]==1:
p=p.T
r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r)
r = np.matrix([np.array(i) for i in r])
if r.shape[0]==1:
r=r.T
return p, r
opts = dict(match=match, distance=distance, features=features)
if match is 'exact':
opts.update({'features' : 'item'})
recmat = recall_matrix(egg, **opts)
if not ts:
ts = egg.pres.shape[1]
if match in ['exact', 'best']:
lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat]
elif match is 'smooth':
lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0))
else:
raise ValueError('Match must be set to exact, best or smooth.')
return np.nanmean(lagcrp, axis=0)
| mit |
sbhal/be-fruitful | pythonProject/qlearning_tf.py | 1 | 5122 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Thu Jun 8 19:14:33 2017
@author: sbhal
"""
import numpy as np
import pandas as pd
import random
import tensorflow as tf
class qlearningTF:
def __init__(self, m_criteria, initialWeights=None):
if initialWeights == None:
self.weights = np.full(m_criteria, 3) #assign dtype
else:
self.weights = initialWeights
self.weightBins = 3 #.3 .7. .5
self.e = 0.5
self.lr = .8
self.y = .95
self.m_criteria = m_criteria
self.actionStatesCount = 3 #+-0
# initialize Q table
self.currState = "33"
self.Qrows = pow(self.weightBins,self.m_criteria)
self.Qcols = self.m_criteria* self.actionStatesCount
# These lines establish the feed-forward part of the network used to choose actions
self.inputs1 = tf.placeholder(shape=[1, self.Qrows], dtype=tf.float32)
#self.W = tf.Variable(tf.random_uniform([self.Qrows, self.Qcols], 0, 0.01))
self.W = tf.Variable(tf.random_uniform([self.Qrows, self.Qcols], 0, 0.00))
self.Qout = tf.matmul(self.inputs1, self.W)
self.predict = tf.argmax(self.Qout, 1)
# Below we obtain the loss by taking the sum of squares difference between the target and prediction Q values.
self.nextQ = tf.placeholder(shape=[1, self.Qcols], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(self.nextQ - self.Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
self.updateModel = trainer.minimize(loss)
self.sess = tf.Session()
self.sess.run(tf.initialize_all_variables())
def learn(self, s, a, reward, s1): #curState ----action----> finalState (+reward)
allQ = self.sess.run(self.Qout, feed_dict={self.inputs1: np.identity(self.Qrows)[s:s + 1]})
value2 = np.max(self.sess.run(self.Qout,feed_dict={self.inputs1:np.identity(self.Qrows)[s1:s1+1]}))
allQ[0, a] = reward + self.y * value2
_, W1 = self.sess.run([self.updateModel, self.W], feed_dict={self.inputs1: np.identity(self.Qrows)[s:s + 1], self.nextQ: allQ})
# print(self.sess.run(self.W), " weight updated @ state", self.currState)
self.currState = self.state_num_to_string(s1)
def currToFinalState (self, a, c):
c_num = list(map(int, c))
if a[2] == "+":
c_num[int(a[1])] = min(7, c_num[int(a[1])]+2)
else:
c_num[int(a[1])] = max(3, c_num[int(a[1])] - 2)
return "".join(map(str,c_num))
def update(self, action, latency):
reward = 0 if latency==0 else 1/latency
finalState = self.currToFinalState(action, self.currState)
s = self.state_string_to_num(self.currState)
s1 = self.state_string_to_num(finalState)
a = self.action_string_to_num(action)
self.learn (s, a, reward, s1)
def choose_action(self, currState):
#verify if currState has correct format
s = self.state_string_to_num(currState)
if np.random.rand(1) < self.e:
# print("Random action Chosen")
return self.action_num_to_string(random.randrange(0, self.Qcols))
else:
a = np.argmax(self.sess.run(self.Qout,feed_dict={self.inputs1:np.identity(self.Qrows)[s:s+1]}))
return self.action_num_to_string(a)
def state_string_to_num(self, s):
dict = {'3': 0,
'5': 1,
'7': 2}
sum =0
for i, c in enumerate(reversed(s)):
sum += pow(self.weightBins,i) * dict[c]
return sum
def state_num_to_string(self, num):
dict = {'0':'3',
'1':'5',
'2':'7'}
mynum = num
strr = ""
string = ""
for i in reversed(range(0,self.m_criteria)):
strr += str(mynum // pow(self.weightBins, i))
mynum = mynum % pow(self.weightBins, i)
for i,c in enumerate(strr):
string += dict[strr[i]]
return string
def action_num_to_string(self, num):
dict = {0: "+",
1: "-",
2: "0"}
quotient = num // self.weightBins
remainder = num % self.weightBins
return "w"+ str(quotient) + dict[remainder]
def action_string_to_num(self, s):
dict = { "+": 0,
"-": 1,
"0": 2}
return (int(s[1]) * self.weightBins) + dict[s[2]]
if __name__ == "__main__":
myIns = qlearningTF(m_criteria=2)
print (myIns.state_string_to_num("33"))
print(myIns.state_string_to_num("53"))
print(myIns.state_string_to_num("77"))
print(myIns.action_num_to_string(0))
print(myIns.action_num_to_string(4))
print(myIns.state_num_to_string(0))
print(myIns.state_num_to_string(3))
print(myIns.state_num_to_string(8))
print("From here:")
action = myIns.choose_action("33")
print("Action given is", action)
myIns.update(action, 300)
print("new")
action = myIns.choose_action("77")
myIns.update(action, 300)
print(myIns.choose_action("33"))
| mit |
kyleabeauchamp/EnsemblePaper | code/model_building/evaluate_BW_entropy.py | 1 | 1791 | import pandas as pd
import numpy as np
from fitensemble import bayesian_weighting, belt
import experiment_loader
import ALA3
prior = "BW"
ff = "amber96"
stride = 1000
regularization_strength = 10.0
thin = 400
factor = 50
steps = 1000000
predictions_framewise, measurements, uncertainties = experiment_loader.load(ff, stride=stride)
phi, psi, ass_raw0, state_ind0 = experiment_loader.load_rama(ff, stride)
num_states = len(phi)
assignments = np.arange(num_states)
prior_pops = np.ones(num_states)
predictions = pd.DataFrame(bayesian_weighting.framewise_to_statewise(predictions_framewise, assignments), columns=predictions_framewise.columns)
model = bayesian_weighting.MaxentBayesianWeighting(predictions.values, measurements.values, uncertainties.values, assignments, regularization_strength)
model.sample(steps * factor, thin=thin * factor)
model2 = belt.MaxEntBELT(predictions.values, measurements.values, uncertainties.values, regularization_strength)
model2.sample(steps, thin=thin)
pi = model.mcmc.trace("matrix_populations")[:, 0]
num_samples = len(pi)
data = np.zeros((num_samples, num_samples))
for i, p in enumerate(model.iterate_populations()):
print(i)
for j, p2 in enumerate(model2.iterate_populations()):
data[i, j] = p.dot(np.log(p / p2))
p_bw = model.accumulate_populations()
p_BELT = model2.accumulate_populations()
chi2 = []
prior = []
H_terms = []
for j, p2 in enumerate(model2.iterate_populations()):
mu = predictions.T.dot(p2)
chi2.append(0.5 * (((mu - measurements) / uncertainties) ** 2).sum())
prior.append(regularization_strength * -1.0 * p2.dot(np.log(p2)))
H = -np.diag(p2[:-1] ** -1.) - p[-1] ** -1.
H_terms.append(0.5 * np.linalg.slogdet(H)[1])
R = pd.DataFrame({"chi2":chi2, "prior":prior, "H":H_terms})
| gpl-3.0 |
weidel-p/nest-simulator | pynest/examples/brette_gerstner_fig_3d.py | 12 | 3030 | # -*- coding: utf-8 -*-
#
# brette_gerstner_fig_3d.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""Testing the adapting exponential integrate and fire model in NEST (Brette and Gerstner Fig 3D)
----------------------------------------------------------------------------------------------------
This example tests the adaptive integrate and fire model (AdEx) according to
Brette and Gerstner [1]_ reproduces Figure 3D of the paper.
Note that Brette and Gerstner give the value for `b` in `nA`.
To be consistent with the other parameters in the equations, `b` must be
converted to `pA` (pico Ampere).
References
~~~~~~~~~~~
.. [1] Brette R and Gerstner W (2005). Adaptive exponential integrate-and-fire model as an effective
description of neuronal activity J. Neurophysiology. https://doi.org/10.1152/jn.00686.2005
"""
import nest
import nest.voltage_trace
import matplotlib.pyplot as plt
nest.ResetKernel()
###############################################################################
# First we make sure that the resolution of the simulation is 0.1 ms. This is
# important, since the slop of the action potential is very steep.
res = 0.1
nest.SetKernelStatus({"resolution": res})
neuron = nest.Create("aeif_cond_exp")
###############################################################################
# Set the parameters of the neuron according to the paper.
neuron.set(V_peak=20., E_L=-60.0, a=80.0, b=80.5, tau_w=720.0)
###############################################################################
# Create and configure the stimulus which is a step current.
dc = nest.Create("dc_generator")
dc.set(amplitude=-800.0, start=0.0, stop=400.0)
###############################################################################
# We connect the DC generators.
nest.Connect(dc, neuron, 'all_to_all')
###############################################################################
# And add a ``voltmeter`` to sample the membrane potentials from the neuron
# in intervals of 0.1 ms.
voltmeter = nest.Create("voltmeter", params={'interval': 0.1})
nest.Connect(voltmeter, neuron)
###############################################################################
# Finally, we simulate for 1000 ms and plot a voltage trace to produce the
# figure.
nest.Simulate(1000.0)
nest.voltage_trace.from_device(voltmeter)
plt.axis([0, 1000, -85, 0])
nest.voltage_trace.show()
| gpl-2.0 |
ODM2/YODA-Tools | yodatools/converter/Abstract/iOutputs.py | 2 | 2345 | from odm2api.models import Base, TimeSeriesResultValues
from sqlalchemy.exc import IntegrityError, ProgrammingError
import sqlalchemy.ext.declarative.api as api
import pandas as pd
from sqlalchemy import func
class iOutputs:
def __init__(self):
pass
def parseObjects(self, session):
data = {}
schema = TimeSeriesResultValues.__table_args__['schema']
for t in self.get_table_names():
tmplist = []
try:
if t.__tablename__.lower() == "timeseriesresultvalues":
# TODO: Test if this works for database connections to mssql and mysql
if 'postgresql' in session.bind.name:
sql = """SELECT * FROM {}.timeseriesresultvalues""".format(schema)
elif 'mssql' in session.bind.name:
sql = """SELECT * FROM {}.TimeSeriesResultValues""".format(schema)
else:
sql = """SELECT * FROM TimeSeriesResultValues"""
tbl = pd.read_sql(sql, session.connection().connection.connection)
tmplist = tbl
else:
try:
for obj in session.query(t).all():
# session.expunge(o)
tmplist.append(obj)
except ProgrammingError as e:
print(e.message)
except IntegrityError as e:
print(e)
session.rollback()
if len(tmplist) > 0:
data[t.__tablename__] = tmplist
return data
def get_table_names(self):
tables = []
import inspect
import sys
# get a list of all of the classes in the module
clsmembers = inspect.getmembers(sys.modules["odm2api.models"],
lambda member: inspect.isclass(member) and member.__module__ == "odm2api.models")
for name, Tbl in clsmembers:
if isinstance(Tbl, api.DeclarativeMeta):
# check to see if the schema is already set correctly
tables.append(Tbl)
return tables
def save(self, session, path):
raise NotImplementedError()
def accept(self):
raise NotImplementedError()
| bsd-3-clause |
yugangzhang/chxanalys | chxanalys/chx_compress.py | 1 | 37856 | import os,shutil
from glob import iglob
import matplotlib.pyplot as plt
from chxanalys.chx_libs import (np, roi, time, datetime, os, getpass, db,
get_images,LogNorm, RUN_GUI)
from chxanalys.chx_generic_functions import (create_time_slice,get_detector, get_fields, get_sid_filenames,
load_data)
import struct
from tqdm import tqdm
from contextlib import closing
from multiprocessing import Pool
import dill
import sys
import gc
import pickle as pkl
from eiger_io.pims_reader import EigerImages
def run_dill_encoded(what):
fun, args = dill.loads(what)
return fun(*args)
def apply_async(pool, fun, args, callback=None):
return pool.apply_async( run_dill_encoded, (dill.dumps((fun, args)),), callback= callback)
def map_async(pool, fun, args ):
return pool.map_async(run_dill_encoded, (dill.dumps((fun, args)),))
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
def go_through_FD(FD):
for i in range(FD.beg, FD.end):
pass_FD(FD,i)
def compress_eigerdata( images, mask, md, filename=None, force_compress=False,
bad_pixel_threshold=1e15, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30, nobytes=4,bins=1, bad_frame_list=None,
para_compress= False, num_sub=100, dtypes='uid',reverse =True,
num_max_para_process=500, with_pickle=False, direct_load_data=False, data_path=None):
end= len(images)//bins
if filename is None:
filename= '/XF11ID/analysis/Compressed_Data' +'/uid_%s.cmp'%md['uid']
if dtypes!= 'uid':
para_compress= False
else:
if para_compress:
images='foo'
#para_compress= True
#print( dtypes )
if force_compress:
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process, with_pickle= with_pickle,
direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
if not os.path.exists( filename ):
print ("Create a new compress file with filename as :%s."%filename)
if para_compress:
print( 'Using a multiprocess to compress the data.')
return para_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes= nobytes, bins=bins,
num_sub=num_sub, dtypes=dtypes, reverse=reverse,
num_max_para_process=num_max_para_process,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path)
else:
return init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold, nobytes= nobytes, bins=bins,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
else:
print ("Using already created compressed file with filename as :%s."%filename)
beg=0
return read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold ,bad_frame_list=bad_frame_list,with_pickle= with_pickle, direct_load_data= direct_load_data,data_path=data_path )
def read_compressed_eigerdata( mask, filename, beg, end,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,bad_frame_list=None,with_pickle= False,
direct_load_data=False,data_path=None):
'''
Read already compress eiger data
Return
mask
avg_img
imsum
bad_frame_list
'''
#should use try and except instead of with_pickle in the future!
CAL = False
if not with_pickle:
CAL = True
else:
try:
mask, avg_img, imgsum, bad_frame_list_ = pkl.load( open(filename + '.pkl', 'rb' ) )
except:
CAL = True
if CAL:
FD = Multifile( filename, beg, end)
imgsum = np.zeros( FD.end- FD.beg, dtype= np.float )
avg_img = np.zeros( [FD.md['ncols'], FD.md['nrows'] ] , dtype= np.float )
imgsum, bad_frame_list_ = get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=bad_pixel_threshold, bad_pixel_low_threshold=bad_pixel_low_threshold,
hot_pixel_threshold=hot_pixel_threshold, plot_ = False,
bad_frame_list=bad_frame_list)
avg_img = get_avg_imgc( FD, beg=None,end=None,sampling = 1, plot_ = False,bad_frame_list=bad_frame_list_ )
FD.FID.close()
return mask, avg_img, imgsum, bad_frame_list_
def para_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='uid',reverse =True,
num_max_para_process=500, cpu_core_number=72, with_pickle=True,
direct_load_data=False, data_path=None):
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
N = int( np.ceil( N/ bins ) )
Nf = int( np.ceil( N/ num_sub ) )
if Nf > cpu_core_number:
print("The process number is larger than %s (XF11ID server core number)"%cpu_core_number)
num_sub_old = num_sub
num_sub = int( np.ceil(N/cpu_core_number))
Nf = int( np.ceil( N/ num_sub ) )
print ("The sub compressed file number was changed from %s to %s"%( num_sub_old, num_sub ))
create_compress_header( md, filename +'-header', nobytes, bins )
#print( 'done for header here')
results = para_segment_compress_eigerdata( images=images, mask=mask, md=md,filename=filename,
num_sub=num_sub, bad_pixel_threshold=bad_pixel_threshold, hot_pixel_threshold=hot_pixel_threshold,
bad_pixel_low_threshold=bad_pixel_low_threshold,nobytes=nobytes, bins=bins, dtypes=dtypes,
num_max_para_process=num_max_para_process,
direct_load_data=direct_load_data, data_path=data_path)
res_ = np.array( [ results[k].get() for k in list(sorted(results.keys())) ] )
imgsum = np.zeros( N )
bad_frame_list = np.zeros( N, dtype=bool )
good_count = 1
for i in range( Nf ):
mask_, avg_img_, imgsum_, bad_frame_list_ = res_[i]
imgsum[i*num_sub: (i+1)*num_sub] = imgsum_
bad_frame_list[i*num_sub: (i+1)*num_sub] = bad_frame_list_
if i==0:
mask = mask_
avg_img = np.zeros_like( avg_img_ )
else:
mask *= mask_
if not np.sum( np.isnan( avg_img_)):
avg_img += avg_img_
good_count += 1
bad_frame_list = np.where( bad_frame_list )[0]
avg_img /= good_count
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
print( 'Combining the seperated compressed files together...')
combine_compressed( filename, Nf, del_old=True)
del results
del res_
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
def combine_compressed( filename, Nf, del_old=True):
old_files = np.concatenate( np.array([ [filename +'-header'],
[filename + '_temp-%i.tmp'%i for i in range(Nf) ]]))
combine_binary_files(filename, old_files, del_old )
def combine_binary_files(filename, old_files, del_old = False):
'''Combine binary files together'''
fn_ = open(filename, 'wb')
for ftemp in old_files:
shutil.copyfileobj( open(ftemp, 'rb'), fn_)
if del_old:
os.remove( ftemp )
fn_.close()
def para_segment_compress_eigerdata( images, mask, md, filename, num_sub=100,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1, dtypes='images',reverse =True,
num_max_para_process=50,direct_load_data=False, data_path=None):
'''
parallelly compressed eiger data without header, this function is for parallel compress
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images_ = load_data( uid, detector, reverse= reverse )
else:
images_ = EigerImages(data_path, md)
N= len(images_)
else:
N = len(images)
#N = int( np.ceil( N/ bins ) )
num_sub *= bins
if N%num_sub:
Nf = N// num_sub +1
print('The average image intensity would be slightly not correct, about 1% error.')
print( 'Please give a num_sub to make reminder of Num_images/num_sub =0 to get a correct avg_image')
else:
Nf = N//num_sub
print( 'It will create %i temporary files for parallel compression.'%Nf)
if Nf> num_max_para_process:
N_runs = np.int( np.ceil( Nf/float(num_max_para_process)))
print('The parallel run number: %s is larger than num_max_para_process: %s'%(Nf, num_max_para_process ))
else:
N_runs= 1
result = {}
#print( mask_filename )# + '*'* 10 + 'here' )
for nr in range( N_runs ):
if (nr+1)*num_max_para_process > Nf:
inputs= range( num_max_para_process*nr, Nf )
else:
inputs= range( num_max_para_process*nr, num_max_para_process*(nr + 1 ) )
fns = [ filename + '_temp-%i.tmp'%i for i in inputs]
#print( nr, inputs, )
pool = Pool(processes= len(inputs) ) #, maxtasksperchild=1000 )
#print( inputs )
for i in inputs:
if i*num_sub <= N:
result[i] = pool.apply_async( segment_compress_eigerdata, [
images, mask, md, filename + '_temp-%i.tmp'%i,bad_pixel_threshold, hot_pixel_threshold, bad_pixel_low_threshold, nobytes, bins, i*num_sub, (i+1)*num_sub, dtypes, reverse,direct_load_data, data_path ] )
pool.close()
pool.join()
pool.terminate()
return result
def segment_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0, nobytes=4, bins=1,
N1=None, N2=None, dtypes='images',reverse =True,direct_load_data=False, data_path=None ):
'''
Create a compressed eiger data without header, this function is for parallel compress
for parallel compress don't pass any non-scalar parameters
'''
if dtypes=='uid':
uid= md['uid'] #images
if not direct_load_data:
detector = get_detector( db[uid ] )
images = load_data( uid, detector, reverse= reverse )[N1:N2]
else:
images = EigerImages(data_path, md)[N1:N2]
Nimg_ = len( images)
M,N = images[0].shape
avg_img = np.zeros( [M,N], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
#frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
#Nimg = Nimg_//bins
Nimg = int( np.ceil( Nimg_ / bins ) )
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
#print( time_edge, Nimg_, Nimg, bins, N1, N2 )
imgsum = np.zeros( Nimg )
if bins!=1:
#print('The frames will be binned by %s'%bins)
dtype=np.float64
fp = open( filename,'wb' )
for n in range(Nimg):
t1,t2 = time_edge[n]
if bins!=1:
img = np.array( np.average( images[t1:t2], axis=0 ) , dtype= dtype)
else:
img = np.array( images[t1], dtype=dtype)
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) * np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (dlen==0) or (imgsum[n] > bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel( avg_img )[p] += v
good_count +=1
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v)) #n +=1
del p,v, img
fp.flush()
fp.close()
avg_img /= good_count
bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold)
sys.stdout.write('#')
sys.stdout.flush()
#del images, mask, avg_img, imgsum, bad_frame_list
#print( 'Should release memory here')
return mask, avg_img, imgsum, bad_frame_list
def create_compress_header( md, filename, nobytes=4, bins=1 ):
'''
Create the head for a compressed eiger data, this function is for parallel compress
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
fp.close()
def init_compress_eigerdata( images, mask, md, filename,
bad_pixel_threshold=1e15, hot_pixel_threshold=2**30,
bad_pixel_low_threshold=0,nobytes=4, bins=1, with_pickle=True,
direct_load_data=False, data_path=None):
'''
Compress the eiger data
Create a new mask by remove hot_pixel
Do image average
Do each image sum
Find badframe_list for where image sum above bad_pixel_threshold
Generate a compressed data with filename
if bins!=1, will bin the images with bin number as bins
Header contains 1024 bytes ['Magic value', 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End ]
Return
mask
avg_img
imsum
bad_frame_list
'''
fp = open( filename,'wb' )
#Make Header 1024 bytes
#md = images.md
if bins!=1:
nobytes=8
Header = struct.pack('@16s8d7I916x',b'Version-COMP0001',
md['beam_center_x'],md['beam_center_y'], md['count_time'], md['detector_distance'],
md['frame_time'],md['incident_wavelength'], md['x_pixel_size'],md['y_pixel_size'],
nobytes, md['pixel_mask'].shape[1], md['pixel_mask'].shape[0],
0, md['pixel_mask'].shape[1],
0, md['pixel_mask'].shape[0]
)
fp.write( Header)
Nimg_ = len( images)
avg_img = np.zeros_like( images[0], dtype= np.float )
Nopix = float( avg_img.size )
n=0
good_count = 0
frac = 0.0
if nobytes==2:
dtype= np.int16
elif nobytes==4:
dtype= np.int32
elif nobytes==8:
dtype=np.float64
else:
print ( "Wrong type of nobytes, only support 2 [np.int16] or 4 [np.int32]")
dtype= np.int32
Nimg = Nimg_//bins
time_edge = np.array(create_time_slice( N= Nimg_,
slice_num= Nimg, slice_width= bins ))
imgsum = np.zeros( Nimg )
if bins!=1:
print('The frames will be binned by %s'%bins)
for n in tqdm( range(Nimg) ):
t1,t2 = time_edge[n]
img = np.average( images[t1:t2], axis=0 )
mask &= img < hot_pixel_threshold
p = np.where( (np.ravel(img)>0) & np.ravel(mask) )[0] #don't use masked data
v = np.ravel( np.array( img, dtype= dtype )) [p]
dlen = len(p)
imgsum[n] = v.sum()
if (imgsum[n] >bad_pixel_threshold) or (imgsum[n] <=bad_pixel_low_threshold):
#if imgsum[n] >=bad_pixel_threshold :
dlen = 0
fp.write( struct.pack( '@I', dlen ))
else:
np.ravel(avg_img )[p] += v
good_count +=1
frac += dlen/Nopix
#s_fmt ='@I{}i{}{}'.format( dlen,dlen,'ih'[nobytes==2])
fp.write( struct.pack( '@I', dlen ))
fp.write( struct.pack( '@{}i'.format( dlen), *p))
if bins==1:
fp.write( struct.pack( '@{}{}'.format( dlen,'ih'[nobytes==2]), *v))
else:
fp.write( struct.pack( '@{}{}'.format( dlen,'dd'[nobytes==2] ), *v))
#n +=1
fp.close()
frac /=good_count
print( "The fraction of pixel occupied by photon is %6.3f%% "%(100*frac) )
avg_img /= good_count
bad_frame_list = np.where( (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) )[0]
#bad_frame_list1 = np.where( np.array(imgsum) > bad_pixel_threshold )[0]
#bad_frame_list2 = np.where( np.array(imgsum) < bad_pixel_low_threshold )[0]
#bad_frame_list = np.unique( np.concatenate( [bad_frame_list1, bad_frame_list2]) )
if len(bad_frame_list):
print ('Bad frame list are: %s' %bad_frame_list)
else:
print ('No bad frames are involved.')
if with_pickle:
pkl.dump( [mask, avg_img, imgsum, bad_frame_list], open(filename + '.pkl', 'wb' ) )
return mask, avg_img, imgsum, bad_frame_list
""" Description:
This is code that Mark wrote to open the multifile format
in compressed mode, translated to python.
This seems to work for DALSA, FCCD and EIGER in compressed mode.
It should be included in the respective detector.i files
Currently, this refers to the compression mode being '6'
Each file is image descriptor files chunked together as follows:
Header (1024 bytes)
|--------------IMG N begin--------------|
| Dlen
|---------------------------------------|
| Pixel positions (dlen*4 bytes |
| (0 based indexing in file) |
|---------------------------------------|
| Pixel data(dlen*bytes bytes) |
| (bytes is found in header |
| at position 116) |
|--------------IMG N end----------------|
|--------------IMG N+1 begin------------|
|----------------etc.....---------------|
Header contains 1024 bytes version name, 'beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
bytes per pixel (either 2 or 4 (Default)),
Nrows, Ncols, Rows_Begin, Rows_End, Cols_Begin, Cols_End,
"""
class Multifile:
'''The class representing the multifile.
The recno is in 1 based numbering scheme (first record is 1)
This is efficient for reading in increasing order.
Note: reading same image twice in a row is like reading an earlier
numbered image and means the program starts for the beginning again.
'''
def __init__(self,filename,beg,end):
'''Multifile initialization. Open the file.
Here I use the read routine which returns byte objects
(everything is an object in python). I use struct.unpack
to convert the byte object to other data type (int object
etc)
NOTE: At each record n, the file cursor points to record n+1
'''
self.FID = open(filename,"rb")
# self.FID.seek(0,os.SEEK_SET)
self.filename = filename
#br: bytes read
br = self.FID.read(1024)
self.beg=beg
self.end=end
ms_keys = ['beam_center_x', 'beam_center_y', 'count_time', 'detector_distance',
'frame_time', 'incident_wavelength', 'x_pixel_size', 'y_pixel_size',
'bytes',
'nrows', 'ncols', 'rows_begin', 'rows_end', 'cols_begin', 'cols_end'
]
magic = struct.unpack('@16s', br[:16])
md_temp = struct.unpack('@8d7I916x', br[16:])
self.md = dict(zip(ms_keys, md_temp))
self.imgread=0
self.recno = 0
# some initialization stuff
self.byts = self.md['bytes']
if (self.byts==2):
self.valtype = np.uint16
elif (self.byts == 4):
self.valtype = np.uint32
elif (self.byts == 8):
self.valtype = np.float64
#now convert pieces of these bytes to our data
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
# now read first image
#print "Opened file. Bytes per data is {0img.shape = (self.rows,self.cols)}".format(self.byts)
def _readHeader(self):
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
def _readImageRaw(self):
p= np.fromfile(self.FID, dtype = np.int32,count= self.dlen)
v= np.fromfile(self.FID, dtype = self.valtype,count= self.dlen)
self.imgread=1
return(p,v)
def _readImage(self):
(p,v)=self._readImageRaw()
img = np.zeros( ( self.md['ncols'], self.md['nrows'] ) )
np.put( np.ravel(img), p, v )
return(img)
def seekimg(self,n=None):
'''Position file to read the nth image.
For now only reads first image ignores n
'''
# the logic involving finding the cursor position
if (n is None):
n = self.recno
if (n < self.beg or n > self.end):
raise IndexError('Error, record out of range')
#print (n, self.recno, self.FID.tell() )
if ((n == self.recno) and (self.imgread==0)):
pass # do nothing
else:
if (n <= self.recno): #ensure cursor less than search pos
self.FID.seek(1024,os.SEEK_SET)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
self.recno = 0
self.imgread=0
if n == 0:
return
#have to iterate on seeking since dlen varies
#remember for rec recno, cursor is always at recno+1
if(self.imgread==0 ): #move to next header if need to
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
for i in range(self.recno+1,n):
#the less seeks performed the faster
#print (i)
self.dlen =np.fromfile(self.FID,dtype=np.int32,count=1)[0]
#print 's',self.dlen
self.FID.seek(self.dlen*(4+self.byts),os.SEEK_CUR)
# we are now at recno in file, read the header and data
#self._clearImage()
self._readHeader()
self.imgread=0
self.recno = n
def rdframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImage())
def rdrawframe(self,n):
if self.seekimg(n)!=-1:
return(self._readImageRaw())
def pass_FD(FD,n):
#FD.rdframe(n)
FD.seekimg(n)
class Multifile_Bins( object ):
'''
Bin a compressed file with bins number
See Multifile for details for Multifile_class
'''
def __init__(self, FD, bins=100):
'''
FD: the handler of a compressed Eiger frames
bins: bins number
'''
self.FD=FD
if (FD.end - FD.beg)%bins:
print ('Please give a better bins number and make the length of FD/bins= integer')
else:
self.bins = bins
self.md = FD.md
#self.beg = FD.beg
self.beg = 0
Nimg = (FD.end - FD.beg)
slice_num = Nimg//bins
self.end = slice_num
self.time_edge = np.array(create_time_slice( N= Nimg,
slice_num= slice_num, slice_width= bins )) + FD.beg
self.get_bin_frame()
def get_bin_frame(self):
FD= self.FD
self.frames = np.zeros( [ FD.md['ncols'],FD.md['nrows'], len(self.time_edge)] )
for n in tqdm( range(len(self.time_edge))):
#print (n)
t1,t2 = self.time_edge[n]
#print( t1, t2)
self.frames[:,:,n] = get_avg_imgc( FD, beg=t1,end=t2, sampling = 1,
plot_ = False, show_progress = False )
def rdframe(self,n):
return self.frames[:,:,n]
def rdrawframe(self,n):
x_= np.ravel( self.rdframe(n) )
p= np.where( x_ ) [0]
v = np.array( x_[ p ])
return ( np.array(p, dtype=np.int32), v)
def get_avg_imgc( FD, beg=None,end=None, sampling = 100, plot_ = False, bad_frame_list=None,
show_progress=True, *argv,**kwargs):
'''Get average imagef from a data_series by every sampling number to save time'''
#avg_img = np.average(data_series[:: sampling], axis=0)
if beg is None:
beg = FD.beg
if end is None:
end = FD.end
avg_img = FD.rdframe(beg)
n=1
flag=True
if show_progress:
#print( sampling-1 + beg , end, sampling )
if bad_frame_list is None:
bad_frame_list =[]
fra_num = int( (end - beg )/sampling ) - len( bad_frame_list )
for i in tqdm(range( sampling-1 + beg , end, sampling ), desc= 'Averaging %s images'% fra_num):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
#print(i, flag)
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
else:
for i in range( sampling-1 + beg , end, sampling ):
if bad_frame_list is not None:
if i in bad_frame_list:
flag= False
else:
flag=True
if flag:
(p,v) = FD.rdrawframe(i)
if len(p)>0:
np.ravel(avg_img )[p] += v
n += 1
avg_img /= n
if plot_:
if RUN_GUI:
fig = Figure()
ax = fig.add_subplot(111)
else:
fig, ax = plt.subplots()
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
im = ax.imshow(avg_img , cmap='viridis',origin='lower',
norm= LogNorm(vmin=0.001, vmax=1e2))
#ax.set_title("Masked Averaged Image")
ax.set_title('uid= %s--Masked-Averaged-Image-'%uid)
fig.colorbar(im)
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--avg-img-"%uid + '.png'
plt.savefig( fp, dpi=fig.dpi)
#plt.show()
return avg_img
def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor = False):
"""Compute the mean intensity for each ROI in the compressed file (FD), support parallel computation
Parameters
----------
FD: Multifile class
compressed file
labeled_array : array
labeled array; 0 is background.
Each ROI is represented by a nonzero integer. It is not required that
the ROI labels are contiguous
index : int, list, optional
The ROI's to use. If None, this function will extract averages for all
ROIs
Returns
-------
mean_intensity : array
The mean intensity of each ROI for all `images`
Dimensions:
len(mean_intensity) == len(index)
len(mean_intensity[0]) == len(images)
index : list
The labels for each element of the `mean_intensity` list
"""
qind, pixelist = roi.extract_label_indices( labeled_array )
if labeled_array.shape != ( FD.md['ncols'],FD.md['nrows']):
raise ValueError(
" `image` shape (%d, %d) in FD is not equal to the labeled_array shape (%d, %d)" %( FD.md['ncols'],FD.md['nrows'], labeled_array.shape[0], labeled_array.shape[1]) )
# handle various input for `index`
if index is None:
index = list(np.unique(labeled_array))
index.remove(0)
else:
try:
len(index)
except TypeError:
index = [index]
index = np.array( index )
#print ('here')
good_ind = np.zeros( max(qind), dtype= np.int32 )
good_ind[ index -1 ] = np.arange( len(index) ) +1
w = np.where( good_ind[qind -1 ] )[0]
qind = good_ind[ qind[w] -1 ]
pixelist = pixelist[w]
# pre-allocate an array for performance
# might be able to use list comprehension to make this faster
mean_intensity = np.zeros( [ int( ( FD.end - FD.beg)/sampling ) , len(index)] )
#fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
#maxqind = max(qind)
norm = np.bincount( qind )[1:]
n= 0
#for i in tqdm(range( FD.beg , FD.end )):
if not multi_cor:
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get ROI intensity of each frame' ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mean_intensity[n] = np.bincount( qind[pxlist], weights = v[w], minlength = len(index)+1 )[1:]
n +=1
else:
ring_masks = [ np.array(labeled_array==i, dtype = np.int64) for i in np.unique( labeled_array )[1:] ]
inputs = range( len(ring_masks) )
go_through_FD(FD)
pool = Pool(processes= len(inputs) )
print( 'Starting assign the tasks...')
results = {}
for i in tqdm ( inputs ):
results[i] = apply_async( pool, _get_mean_intensity_one_q, ( FD, sampling, ring_masks[i] ) )
pool.close()
print( 'Starting running the tasks...')
res = [ results[k].get() for k in tqdm( list(sorted(results.keys())) ) ]
#return res
for i in inputs:
mean_intensity[:,i] = res[i]
print( 'ROI mean_intensit calculation is DONE!')
del results
del res
mean_intensity /= norm
return mean_intensity, index
def _get_mean_intensity_one_q( FD, sampling, labels ):
mi = np.zeros( int( ( FD.end - FD.beg)/sampling ) )
n=0
qind, pixelist = roi.extract_label_indices( labels )
# iterate over the images to compute multi-tau correlation
fra_pix = np.zeros_like( pixelist, dtype=np.float64)
timg = np.zeros( FD.md['ncols'] * FD.md['nrows'] , dtype=np.int32 )
timg[pixelist] = np.arange( 1, len(pixelist) + 1 )
for i in range( FD.beg, FD.end, sampling ):
(p,v) = FD.rdrawframe(i)
w = np.where( timg[p] )[0]
pxlist = timg[ p[w] ] -1
mi[n] = np.bincount( qind[pxlist], weights = v[w], minlength = 2 )[1:]
n +=1
return mi
def get_each_frame_intensityc( FD, sampling = 1,
bad_pixel_threshold=1e10, bad_pixel_low_threshold=0,
hot_pixel_threshold=2**30,
plot_ = False, bad_frame_list=None, save=False, *argv,**kwargs):
'''Get the total intensity of each frame by sampling every N frames
Also get bad_frame_list by check whether above bad_pixel_threshold
Usuage:
imgsum, bad_frame_list = get_each_frame_intensity(good_series ,sampling = 1000,
bad_pixel_threshold=1e10, plot_ = True)
'''
#print ( argv, kwargs )
#mask &= img < hot_pixel_threshold
imgsum = np.zeros( int( (FD.end - FD.beg )/ sampling ) )
n=0
for i in tqdm(range( FD.beg, FD.end, sampling ), desc= 'Get each frame intensity' ):
(p,v) = FD.rdrawframe(i)
if len(p)>0:
imgsum[n] = np.sum( v )
n += 1
if plot_:
uid = 'uid'
if 'uid' in kwargs.keys():
uid = kwargs['uid']
fig, ax = plt.subplots()
ax.plot( imgsum,'bo')
ax.set_title('uid= %s--imgsum'%uid)
ax.set_xlabel( 'Frame_bin_%s'%sampling )
ax.set_ylabel( 'Total_Intensity' )
if save:
#dt =datetime.now()
#CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute)
path = kwargs['path']
if 'uid' in kwargs:
uid = kwargs['uid']
else:
uid = 'uid'
#fp = path + "uid= %s--Waterfall-"%uid + CurTime + '.png'
fp = path + "uid=%s--imgsum-"%uid + '.png'
fig.savefig( fp, dpi=fig.dpi)
plt.show()
bad_frame_list_ = np.where( ( np.array(imgsum) > bad_pixel_threshold ) | ( np.array(imgsum) <= bad_pixel_low_threshold) )[0] + FD.beg
if bad_frame_list is not None:
bad_frame_list = np.unique( np.concatenate([bad_frame_list, bad_frame_list_]) )
else:
bad_frame_list = bad_frame_list_
if len(bad_frame_list):
print ('Bad frame list length is: %s' %len(bad_frame_list))
else:
print ('No bad frames are involved.')
return imgsum,bad_frame_list
| bsd-3-clause |
josauder/procedural_city_generation | UI.py | 2 | 3982 | import os
import sys
import procedural_city_generation
donemessage = "\n"+(150*"-")+"\n\t\t\t Done, waiting for command\n"+(150*"-")+"\n"
path = os.path.dirname(procedural_city_generation.__file__)
sys.path.append(path)
if not os.path.exists(path+"/temp/"):
os.system("mkdir "+path+"/temp")
if not os.path.exists(path+"/outputs/"):
os.system("mkdir "+path+"/outputs")
def setup_matplotlib():
"""
This function is used to set the matplotlib backend correctly.
Parameters
----------
Returns
--------
None
:return:
"""
if sys.version[0] == "3":
import matplotlib
try:
matplotlib.use("Qt4Agg")
except:
print("PyQt4 is not installed - outputs will only be saved as images and not be visible at runtime")
print("However, it is strongly recommended that you install PyQt4 in order to use the GUI")
matplotlib.use("agg")
from procedural_city_generation.roadmap import main as roadmap_main
from procedural_city_generation.polygons import main as polygons_main
from procedural_city_generation.building_generation import main as building_generation_main
from procedural_city_generation.additional_stuff.Singleton import Singleton
def setRoadmapGUI(gui):
roadmap_main.gui = gui
Singleton("roadmap").kill()
def setPolygonsGUI(gui):
polygons_main.gui = gui
Singleton("polygons").kill()
def setBuilding_generationGUI(gui):
building_generation_main.gui = gui
Singleton("building_generation").kill()
def roadmap():
roadmap_main.main()
Singleton("roadmap").kill()
print(donemessage)
def polygons():
polygons_main.main(None)
Singleton("polygons").kill()
print(donemessage)
def building_generation():
building_generation_main.main()
Singleton("building_generation").kill()
print(donemessage)
def visualization():
os.system("blender --python "+path+"/visualization/blenderize.py")
from procedural_city_generation.additional_stuff.Singleton import Singleton
Singleton("visualization").kill()
def main(args):
"""
Welcome to procedural_city_generation, a module for procedurally generating a 3D model of a city in Blender with python.
A call to this module from the command line should follow this format::
python UI.py <submodule-name> <options>
<submodule-name> is either "roadmap", "polygons", "building_generation, "visualization".
<options> is either "run" or "configure"
If you want to configure a paremeter, go with
python UI.py <submodule-name> --configure <parameter-name> <new value>
"""
if len(args) == 1:
print(main.__doc__)
return 0
if "configure" in args[2]:
config_file = os.path.join(os.path.dirname(os.path.realpath(__file__)),
"procedural_city_generation/inputs/{0}.conf".format(args[1]))
if len(args) == 3:
os.system("nano {0}".format(config_file))
sys.exit(0)
elif args[3] and args[4]:
import json
with open(config_file, 'r') as f:
wb = json.loads(f.read())
i = 0
while True:
try:
old = wb[args[3+i]]['value']
wb[args[3+i]]['value'] = eval(args[4+i])
print("{0} was changed from {1} to {2}".format(args[3+i], old, args[4+i]))
i += 2
if len(args)-1 < i+4:
break
except:
print(i, len(args))
print("Either {0} is not a configurable parameter for {1}".format(args[3+i], args[1]))
return 0
with open(config_file, 'w') as f:
f.write(json.dumps(wb, indent=2))
return 0
elif "run" in args[2]:
setup_matplotlib()
eval(args[1])()
if __name__ == '__main__':
main(sys.argv)
| mpl-2.0 |
burakbayramli/dersblog | tser/tser_005_intro/common.py | 2 | 8217 | import pandas as pd
import numpy as np
import scipy.stats as st
DAYS_IN_YEAR=256.0
ROOT_DAYS_IN_YEAR=DAYS_IN_YEAR**.5
useroot=""
def cap_forecast(xrow, capmin,capmax):
"""
Cap forecasts.
"""
## Assumes we have a single column
x=xrow[0]
if x<capmin:
return capmin
elif x>capmax:
return capmax
return x
def cap_series(xseries, capmin=-20.0,capmax=20.0):
"""
Apply capping to each element of a time series
For a long only investor, replace -20.0 with 0.0
"""
return xseries.apply(cap_forecast, axis=1, args=(capmin, capmax))
def get_list_code():
ans=pd.read_csv("%sconfig.csv" % useroot)
return list(ans.Instrument)
def get_point_sizes():
ans=pd.read_csv("%sconfig.csv" % useroot)
psizes=dict([(x[1].Instrument, float(x[1].Pointsize)) for x in ans.iterrows()])
return psizes
def pd_readcsv(filename):
"""
Reads the pandas dataframe from a filename, given the index is correctly labelled
"""
ans=pd.read_csv(filename)
ans.index=pd.to_datetime(ans['DATETIME'])
del ans['DATETIME']
ans.index.name=None
return ans
def find_datediff(data_row):
"""
data differential for a single row
"""
if np.isnan(data_row.NEAR_MONTH) or np.isnan(data_row.TRADE_MONTH):
return np.nan
nearest_dt=pd.to_datetime(str(int(data_row.NEAR_MONTH)), format="%Y%m")
trade_dt=pd.to_datetime(str(int(data_row.TRADE_MONTH)), format="%Y%m")
distance = trade_dt - nearest_dt
distance_years=distance.days/365.25
## if nearder contract is cheaper; price will fall
price_diff=data_row.NEARER - data_row.TRADED
return price_diff/distance_years
def ewmac_forecast_scalar(Lfast, Lslow):
"""
Function to return the forecast scalar (table 49 of the book)
Only defined for certain values
"""
fsdict=dict(l2_8=10.6, l4_16=7.5, l8_32=5.3, l16_64=3.75, l32_128=2.65, l64_256=1.87)
lkey="l%d_%d" % (Lfast, Lslow)
if lkey in fsdict:
return fsdict[lkey]
else:
print ("Warning: No scalar defined for Lfast=%d, Lslow=%d, using default of 1.0" % (Lfast, Lslow))
return 1.0
def get_price_for_instrument(code):
filename="%sdata/%s_price.csv" % (useroot, code)
price=pd_readcsv(filename)
return price
def get_carry_data(code):
filename="%sdata/%s_carrydata.csv" % (useroot, code)
data=pd_readcsv(filename)
return data
def uniquets(df3):
"""
Makes x unique
"""
df3=df3.groupby(level=0).first()
return df3
def daily_resample(b, a):
"""
Returns b dataframe resampled to a dataframe index
"""
master_index=a.index
a_daily=a.resample('1D') ## Only want index, fill method is irrelevant
b=uniquets(b)
b_daily=b.reindex(a_daily.index, method="ffill", limit=1)
new_b=b_daily.reindex(master_index, method="ffill", limit=1)
return new_b
def calculate_pandl(position_ts, price_ts, pointsize=1.0):
rs_positions_ts=daily_resample(position_ts, price_ts).ffill()
rets=price_ts - price_ts.shift(1)
local_rets=rs_positions_ts.shift(1)*rets*pointsize
return local_rets
def annualised_rets(total_rets):
mean_rets=total_rets.mean(skipna=True)
annualised_rets=mean_rets*DAYS_IN_YEAR
return annualised_rets
def annualised_vol(total_rets):
actual_total_daily_vol=total_rets.std(skipna=True)
actual_total_annual_vol=actual_total_daily_vol*ROOT_DAYS_IN_YEAR
return actual_total_annual_vol
def sharpe(total_rets):
sharpe=annualised_rets(total_rets)/annualised_vol(total_rets)
return sharpe
def stack_ts(tslist, start_date=pd.datetime(1970,1,1)):
"""
Take a list of time series, and stack them, generating a new time series
"""
tslist_values=[list(x.iloc[:,0].values) for x in tslist]
stack_values=sum(tslist_values, [])
stack_values=[x for x in stack_values if not np.isinf(x)]
stacked=arbitrary_timeindex(stack_values, start_date)
return stacked
def slices_for_ts(data, freq="12M"):
"""
Return date indices for slicing up a data frame
"""
yridx=list(pd.date_range(start=data.index[0], end=data.index[-1], freq=freq))
yridx_stub=list(pd.date_range(start=yridx[-1], periods=2, freq=freq))[-1]
yridx=yridx+[yridx_stub]
return yridx
def break_up_ts(data, freq="12M"):
"""
Take a data frame and break it into chunks
returns a list of data frames
"""
yridx=slices_for_ts(data, freq)
brokenup=[]
for idx in range(len(yridx))[1:]:
brokenup.append(data[yridx[idx-1]:yridx[idx]])
return brokenup
def drawdown(x):
### Returns a ts of drawdowns for a time series x
## rolling max with infinite window
maxx=pd.rolling_max(x, 99999999, min_periods=1)
return (x - maxx)/maxx
class account_curve(pd.core.series.Series):
"""
Inherits from pandas time series to give useful information
Could be in % or GBP terms
Downsamples to daily before doing anything else
Can
"""
def new_freq(self, freq):
## Set up a new frequency.
## Note this will break certain things (eg Sharpe) so be careful
if freq=="Daily":
## we assume we're daily so do nothing
return self
if freq=="Weekly":
return self.cumsum().ffill().resample("W").diff()
if freq=="Monthly":
return self.cumsum().ffill().resample("M").diff()
def sharpe(self):
## assumes daily returns
return ROOT_DAYS_IN_YEAR*self.mean()/self.std()
def annstd(self):
return ROOT_DAYS_IN_YEAR*self.std()
def losses(self):
x=self.values
return [z for z in x if z<0]
def gains(self):
x=self.values
return [z for z in x if z>0]
def avg_loss(self):
return np.mean(self.losses())
def avg_gain(self):
return np.mean(self.gains())
def drawdown(self):
## in case need numerous stats
if "drawdownacc" not in dir(self):
setattr(self, "drawdownacc", drawdown(cum_perc(self)))
return self.drawdownacc
def avg_drawdown(self):
return self.perc_drawdown(50.0)
def perc_drawdown(self, q):
dd=self.drawdown()
return np.percentile(dd, q)
def worst_drawdown(self):
dd=self.drawdown()
return np.nanmin(dd.values)
def time_in_drawdown(self):
dd=self.drawdown()
dd=[z for z in dd if not np.isnan(z)]
in_dd=float(len([z for z in dd if z<0]))
return in_dd/float(len(dd))
def monthly_returns(self):
return self.resample("1M", how="sum")
def gaintolossratio(self):
return self.avg_gain()/-self.avg_loss()
def profitfactor(self):
return sum(self.gains())/-sum(self.losses())
def hitrate(self):
no_gains=float(len(self.gains()))
no_losses=float(len(self.losses()))
return no_gains/(no_losses+no_gains)
def cum_perc(pd_timeseries):
"""
Cumulate percentage returns for a pandas time series
"""
cum_datalist=[1+x for x in pd_timeseries]
cum_datalist=pd.TimeSeries(cum_datalist, index=pd_timeseries.index)
return cum_datalist.cumprod()
def arbitrary_timeindex(Nperiods, index_start=pd.datetime(2000,1,1)):
"""
For nice plotting, convert a list of prices or returns into an arbitrary pandas time series
"""
ans=pd.bdate_range(start=index_start, periods=Nperiods)
return ans
def arbitrary_timeseries(datalist, index_start=pd.datetime(2000,1,1)):
"""
For nice plotting, convert a list of prices or returns into an arbitrary pandas time series
"""
ans=pd.TimeSeries(datalist, index=arbitrary_timeindex(len(datalist), index_start))
return ans
def remove_nans_from_list(xlist):
return [x for x in xlist if not np.isnan(x)]
def autocorr(x, t=1):
return np.corrcoef(np.array([x[0:len(x)-t], x[t:len(x)]]))[0,1]
| gpl-3.0 |
madjelan/scikit-learn | sklearn/decomposition/tests/test_dict_learning.py | 85 | 8565 | import numpy as np
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import TempMemmap
from sklearn.decomposition import DictionaryLearning
from sklearn.decomposition import MiniBatchDictionaryLearning
from sklearn.decomposition import SparseCoder
from sklearn.decomposition import dict_learning_online
from sklearn.decomposition import sparse_encode
rng_global = np.random.RandomState(0)
n_samples, n_features = 10, 8
X = rng_global.randn(n_samples, n_features)
def test_dict_learning_shapes():
n_components = 5
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_overcomplete():
n_components = 12
dico = DictionaryLearning(n_components, random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_reconstruction():
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
# used to test lars here too, but there's no guarantee the number of
# nonzero atoms is right.
def test_dict_learning_reconstruction_parallel():
# regression test that parallel reconstruction works with n_jobs=-1
n_components = 12
dico = DictionaryLearning(n_components, transform_algorithm='omp',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X).transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X)
dico.set_params(transform_algorithm='lasso_lars')
code = dico.transform(X)
assert_array_almost_equal(np.dot(code, dico.components_), X, decimal=2)
def test_dict_learning_lassocd_readonly_data():
n_components = 12
with TempMemmap(X) as X_read_only:
dico = DictionaryLearning(n_components, transform_algorithm='lasso_cd',
transform_alpha=0.001, random_state=0, n_jobs=-1)
code = dico.fit(X_read_only).transform(X_read_only)
assert_array_almost_equal(np.dot(code, dico.components_), X_read_only, decimal=2)
def test_dict_learning_nonzero_coefs():
n_components = 4
dico = DictionaryLearning(n_components, transform_algorithm='lars',
transform_n_nonzero_coefs=3, random_state=0)
code = dico.fit(X).transform(X[1])
assert_true(len(np.flatnonzero(code)) == 3)
dico.set_params(transform_algorithm='omp')
code = dico.transform(X[1])
assert_equal(len(np.flatnonzero(code)), 3)
def test_dict_learning_unknown_fit_algorithm():
n_components = 5
dico = DictionaryLearning(n_components, fit_algorithm='<unknown>')
assert_raises(ValueError, dico.fit, X)
def test_dict_learning_split():
n_components = 5
dico = DictionaryLearning(n_components, transform_algorithm='threshold',
random_state=0)
code = dico.fit(X).transform(X)
dico.split_sign = True
split_code = dico.transform(X)
assert_array_equal(split_code[:, :n_components] -
split_code[:, n_components:], code)
def test_dict_learning_online_shapes():
rng = np.random.RandomState(0)
n_components = 8
code, dictionary = dict_learning_online(X, n_components=n_components,
alpha=1, random_state=rng)
assert_equal(code.shape, (n_samples, n_components))
assert_equal(dictionary.shape, (n_components, n_features))
assert_equal(np.dot(code, dictionary).shape, X.shape)
def test_dict_learning_online_verbosity():
n_components = 5
# test verbosity
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=1,
random_state=0)
dico.fit(X)
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, verbose=2,
random_state=0)
dico.fit(X)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=1,
random_state=0)
dict_learning_online(X, n_components=n_components, alpha=1, verbose=2,
random_state=0)
finally:
sys.stdout = old_stdout
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_estimator_shapes():
n_components = 5
dico = MiniBatchDictionaryLearning(n_components, n_iter=20, random_state=0)
dico.fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_overcomplete():
n_components = 12
dico = MiniBatchDictionaryLearning(n_components, n_iter=20,
random_state=0).fit(X)
assert_true(dico.components_.shape == (n_components, n_features))
def test_dict_learning_online_initialization():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features)
dico = MiniBatchDictionaryLearning(n_components, n_iter=0,
dict_init=V, random_state=0).fit(X)
assert_array_equal(dico.components_, V)
def test_dict_learning_online_partial_fit():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
dict1 = MiniBatchDictionaryLearning(n_components, n_iter=10 * len(X),
batch_size=1,
alpha=1, shuffle=False, dict_init=V,
random_state=0).fit(X)
dict2 = MiniBatchDictionaryLearning(n_components, alpha=1,
n_iter=1, dict_init=V,
random_state=0)
for i in range(10):
for sample in X:
dict2.partial_fit(sample)
assert_true(not np.all(sparse_encode(X, dict1.components_, alpha=1) ==
0))
assert_array_almost_equal(dict1.components_, dict2.components_,
decimal=2)
def test_sparse_encode_shapes():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
for algo in ('lasso_lars', 'lasso_cd', 'lars', 'omp', 'threshold'):
code = sparse_encode(X, V, algorithm=algo)
assert_equal(code.shape, (n_samples, n_components))
def test_sparse_encode_error():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = sparse_encode(X, V, alpha=0.001)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1)
def test_sparse_encode_error_default_sparsity():
rng = np.random.RandomState(0)
X = rng.randn(100, 64)
D = rng.randn(2, 64)
code = ignore_warnings(sparse_encode)(X, D, algorithm='omp',
n_nonzero_coefs=None)
assert_equal(code.shape, (100, 2))
def test_unknown_method():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
assert_raises(ValueError, sparse_encode, X, V, algorithm="<unknown>")
def test_sparse_coder_estimator():
n_components = 12
rng = np.random.RandomState(0)
V = rng.randn(n_components, n_features) # random init
V /= np.sum(V ** 2, axis=1)[:, np.newaxis]
code = SparseCoder(dictionary=V, transform_algorithm='lasso_lars',
transform_alpha=0.001).transform(X)
assert_true(not np.all(code == 0))
assert_less(np.sqrt(np.sum((np.dot(code, V) - X) ** 2)), 0.1) | bsd-3-clause |
liyu1990/sklearn | examples/linear_model/plot_lasso_and_elasticnet.py | 73 | 2074 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, color='lightgreen', linewidth=2,
label='Elastic net coefficients')
plt.plot(lasso.coef_, color='gold', linewidth=2,
label='Lasso coefficients')
plt.plot(coef, '--', color='navy', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
jiansenzheng/oanda_trading | oanda_trading/forex_trading_general_171005.py | 1 | 27162 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 06 20:00:30 2016
@author: Jiansen
"""
import requests
import threading
import copy
import logging
import os
#import urllib3
import json
from scipy import stats
#from decimal import Decimal, getcontext, ROUND_HALF_DOWN
#from event00 import TickEvent,TickEvent2
#import time
import oandapy
import httplib
import pandas as pd
import math
import numpy as np
import pywt
import time
from settings import STREAM_DOMAIN, API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID
from trading_events import Event,TickEvent2,LiqEvent,OrderEvent,CloseEvent
from trading_global_functions import *
from trading_log import log_dict
import Queue
#for writing data
import datetime
from bson.objectid import ObjectId
import pymongo as pm
from pymongo import MongoClient
import statsmodels.tsa.stattools as ts
#requests.adapters.DEFAULT_RETRIES = 5
from warningOps import warning
from seriesADF import getADF
corpid= ''
secret=''
warn = warning(corpid,secret)
#------the only line we need to change is about the instruments----#
pairs = "EUR_USD"
#-----------------------------------------------------------------------#
client = MongoClient('localhost',27017)
db = client.test_database
#---------------Initialize the parameters and database connections-------#
if pairs == "EUR_USD":
try:
from param_EUR_USD import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_test
index_collect = db.index_EUR_USD
elif pairs == "USD_CNH":
try:
from param_USD_CNH import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_USD_CNH
index_collect = db.index_USD_CNH
elif pairs == "AUD_USD":
try:
from param_AUD_USD import MA_dict, threshold_dict,sltp_dict
except ImportError:
raise ValueError("cannot find parameters for {0}!".format(pairs))
collection = db.tick_AUD_USD
index_collect = db.index_AUD_USD
else:
raise ValueError('Invalid <pairs>, CANNOT FIND THE INSTRUMENTS!')
#-----------------------------------------------------------------------#
#--------------------------Liquidity Index------------------------------#
#-----------------------------------------------------------------------#
class LiqForex(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def getLiq(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session()
#s.keep_alive = False
url = "https://" + self.domain + "/v1/candles"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instrument':self.instruments, 'accountId' : self.account_id,
'count':self.ct,'candleFormat':'midpoint','granularity':self.gran}
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
logging.info( pre)
resp = s.send(pre, stream=False, verify=False)
try:
msg=json.loads(resp.text)
except Exception as e:
logging.warning( "Caught exception when converting message into json\n" + str(e))
return
if msg.has_key("candles"):
time0=msg.get("candles")[-1]["time"]
lis = ohlcv_lis(msg.get("candles"))
liqS = pd.Series()
for i in range(0, len(lis)- (self.dd+1) ,1):
s2 = liq15min(lis[i:i+self.dd])
liqS = np.append(liqS,s2)
liq=liqS[-1]
logging.info( "liq=".format(liq))
tev = LiqEvent(self.instruments,time0,liq)
self.events_queue.put(tev,False)
post_metric = get_indicator(self.instruments,None,None,self.gran,liq,None,None)
index_collect.insert_one(post_metric)
except Exception as e:
s.close()
content0 = "Caught exception when connecting to history\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def activeLiq(self,period):
while True:
self.getLiq()
time.sleep(period)
#--------------------------------------------------------------------#
class StreamingForexPrices(object):
def __init__(
self, domain, access_token,
account_id, instruments,ct, gran, dd, events_queue
):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.instruments = instruments
self.ct = ct
self.gran=gran
self.dd= dd
self.events_queue = events_queue
def connect_to_stream(self):
try:
requests.packages.urllib3.disable_warnings()
s = requests.Session() # socket
url = "https://" + self.domain + "/v1/prices"
headers = {'Authorization' : 'Bearer ' + self.access_token}
params = {'instruments' : self.instruments, 'accountId' : self.account_id}
time.sleep(0.8) # sleep some seconds
req = requests.Request('GET', url, headers=headers, params=params)
pre = req.prepare()
resp = s.send(pre, stream=True, verify=False)
return resp
except Exception as e:
#global s
s.close()
content0 = "Caught exception when connecting to stream\n" + str(e)
logging.warning(content0)
#warn.tradingWarning(content0)
def stream_to_queue_old(self,collection):
response = self.connect_to_stream()
if response.status_code != 200:
return
try:
for line in response.iter_lines(1):
if line:
try:
msg = json.loads(line)
except Exception as e:
content0 = "Caught exception when converting message into json\n" + str(e)
logging.warning(content0)
return
if msg.has_key("instrument") or msg.has_key("tick"):
logging.info(msg)
instrument = msg["tick"]["instrument"]
time0 = msg["tick"]["time"]
bid = msg["tick"]["bid"]
ask = msg["tick"]["ask"]
tev = TickEvent2(instrument, time0, bid, ask)
self.events_queue.put(tev,False)
post= getDoc(msg)
collection.insert_one(post)
except Exception as e:
logging.warning('Caught ChunkedEncodingError in stream_to_queue_old()!'+str(time.ctime()))
return
#--------------
#------
# new strategy
class LiqMAStrategy(object):
"""
"""
def __init__(
self, access_token, account_id, pairs, units, events, stopLoss1, takeProfit1,stopLoss2, takeProfit2,
short_window1, long_window1,short_window2, long_window2, idxU, lam, thres1, thres2,thres3, thres4, adf_thres
):
self.access_token = access_token
self.account_id = account_id
self.pairs = pairs
self.units = units
self.stopLoss1 = stopLoss1
self.takeProfit1 = takeProfit1
self.stopLoss2 = stopLoss2
self.takeProfit2 = takeProfit2
self.pairs_dict = self.create_pairs_dict()
self.events = events
self.short_window1 = short_window1
self.long_window1 = long_window1
self.short_window2 = short_window2
self.long_window2 = long_window2
self.idxU = idxU
self.lam = lam
self.priceLis1 = pd.Series() #for trends
self.priceLis2 = pd.Series() #for reversion
self.thres1 = thres1
self.thres2 = thres2
self.thres3 = thres3
self.thres4 = thres4
self.adf_thres = adf_thres
#---intermediates---#
self.SL_TP = {"trends":[self.stopLoss1,self.takeProfit1],
"reversion":[self.stopLoss2,self.takeProfit2]}
self.s_l_window = {"trends":[self.short_window1,self.long_window1],
"reversion":[self.short_window2,self.long_window2]}
self.thres_tre_rev = {"trends":[self.thres1, self.thres2],
"reversion":[self.thres3,self.thres4]}
def create_pairs_dict(self):
attr_dict = {
"ticks": 0,
"tick0": 0,
"priceLS":0.0,
"invested": False,
"short_sma": None,
"long_sma": None,
"longShort": None,
"short_slope":None,
"long_slope":None, # False denotes sell, while True denotes buy
"check": False,
"orlis":[0,0,0,0],
"stra": 0,
"fixed": False
}
#pairs_dict = {}
pairs_dict = copy.deepcopy(attr_dict)
return pairs_dict
def check_order(self,check):
if check== True:
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
try:
responseTrades = oanda0.get_trades(self.account_id,instrument=self.pairs)
except Exception as e:
logging.warning('Caught exception in get_trades() of check_order()!\n'+str(time.ctime()))
return
if responseTrades.get("trades")==[]:
pd = self.pairs_dict
pd["orlis"].pop(0)
logging.info(" orlis: "+str(pd["orlis"]))
pd["orlis"].append(0)
logging.info(" orlis: "+str(pd["orlis"]))
if pd["orlis"][0:4]==[1,1,0,0]:
logging.warning( "Stop Loss Order Executed!")
#warn.tradingWarning(" Stop Loss Order Executed!")
pd["invested"]= False
pd["fixed"] = False #position closed, the stra type is free
pd["check"] = False
else:
pass
else:
pd = self.pairs_dict
#pd["orlis"][0] = copy.copy(pd["orlis"][1])
pd["orlis"].pop(0)
pd["orlis"].append(1)
logging.info("not empty- orlis: "+str(pd["orlis"]))
pd["invested"]= True
pd["fixed"] = True #position closed, the stra type is free
pd["check"] = True
else:
pass
def compute_slope(self,price_lis,window_length,k):
'''[summary]
compute the slope ratio for a short time series
Arguments:
price_lis {np.ndarray} -- the filtered time series to compute the slope ratio
for both SMA and LMA
default: newPriceLis
window_length {[type]} -- a parameter for the SMA
k: an parameter for performing average, default->0.5
default: self.short_window2
Returns:
[float] -- [the slope ratio]
'''
amp = lambda lis: (lis-lis[0])*10000.0
pShort = amp(price_lis[-window_length:])
pLong = amp(price_lis)
#compute the slope ratio
aveSlope = k*getSlope(pShort)+ (1-k)*getSlope(pLong)
return aveSlope
def set_invested_check_fixed(self,pair_dict,invested_bool,check_bool,fixed_bool):
pair_dict["invested"] = invested_bool
pair_dict["check"] = check_bool
pair_dict["fixed"] = fixed_bool
time.sleep(0.0)
def get_sl_tp(self,TreRev):
return self.SL_TP[TreRev]
def insert_metric(self,collection,pair_dict):
'''
default collection: index_USD_CNH
'''
short_window,long_window = self.s_l_window[pair_dict["stra"]]
post_metric = get_indicator(self.pairs,short_window,long_window,
None,None,pair_dict["short_slope"],pair_dict["long_slope"])
collection.insert_one(post_metric)
#----------------#
def buy_send_order(self,pd,side,price0,price1,TreRev):
logging.info("price02={0}".format(price0))
self.set_invested_check_fixed(pd,True,True,True)
fixSL, fixeTP = self.get_sl_tp(TreRev)
sl_b, tp_b= round(price0 - fixSL,5),round(price1 + fixeTP,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_b, tp_b,"Trends")
self.events.put(order)
pd["longShort"] = True
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price0
def sell_send_order(self,pd,side,price0,price1,TreRev):
logging.info("price01={0}".format(price1))
self.set_invested_check_fixed(pd,True,True,True)
fixSL, fixeTP = self.get_sl_tp(TreRev)
sl_s,tp_s = round(price1 + fixSL,5),round(price0 - fixeTP,5)
order = OrderEvent(self.pairs, self.units, "market", side, sl_s, tp_s,"Trends")
self.events.put(order)
pd["longShort"] = False
pd["tick0"]= pd["ticks"]
pd["priceLS"]= price1
def logging_invested(self,priceLis,pd,sign):
TreRev = pd["stra"]
logging.info(TreRev+" position!")
#??? TODO 23:38 Oct 5, 2017
short_window = self.s_l_window[TreRev][0]
newPriceLis = get_new_price_lis(priceLis, pd, short_window)
basePrice=pd["priceLS"]+sign*self.lam*np.std(priceLis)*np.sqrt(pd["ticks"]-pd["tick0"])
logging.info( "basePrice="+str(basePrice))
logging.info( "short_sma"+str(pd["short_sma"]))
logging.info( "long_sma"+str(pd["long_sma"]))
aveSlope = self.compute_slope(newPriceLis,short_window, 0.5)
logging.info( "aveSlope="+str(aveSlope))
return aveSlope
def put_close_order(self,pairs,num):
'''
pairs,num = self.pairs,0
'''
order_closed = CloseEvent(pairs,num)
self.events.put(order_closed)
#--------------------------------------#
def open_trends_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] > pd["long_sma"] and aveSlope > thres)
def open_trends_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] < pd["long_sma"] and aveSlope < -thres)
def open_reversion_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] < pd["long_sma"] and aveSlope< -thres)
def open_reversion_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][0]
return (pd["short_sma"] > pd["long_sma"] and aveSlope> thres)
#-----------------------------------------------#
def close_trends_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["longShort"] and aveSlope < thres)
def close_trends_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (not pd["longShort"] and aveSlope > -thres)
def close_reversion_buy(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["short_sma"] > pd["long_sma"]*(1+thres/100.0) and pd["longShort"])
def close_reversion_sell(self,pd,aveSlope):
thres = self.thres_tre_rev[pd["stra"]][1]
return (pd["short_sma"] < pd["long_sma"]*(1-thres/100.0) and not pd["longShort"])
#--------------------------------------#
def calculate_signals(self, event):
#if True:
global liqIndex
global newPriceLis
if event.type == 'TICK':
price = (event.bid+event.ask)/2.000
self.priceLis1 = np.append(self.priceLis1,price)
self.priceLis2 = np.append(self.priceLis2,price)
if len(self.priceLis1)>max([self.long_window1,self.long_window2]):
self.priceLis1=self.priceLis1[-self.long_window1:]
self.priceLis2=self.priceLis2[-self.long_window2:]
else:
pass
#liqIndex= event.liq
logging.info("liqIndex= "+str(liqIndex)+"\n")
logging.info("price= "+str(price))
pd = self.pairs_dict
logging.info("check"+str(pd["check"]))
self.check_order(pd["check"]) #check whether the SLTP order is triggered..
# Only start the strategy when we have created an accurate short window
logging.info("INVESTED= "+str(pd["invested"]))
if not pd["invested"]:
#global price0
if pd["ticks"]>max([self.long_window1, self.long_window2])+1 and liqIndex > self.idxU:
if not pd["fixed"]:
critAdf = getADF(collection).priceADF(200,1)
if critAdf > self.adf_thres:
pd["stra"] = "reversion"
newPriceLis = get_new_price_lis(self.priceLis2, pd, self.short_window2)
aveSlope = self.compute_slope(newPriceLis,self.short_window2, 0.5)
logging.info( "REVERSION+aveSlope="+str(aveSlope))
self.insert_metric(index_collect,pd)
else:
pd["stra"] = "trends"
newPriceLis = get_new_price_lis(self.priceLis1, pd, self.short_window1)
aveSlope = self.compute_slope(newPriceLis,self.short_window1, 0.5)
logging.info("TRENDS+aveSlope="+str(aveSlope))
self.insert_metric(index_collect,pd)
else:
raise ValueError("pd[fixed] should be False!")
price0, price1 = event.bid, event.ask
if pd["stra"] =="trends":
if self.open_trends_buy(pd,aveSlope):
side = "buy"
self.buy_send_order(pd,side,price0,price1,pd["stra"])
elif self.open_trends_sell(pd,aveSlope):
side = "sell"
self.sell_send_order(pd,side,price0,price1,pd["stra"])
else:
pd["fixed"] = False
elif pd["stra"] =="reversion":
if self.open_reversion_sell(pd,aveSlope):
side = "sell"
self.sell_send_order(pd,side,price0,price1,pd["stra"])
elif self.open_reversion_buy(pd,aveSlope):
side = "buy"
self.buy_send_order(pd,side,price0,price1,pd["stra"])
else:
pd["fixed"] = False
else:
pass
else:
pass
elif pd["invested"]:
sign= 1 if pd["longShort"] == True else -1
if pd["stra"] =="trends":
aveSlope = self.logging_invested(self.priceLis1,pd,sign)
self.insert_metric(index_collect,pd)
if self.close_trends_sell(pd,aveSlope):
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
elif self.close_trends_buy(pd,aveSlope):
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
else: #not closing positions, just keep the pd["fixed"] as True.
pd["fixed"] = True #should we add pd["invested"]
elif pd["stra"] =="reversion":
aveSlope=self.logging_invested(self.priceLis2,pd,sign)
self.insert_metric(index_collect,pd)
if self.close_reversion_sell(pd,aveSlope):
#side = "sell"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
elif self.close_reversion_buy(pd,aveSlope):
#side = "buy"
self.set_invested_check_fixed(pd,False,False,False)
#warn.tradingWarning(" check->False Executed!")
self.put_close_order(self.pairs,0)
else:
pd["fixed"] = True #should we add pd["invested"]
else:
pass
pd["ticks"] += 1
logging.info("current Tick "+str(pd["ticks"])+"\n"+str(time.ctime()))
#--------------------------------------------------------------------#
class Execution(object):
def __init__(self, domain, access_token, account_id):
self.domain = domain
self.access_token = access_token
self.account_id = account_id
self.conn = self.obtain_connection()
def obtain_connection(self):
return httplib.HTTPSConnection(self.domain)
def execute_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
try:
responseX = oanda0.create_order(self.account_id,
instrument=event.instrument,
units= event.units,
side= event.side,
type= event.order_type,
stopLoss = event.stopLoss,
takeProfit = event.takeProfit
)
except Exception as e:
content0 = "Caught OnadaError when sending the orders\n" + str(e)
logging.warning(content0)
return
logging.info( "Execute Order ! \n {0}".format(responseX))
content0 = str(event.stra)+"Execute Order ! "+" "+str(event.side)+" "+ str(event.units)+" units of "+str(event.instrument)
#warn.tradingWarning(content0)
logging.info(content0)
def close_order(self, event):
oanda0 = oandapy.API(environment="practice", access_token=self.access_token)
response1= oanda0.get_trades(self.account_id,instrument=event.instrument)
order_lis= response1["trades"]
if order_lis !=[]:
for order in order_lis: #close all trades
responseX = oanda0.close_trade(self.account_id,trade_id= order['id'])
logging.info( "Close Order ! \n {0}".format(responseX))
content0 = "Close Order !" + "profit: "+str(responseX['profit'])+" CLOSE "+str(responseX['instrument'])
content0 = content0 + " "+str(responseX['side'])+" at "+ str(responseX['price'])
#warn.tradingWarning(content0)
else:
logging.warning("No trade to be closed! :{0}".format(time.ctime()))
#--------------------------------------------------------------------#
def trade(events, strategy,execution,heartbeat):
"""
"""
global liqIndex
while True:
try:
event = events.get(False)
except Queue.Empty:
pass
else:
if event is not None:
if event.type =='LIQ':
liqIndex= event.liq
#print "current index ="+str(liqIndex)
elif event.type == 'TICK':
strategy.calculate_signals(event)
logging.info( "Tick!")
elif event.type == 'ORDER':
logging.info( "Executing order!")
execution.execute_order(event)
elif event.type == "CLOSE":
logging.info( "Close trading!")
execution.close_order(event)
time.sleep(heartbeat)
#--------------------------------------------------------------------#
if __name__ == "__main__":
logPath,logName = log_dict[pairs]["path"],log_dict[pairs]["name"]
logging.basicConfig(filename= os.path.join(logPath,logName),
format='%(levelname)s:%(message)s',level=logging.DEBUG)
global liqIndex
liqIndex=0
ct = 20
gran ='M15'
time_dict = {
"S5": 5,
"S10": 10,
"S15": 15,
"S30": 30,
"M1": 60,
"M2": 120 }
dd = 11
lam= 0.1 #0.5 basePrice tuning
units = 100 #100
#----------Parameters----------------
short_window1= MA_dict['short_window1']
long_window1 = MA_dict['long_window1']
short_window2= MA_dict['short_window2']
long_window2 = MA_dict['long_window2']
idxu = threshold_dict['idxu']
thres1= threshold_dict['thres1']
thres2= threshold_dict['thres2']
thres3 = threshold_dict['thres3']
thres4= threshold_dict['thres4']
adf_thres = threshold_dict['adf_thres']
sl1 = sltp_dict['sl1'] #10
tp1 = sltp_dict['tp1'] #10
sl2 = sltp_dict['sl2'] #10
tp2 = sltp_dict['tp2'] #10
#--------------------------------------
heartbeat= 0.2
period= 600
print 'initial'
print('MA:\n sw1 {0} lw1 {1} sw2 {2} lw2 {3}'.format(short_window1, long_window1, short_window2, long_window2))
print('parameters:\n thres1 {0} thres2 {1} thres3 {2} thres4 {3}'.format(thres1,thres2,thres3,thres4))
print('sltp_parameters:\n {0} {1} {2} {3}'.format(sl1,tp1,sl2,tp2))
events = Queue.Queue()
# initial the threads
prices = StreamingForexPrices(STREAM_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
liquidity = LiqForex(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID, pairs, ct, gran, dd, events)
execution = Execution(API_DOMAIN, ACCESS_TOKEN, ACCOUNT_ID)
#strategy = MovingAverageCrossStrategy(pairs, units, events, sl, tp, short_window,long_window)
strategy = LiqMAStrategy(ACCESS_TOKEN, ACCOUNT_ID, pairs, units, events, sl1, tp1, sl2, tp2, short_window1,long_window1,
short_window2,long_window2,idxu,lam,thres1,thres2,thres3,thres4,adf_thres)
# construct the thread
price_thread = threading.Thread(target=prices.stream_to_queue_old, args=[collection])
liq_thread = threading.Thread(target= liquidity.activeLiq, args=[period])
trade_thread = threading.Thread(target=trade, args=(events, strategy,execution,heartbeat))
print "Full?:",events.full()
trade_thread.start()
price_thread.start()
liq_thread.start()
| gpl-3.0 |
mhbashari/machine-learning-snippets | Basic/01-linear_regression_tensorflow.py | 1 | 2015 | import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from numpy.core.multiarray import ndarray
__author__ = "mhbashari"
class LinearRegression:
def __init__(self, train_X: ndarray, train_Y: ndarray, learning_rate=0.001, training_epochs=100):
self.train_X = train_X
self.train_Y = train_Y
self.learning_rate = learning_rate
self.training_epochs = training_epochs
def fit(self):
x = tf.placeholder("float")
y = tf.placeholder("float")
a = tf.Variable(1.0, name="weight")
b = tf.Variable(1.0, name="bias")
pred = tf.multiply(x, a) + b
cost = tf.reduce_mean(tf.abs(pred - y))
optimizer = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(cost)
init = tf.initialize_all_variables()
with tf.Session() as sess:
sess.run(init)
for epoch in range(self.training_epochs):
for i, out in zip(self.train_X, self.train_Y):
sess.run(optimizer, feed_dict={x: i, y: out})
print("Epoch:", '%04d' % (epoch + 1), "cost=", "W=", sess.run(a), "b=", sess.run(b))
print("Optimization Finished!")
training_cost = sess.run(cost, feed_dict={x: self.train_X, y: self.train_Y})
print("Training cost=", training_cost, "a=", sess.run(a), "b=", sess.run(b), '\n')
return sess.run(a), sess.run(b)
def visualize(a, b, train_X: ndarray, train_Y: ndarray):
plt.plot(train_X, train_Y, 'ro', label='Original data')
plt.plot(train_X, train_Y)
plt.plot(train_X, a * train_X + b, label='Fitted line')
plt.scatter(train_X, train_Y)
plt.legend()
plt.show()
def data_maker(num=80):
X = np.arange(0, num, dtype=np.float32)
Y = np.float32(np.ceil(5 * (np.sin(X) + X / 5)))
return X, Y
if __name__ == "__main__":
data = data_maker(5)
regression = LinearRegression(*data_maker())
visualize(*(regression.fit() + data_maker()))
| mit |
nmayorov/scikit-learn | sklearn/linear_model/logistic.py | 9 | 67760 |
"""
Logistic Regression
"""
# Author: Gael Varoquaux <gael.varoquaux@normalesup.org>
# Fabian Pedregosa <f@bianp.net>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Lars Buitinck
# Simon Wu <s8wu@uwaterloo.ca>
import numbers
import warnings
import numpy as np
from scipy import optimize, sparse
from .base import LinearClassifierMixin, SparseCoefMixin, BaseEstimator
from .sag import sag_solver
from ..feature_selection.from_model import _LearntSelectorMixin
from ..preprocessing import LabelEncoder, LabelBinarizer
from ..svm.base import _fit_liblinear
from ..utils import check_array, check_consistent_length, compute_class_weight
from ..utils import check_random_state
from ..utils.extmath import (logsumexp, log_logistic, safe_sparse_dot,
softmax, squared_norm)
from ..utils.extmath import row_norms
from ..utils.optimize import newton_cg
from ..utils.validation import check_X_y
from ..exceptions import DataConversionWarning
from ..exceptions import NotFittedError
from ..utils.fixes import expit
from ..utils.multiclass import check_classification_targets
from ..externals.joblib import Parallel, delayed
from ..model_selection import check_cv
from ..externals import six
from ..metrics import SCORERS
# .. some helper functions for logistic_regression_path ..
def _intercept_dot(w, X, y):
"""Computes y * np.dot(X, w).
It takes into consideration if the intercept should be fit or not.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
"""
c = 0.
if w.size == X.shape[1] + 1:
c = w[-1]
w = w[:-1]
z = safe_sparse_dot(X, w) + c
return w, c, y * z
def _logistic_loss_and_grad(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss and gradient.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
"""
_, n_features = X.shape
grad = np.empty_like(w)
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if grad.shape[0] > n_features:
grad[-1] = z0.sum()
return out, grad
def _logistic_loss(w, X, y, alpha, sample_weight=None):
"""Computes the logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
out : float
Logistic loss.
"""
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
# Logistic loss is the negative of the log of the logistic function.
out = -np.sum(sample_weight * log_logistic(yz)) + .5 * alpha * np.dot(w, w)
return out
def _logistic_grad_hess(w, X, y, alpha, sample_weight=None):
"""Computes the gradient and the Hessian, in the case of a logistic loss.
Parameters
----------
w : ndarray, shape (n_features,) or (n_features + 1,)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : ndarray, shape (n_samples,)
Array of labels.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
grad : ndarray, shape (n_features,) or (n_features + 1,)
Logistic gradient.
Hs : callable
Function that takes the gradient as a parameter and returns the
matrix product of the Hessian and gradient.
"""
n_samples, n_features = X.shape
grad = np.empty_like(w)
fit_intercept = grad.shape[0] > n_features
w, c, yz = _intercept_dot(w, X, y)
if sample_weight is None:
sample_weight = np.ones(y.shape[0])
z = expit(yz)
z0 = sample_weight * (z - 1) * y
grad[:n_features] = safe_sparse_dot(X.T, z0) + alpha * w
# Case where we fit the intercept.
if fit_intercept:
grad[-1] = z0.sum()
# The mat-vec product of the Hessian
d = sample_weight * z * (1 - z)
if sparse.issparse(X):
dX = safe_sparse_dot(sparse.dia_matrix((d, 0),
shape=(n_samples, n_samples)), X)
else:
# Precompute as much as possible
dX = d[:, np.newaxis] * X
if fit_intercept:
# Calculate the double derivative with respect to intercept
# In the case of sparse matrices this returns a matrix object.
dd_intercept = np.squeeze(np.array(dX.sum(axis=0)))
def Hs(s):
ret = np.empty_like(s)
ret[:n_features] = X.T.dot(dX.dot(s[:n_features]))
ret[:n_features] += alpha * s[:n_features]
# For the fit intercept case.
if fit_intercept:
ret[:n_features] += s[-1] * dd_intercept
ret[-1] = dd_intercept.dot(s[:n_features])
ret[-1] += d.sum() * s[-1]
return ret
return grad, Hs
def _multinomial_loss(w, X, Y, alpha, sample_weight):
"""Computes multinomial loss and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
loss : float
Multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities.
w : ndarray, shape (n_classes, n_features)
Reshaped param vector excluding intercept terms.
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
w = w.reshape(n_classes, -1)
sample_weight = sample_weight[:, np.newaxis]
if fit_intercept:
intercept = w[:, -1]
w = w[:, :-1]
else:
intercept = 0
p = safe_sparse_dot(X, w.T)
p += intercept
p -= logsumexp(p, axis=1)[:, np.newaxis]
loss = -(sample_weight * Y * p).sum()
loss += 0.5 * alpha * squared_norm(w)
p = np.exp(p, p)
return loss, p, w
def _multinomial_loss_grad(w, X, Y, alpha, sample_weight):
"""Computes the multinomial loss, gradient and class probabilities.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
loss : float
Multinomial loss.
grad : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
p : ndarray, shape (n_samples, n_classes)
Estimated class probabilities
Reference
---------
Bishop, C. M. (2006). Pattern recognition and machine learning.
Springer. (Chapter 4.3.4)
"""
n_classes = Y.shape[1]
n_features = X.shape[1]
fit_intercept = (w.size == n_classes * (n_features + 1))
grad = np.zeros((n_classes, n_features + bool(fit_intercept)))
loss, p, w = _multinomial_loss(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
diff = sample_weight * (p - Y)
grad[:, :n_features] = safe_sparse_dot(diff.T, X)
grad[:, :n_features] += alpha * w
if fit_intercept:
grad[:, -1] = diff.sum(axis=0)
return loss, grad.ravel(), p
def _multinomial_grad_hess(w, X, Y, alpha, sample_weight):
"""
Computes the gradient and the Hessian, in the case of a multinomial loss.
Parameters
----------
w : ndarray, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Coefficient vector.
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
Y : ndarray, shape (n_samples, n_classes)
Transformed labels according to the output of LabelBinarizer.
alpha : float
Regularization parameter. alpha is equal to 1 / C.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
Returns
-------
grad : array, shape (n_classes * n_features,) or
(n_classes * (n_features + 1),)
Ravelled gradient of the multinomial loss.
hessp : callable
Function that takes in a vector input of shape (n_classes * n_features)
or (n_classes * (n_features + 1)) and returns matrix-vector product
with hessian.
References
----------
Barak A. Pearlmutter (1993). Fast Exact Multiplication by the Hessian.
http://www.bcl.hamilton.ie/~barak/papers/nc-hessian.pdf
"""
n_features = X.shape[1]
n_classes = Y.shape[1]
fit_intercept = w.size == (n_classes * (n_features + 1))
# `loss` is unused. Refactoring to avoid computing it does not
# significantly speed up the computation and decreases readability
loss, grad, p = _multinomial_loss_grad(w, X, Y, alpha, sample_weight)
sample_weight = sample_weight[:, np.newaxis]
# Hessian-vector product derived by applying the R-operator on the gradient
# of the multinomial loss function.
def hessp(v):
v = v.reshape(n_classes, -1)
if fit_intercept:
inter_terms = v[:, -1]
v = v[:, :-1]
else:
inter_terms = 0
# r_yhat holds the result of applying the R-operator on the multinomial
# estimator.
r_yhat = safe_sparse_dot(X, v.T)
r_yhat += inter_terms
r_yhat += (-p * r_yhat).sum(axis=1)[:, np.newaxis]
r_yhat *= p
r_yhat *= sample_weight
hessProd = np.zeros((n_classes, n_features + bool(fit_intercept)))
hessProd[:, :n_features] = safe_sparse_dot(r_yhat.T, X)
hessProd[:, :n_features] += v * alpha
if fit_intercept:
hessProd[:, -1] = r_yhat.sum(axis=0)
return hessProd.ravel()
return grad, hessp
def _check_solver_option(solver, multi_class, penalty, dual):
if solver not in ['liblinear', 'newton-cg', 'lbfgs', 'sag']:
raise ValueError("Logistic Regression supports only liblinear,"
" newton-cg, lbfgs and sag solvers, got %s" % solver)
if multi_class not in ['multinomial', 'ovr']:
raise ValueError("multi_class should be either multinomial or "
"ovr, got %s" % multi_class)
if multi_class == 'multinomial' and solver == 'liblinear':
raise ValueError("Solver %s does not support "
"a multinomial backend." % solver)
if solver != 'liblinear':
if penalty != 'l2':
raise ValueError("Solver %s supports only l2 penalties, "
"got %s penalty." % (solver, penalty))
if dual:
raise ValueError("Solver %s supports only "
"dual=False, got dual=%s" % (solver, dual))
def logistic_regression_path(X, y, pos_class=None, Cs=10, fit_intercept=True,
max_iter=100, tol=1e-4, verbose=0,
solver='lbfgs', coef=None, copy=False,
class_weight=None, dual=False, penalty='l2',
intercept_scaling=1., multi_class='ovr',
random_state=None, check_input=True,
max_squared_sum=None, sample_weight=None):
"""Compute a Logistic Regression model for a list of regularization
parameters.
This is an implementation that uses the result of the previous model
to speed up computations along the set of solutions, making it faster
than sequentially calling LogisticRegression for the different parameters.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data.
y : array-like, shape (n_samples,)
Input data, target values.
Cs : int | array-like, shape (n_cs,)
List of values for the regularization parameter or integer specifying
the number of regularization parameters that should be used. In this
case, the parameters will be chosen in a logarithmic scale between
1e-4 and 1e4.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
fit_intercept : bool
Whether to fit an intercept for the model. In this case the shape of
the returned array is (n_cs, n_features + 1).
max_iter : int
Maximum number of iterations for the solver.
tol : float
Stopping criterion. For the newton-cg and lbfgs solvers, the iteration
will stop when ``max{|g_i | i = 1, ..., n} <= tol``
where ``g_i`` is the i-th component of the gradient.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Numerical solver to use.
coef : array-like, shape (n_features,), default None
Initialization value for coefficients of logistic regression.
Useless for liblinear solver.
copy : bool, default False
Whether or not to produce a copy of the data. A copy is not required
anymore. This parameter is deprecated and will be removed in 0.19.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The 'newton-cg',
'sag' and 'lbfgs' solvers support only l2 penalties.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solvers.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
check_input : bool, default True
If False, the input arrays X and y will not be checked.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
n_iter : array, shape (n_cs,)
Actual number of iteration for each Cs.
Notes
-----
You might get slightly different results with the solver liblinear than
with the others since this uses LIBLINEAR which penalizes the intercept.
"""
if copy:
warnings.warn("A copy is not required anymore. The 'copy' parameter "
"is deprecated and will be removed in 0.19.",
DeprecationWarning)
if isinstance(Cs, numbers.Integral):
Cs = np.logspace(-4, 4, Cs)
_check_solver_option(solver, multi_class, penalty, dual)
# Preprocessing.
if check_input or copy:
X = check_array(X, accept_sparse='csr', dtype=np.float64)
y = check_array(y, ensure_2d=False, copy=copy, dtype=None)
check_consistent_length(X, y)
_, n_features = X.shape
classes = np.unique(y)
random_state = check_random_state(random_state)
if pos_class is None and multi_class != 'multinomial':
if (classes.size > 2):
raise ValueError('To fit OvR, use the pos_class argument')
# np.unique(y) gives labels in sorted order.
pos_class = classes[1]
# If sample weights exist, convert them to array (support for lists)
# and check length
# Otherwise set them to 1 for all examples
if sample_weight is not None:
sample_weight = np.array(sample_weight, dtype=np.float64, order='C')
check_consistent_length(y, sample_weight)
else:
sample_weight = np.ones(X.shape[0])
# If class_weights is a dict (provided by the user), the weights
# are assigned to the original labels. If it is "balanced", then
# the class_weights are assigned after masking the labels with a OvR.
le = LabelEncoder()
if isinstance(class_weight, dict) or multi_class == 'multinomial':
if solver == "liblinear":
if classes.size == 2:
# Reconstruct the weights with keys 1 and -1
temp = {1: class_weight[pos_class],
-1: class_weight[classes[0]]}
class_weight = temp.copy()
else:
raise ValueError("In LogisticRegressionCV the liblinear "
"solver cannot handle multiclass with "
"class_weight of type dict. Use the lbfgs, "
"newton-cg or sag solvers or set "
"class_weight='balanced'")
else:
class_weight_ = compute_class_weight(class_weight, classes, y)
sample_weight *= class_weight_[le.fit_transform(y)]
# For doing a ovr, we need to mask the labels first. for the
# multinomial case this is not necessary.
if multi_class == 'ovr':
w0 = np.zeros(n_features + int(fit_intercept))
mask_classes = np.array([-1, 1])
mask = (y == pos_class)
y_bin = np.ones(y.shape, dtype=np.float64)
y_bin[~mask] = -1.
# for compute_class_weight
# 'auto' is deprecated and will be removed in 0.19
if class_weight in ("auto", "balanced"):
class_weight_ = compute_class_weight(class_weight, mask_classes,
y_bin)
sample_weight *= class_weight_[le.fit_transform(y_bin)]
else:
if solver != 'sag':
lbin = LabelBinarizer()
Y_multi = lbin.fit_transform(y)
if Y_multi.shape[1] == 1:
Y_multi = np.hstack([1 - Y_multi, Y_multi])
else:
# SAG multinomial solver needs LabelEncoder, not LabelBinarizer
le = LabelEncoder()
Y_multi = le.fit_transform(y)
w0 = np.zeros((classes.size, n_features + int(fit_intercept)),
order='F')
if coef is not None:
# it must work both giving the bias term and not
if multi_class == 'ovr':
if coef.size not in (n_features, w0.size):
raise ValueError(
'Initialization coef is of shape %d, expected shape '
'%d or %d' % (coef.size, n_features, w0.size))
w0[:coef.size] = coef
else:
# For binary problems coef.shape[0] should be 1, otherwise it
# should be classes.size.
n_classes = classes.size
if n_classes == 2:
n_classes = 1
if (coef.shape[0] != n_classes or
coef.shape[1] not in (n_features, n_features + 1)):
raise ValueError(
'Initialization coef is of shape (%d, %d), expected '
'shape (%d, %d) or (%d, %d)' % (
coef.shape[0], coef.shape[1], classes.size,
n_features, classes.size, n_features + 1))
w0[:, :coef.shape[1]] = coef
if multi_class == 'multinomial':
# fmin_l_bfgs_b and newton-cg accepts only ravelled parameters.
if solver in ['lbfgs', 'newton-cg']:
w0 = w0.ravel()
target = Y_multi
if solver == 'lbfgs':
func = lambda x, *args: _multinomial_loss_grad(x, *args)[0:2]
elif solver == 'newton-cg':
func = lambda x, *args: _multinomial_loss(x, *args)[0]
grad = lambda x, *args: _multinomial_loss_grad(x, *args)[1]
hess = _multinomial_grad_hess
warm_start_sag = {'coef': w0.T}
else:
target = y_bin
if solver == 'lbfgs':
func = _logistic_loss_and_grad
elif solver == 'newton-cg':
func = _logistic_loss
grad = lambda x, *args: _logistic_loss_and_grad(x, *args)[1]
hess = _logistic_grad_hess
warm_start_sag = {'coef': np.expand_dims(w0, axis=1)}
coefs = list()
n_iter = np.zeros(len(Cs), dtype=np.int32)
for i, C in enumerate(Cs):
if solver == 'lbfgs':
try:
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol, maxiter=max_iter)
except TypeError:
# old scipy doesn't have maxiter
w0, loss, info = optimize.fmin_l_bfgs_b(
func, w0, fprime=None,
args=(X, target, 1. / C, sample_weight),
iprint=(verbose > 0) - 1, pgtol=tol)
if info["warnflag"] == 1 and verbose > 0:
warnings.warn("lbfgs failed to converge. Increase the number "
"of iterations.")
try:
n_iter_i = info['nit'] - 1
except:
n_iter_i = info['funcalls'] - 1
elif solver == 'newton-cg':
args = (X, target, 1. / C, sample_weight)
w0, n_iter_i = newton_cg(hess, func, grad, w0, args=args,
maxiter=max_iter, tol=tol)
elif solver == 'liblinear':
coef_, intercept_, n_iter_i, = _fit_liblinear(
X, target, C, fit_intercept, intercept_scaling, class_weight,
penalty, dual, verbose, max_iter, tol, random_state,
sample_weight=sample_weight)
if fit_intercept:
w0 = np.concatenate([coef_.ravel(), intercept_])
else:
w0 = coef_.ravel()
elif solver == 'sag':
if multi_class == 'multinomial':
target = target.astype(np.float64)
loss = 'multinomial'
else:
loss = 'log'
w0, n_iter_i, warm_start_sag = sag_solver(
X, target, sample_weight, loss, 1. / C, max_iter, tol,
verbose, random_state, False, max_squared_sum, warm_start_sag)
else:
raise ValueError("solver must be one of {'liblinear', 'lbfgs', "
"'newton-cg', 'sag'}, got '%s' instead" % solver)
if multi_class == 'multinomial':
multi_w0 = np.reshape(w0, (classes.size, -1))
if classes.size == 2:
multi_w0 = multi_w0[1][np.newaxis, :]
coefs.append(multi_w0)
else:
coefs.append(w0.copy())
n_iter[i] = n_iter_i
return coefs, np.array(Cs), n_iter
# helper function for LogisticCV
def _log_reg_scoring_path(X, y, train, test, pos_class=None, Cs=10,
scoring=None, fit_intercept=False,
max_iter=100, tol=1e-4, class_weight=None,
verbose=0, solver='lbfgs', penalty='l2',
dual=False, intercept_scaling=1.,
multi_class='ovr', random_state=None,
max_squared_sum=None, sample_weight=None):
"""Computes scores across logistic_regression_path
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training data.
y : array-like, shape (n_samples,) or (n_samples, n_targets)
Target labels.
train : list of indices
The indices of the train set.
test : list of indices
The indices of the test set.
pos_class : int, None
The class with respect to which we perform a one-vs-all fit.
If None, then it is assumed that the given problem is binary.
Cs : list of floats | int
Each of the values in Cs describes the inverse of
regularization strength. If Cs is as an int, then a grid of Cs
values are chosen in a logarithmic scale between 1e-4 and 1e4.
If not provided, then a fixed set of values for Cs are used.
scoring : callable
For a list of scoring functions that can be used, look at
:mod:`sklearn.metrics`. The default scoring option used is
accuracy_score.
fit_intercept : bool
If False, then the bias term is set to zero. Else the last
term of each coef_ gives us the intercept.
max_iter : int
Maximum number of iterations for the solver.
tol : float
Tolerance for stopping criteria.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
verbose : int
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
solver : {'lbfgs', 'newton-cg', 'liblinear', 'sag'}
Decides which solver to use.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
intercept_scaling : float, default 1.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'lbfgs' and
'newton-cg' solver.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
max_squared_sum : float, default None
Maximum squared sum of X over samples. Used only in SAG solver.
If None, it will be computed, going through all the samples.
The value should be precomputed to speed up cross validation.
sample_weight : array-like, shape(n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
coefs : ndarray, shape (n_cs, n_features) or (n_cs, n_features + 1)
List of coefficients for the Logistic Regression model. If
fit_intercept is set to True then the second dimension will be
n_features + 1, where the last item represents the intercept.
Cs : ndarray
Grid of Cs used for cross-validation.
scores : ndarray, shape (n_cs,)
Scores obtained for each Cs.
n_iter : array, shape(n_cs,)
Actual number of iteration for each Cs.
"""
_check_solver_option(solver, multi_class, penalty, dual)
X_train = X[train]
X_test = X[test]
y_train = y[train]
y_test = y[test]
if sample_weight is not None:
sample_weight = sample_weight[train]
coefs, Cs, n_iter = logistic_regression_path(
X_train, y_train, Cs=Cs, fit_intercept=fit_intercept,
solver=solver, max_iter=max_iter, class_weight=class_weight,
pos_class=pos_class, multi_class=multi_class,
tol=tol, verbose=verbose, dual=dual, penalty=penalty,
intercept_scaling=intercept_scaling, random_state=random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
log_reg = LogisticRegression(fit_intercept=fit_intercept)
# The score method of Logistic Regression has a classes_ attribute.
if multi_class == 'ovr':
log_reg.classes_ = np.array([-1, 1])
elif multi_class == 'multinomial':
log_reg.classes_ = np.unique(y_train)
else:
raise ValueError("multi_class should be either multinomial or ovr, "
"got %d" % multi_class)
if pos_class is not None:
mask = (y_test == pos_class)
y_test = np.ones(y_test.shape, dtype=np.float64)
y_test[~mask] = -1.
# To deal with object dtypes, we need to convert into an array of floats.
y_test = check_array(y_test, dtype=np.float64, ensure_2d=False)
scores = list()
if isinstance(scoring, six.string_types):
scoring = SCORERS[scoring]
for w in coefs:
if multi_class == 'ovr':
w = w[np.newaxis, :]
if fit_intercept:
log_reg.coef_ = w[:, :-1]
log_reg.intercept_ = w[:, -1]
else:
log_reg.coef_ = w
log_reg.intercept_ = 0.
if scoring is None:
scores.append(log_reg.score(X_test, y_test))
else:
scores.append(scoring(log_reg, X_test, y_test))
return coefs, Cs, np.array(scores), n_iter
class LogisticRegression(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Logistic Regression (aka logit, MaxEnt) classifier.
In the multiclass case, the training algorithm uses the one-vs-rest (OvR)
scheme if the 'multi_class' option is set to 'ovr' and uses the cross-
entropy loss, if the 'multi_class' option is set to 'multinomial'.
(Currently the 'multinomial' option is supported only by the 'lbfgs',
'sag' and 'newton-cg' solvers.)
This class implements regularized logistic regression using the
'liblinear' library, 'newton-cg', 'sag' and 'lbfgs' solvers. It can handle
both dense and sparse input. Use C-ordered arrays or CSR matrices
containing 64-bit floats for optimal performance; any other input format
will be converted (and copied).
The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization
with primal formulation. The 'liblinear' solver supports both L1 and L2
regularization, with a dual formulation only for the L2 penalty.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
penalty : str, 'l1' or 'l2', default: 'l2'
Used to specify the norm used in the penalization. The newton-cg, sag
and lbfgs solvers support only l2 penalties.
dual : bool, default: False
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
C : float, default: 1.0
Inverse of regularization strength; must be a positive float.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
intercept_scaling : float, default: 1
Useful only if solver is liblinear.
when self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : dict or 'balanced', default: None
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
*class_weight='balanced'* instead of deprecated
*class_weight='auto'*.
max_iter : int, default: 100
Useful only for the newton-cg, sag and lbfgs solvers.
Maximum number of iterations taken for the solvers to converge.
random_state : int seed, RandomState instance, default: None
The seed of the pseudo random number generator to use when
shuffling the data. Used only in solvers 'sag' and 'liblinear'.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}, default: 'liblinear'
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, default: 1e-4
Tolerance for stopping criteria.
multi_class : str, {'ovr', 'multinomial'}, default: 'ovr'
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
verbose : int, default: 0
For the liblinear and lbfgs solvers set verbose to any positive
number for verbosity.
warm_start : bool, default: False
When set to True, reuse the solution of the previous call to fit as
initialization, otherwise, just erase the previous solution.
Useless for liblinear solver.
.. versionadded:: 0.17
*warm_start* to support *lbfgs*, *newton-cg*, *sag* solvers.
n_jobs : int, default: 1
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
Attributes
----------
coef_ : array, shape (n_classes, n_features)
Coefficient of the features in the decision function.
intercept_ : array, shape (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
If `fit_intercept` is set to False, the intercept is set to zero.
n_iter_ : array, shape (n_classes,) or (1, )
Actual number of iterations for all classes. If binary or multinomial,
it returns only 1 element. For liblinear solver, only the maximum
number of iteration across all classes is given.
See also
--------
SGDClassifier : incrementally trained logistic regression (when given
the parameter ``loss="log"``).
sklearn.svm.LinearSVC : learns SVM models using the same algorithm.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon,
to have slightly different results for the same input data. If
that happens, try with a smaller tol parameter.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
LIBLINEAR -- A Library for Large Linear Classification
http://www.csie.ntu.edu.tw/~cjlin/liblinear/
Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent
methods for logistic regression and maximum entropy models.
Machine Learning 85(1-2):41-75.
http://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf
"""
def __init__(self, penalty='l2', dual=False, tol=1e-4, C=1.0,
fit_intercept=True, intercept_scaling=1, class_weight=None,
random_state=None, solver='liblinear', max_iter=100,
multi_class='ovr', verbose=0, warm_start=False, n_jobs=1):
self.penalty = penalty
self.dual = dual
self.tol = tol
self.C = C
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.random_state = random_state
self.solver = solver
self.max_iter = max_iter
self.multi_class = multi_class
self.verbose = verbose
self.warm_start = warm_start
self.n_jobs = n_jobs
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
.. versionadded:: 0.17
*sample_weight* support to LogisticRegression.
Returns
-------
self : object
Returns self.
"""
if not isinstance(self.C, numbers.Number) or self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
n_samples, n_features = X.shape
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if self.solver == 'liblinear':
self.coef_, self.intercept_, n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state,
sample_weight=sample_weight)
self.n_iter_ = np.array([n_iter_])
return self
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
n_classes = len(self.classes_)
classes_ = self.classes_
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % classes_[0])
if len(self.classes_) == 2:
n_classes = 1
classes_ = classes_[1:]
if self.warm_start:
warm_start_coef = getattr(self, 'coef_', None)
else:
warm_start_coef = None
if warm_start_coef is not None and self.fit_intercept:
warm_start_coef = np.append(warm_start_coef,
self.intercept_[:, np.newaxis],
axis=1)
self.coef_ = list()
self.intercept_ = np.zeros(n_classes)
# Hack so that we iterate only once for the multinomial case.
if self.multi_class == 'multinomial':
classes_ = [None]
warm_start_coef = [warm_start_coef]
if warm_start_coef is None:
warm_start_coef = [None] * n_classes
path_func = delayed(logistic_regression_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, pos_class=class_, Cs=[self.C],
fit_intercept=self.fit_intercept, tol=self.tol,
verbose=self.verbose, solver=self.solver, copy=False,
multi_class=self.multi_class, max_iter=self.max_iter,
class_weight=self.class_weight, check_input=False,
random_state=self.random_state, coef=warm_start_coef_,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
for (class_, warm_start_coef_) in zip(classes_, warm_start_coef))
fold_coefs_, _, n_iter_ = zip(*fold_coefs_)
self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0]
if self.multi_class == 'multinomial':
self.coef_ = fold_coefs_[0][0]
else:
self.coef_ = np.asarray(fold_coefs_)
self.coef_ = self.coef_.reshape(n_classes, n_features +
int(self.fit_intercept))
if self.fit_intercept:
self.intercept_ = self.coef_[:, -1]
self.coef_ = self.coef_[:, :-1]
return self
def predict_proba(self, X):
"""Probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
For a multi_class problem, if multi_class is set to be "multinomial"
the softmax function is used to find the predicted probability of
each class.
Else use a one-vs-rest approach, i.e calculate the probability
of each class assuming it to be positive using the logistic function.
and normalize these values across all the classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the probability of the sample for each class in the model,
where classes are ordered as they are in ``self.classes_``.
"""
if not hasattr(self, "coef_"):
raise NotFittedError("Call fit before prediction")
calculate_ovr = self.coef_.shape[0] == 1 or self.multi_class == "ovr"
if calculate_ovr:
return super(LogisticRegression, self)._predict_proba_lr(X)
else:
return softmax(self.decision_function(X), copy=False)
def predict_log_proba(self, X):
"""Log of probability estimates.
The returned estimates for all classes are ordered by the
label of classes.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Returns
-------
T : array-like, shape = [n_samples, n_classes]
Returns the log-probability of the sample for each class in the
model, where classes are ordered as they are in ``self.classes_``.
"""
return np.log(self.predict_proba(X))
class LogisticRegressionCV(LogisticRegression, BaseEstimator,
LinearClassifierMixin, _LearntSelectorMixin):
"""Logistic Regression CV (aka logit, MaxEnt) classifier.
This class implements logistic regression using liblinear, newton-cg, sag
of lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2
regularization with primal formulation. The liblinear solver supports both
L1 and L2 regularization, with a dual formulation only for the L2 penalty.
For the grid of Cs values (that are set by default to be ten values in
a logarithmic scale between 1e-4 and 1e4), the best hyperparameter is
selected by the cross-validator StratifiedKFold, but it can be changed
using the cv parameter. In the case of newton-cg and lbfgs solvers,
we warm start along the path i.e guess the initial coefficients of the
present fit to be the coefficients got after convergence in the previous
fit, so it is supposed to be faster for high-dimensional dense data.
For a multiclass problem, the hyperparameters for each class are computed
using the best scores got by doing a one-vs-rest in parallel across all
folds and classes. Hence this is not the true multinomial loss.
Read more in the :ref:`User Guide <logistic_regression>`.
Parameters
----------
Cs : list of floats | int
Each of the values in Cs describes the inverse of regularization
strength. If Cs is as an int, then a grid of Cs values are chosen
in a logarithmic scale between 1e-4 and 1e4.
Like in support vector machines, smaller values specify stronger
regularization.
fit_intercept : bool, default: True
Specifies if a constant (a.k.a. bias or intercept) should be
added to the decision function.
class_weight : dict or 'balanced', optional
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
.. versionadded:: 0.17
class_weight == 'balanced'
cv : integer or cross-validation generator
The default cross-validation generator used is Stratified K-Folds.
If an integer is provided, then it is the number of folds used.
See the module :mod:`sklearn.model_selection` module for the
list of possible cross-validation objects.
penalty : str, 'l1' or 'l2'
Used to specify the norm used in the penalization. The newton-cg and
lbfgs solvers support only l2 penalties.
dual : bool
Dual or primal formulation. Dual formulation is only implemented for
l2 penalty with liblinear solver. Prefer dual=False when
n_samples > n_features.
scoring : callabale
Scoring function to use as cross-validation criteria. For a list of
scoring functions that can be used, look at :mod:`sklearn.metrics`.
The default scoring option used is accuracy_score.
solver : {'newton-cg', 'lbfgs', 'liblinear', 'sag'}
Algorithm to use in the optimization problem.
- For small datasets, 'liblinear' is a good choice, whereas 'sag' is
faster for large ones.
- For multiclass problems, only 'newton-cg', 'sag' and 'lbfgs' handle
multinomial loss; 'liblinear' is limited to one-versus-rest
schemes.
- 'newton-cg', 'lbfgs' and 'sag' only handle L2 penalty.
- 'liblinear' might be slower in LogisticRegressionCV because it does
not handle warm-starting.
Note that 'sag' fast convergence is only guaranteed on features with
approximately the same scale. You can preprocess the data with a
scaler from sklearn.preprocessing.
.. versionadded:: 0.17
Stochastic Average Gradient descent solver.
tol : float, optional
Tolerance for stopping criteria.
max_iter : int, optional
Maximum number of iterations of the optimization algorithm.
n_jobs : int, optional
Number of CPU cores used during the cross-validation loop. If given
a value of -1, all cores are used.
verbose : int
For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any
positive number for verbosity.
refit : bool
If set to True, the scores are averaged across all folds, and the
coefs and the C that corresponds to the best score is taken, and a
final refit is done using these parameters.
Otherwise the coefs, intercepts and C that correspond to the
best scores across folds are averaged.
multi_class : str, {'ovr', 'multinomial'}
Multiclass option can be either 'ovr' or 'multinomial'. If the option
chosen is 'ovr', then a binary problem is fit for each label. Else
the loss minimised is the multinomial loss fit across
the entire probability distribution. Works only for the 'newton-cg',
'sag' and 'lbfgs' solver.
.. versionadded:: 0.18
Stochastic Average Gradient descent solver for 'multinomial' case.
intercept_scaling : float, default 1.
Useful only if solver is liblinear.
This parameter is useful only when the solver 'liblinear' is used
and self.fit_intercept is set to True. In this case, x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data.
Attributes
----------
coef_ : array, shape (1, n_features) or (n_classes, n_features)
Coefficient of the features in the decision function.
`coef_` is of shape (1, n_features) when the given problem
is binary.
`coef_` is readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape (1,) or (n_classes,)
Intercept (a.k.a. bias) added to the decision function.
It is available only when parameter intercept is set to True
and is of shape(1,) when the problem is binary.
Cs_ : array
Array of C i.e. inverse of regularization parameter values used
for cross-validation.
coefs_paths_ : array, shape ``(n_folds, len(Cs_), n_features)`` or \
``(n_folds, len(Cs_), n_features + 1)``
dict with classes as the keys, and the path of coefficients obtained
during cross-validating across each fold and then across each Cs
after doing an OvR for the corresponding class as values.
If the 'multi_class' option is set to 'multinomial', then
the coefs_paths are the coefficients corresponding to each class.
Each dict value has shape ``(n_folds, len(Cs_), n_features)`` or
``(n_folds, len(Cs_), n_features + 1)`` depending on whether the
intercept is fit or not.
scores_ : dict
dict with classes as the keys, and the values as the
grid of scores obtained during cross-validating each fold, after doing
an OvR for the corresponding class. If the 'multi_class' option
given is 'multinomial' then the same scores are repeated across
all classes, since this is the multinomial class.
Each dict value has shape (n_folds, len(Cs))
C_ : array, shape (n_classes,) or (n_classes - 1,)
Array of C that maps to the best scores across every class. If refit is
set to False, then for each class, the best C is the average of the
C's that correspond to the best scores for each fold.
n_iter_ : array, shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs)
Actual number of iterations for all classes, folds and Cs.
In the binary or multinomial cases, the first dimension is equal to 1.
See also
--------
LogisticRegression
"""
def __init__(self, Cs=10, fit_intercept=True, cv=None, dual=False,
penalty='l2', scoring=None, solver='lbfgs', tol=1e-4,
max_iter=100, class_weight=None, n_jobs=1, verbose=0,
refit=True, intercept_scaling=1., multi_class='ovr',
random_state=None):
self.Cs = Cs
self.fit_intercept = fit_intercept
self.cv = cv
self.dual = dual
self.penalty = penalty
self.scoring = scoring
self.tol = tol
self.max_iter = max_iter
self.class_weight = class_weight
self.n_jobs = n_jobs
self.verbose = verbose
self.solver = solver
self.refit = refit
self.intercept_scaling = intercept_scaling
self.multi_class = multi_class
self.random_state = random_state
def fit(self, X, y, sample_weight=None):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples,)
Target vector relative to X.
sample_weight : array-like, shape (n_samples,) optional
Array of weights that are assigned to individual samples.
If not provided, then each sample is given unit weight.
Returns
-------
self : object
Returns self.
"""
_check_solver_option(self.solver, self.multi_class, self.penalty,
self.dual)
if not isinstance(self.max_iter, numbers.Number) or self.max_iter < 0:
raise ValueError("Maximum number of iteration must be positive;"
" got (max_iter=%r)" % self.max_iter)
if not isinstance(self.tol, numbers.Number) or self.tol < 0:
raise ValueError("Tolerance for stopping criteria must be "
"positive; got (tol=%r)" % self.tol)
X, y = check_X_y(X, y, accept_sparse='csr', dtype=np.float64,
order="C")
if self.solver == 'sag':
max_squared_sum = row_norms(X, squared=True).max()
else:
max_squared_sum = None
check_classification_targets(y)
if y.ndim == 2 and y.shape[1] == 1:
warnings.warn(
"A column-vector y was passed when a 1d array was"
" expected. Please change the shape of y to "
"(n_samples, ), for example using ravel().",
DataConversionWarning)
y = np.ravel(y)
check_consistent_length(X, y)
# init cross-validation generator
cv = check_cv(self.cv, y, classifier=True)
folds = list(cv.split(X, y))
self._enc = LabelEncoder()
self._enc.fit(y)
labels = self.classes_ = np.unique(y)
n_classes = len(labels)
if n_classes < 2:
raise ValueError("This solver needs samples of at least 2 classes"
" in the data, but the data contains only one"
" class: %r" % self.classes_[0])
if n_classes == 2:
# OvR in case of binary problems is as good as fitting
# the higher label
n_classes = 1
labels = labels[1:]
# We need this hack to iterate only once over labels, in the case of
# multi_class = multinomial, without changing the value of the labels.
iter_labels = labels
if self.multi_class == 'multinomial':
iter_labels = [None]
if self.class_weight and not(isinstance(self.class_weight, dict) or
self.class_weight in
['balanced', 'auto']):
# 'auto' is deprecated and will be removed in 0.19
raise ValueError("class_weight provided should be a "
"dict or 'balanced'")
# compute the class weights for the entire dataset y
if self.class_weight in ("auto", "balanced"):
classes = np.unique(y)
class_weight = compute_class_weight(self.class_weight, classes, y)
class_weight = dict(zip(classes, class_weight))
else:
class_weight = self.class_weight
path_func = delayed(_log_reg_scoring_path)
# The SAG solver releases the GIL so it's more efficient to use
# threads for this solver.
backend = 'threading' if self.solver == 'sag' else 'multiprocessing'
fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose,
backend=backend)(
path_func(X, y, train, test, pos_class=label, Cs=self.Cs,
fit_intercept=self.fit_intercept, penalty=self.penalty,
dual=self.dual, solver=self.solver, tol=self.tol,
max_iter=self.max_iter, verbose=self.verbose,
class_weight=class_weight, scoring=self.scoring,
multi_class=self.multi_class,
intercept_scaling=self.intercept_scaling,
random_state=self.random_state,
max_squared_sum=max_squared_sum,
sample_weight=sample_weight
)
for label in iter_labels
for train, test in folds)
if self.multi_class == 'multinomial':
multi_coefs_paths, Cs, multi_scores, n_iter_ = zip(*fold_coefs_)
multi_coefs_paths = np.asarray(multi_coefs_paths)
multi_scores = np.asarray(multi_scores)
# This is just to maintain API similarity between the ovr and
# multinomial option.
# Coefs_paths in now n_folds X len(Cs) X n_classes X n_features
# we need it to be n_classes X len(Cs) X n_folds X n_features
# to be similar to "ovr".
coefs_paths = np.rollaxis(multi_coefs_paths, 2, 0)
# Multinomial has a true score across all labels. Hence the
# shape is n_folds X len(Cs). We need to repeat this score
# across all labels for API similarity.
scores = np.tile(multi_scores, (n_classes, 1, 1))
self.Cs_ = Cs[0]
self.n_iter_ = np.reshape(n_iter_, (1, len(folds),
len(self.Cs_)))
else:
coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_)
self.Cs_ = Cs[0]
coefs_paths = np.reshape(coefs_paths, (n_classes, len(folds),
len(self.Cs_), -1))
self.n_iter_ = np.reshape(n_iter_, (n_classes, len(folds),
len(self.Cs_)))
self.coefs_paths_ = dict(zip(labels, coefs_paths))
scores = np.reshape(scores, (n_classes, len(folds), -1))
self.scores_ = dict(zip(labels, scores))
self.C_ = list()
self.coef_ = np.empty((n_classes, X.shape[1]))
self.intercept_ = np.zeros(n_classes)
# hack to iterate only once for multinomial case.
if self.multi_class == 'multinomial':
scores = multi_scores
coefs_paths = multi_coefs_paths
for index, label in enumerate(iter_labels):
if self.multi_class == 'ovr':
scores = self.scores_[label]
coefs_paths = self.coefs_paths_[label]
if self.refit:
best_index = scores.sum(axis=0).argmax()
C_ = self.Cs_[best_index]
self.C_.append(C_)
if self.multi_class == 'multinomial':
coef_init = np.mean(coefs_paths[:, best_index, :, :],
axis=0)
else:
coef_init = np.mean(coefs_paths[:, best_index, :], axis=0)
w, _, _ = logistic_regression_path(
X, y, pos_class=label, Cs=[C_], solver=self.solver,
fit_intercept=self.fit_intercept, coef=coef_init,
max_iter=self.max_iter, tol=self.tol,
penalty=self.penalty, copy=False,
class_weight=class_weight,
multi_class=self.multi_class,
verbose=max(0, self.verbose - 1),
random_state=self.random_state,
check_input=False, max_squared_sum=max_squared_sum,
sample_weight=sample_weight)
w = w[0]
else:
# Take the best scores across every fold and the average of all
# coefficients corresponding to the best scores.
best_indices = np.argmax(scores, axis=1)
w = np.mean([coefs_paths[i][best_indices[i]]
for i in range(len(folds))], axis=0)
self.C_.append(np.mean(self.Cs_[best_indices]))
if self.multi_class == 'multinomial':
self.C_ = np.tile(self.C_, n_classes)
self.coef_ = w[:, :X.shape[1]]
if self.fit_intercept:
self.intercept_ = w[:, -1]
else:
self.coef_[index] = w[: X.shape[1]]
if self.fit_intercept:
self.intercept_[index] = w[-1]
self.C_ = np.asarray(self.C_)
return self
| bsd-3-clause |
pizzathief/numpy | doc/example.py | 51 | 3578 | """This is the docstring for the example.py module. Modules names should
have short, all-lowercase names. The module name may have underscores if
this improves readability.
Every module should have a docstring at the very top of the file. The
module's docstring may extend over multiple lines. If your docstring does
extend over multiple lines, the closing three quotation marks must be on
a line by itself, preferably preceded by a blank line.
"""
from __future__ import division, absolute_import, print_function
import os # standard library imports first
# Do NOT import using *, e.g. from numpy import *
#
# Import the module using
#
# import numpy
#
# instead or import individual functions as needed, e.g
#
# from numpy import array, zeros
#
# If you prefer the use of abbreviated module names, we suggest the
# convention used by NumPy itself::
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
# These abbreviated names are not to be used in docstrings; users must
# be able to paste and execute docstrings after importing only the
# numpy module itself, unabbreviated.
from my_module import my_func, other_func
def foo(var1, var2, long_var_name='hi'):
r"""A one-line summary that does not use variable names or the
function name.
Several sentences providing an extended description. Refer to
variables using back-ticks, e.g. `var`.
Parameters
----------
var1 : array_like
Array_like means all those objects -- lists, nested lists, etc. --
that can be converted to an array. We can also refer to
variables like `var1`.
var2 : int
The type above can either refer to an actual Python type
(e.g. ``int``), or describe the type of the variable in more
detail, e.g. ``(N,) ndarray`` or ``array_like``.
long_var_name : {'hi', 'ho'}, optional
Choices in brackets, default first when optional.
Returns
-------
type
Explanation of anonymous return value of type ``type``.
describe : type
Explanation of return value named `describe`.
out : type
Explanation of `out`.
Other Parameters
----------------
only_seldom_used_keywords : type
Explanation
common_parameters_listed_above : type
Explanation
Raises
------
BadException
Because you shouldn't have done that.
See Also
--------
otherfunc : relationship (optional)
newfunc : Relationship (optional), which could be fairly long, in which
case the line wraps here.
thirdfunc, fourthfunc, fifthfunc
Notes
-----
Notes about the implementation algorithm (if needed).
This can have multiple paragraphs.
You may include some math:
.. math:: X(e^{j\omega } ) = x(n)e^{ - j\omega n}
And even use a greek symbol like :math:`omega` inline.
References
----------
Cite the relevant literature, e.g. [1]_. You may also cite these
references in the notes section above.
.. [1] O. McNoleg, "The integration of GIS, remote sensing,
expert systems and adaptive co-kriging for environmental habitat
modelling of the Highland Haggis using object-oriented, fuzzy-logic
and neural-network techniques," Computers & Geosciences, vol. 22,
pp. 585-588, 1996.
Examples
--------
These are written in doctest format, and should illustrate how to
use the function.
>>> a = [1, 2, 3]
>>> print [x + 3 for x in a]
[4, 5, 6]
>>> print "a\n\nb"
a
b
"""
pass
| bsd-3-clause |
boland1992/seissuite_iran | build/lib/ambient/ant/pstomo.py | 2 | 62674 | """
Definition of classes handling dispersion curves and
velocity maps (obtained by inverting dispersion curves)
"""
import pserrors, psutils
import itertools as it
import numpy as np
from scipy.optimize import curve_fit
from scipy.interpolate import interp1d
import os
import glob
import pickle
import matplotlib.pyplot as plt
from matplotlib.colors import LinearSegmentedColormap
from matplotlib import gridspec
from matplotlib.colors import ColorConverter
import shutil
from inspect import getargspec
# todo: discard measurments if too different from trimester velocities (see BB15B-SPB)
# ====================================================
# parsing configuration file to import some parameters
# ====================================================
from psconfig import (
SIGNAL_WINDOW_VMIN, SIGNAL_WINDOW_VMAX, SIGNAL2NOISE_TRAIL, NOISE_WINDOW_SIZE,
MINSPECTSNR, MINSPECTSNR_NOSDEV, MAXSDEV, MINNBTRIMESTER, MAXPERIOD_FACTOR,
LONSTEP, LATSTEP, CORRELATION_LENGTH, ALPHA, BETA, LAMBDA,
FTAN_ALPHA, FTAN_VELOCITIES_STEP, PERIOD_RESAMPLE)
# ========================
# Constants and parameters
# ========================
EPS = 1.0E-6
# custom color map for seismic anomalies
# --------------------------------------
c = ColorConverter()
colors = ['black', 'red', 'gold', 'white',
'white', 'aquamarine', 'blue', 'magenta']
values = [-1.0, -0.35, -0.1, -0.025,
0.025, 0.1, 0.35, 1.0]
#colors = ['black', 'red', 'gold', 'lemonchiffon', 'white',
# 'palegreen', 'aquamarine', 'blue', 'magenta']
#values = [-1.0, -0.7, -0.3, -0.1, 0.0,
# 0.1, 0.3, 0.7, 1.0]
rgblist = [c.to_rgb(s) for s in colors]
reds, greens, blues = zip(*rgblist)
cdict = {}
for x, r, g, b in zip(values, reds, greens, blues):
v = (x - min(values)) / (max(values) - min(values))
cdict.setdefault('red', []).append((v, r, r))
cdict.setdefault('green', []).append((v, g, g))
cdict.setdefault('blue', []).append((v, b, b))
CMAP_SEISMIC = LinearSegmentedColormap('customseismic', cdict)
# custom color map for spatial resolution
# ---------------------------------------
colors = ['black', 'red', 'yellow', 'green', 'white']
values = [0, 0.25, 0.5, 0.75, 1.0]
#colors = ['magenta', 'blue', 'aquamarine', 'palegreen', 'white',
# 'lemonchiffon', 'gold', 'red', 'darkred']
#values = [-1.0, -0.7, -0.3, -0.1,
# 0.1, 0.3, 0.7, 1.0]
rgblist = [c.to_rgb(s) for s in colors]
reds, greens, blues = zip(*rgblist)
cdict = {}
for x, r, g, b in zip(values, reds, greens, blues):
v = (x - min(values)) / (max(values) - min(values))
cdict.setdefault('red', []).append((v, r, r))
cdict.setdefault('green', []).append((v, g, g))
cdict.setdefault('blue', []).append((v, b, b))
CMAP_RESOLUTION = LinearSegmentedColormap('customresolution', cdict)
CMAP_RESOLUTION.set_bad(color='0.85')
# custom color map for path density
# ---------------------------------------
colors = ['white', 'cyan', 'green', 'yellow', 'red', 'black']
values = [0, 0.05, 0.1, 0.25, 0.5, 1.0]
rgblist = [c.to_rgb(s) for s in colors]
reds, greens, blues = zip(*rgblist)
cdict = {}
for x, r, g, b in zip(values, reds, greens, blues):
v = (x - min(values)) / (max(values) - min(values))
cdict.setdefault('red', []).append((v, r, r))
cdict.setdefault('green', []).append((v, g, g))
cdict.setdefault('blue', []).append((v, b, b))
CMAP_DENSITY = LinearSegmentedColormap('customdensity', cdict)
class DispersionCurve:
"""
Class holding a dispersion curve, i.e., velocity
as a function of period
"""
def __init__(self, periods, v, station1, station2,
minspectSNR=MINSPECTSNR,
minspectSNR_nosdev=MINSPECTSNR_NOSDEV,
maxsdev=MAXSDEV,
minnbtrimester=MINNBTRIMESTER,
maxperiodfactor=MAXPERIOD_FACTOR,
nom2inst_periods=None):
"""
Initiliazes the dispersion curve between the pair *station1*-*station2*
using the given velocities (array *v*) at the given *periods*.
Selection parameters (used to select velocities that will participate
to the tomographic inversion) are given in *minspectSNR*,
*minspectSNR_nosdev*, *maxsdev*, *minnbtrimester* and *maxperiodfactor*.
Periods can be nominal (i.e., center of Gaussian filters of FTAN) or
instantaneous (dphi/dt). If periods are instantaneous, then a list
of tuples [(nominal period, instantaneous period), ...] should be
provided in *nom2inst_periods*
@type periods: iterable
@type v: iterable
@type station1: L{psstation.Station}
@type station2: L{psstation.Station}
"""
# periods and associated velocities
self.periods = np.array(periods)
self.v = np.array(v)
# SNRs along periods
self._SNRs = None
# trimester velocities and SNRs
self.v_trimesters = {}
self._SNRs_trimesters = {}
# stations
self.station1 = station1
self.station2 = station2
# selection parameters
self.minspectSNR = minspectSNR
self.minspectSNR_nosdev = minspectSNR_nosdev
self.maxsdev = maxsdev
self.minnbtrimester = minnbtrimester
self.maxperiodfactor = maxperiodfactor
# list of (nominal period, instantaneous period)
self.nom2inst_periods = nom2inst_periods
def __repr__(self):
return 'Dispersion curve between stations {}-{}'.format(self.station1.name,
self.station2.name)
def get_period_index(self, period):
"""
Gets index of *period*, or raises an error if period
is not found
"""
iperiod = np.abs(self.periods - period).argmin()
if np.abs(self.periods[iperiod] - period) > EPS:
raise Exception('Cannot find period in dispersion curve')
return iperiod
def update_parameters(self, minspectSNR=None, minspectSNR_nosdev=None,
maxsdev=None, minnbtrimester=None, maxperiodfactor=None):
"""
Updating one or more filtering parameter(s)
"""
if not minspectSNR is None:
self.minspectSNR = minspectSNR
if not minspectSNR_nosdev is None:
self.minspectSNR_nosdev = minspectSNR_nosdev
if not maxsdev is None:
self.maxsdev = maxsdev
if not minnbtrimester is None:
self.minnbtrimester = minnbtrimester
if not maxperiodfactor is None:
self.maxperiodfactor = maxperiodfactor
def dist(self):
"""
Interstation spacing (km)
"""
return self.station1.dist(self.station2)
def add_trimester(self, trimester_start, curve_trimester):
"""
Adding a trimester dispersion curve.
@type trimester_start: int
@type curve_trimester: L{DispersionCurve}
"""
if trimester_start in self.v_trimesters:
raise Exception('Trimester already added')
if np.any(curve_trimester.periods != self.periods):
raise Exception("Wrong periods for trimester curve")
# adding velocity adn SNR arrays of trimester
self.v_trimesters[trimester_start] = curve_trimester.v
self._SNRs_trimesters[trimester_start] = curve_trimester._SNRs
def add_SNRs(self, xc, filter_alpha=FTAN_ALPHA, months=None,
vmin=SIGNAL_WINDOW_VMIN,
vmax=SIGNAL_WINDOW_VMAX,
signal2noise_trail=SIGNAL2NOISE_TRAIL,
noise_window_size=NOISE_WINDOW_SIZE):
"""
Adding spectral SNRs at each period of the dispersion curve.
The SNRs are calculated from the cross-correlation data
bandpassed with narrow Gaussian filters (similar to the filter
used in the FTAN) centered at self.periods, and width controlled
by *filter_alpha*. (See psutils.bandpass_gaussian().)
Parameters *vmin*, *vmax*, *signal2noise_trail*, *noise_window_size*
control the location of the signal window and the noise window
(see function xc.SNR()).
@type xc: L{CrossCorrelation}
"""
centerperiods_and_alpha = zip(self.periods, [filter_alpha] * len(self.periods))
SNRs = xc.SNR(centerperiods_and_alpha=centerperiods_and_alpha,
months=months, vmin=vmin, vmax=vmax,
signal2noise_trail=signal2noise_trail,
noise_window_size=noise_window_size)
if self.nom2inst_periods:
# if a list of (nominal period, inst period) is provided
# we use it to re-interpolate SNRs
inst_period_func = interp1d(*zip(*self.nom2inst_periods))
SNRs = np.interp(x=self.periods,
xp=inst_period_func(self.periods),
fp=SNRs,
left=np.nan,
right=np.nan)
self._SNRs = SNRs
def get_SNRs(self, **kwargs):
if self._SNRs is None:
self.add_SNRs(**kwargs)
return self._SNRs
def filtered_sdevs(self):
"""
Standard dev of velocity at each period, calculated
across trimester velocity curves. On periods at which
std dev cannot be calculated, NaNs are returned.
Selection criteria:
- SNR of trimester velocity >= minspectSNR
- nb of trimester velocities >= minnbtrimester
@rtype: L{numpy.ndarray}
"""
# list of arrays of trimester velocities
trimester_vels = self.filtered_trimester_vels()
sdevs = []
for v_across_trimesters in zip(*trimester_vels):
# filtering out nans from trimester velocities
v_across_trimesters = [v for v in v_across_trimesters if not np.isnan(v)]
if len(v_across_trimesters) >= self.minnbtrimester:
sdev = np.std(v_across_trimesters)
else:
# not enough trimester velocities to estimate std dev
sdev = np.nan
sdevs.append(sdev)
return np.array(sdevs) if sdevs else np.ones_like(self.periods) * np.nan
def filtered_vels_sdevs(self):
"""
Returns array of velocities and array of associated
standard deviations. Velocities not passing selection
criteria are replaced with NaNs. Where standard
deviation cannot be estimated, NaNs are returned.
Selection criteria:
1) period <= distance * *maxperiodfactor*
2) for velocities having a standard deviation associated:
- standard deviation <= *maxsdev*
- SNR >= *minspectSNR*
3) for velocities NOT having a standard deviation associated:
- SNR >= *minspectSNR_nosdev*
(SNRs equal to Nan are replaced with 0)
@rtype: L{numpy.ndarray}, L{numpy.ndarray}
"""
if self._SNRs is None:
raise Exception("Spectral SNRs not defined")
# estimating std devs, WHERE POSSIBLE (returning NaNs where not possible)
sdevs = self.filtered_sdevs()
has_sdev = ~np.isnan(sdevs) # where are std devs defined?
# Selection criteria:
# 1) period <= distance * *maxperiodfactor*
cutoffperiod = self.maxperiodfactor * self.dist()
mask = self.periods <= cutoffperiod
# 2) for velocities having a standard deviation associated:
# - standard deviation <= *maxsdev*
# - SNR >= *minspectSNR*
mask[has_sdev] &= (sdevs[has_sdev] <= self.maxsdev) & \
(np.nan_to_num(self._SNRs[has_sdev]) >= self.minspectSNR)
# 3) for velocities NOT having a standard deviation associated:
# - SNR >= *minspectSNR_nosdev*
mask[~has_sdev] &= \
np.nan_to_num(self._SNRs[~has_sdev]) >= self.minspectSNR_nosdev
# replacing velocities not passing the selection criteria with NaNs
return np.where(mask, self.v, np.nan), sdevs
def filtered_vel_sdev_SNR(self, period):
"""
Returns a velocity, its std deviation and SNR at a given period,
or nan if the velocity does not satisfy the criteria, or
raises an exception if the period is not found.
@type period: float
@rtype: (float, float, float)
"""
iperiod = self.get_period_index(period)
vels, sdevs = self.filtered_vels_sdevs()
return vels[iperiod], sdevs[iperiod], self._SNRs[iperiod]
def filtered_trimester_vels(self):
"""
Returns list of arrays of trimester velocities, or nan.
Selection criteria:
- SNR of trimester velocity defined and >= minspectSNR
- period <= pair distance * *maxperiodfactor*
@rtype: list of L{numpy.ndarray}
"""
# filtering criterion: periods <= distance * maxperiodfactor
dist = self.station1.dist(self.station2)
periodmask = self.periods <= self.maxperiodfactor * dist
varrays = []
for trimester_start, vels in self.v_trimesters.items():
SNRs = self._SNRs_trimesters.get(trimester_start)
if SNRs is None:
raise Exception("Spectral SNRs not defined")
# filtering criterion: SNR >= minspectSNR
mask = periodmask & (np.nan_to_num(SNRs) >= self.minspectSNR)
varrays.append(np.where(mask, vels, np.nan))
return varrays
class Grid:
"""
Class holding a 2D regular rectangular spatial grid
"""
def __init__(self, xmin, xstep, nx, ymin, ystep, ny):
"""
Min coords, step size and nb of points of grid
"""
self.xmin = xmin
self.xstep = xstep
self.nx = int(nx)
self.ymin = ymin
self.ystep = ystep
self.ny = int(ny)
def __repr__(self):
s = '<2D grid: x = {}...{} by {}, y = {}...{} by {}>'
return s.format(self.xmin, self.get_xmax(), self.xstep,
self.ymin, self.get_ymax(), self.ystep)
def __eq__(self, other):
"""
@type other: Grid
"""
try:
samegrids = (self.xmin == other.xmin and
self.xstep == other.xstep and
self.nx == other.nx and
self.ymin == other.ymin and
self.ystep == other.ystep and
self.ny == other.ny)
return samegrids
except:
return False
def __ne__(self, other):
return not self.__eq__(other)
def get_xmax(self):
return self.xmin + (self.nx - 1) * self.xstep
def get_ymax(self):
return self.ymin + (self.ny - 1) * self.ystep
def bbox(self):
"""
Bounding box: (xmin, xmax, ymin, ymax)
@rtype: (float, float, float, float)
"""
return self.xmin, self.get_xmax(), self.ymin, self.get_ymax()
def n_nodes(self):
"""
Nb of nodes on grid
"""
return self.nx * self.ny
def ix_iy(self, index_):
"""
Indexes along x and y-axis of node nb *index_*
"""
ix = np.int_(np.array(index_) / self.ny)
iy = np.mod(np.array(index_), self.ny)
return ix, iy
def xy(self, index_):
"""
Coords of node nb *index_*
"""
index_ = np.array(index_)
if np.any((index_ < 0) | (index_ > self.n_nodes() - 1)):
raise Exception('Index out of bounds')
ix, iy = self.ix_iy(index_)
return self._x(ix), self._y(iy)
def xy_nodes(self):
"""
Returns coords of all nodes of grid
"""
return self.xy(np.arange(0, self.n_nodes()))
def xarray(self):
return np.linspace(self.xmin, self.get_xmax(), num=self.nx, endpoint=True)
def yarray(self):
return np.linspace(self.ymin, self.get_ymax(), num=self.ny, endpoint=True)
def index_(self, ix, iy):
"""
Index of node (ix, iy) in grid:
- 0 : ix=0, iy=0
- 1 : ix=0, iy=1
- ...
- ny: ix=1, iy=0
- ...
- nx*ny-1: ix=nx-1, iy=ny-1
"""
ix = np.array(ix)
iy = np.array(iy)
if np.any((ix < 0) | (ix > self.nx - 1)):
raise Exception('ix out of bounds')
if np.any((iy < 0) | (iy > self.ny - 1)):
raise Exception('iy out of bounds')
return ix * self.ny + iy
def indexes_delaunay_triangle(self, x, y):
"""
Indexes of the grid's nodes defining the
Delaunay triangle around point (x, y)
"""
# x and y indexes of bottom left neighbour
ix = self._xindex_left_neighbour(x)
iy = self._yindex_bottom_neighbour(y)
np.where(ix == self.nx - 1, ix - 1, ix)
np.where(iy == self.ny - 1, iy - 1, iy)
xratio = (x - self._x(ix)) / self.xstep
yratio = (y - self._y(iy)) / self.ystep
# returning indexes of vertices of bottom right triangle
# or upper left triangle depending on location
index1 = self.index_(ix, iy)
index2 = np.where(xratio >= yratio, self.index_(ix+1, iy), self.index_(ix, iy+1))
index3 = self.index_(ix+1, iy+1)
return index1, index2, index3
def geodetic_dist(self, index1, index2):
"""
Geodetic distance between nodes nb *index1* and *index2*,
whose coodinates (x, y) are treated as (lon, lat)
"""
lon1, lat2 = self.xy(index1)
lon2, lat2 = self.xy(index2)
return psutils.dist(lons1=lon1, lats1=lat2, lons2=lon2, lats2=lat2)
def to_2D_array(self, a):
"""
Converts a sequence-like *a* to a 2D array b[ix, iy]
such that i is the index of node (ix, iy)
"""
b = np.zeros((self.nx, self.ny))
ix, iy = self.ix_iy(range(self.n_nodes()))
b[ix, iy] = np.array(a).flatten()
return b
def _x(self, ix):
"""
Returns the abscissa of node nb *ix* on x-axis
(ix = 0 ... nx-1)
"""
ix = np.array(ix)
if np.any((ix < 0) | (ix > self.nx - 1)):
raise Exception('ix out of bounds')
return self.xmin + ix * self.xstep
def _y(self, iy):
"""
Returns the ordinate of node nb *iy* on y-axis
"""
iy = np.array(iy)
if np.any((iy < 0) | (iy > self.ny - 1)):
raise Exception('iy out of bounds')
return self.ymin + iy * self.ystep
def _xindex_left_neighbour(self, x):
"""
Returns the index (along x-axis) of the grid nodes
closest to (and on the left of) *x*
(Index of 1st node = 0, index of last node = nx - 1)
@rtype: Number
"""
x = np.array(x)
# checking bounds
out_of_bounds = (x < self.xmin) | (x > self.get_xmax())
if np.any(out_of_bounds):
s = 'some x {} are out of bounds [{} - {}]'
raise Exception(s.format(x[out_of_bounds], self.xmin, self.get_xmax()))
# index of closest left node
return np.int_((x - self.xmin) / self.xstep)
def _yindex_bottom_neighbour(self, y):
"""
Same as above method, along y axis
@rtype: Number
"""
y = np.array(y)
# checking bounds
out_of_bounds = (y < self.ymin) | (y > self.get_ymax())
if np.any(out_of_bounds):
s = 'some y {} are out of bounds [{} - {}]'
raise Exception(s.format(y[out_of_bounds], self.ymin, self.get_ymax()))
# index of closest bottom node
return np.int_((y - self.ymin) / self.ystep)
class VelocityMap:
"""
Class taking care of the inversion of velocities between
pairs of stations, to produce a velocity map at a given
period. The inversion procedure of Barmin et al. (2001)
is applied.
Attributes:
- period : period (s) of the velocity map
- disp_curves : disp curves whose period's velocity is not nan
- paths : list of geodesic paths associated with pairs of stations
of dispersion curves
- v0 : reference velocity (inverse of mean slowness, i.e.,
slowness implied by all observed travel-times)
- dobs : vector of observed data (differences observed-reference travel time)
- Cinv : inverse of covariance matrix of the data
- G : forward matrix, such that d = G.m
(m = parameter vector = (v0-v)/v at grid nodes)
- density : array of path densities at grid nodes
- Q : regularization matrix
- Ginv : inversion operator, (Gt.C^-1.G + Q)^-1.Gt
- mopt : vector of best-fitting parameters, Ginv.C^-1.dobs
= best-fitting (v0-v)/v at grid nodes
- R : resolution matrix, (Gt.C^-1.G + Q)^-1.Gt.C^-1.G = Ginv.C^-1.G
- Rradius : array of radii of the cones that best-fit each line of the
resolution matrix
Note that vectors (d, m) and matrixes (Cinv, G, Q, Ginv, R) are NOT
numpy arrays, but numpy matrixes (vectors being n x 1 matrixes). This
means that the product operation (*) on such objects is NOT the
element-by-element product, but the real matrix product.
"""
def __init__(self, dispersion_curves, period, skippairs=(),
resolution_fit='cone', min_resolution_height=0.1,
showplot=False, verbose=True, **kwargs):
"""
Initializes the velocity map at period = *period*, from
the observed velocities in *dispersion_curves*:
- sets up the data vector, forward matrix and regularization matrix
- performs the tomographic inversion to estimate the best-fitting
parameters and the resolution matrix
- estimates the characteristic spatial resolution by fitting a cone
to each line of the resolution matrix
Specify pairs to be skipped (if any), as a list of pairs of stations names,
e.g., skippairs = [('APOB', 'SPB'), ('ITAB', 'BAMB')].
This option is useful to perform a 2-pass tomographic inversion,
wherein pairs with a too large difference observed/predicted travel-
time are excluded from the second pass.
Select the type of function you want to fit to each resolution map
with *resolution_fit*:
- 'cone' to fit a cone, and report the cone's radius as characteristic
resolution at each grid node in self.Rradius
- 'gaussian' to fit a gaussian function, exp(-r/2.sigma^2), and report
2.sigma as characteristic resolution at each grid node in self.Rradius
Note that all resolutions in self.Rradius having a best-fitting
cone height < *min_resolution_height* * max height will be
discarded and set to nan.
Append optional argument (**kwargs) to override default values:
- minspectSNR : min spectral SNR to retain velocity
(default MINSPECTSNR)
- minspectSNR_nosdev: min spectral SNR to retain velocities without standard
deviation (default MINSPECTSNR_NOSDEV)
- minnbtrimester : min nb of trimester velocities to estimate standard
deviation of velocity
- maxsdev : max standard deviation to retain velocity (default MAXSDEV)
- lonstep : longitude step of grid (default LONSTEP)
- latstep : latitude step of grid (default LATSTEP)
- correlation_length: correlation length of the smoothing kernel:
S(r,r') = exp[-|r-r'|**2 / (2 * correlation_length**2)]
(default value CORRELATION_LENGTH)
- alpha : strength of the spatial smoothing term in the penalty
function (default ALPHA)
- beta : strength of the weighted norm penalization term in the
penalty function (default BETA)
- lambda_ : parameter in the damping factor of the norm penalization
term, such that the norm is weighted by:
exp(- lambda_*path_density)
With a value of 0.15, penalization becomes strong when
path density < ~20
With a value of 0.30, penalization becomes strong when
path density < ~10
(default LAMBDA)
@type dispersion_curves: list of L{DispersionCurve}
@type skippairs: list of (str, str)
"""
self.period = period
# reading inversion parameters
minspectSNR = kwargs.get('minspectSNR', MINSPECTSNR)
minspectSNR_nosdev = kwargs.get('minspectSNR_nosdev', MINSPECTSNR_NOSDEV)
minnbtrimester = kwargs.get('minnbtrimester', MINNBTRIMESTER)
maxsdev = kwargs.get('maxsdev', MAXSDEV)
lonstep = kwargs.get('lonstep', LONSTEP)
latstep = kwargs.get('latstep', LATSTEP)
correlation_length = kwargs.get('correlation_length', CORRELATION_LENGTH)
alpha = kwargs.get('alpha', ALPHA)
beta = kwargs.get('beta', BETA)
lambda_ = kwargs.get('lambda_', LAMBDA)
if verbose:
print "Velocities selection criteria:"
print "- rejecting velocities if SNR < {}".format(minspectSNR)
s = "- rejecting velocities without std dev if SNR < {}"
print s.format(minspectSNR_nosdev)
s = "- estimating standard dev of velocities with more than {} trimesters"
print s.format(minnbtrimester)
print "- rejecting velocities with standard dev > {} km/s".format(maxsdev)
print "\nTomographic inversion parameters:"
print "- {} x {} deg grid".format(lonstep, latstep)
s = "- correlation length of the smoothing kernel: {} km"
print s.format(correlation_length)
print "- strength of the spatial smoothing term: {}".format(alpha)
print "- strength of the norm penalization term: {}".format(beta)
print "- weighting norm by exp(- {} * path_density)".format(lambda_)
print
# skipping pairs
if skippairs:
skippairs = [set(pair) for pair in skippairs]
dispersion_curves = [c for c in dispersion_curves
if not {c.station1.name, c.station2.name} in skippairs]
# updating parameters of dispersion curves
for c in dispersion_curves:
c.update_parameters(minspectSNR=minspectSNR,
minspectSNR_nosdev=minspectSNR_nosdev,
minnbtrimester=minnbtrimester,
maxsdev=maxsdev)
# valid dispersion curves (velocity != nan at period) and
# associated interstation distances
self.disp_curves = [c for c in dispersion_curves
if not np.isnan(c.filtered_vel_sdev_SNR(self.period)[0])]
if not self.disp_curves:
s = "No valid velocity at selected period ({} sec)"
raise pserrors.CannotPerformTomoInversion(s.format(period))
dists = np.array([c.dist() for c in self.disp_curves])
# getting (non nan) velocities and std devs at period
vels, sigmav, _ = zip(*[c.filtered_vel_sdev_SNR(self.period)
for c in self.disp_curves])
vels = np.array(vels)
sigmav = np.array(sigmav)
sigmav_isnan = np.isnan(sigmav)
if np.all(sigmav_isnan):
s = "No valid std deviation at selected period ({} sec)"
raise pserrors.CannotPerformTomoInversion(s.format(period))
# If the resolution in the velocities space is dv,
# it means that a velocity v is actually anything between
# v-dv/2 and v+dv/2, so the standard deviation cannot be
# less than the standard dev of a uniform distribution of
# width dv, which is dv / sqrt(12). Note that:
#
# dv = max(dv_FTAN, dt_xc * v^2/dist),
#
# with dv_FTAN the intrinsic velocity discretization step
# of the FTAN, and dt_xc the sampling interval of the
# cross-correlation.
dv = np.maximum(FTAN_VELOCITIES_STEP, PERIOD_RESAMPLE * vels**2 / dists)
minsigmav = dv / np.sqrt(12)
sigmav[~sigmav_isnan] = np.maximum(sigmav[~sigmav_isnan],
minsigmav[~sigmav_isnan])
# where std dev cannot be estimated (std dev = nan),
# assigning 3 times the mean std dev of the period
# following Bensen et al. (2008)
sigmav[sigmav_isnan] = 3 * sigmav[~sigmav_isnan].mean()
# ======================================================
# setting up reference velocity and data vector
# = vector of differences observed-reference travel time
# ======================================================
if verbose:
print 'Setting up reference velocity (v0) and data vector (dobs)'
# reference velocity = inverse of mean slowness
# mean slowness = slowness implied by observed travel-times
# = sum(observed travel-times) / sum(intersation distances)
s = (dists / vels).sum() / dists.sum()
self.v0 = 1.0 / s
# data vector
self.dobs = np.matrix(dists / vels - dists / self.v0).T
# inverse of covariance matrix of the data
if verbose:
print 'Setting up covariance matrix (C)'
sigmad = sigmav * dists / vels**2
self.Cinv = np.matrix(np.zeros((len(sigmav), len(sigmav))))
np.fill_diagonal(self.Cinv, 1.0 / sigmad**2)
# spatial grid for tomographic inversion (slightly enlarged to be
# sure that no path will fall outside)
lons1, lats1 = zip(*[c.station1.coord for c in self.disp_curves])
lons2, lats2 = zip(*[c.station2.coord for c in self.disp_curves])
tol = 0.5
lonmin = np.floor(min(lons1 + lons2) - tol)
nlon = np.ceil((max(lons1 + lons2) + tol - lonmin) / lonstep) + 1
latmin = np.floor(min(lats1 + lats2) - tol)
nlat = np.ceil((max(lats1 + lats2) + tol - latmin) / latstep) + 1
self.grid = Grid(lonmin, lonstep, nlon, latmin, latstep, nlat)
# geodesic paths associated with pairs of stations of dispersion curves
if verbose:
print 'Calculating interstation paths'
self.paths = []
for curve, dist in zip(self.disp_curves, dists):
# interpoint distance <= 1 km, and nb of points >= 100
npts = max(np.ceil(dist) + 1, 100)
path = psutils.geodesic(curve.station1.coord, curve.station2.coord, npts)
self.paths.append(path)
# ================================================
# setting up forward matrix G, such that d = G.m
#
# G[i,j] = integral{w_j(r) / v0 ds} over path nb i
# (w_j(r) = weight of node nb j on location r)
# ================================================
G = np.zeros((len(self.paths), self.grid.n_nodes()))
if verbose:
print 'Setting up {} x {} forward matrix (G)'.format(*G.shape)
for ipath, path in enumerate(self.paths):
# for each point M along the path (1) we determine the Delaunay
# triangle ABC that encloses M, (2) we locally define a cartesian
# system on the plane ABC, (3) we locate M' (the projection of M
# on the plane ABC) and (4) we attribute weights to A, B, C
# corresponding to the three-point linear interpolation of A, B,
# C at point M'.
lon_M, lat_M = path[:, 0], path[:, 1]
xyzM = psutils.geo2cartesian(lon_M, lat_M)
# indexes, geographic coordinates and cartesian coordinates
# (on unit sphere) of grid nodes of Delaunay triangle ABC
# enclosing M
iA, iB, iC = self.grid.indexes_delaunay_triangle(lon_M, lat_M)
lonlatA, lonlatB, lonlatC = [self.grid.xy(index_) for index_ in (iA, iB, iC)]
xyzA, xyzB, xyzC = [psutils.geo2cartesian(lon, lat)
for lon, lat in (lonlatA, lonlatB, lonlatC)]
# projection of M on the plane ABC
xyzMp = psutils.projection(xyzM, xyzA, xyzB, xyzC)
# weights of nodes A, B, C in linear interpolation =
# barycentric coordinates of M' in triangle ABC
wA, wB, wC = psutils.barycentric_coords(xyzMp, xyzA, xyzB, xyzC)
# attributing weights to grid nodes along path:
# w[j, :] = w_j(r) = weights of node j along path
nM = path.shape[0]
w = np.zeros((self.grid.n_nodes(), nM))
w[iA, range(nM)] = wA
w[iB, range(nM)] = wB
w[iC, range(nM)] = wC
# ds = array of infinitesimal distances along path
ds = psutils.dist(lons1=lon_M[:-1], lats1=lat_M[:-1],
lons2=lon_M[1:], lats2=lat_M[1:])
# integrating w_j(r) / v0 along path using trapeze formula
G[ipath, :] = np.sum(0.5 * (w[:, :-1] + w[:, 1:]) / self.v0 * ds, axis=-1)
self.G = np.matrix(G)
# path densities around grid's nodes
if verbose:
print "Calculating path densities"
self.density = self.path_density()
# =====================================================================
# setting up regularization matrix Q = Ft.F + Ht.H
#
# F[i,j] = alpha * | 1 if i = j
# | -S(ri,rj) / sum{S(ri,rj')} over j' != i] if i!= j
#
# H[i,j] = beta * | exp[-lambda * path_density(ri)] if i = j
# | 0 if i!= j
#
# with S(.,.) the smoothing kernel and ri the locations grid nodes
# =====================================================================
# setting up distance matrix:
# dists[i,j] = distance between nodes nb i and j
dists = np.zeros((self.grid.n_nodes(), self.grid.n_nodes()))
if verbose:
print "Setting up {} x {} regularization matrix (Q)".format(*dists.shape)
# indices of the upper right triangle of distance matrix
# = (array of index #1, array of index #2)
i_upper, j_upper = np.triu_indices_from(dists)
lons_i, lats_i = self.grid.xy(i_upper)
lons_j, lats_j = self.grid.xy(j_upper)
# distance matrix (upper triangle)
dists[i_upper, j_upper] = psutils.dist(lons1=lons_i, lats1=lats_i,
lons2=lons_j, lats2=lats_j)
# symmetrizing distance matrix (works because diagonal elts = 0)
dists += dists.T
# setting up smoothing kernel:
# S[i,j] = K * exp[-|ri-rj|**2 / (2 * CORRELATION_LENGTH**2)]
S = np.exp(- dists**2 / (2 * correlation_length**2))
S /= S.sum(axis=-1) - np.diag(S) # normalization of non-diagonal terms
# setting up spatial regularization matrix F
F = np.matrix(-S)
F[np.diag_indices_from(F)] = 1
F *= alpha
# setting up regularization matrix Q
# ... Ft.F part
Q = F.T * F
# ... Ht.H part
for i, path_density in enumerate(self.density):
Q[i, i] += beta**2 * np.exp(-2 * lambda_ * path_density)
self.Q = Q
# ===========================================================
# setting up inversion operator Ginv = (Gt.C^-1.G + Q)^-1.Gt,
# estimating model and setting up resolution matrix R =
# Ginv.C^-1.G
# ===========================================================
# inversion operator
if verbose:
print "Setting up inversion operator (Ginv)"
self.Ginv = (self.G.T * self.Cinv * self.G + self.Q).I * self.G.T
# vector of best-fitting parameters
if verbose:
print "Estimating best-fitting parameters (mopt)"
self.mopt = self.Ginv * self.Cinv * self.dobs
# resolution matrix
if verbose:
print "Setting up {0} x {0} resolution matrix (R)".format(self.G.shape[1])
self.R = self.Ginv * self.Cinv * self.G
# ===========================================================
# Estimating spatial resolution at each node of the grid,
# Rradius.
#
# The i-th row of the resolution matrix, R[i,:], contains the
# resolution map associated with the i-th grid noe, that is,
# the estimated model we would get if there were only a point
# velocity anomaly at node nb i. So a cone centered on node
# nb i is fitted to the resolution map, and its radius gives
# an indication of the spatial resolution at node nb i (i.e.,
# the minimum distance at which two point anomalies can be
# resolved)
# ===========================================================
if verbose:
print "Estimation spatial resolution (Rradius)"
self.Rradius = np.zeros(self.grid.n_nodes())
heights = np.zeros(self.grid.n_nodes())
for i, Ri in enumerate(np.array(self.R)):
lon0, lat0 = self.grid.xy(i)
# best-fitting cone at point (lon0, lat0)
# Function returning the height of cone of radius *r0*
# and peak *z0*, at a point located *r* km away from
# the cone's center
if resolution_fit.lower().strip() == 'cone':
def cone_height(r, z0, r0):
"""
Cone
"""
return np.where(r < r0, z0 * (1 - r / r0), 0.0)
elif resolution_fit.lower().strip() == 'gaussian':
def cone_height(r, z0, r0):
"""
Gaussian function
"""
sigma = r0 / 2.0
return z0 * np.exp(- r**2 / (2 * sigma**2))
else:
s = "Unknown function to fit resolution: '{}'"
raise Exception(s.format(resolution_fit))
# distances between nodes and cone's center (lon0, lat0)
lonnodes, latnodes = self.grid.xy_nodes()
n = self.grid.n_nodes()
rdata = psutils.dist(lons1=lonnodes, lats1=latnodes,
lons2=n*[lon0], lats2=n*[lat0])
# best possible resolution *rmin* = 2 * inter-node distance
# -> estimating *rmin* along the meridian crossing the cone's
# center (conservative choice as it yields the largest
# possible value)
d2rad = np.pi / 180.0
rmin = 2 * d2rad * 6371.0 * max(self.grid.xstep * np.cos(lat0 * d2rad),
self.grid.ystep)
# fitting the above function to observed heights along nodes,
# in array abs(Ri)
popt, _ = curve_fit(f=cone_height, xdata=rdata, ydata=np.abs(Ri),
p0=[1, 2*rmin], maxfev=10000)
z0, r0 = popt
# reslution cannot be better than *rmin*
r0 = max(rmin, r0)
# appending spatial resolution to array
self.Rradius[i] = r0
heights[i] = z0
self.Rradius[heights < heights.max() * min_resolution_height] = np.nan
if showplot:
# potting maps of velocity perturbation,
# path density and resolution
_ = self.plot()
def __repr__(self):
"""
E.g., "<Velocity map at period = 10 s>"
"""
return '<Velocity map at period = {} s>'.format(self.period)
def path_density(self, window=(LONSTEP, LATSTEP)):
"""
Returns the path density, that is, on each node of the
grid, the number of paths that cross the rectangular
cell of size (window[0], window[1]) centered on
the node.
"""
# initializing path density
density = np.zeros(self.grid.n_nodes())
# coordinates of grid nodes and associated windows
lons_nodes, lats_nodes = self.grid.xy_nodes()
lons_min = np.expand_dims(lons_nodes - window[0] / 2.0, axis=-1)
lons_max = np.expand_dims(lons_nodes + window[0] / 2.0, axis=-1)
lats_min = np.expand_dims(lats_nodes - window[1] / 2.0, axis=-1)
lats_max = np.expand_dims(lats_nodes + window[1] / 2.0, axis=-1)
for path in self.paths:
lons_path, lats_path = path[:, 0], path[:, 1]
# are points of paths in windows?
# 1st dim = grid nodes; 2nd dim = points along path
points_in_windows = (lons_path >= lons_min) & (lons_path <= lons_max) & \
(lats_path >= lats_min) & (lats_path <= lats_max)
density += np.any(points_in_windows, axis=-1)
return density
def traveltime_residuals(self, relative=False):
"""
Returns the [relative] differences between predicted-observed
travel times at each pair of stations:
differences = predicted - observed travel-time,
= dpred - dobs,
with dpred = G.mopt
relative differences = (predicted - observed) / observed travel-time
= (dpred - dobs) / (dobs + ref travel-time)
@rtype: L{ndarray}
"""
# flattening differences as 1D array
diffs = np.array(self.G * self.mopt - self.dobs).flatten()
if not relative:
return diffs
else:
ttref = np.array([c.dist() / self.v0 for c in self.disp_curves])
ttobs = np.array(self.dobs).flatten() + ttref # observed travel-times
return diffs / ttobs
def velocity_residuals(self, relative=False):
"""
Returns the [relative] differences between observed-predicted
velocities (implied by travel times) at each pair of stations:
differences = observed - predicted velocity,
= observed - predicted (dist / travel time),
@rtype: L{matrix}
"""
dists = np.array([c.dist() for c in self.disp_curves])
ttref = np.array([c.dist() / self.v0 for c in self.disp_curves])
ttobs = np.array(self.dobs).flatten() + ttref # observed travel-times
ttpred = np.array(self.G * self.mopt).flatten() + ttref # predicted tt
vobs = dists / ttobs # observed velocities
vpred = dists / ttpred # predicted velocities
if not relative:
return vobs - vpred
else:
return (vobs - vpred) / vobs
def checkerboard_func(self, vmid, vmin, vmax, squaresize, shape='cos'):
"""
Returns a checkerboard function, f(lons, lats), whose background
value is *vmid*, and alternating min/max values are *vmin* and
*vmax*. The centers of the anomalies are separated by *squaresize*
(in km), and their shape is either 'gaussian' or 'cos'.
@rtype: function
"""
# converting square size from km to degrees
d2rad = np.pi / 180.0
midlat = 0.5 * (self.grid.ymin + self.grid.get_ymax())
latwidth = squaresize / 6371.0 / d2rad
lonwidth = squaresize / (6371.0 * np.cos(midlat * d2rad)) / d2rad
# Basis function defining an anomaly of
# unit height centered at (*lon0*, *lat0*).
if shape.lower().strip() == 'gaussian':
def basis_func(lons, lats, lon0, lat0):
"""
Gausian anomaly , with sigma-parameter such that 3 sigma
is the distance between the center and the border of
the square, that is, half the distance between 2
centers.
"""
n = len(lons)
r = psutils.dist(lons1=lons, lats1=lats, lons2=n*[lon0], lats2=n*[lat0])
sigma = squaresize / 6.0
return np.exp(- r**2 / (2 * sigma**2))
elif shape.lower().strip() == 'cos':
def basis_func(lons, lats, lon0, lat0):
"""
Cosinus anomaly
"""
x = (lons - lon0) / lonwidth
y = (lats - lat0) / latwidth
outside_square = (np.abs(x) >= 0.5) | (np.abs(y) >= 0.5)
return np.where(outside_square, 0.0, np.cos(np.pi*x) * np.cos(np.pi*y))
else:
raise Exception("Unknown shape anomaly: " + shape)
# coordinates of the center of the anomalies
startlon = self.grid.xmin + lonwidth / 2.0
stoplon = self.grid.get_xmax() + lonwidth
centerlons = list(np.arange(startlon, stoplon, lonwidth))
startlat = self.grid.ymin + latwidth / 2.0
stoplat = self.grid.get_ymax() + latwidth
centerlats = list(np.arange(startlat, stoplat, latwidth))
centerlonlats = list(it.product(centerlons, centerlats))
# factors by which multiply the basis function associated
# with each center (to alternate lows and highs)
polarities = [(centerlons.index(lon) + centerlats.index(lat)) % 2
for lon, lat in centerlonlats]
factors = np.where(np.array(polarities) == 1, vmax - vmid, vmin - vmid)
def func(lons, lats):
"""
Checkboard function: sum of the basis functions along
the centers defined above, times the high/low factor,
plus background velocity.
"""
lowhighs = [f * basis_func(lons, lats, lon0, lat0) for f, (lon0, lat0)
in zip(factors, centerlonlats)]
return vmid + sum(lowhighs)
return func
def checkerboard_test(self, vmid, vmin, vmax, squaresize, **kwargs):
"""
Generates synthetic data (travel time perturbations),
dsynth, from a checkerboard model of velocities, and
performs a tomographic inversion on them:
m = (Gt.C^-1.G + Q)^-1.Gt.C^-1.dsynth
= Ginv.C^-1.dsynth
Returns the vector of best-fitting parameters, m.
@rtype: L{matrix}
"""
# checkerboard function
f_checkerboard = self.checkerboard_func(vmid, vmin, vmax, squaresize, **kwargs)
# setting up vector of synthetic data
dsynth = np.zeros_like(self.dobs)
for d, path, curve in zip(dsynth, self.paths, self.disp_curves):
# array of infinitesimal distances along path
lons, lats = path[:, 0], path[:, 1]
ds = psutils.dist(lons1=lons[:-1], lats1=lats[:-1],
lons2=lons[1:], lats2=lats[1:])
# velocities along path
v = f_checkerboard(lons, lats)
# travel time = integral[ds / v]
t = np.sum(ds * 0.5 * (1.0 / v[:-1] + 1.0 / v[1:]))
# synthetic data = travel time - ref travel time
d[...] = t - curve.dist() / vmid
# inverting synthetic data
m = self.Ginv * self.Cinv * dsynth
return m
def plot(self, xsize=20, title=None, showplot=True, outfile=None, **kwargs):
"""
Plots velocity perturbation, path density
and spatial resolution, and returns the figure.
Additional keyword args in *kwargs* are sent to
self.plot_velocity(), self.plot_pathdensity()
and self.plot_resolution(), when applicable
@rtype: L{matplotlib.figure.Figure}
"""
# bounding box
bbox = self.grid.bbox()
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
figsize = (xsize, aspectratio * xsize / 3.0 + 2)
fig = plt.figure(figsize=figsize)
# layout
gs = gridspec.GridSpec(1, 3, wspace=0.0, hspace=0.0)
# plotting velocity perturbation
ax = fig.add_subplot(gs[0, 0])
subkwargs = {'ax': ax, 'plot_title': False}
# sending additional arguments (when applicable)
subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_velocity).args
if k in kwargs})
self.plot_velocity(**subkwargs)
# plotting path density
ax = fig.add_subplot(gs[0, 1])
subkwargs = {'ax': ax, 'plot_title': False, 'stationlabel': True}
# sending additional arguments (when applicable)
subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_pathdensity).args
if k in kwargs})
self.plot_pathdensity(**subkwargs)
# plotting spatial resolution
ax = fig.add_subplot(gs[0, 2])
subkwargs = {'ax': ax, 'plot_title': False}
# sending additional arguments (when applicable)
subkwargs.update({k: kwargs[k] for k in getargspec(self.plot_resolution).args
if k in kwargs})
self.plot_resolution(**subkwargs)
# fig title
if not title:
# default title if not given
title = u'Period = {} s, {} paths'
title = title.format(self.period, len(self.paths))
fig.suptitle(title, fontsize=16)
gs.tight_layout(fig, rect=[0, 0, 1, 0.95])
# saving figure
if outfile:
if os.path.exists(outfile):
# backup
shutil.copyfile(outfile, outfile + '~')
fig.set_size_inches(figsize)
fig.savefig(outfile, dpi=300)
# showing figure
if showplot:
fig.show()
return fig
def network_plot(self, ax=None, xsize=10, plotdensity=True, plotpaths=True,
stationlabel=False, plot_title=True, showgrid=False,
highlight_residuals_gt=None):
"""
Plots network of stations using basemap rather than shapefiles!
Also has the option to choose whether or not you want to plot station
pair paths or not
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=not plotdensity, bbox=bbox)
if plotpaths:
# residuals observed/predicted travel-times
res = self.traveltime_residuals() if highlight_residuals_gt else []
# plotting paths
for i, path in enumerate(self.paths):
x, y = zip(*path)
linestyle = {'color': 'grey', 'lw': 0.5}
if highlight_residuals_gt and abs(float(res[i])) > highlight_residuals_gt:
# highlighting line as the travel-time error is > threshold
linestyle = {'color': 'black', 'lw': 1.5}
ax.plot(x, y, '-', **linestyle)
if showgrid:
# plotting grid
x, y = self.grid.xy_nodes()
ax.plot(x, y, '+')
# plotting stations
self._plot_stations(ax, stationlabel=stationlabel)
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_pathdensity(self, ax=None, xsize=10, plotdensity=True, plotpaths=True,
stationlabel=False, plot_title=True, showgrid=False,
highlight_residuals_gt=None):
"""
Plots path density and/or interstation paths.
Paths for which the residual observed/predicted travel-time
is greater than *highlight_residuals_gt* (if defined) are
highlighted as bold lines.
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=not plotdensity, bbox=bbox)
if plotdensity:
# plotting path density
d = self.grid.to_2D_array(self.density)
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
m = ax.imshow(d.transpose(),
origin='bottom',
extent=extent,
interpolation='bicubic',
cmap=CMAP_DENSITY,
vmin=0)
c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('Path density')
if plotpaths:
# residuals observed/predicted travel-times
res = self.traveltime_residuals() if highlight_residuals_gt else []
# plotting paths
for i, path in enumerate(self.paths):
x, y = zip(*path)
linestyle = {'color': 'grey', 'lw': 0.5}
if highlight_residuals_gt and abs(float(res[i])) > highlight_residuals_gt:
# highlighting line as the travel-time error is > threshold
linestyle = {'color': 'black', 'lw': 1.5}
ax.plot(x, y, '-', **linestyle)
if showgrid:
# plotting grid
x, y = self.grid.xy_nodes()
ax.plot(x, y, '+')
# plotting stations
self._plot_stations(ax, stationlabel=stationlabel)
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_velocity(self, ax=None, xsize=10, perturbation=False, plot_title=True,
vscale=None):
"""
Plots velocity or perturbation relative to mean velocity
(which is not necessarily the reference velocity)
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize))
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox)
# plotting stations
self._plot_stations(ax, stationlabel=False)
# velocities on grid: m = (v0 - v) / v, so v = v0 / (1 + m)
v = self.grid.to_2D_array(self.v0 / (1 + self.mopt))
vmean = v.mean()
if perturbation:
# plotting % perturbation relative to mean velocity
v = 100 * (v - vmean) / vmean
if not vscale and perturbation:
# symetric scale
maxdv = np.abs(v).max()
vscale = (-maxdv, maxdv)
elif not vscale and not perturbation:
# scale centered on mean velocity
maxdv = np.abs(v - vmean).max()
vscale = (vmean - maxdv, vmean + maxdv)
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
m = ax.imshow(v.transpose(), origin='bottom', extent=extent,
interpolation='bicubic', cmap=CMAP_SEISMIC,
vmin=vscale[0], vmax=vscale[1])
c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('Velocity perturbation (%)' if perturbation else 'Velocity (km/s)')
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_resolution(self, ax=None, xsize=10, plot_title=True):
"""
Plots resolution map
"""
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not ax:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
ax = fig.add_subplot(111)
# plotting coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox)
# plotting stations
self._plot_stations(ax, stationlabel=False)
# plotting spatial resolution
r = self.grid.to_2D_array(self.Rradius)
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
m = ax.imshow(r.transpose(), origin='bottom', extent=extent,
interpolation='bicubic',
cmap=CMAP_RESOLUTION)
c = plt.colorbar(m, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('Spatial resolution (km)')
# formatting axes
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if plot_title:
ax.set_title(u'Period = {} s, {} paths'.format(self.period, len(self.paths)))
if fig:
fig.show()
def plot_checkerboard(self, vmid, vmin, vmax, squaresize, axes=None, xsize=10,
**kwargs):
"""
Plots checkboard model and reconstructed checkerboard
"""
# checkerboard test
m = self.checkerboard_test(vmid, vmin, vmax, squaresize, **kwargs)
v = self.grid.to_2D_array(vmid / (1 + m))
dv = 100 * (v - vmid) / vmid
# bounding box
bbox = self.grid.bbox()
# creating figure if not given as input
fig = None
if not axes:
aspectratio = (bbox[3] - bbox[2]) / (bbox[1] - bbox[0])
# xzise has not effect if axes are given as input
fig = plt.figure(figsize=(xsize, aspectratio * xsize), tight_layout=True)
axes = [fig.add_subplot(121), fig.add_subplot(122)]
ims = []
# checkerboard model
checkerboard_func = self.checkerboard_func(vmid, vmin, vmax, squaresize, **kwargs)
lons, lats = self.grid.xy_nodes()
a = self.grid.to_2D_array(checkerboard_func(lons, lats))
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
im = axes[0].imshow(a.transpose(),
origin='bottom', extent=extent,
interpolation='bicubic',
vmin=vmin, vmax=vmax,
cmap=CMAP_SEISMIC)
ims.append(im)
# reconstructed checkerboard
extent = (self.grid.xmin, self.grid.get_xmax(),
self.grid.ymin, self.grid.get_ymax())
im = axes[1].imshow(dv.transpose(),
origin='bottom', extent=extent,
interpolation='bicubic',
vmin=-np.abs(dv).max(),
vmax=np.abs(dv).max(),
cmap=CMAP_SEISMIC)
ims.append(im)
for ax, im in zip(axes, ims):
# coasts and tectonic provinces
psutils.basemap(ax=ax, labels=False, fill=False, bbox=bbox)
# stations
self._plot_stations(ax, stationlabel=False)
# color bar
c = plt.colorbar(im, ax=ax, orientation='horizontal', pad=0.1)
c.set_label('km/s' if ax is axes[0] else '% perturbation')
# limits
ax.set_xlim(bbox[:2])
ax.set_ylim(bbox[2:])
if fig:
fig.show()
def _plot_stations(self, ax, stationlabel):
"""
Plots stations on map
"""
# plotting stations
xylabels = [c.station1.coord + (c.station1.name,) for c in self.disp_curves] + \
[c.station2.coord + (c.station2.name,) for c in self.disp_curves]
xlist, ylist, labels = zip(*list(set(xylabels)))
ax.plot(xlist, ylist, '^', color='k', ms=10, mfc='w', mew=1)
if not stationlabel:
return
# stations label
for x, y, label in zip(xlist, ylist, labels):
ax.text(x, y, label, ha='center', va='bottom', fontsize=10, weight='bold')
def pathdensity_colormap(dmax):
"""
Builds a colormap for path density (d) varying from
0 to *dmax*:
- white for d = 0
- blue to green for 1 <= d <= 5
- green to red for 5 <= d <= 10
- red to black for 10 <= d <= dmax
"""
dmax = max(dmax, 11)
x1 = 1.0 / dmax
x2 = 5.0 / dmax
x3 = 10.0 / dmax
cdict = {'red': ((0, 1, 1), (x1, 0, 0), (x2, 0, 0), (x3, 1, 1), (1, 0, 0)),
'green': ((0, 1, 1), (x1, 0, 0), (x2, 1, 1), (x3, 0, 0), (1, 0, 0)),
'blue': ((0, 1, 1), (x1, 1, 1), (x2, 0, 0), (x3, 0, 0), (1, 0, 0))}
return LinearSegmentedColormap('tmp', cdict)
if __name__ == '__main__':
# importig dir of FTAN results
from psconfig import FTAN_DIR
# loading dispersion curves
flist = sorted(glob.glob(os.path.join(FTAN_DIR, 'FTAN*.pickle*')))
print 'Select file containing dispersion curves:'
print '\n'.join('{} - {}'.format(i, os.path.basename(f)) for i, f in enumerate(flist))
pickle_file = flist[int(raw_input('\n'))]
f = open(pickle_file, 'rb')
curves = pickle.load(f)
f.close()
print "Dispersion curves stored in variable 'curves'" | gpl-3.0 |
Lawrence-Liu/scikit-learn | examples/mixture/plot_gmm.py | 248 | 2817 | """
=================================
Gaussian Mixture Model Ellipsoids
=================================
Plot the confidence ellipsoids of a mixture of two Gaussians with EM
and variational Dirichlet process.
Both models have access to five components with which to fit the
data. Note that the EM model will necessarily use all five components
while the DP model will effectively only use as many as are needed for
a good fit. This is a property of the Dirichlet Process prior. Here we
can see that the EM model splits some components arbitrarily, because it
is trying to fit too many components, while the Dirichlet Process model
adapts it number of state automatically.
This example doesn't show it, as we're in a low-dimensional space, but
another advantage of the Dirichlet process model is that it can fit
full covariance matrices effectively even when there are less examples
per cluster than there are dimensions in the data, due to
regularization properties of the inference algorithm.
"""
import itertools
import numpy as np
from scipy import linalg
import matplotlib.pyplot as plt
import matplotlib as mpl
from sklearn import mixture
# Number of samples per component
n_samples = 500
# Generate random sample, two components
np.random.seed(0)
C = np.array([[0., -0.1], [1.7, .4]])
X = np.r_[np.dot(np.random.randn(n_samples, 2), C),
.7 * np.random.randn(n_samples, 2) + np.array([-6, 3])]
# Fit a mixture of Gaussians with EM using five components
gmm = mixture.GMM(n_components=5, covariance_type='full')
gmm.fit(X)
# Fit a Dirichlet process mixture of Gaussians using five components
dpgmm = mixture.DPGMM(n_components=5, covariance_type='full')
dpgmm.fit(X)
color_iter = itertools.cycle(['r', 'g', 'b', 'c', 'm'])
for i, (clf, title) in enumerate([(gmm, 'GMM'),
(dpgmm, 'Dirichlet Process GMM')]):
splot = plt.subplot(2, 1, 1 + i)
Y_ = clf.predict(X)
for i, (mean, covar, color) in enumerate(zip(
clf.means_, clf._get_covars(), color_iter)):
v, w = linalg.eigh(covar)
u = w[0] / linalg.norm(w[0])
# as the DP will not use every component it has access to
# unless it needs it, we shouldn't plot the redundant
# components.
if not np.any(Y_ == i):
continue
plt.scatter(X[Y_ == i, 0], X[Y_ == i, 1], .8, color=color)
# Plot an ellipse to show the Gaussian component
angle = np.arctan(u[1] / u[0])
angle = 180 * angle / np.pi # convert to degrees
ell = mpl.patches.Ellipse(mean, v[0], v[1], 180 + angle, color=color)
ell.set_clip_box(splot.bbox)
ell.set_alpha(0.5)
splot.add_artist(ell)
plt.xlim(-10, 10)
plt.ylim(-3, 6)
plt.xticks(())
plt.yticks(())
plt.title(title)
plt.show()
| bsd-3-clause |
giorgiop/scikit-learn | sklearn/model_selection/_split.py | 7 | 61646 | """
The :mod:`sklearn.model_selection._split` module includes classes and
functions to split the data based on a preset strategy.
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>,
# Olivier Grisel <olivier.grisel@ensta.org>
# Raghav R V <rvraghav93@gmail.com>
# License: BSD 3 clause
from __future__ import print_function
from __future__ import division
import warnings
from itertools import chain, combinations
from collections import Iterable
from math import ceil, floor
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.misc import comb
from ..utils import indexable, check_random_state, safe_indexing
from ..utils.validation import _num_samples, column_or_1d
from ..utils.multiclass import type_of_target
from ..externals.six import with_metaclass
from ..externals.six.moves import zip
from ..utils.fixes import bincount
from ..utils.fixes import signature
from ..utils.random import choice
from ..base import _pprint
from ..gaussian_process.kernels import Kernel as GPKernel
__all__ = ['BaseCrossValidator',
'KFold',
'GroupKFold',
'LeaveOneGroupOut',
'LeaveOneOut',
'LeavePGroupsOut',
'LeavePOut',
'ShuffleSplit',
'GroupShuffleSplit',
'StratifiedKFold',
'StratifiedShuffleSplit',
'PredefinedSplit',
'train_test_split',
'check_cv']
class BaseCrossValidator(with_metaclass(ABCMeta)):
"""Base class for all cross-validators
Implementations must define `_iter_test_masks` or `_iter_test_indices`.
"""
def __init__(self):
# We need this for the build_repr to work properly in py2.7
# see #6304
pass
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, of length n_samples
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
indices = np.arange(_num_samples(X))
for test_index in self._iter_test_masks(X, y, groups):
train_index = indices[np.logical_not(test_index)]
test_index = indices[test_index]
yield train_index, test_index
# Since subclasses must implement either _iter_test_masks or
# _iter_test_indices, neither can be abstract.
def _iter_test_masks(self, X=None, y=None, groups=None):
"""Generates boolean masks corresponding to test sets.
By default, delegates to _iter_test_indices(X, y, groups)
"""
for test_index in self._iter_test_indices(X, y, groups):
test_mask = np.zeros(_num_samples(X), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def _iter_test_indices(self, X=None, y=None, groups=None):
"""Generates integer indices corresponding to test sets."""
raise NotImplementedError
@abstractmethod
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator"""
def __repr__(self):
return _build_repr(self)
class LeaveOneOut(BaseCrossValidator):
"""Leave-One-Out cross-validator
Provides train/test indices to split data in train/test sets. Each
sample is used once as a test set (singleton) while the remaining
samples form the training set.
Note: ``LeaveOneOut()`` is equivalent to ``KFold(n_splits=n)`` and
``LeavePOut(p=1)`` where ``n`` is the number of samples.
Due to the high number of test sets (which is the same as the
number of samples) this cross-validation method can be very costly.
For large datasets one should favor :class:`KFold`, :class:`ShuffleSplit`
or :class:`StratifiedKFold`.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneOut
>>> X = np.array([[1, 2], [3, 4]])
>>> y = np.array([1, 2])
>>> loo = LeaveOneOut()
>>> loo.get_n_splits(X)
2
>>> print(loo)
LeaveOneOut()
>>> for train_index, test_index in loo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [1] TEST: [0]
[[3 4]] [[1 2]] [2] [1]
TRAIN: [0] TEST: [1]
[[1 2]] [[3 4]] [1] [2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit, domain-specific
stratification of the dataset.
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def _iter_test_indices(self, X, y=None, groups=None):
return range(_num_samples(X))
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return _num_samples(X)
class LeavePOut(BaseCrossValidator):
"""Leave-P-Out cross-validator
Provides train/test indices to split data in train/test sets. This results
in testing on all distinct samples of size p, while the remaining n - p
samples form the training set in each iteration.
Note: ``LeavePOut(p)`` is NOT equivalent to
``KFold(n_splits=n_samples // p)`` which creates non-overlapping test sets.
Due to the high number of iterations which grows combinatorically with the
number of samples this cross-validation method can be very costly. For
large datasets one should favor :class:`KFold`, :class:`StratifiedKFold`
or :class:`ShuffleSplit`.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
p : int
Size of the test sets.
Examples
--------
>>> from sklearn.model_selection import LeavePOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> lpo = LeavePOut(2)
>>> lpo.get_n_splits(X)
6
>>> print(lpo)
LeavePOut(p=2)
>>> for train_index, test_index in lpo.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [1 2] TEST: [0 3]
TRAIN: [0 3] TEST: [1 2]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 1] TEST: [2 3]
"""
def __init__(self, p):
self.p = p
def _iter_test_indices(self, X, y=None, groups=None):
for combination in combinations(range(_num_samples(X)), self.p):
yield np.array(combination)
def get_n_splits(self, X, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
"""
if X is None:
raise ValueError("The X parameter should not be None")
return int(comb(_num_samples(X), self.p, exact=True))
class _BaseKFold(with_metaclass(ABCMeta, BaseCrossValidator)):
"""Base class for KFold, GroupKFold, and StratifiedKFold"""
@abstractmethod
def __init__(self, n_splits, shuffle, random_state):
if not isinstance(n_splits, numbers.Integral):
raise ValueError('The number of folds must be of Integral type. '
'%s of type %s was passed.'
% (n_splits, type(n_splits)))
n_splits = int(n_splits)
if n_splits <= 1:
raise ValueError(
"k-fold cross-validation requires at least one"
" train/test split by setting n_splits=2 or more,"
" got n_splits={0}.".format(n_splits))
if not isinstance(shuffle, bool):
raise TypeError("shuffle must be True or False;"
" got {0}".format(shuffle))
self.n_splits = n_splits
self.shuffle = shuffle
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
if self.n_splits > n_samples:
raise ValueError(
("Cannot have number of splits n_splits={0} greater"
" than the number of samples: {1}.").format(self.n_splits,
n_samples))
for train, test in super(_BaseKFold, self).split(X, y, groups):
yield train, test
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
class KFold(_BaseKFold):
"""K-Folds cross-validator
Provides train/test indices to split data in train/test sets. Split
dataset into k consecutive folds (without shuffling by default).
Each fold is then used once as a validation while the k - 1 remaining
folds form the training set.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle the data before splitting into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import KFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> kf = KFold(n_splits=2)
>>> kf.get_n_splits(X)
2
>>> print(kf) # doctest: +NORMALIZE_WHITESPACE
KFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in kf.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [2 3] TEST: [0 1]
TRAIN: [0 1] TEST: [2 3]
Notes
-----
The first ``n_samples % n_splits`` folds have size
``n_samples // n_splits + 1``, other folds have size
``n_samples // n_splits``, where ``n_samples`` is the number of samples.
See also
--------
StratifiedKFold
Takes group information into account to avoid building folds with
imbalanced class distributions (for binary or multiclass
classification tasks).
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_splits=3, shuffle=False,
random_state=None):
super(KFold, self).__init__(n_splits, shuffle, random_state)
def _iter_test_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
indices = np.arange(n_samples)
if self.shuffle:
check_random_state(self.random_state).shuffle(indices)
n_splits = self.n_splits
fold_sizes = (n_samples // n_splits) * np.ones(n_splits, dtype=np.int)
fold_sizes[:n_samples % n_splits] += 1
current = 0
for fold_size in fold_sizes:
start, stop = current, current + fold_size
yield indices[start:stop]
current = stop
class GroupKFold(_BaseKFold):
"""K-fold iterator variant with non-overlapping groups.
The same group will not appear in two different folds (the number of
distinct groups has to be at least equal to the number of folds).
The folds are approximately balanced in the sense that the number of
distinct groups is approximately the same in each fold.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
Examples
--------
>>> from sklearn.model_selection import GroupKFold
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 3, 4])
>>> groups = np.array([0, 0, 2, 2])
>>> group_kfold = GroupKFold(n_splits=2)
>>> group_kfold.get_n_splits(X, y, groups)
2
>>> print(group_kfold)
GroupKFold(n_splits=2)
>>> for train_index, test_index in group_kfold.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
...
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [3 4]
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [3 4] [1 2]
See also
--------
LeaveOneGroupOut
For splitting the data according to explicit domain-specific
stratification of the dataset.
"""
def __init__(self, n_splits=3):
super(GroupKFold, self).__init__(n_splits, shuffle=False,
random_state=None)
def _iter_test_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
unique_groups, groups = np.unique(groups, return_inverse=True)
n_groups = len(unique_groups)
if self.n_splits > n_groups:
raise ValueError("Cannot have number of splits n_splits=%d greater"
" than the number of groups: %d."
% (self.n_splits, n_groups))
# Weight groups by their number of occurrences
n_samples_per_group = np.bincount(groups)
# Distribute the most frequent groups first
indices = np.argsort(n_samples_per_group)[::-1]
n_samples_per_group = n_samples_per_group[indices]
# Total weight of each fold
n_samples_per_fold = np.zeros(self.n_splits)
# Mapping from group index to fold index
group_to_fold = np.zeros(len(unique_groups))
# Distribute samples by adding the largest weight to the lightest fold
for group_index, weight in enumerate(n_samples_per_group):
lightest_fold = np.argmin(n_samples_per_fold)
n_samples_per_fold[lightest_fold] += weight
group_to_fold[indices[group_index]] = lightest_fold
indices = group_to_fold[groups]
for f in range(self.n_splits):
yield np.where(indices == f)[0]
class StratifiedKFold(_BaseKFold):
"""Stratified K-Folds cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a variation of KFold that returns
stratified folds. The folds are made by preserving the percentage of
samples for each class.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of folds. Must be at least 2.
shuffle : boolean, optional
Whether to shuffle each stratification of the data before splitting
into batches.
random_state : None, int or RandomState
When shuffle=True, pseudo-random number generator state used for
shuffling. If None, use default numpy RNG for shuffling.
Examples
--------
>>> from sklearn.model_selection import StratifiedKFold
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> skf = StratifiedKFold(n_splits=2)
>>> skf.get_n_splits(X, y)
2
>>> print(skf) # doctest: +NORMALIZE_WHITESPACE
StratifiedKFold(n_splits=2, random_state=None, shuffle=False)
>>> for train_index, test_index in skf.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 3] TEST: [0 2]
TRAIN: [0 2] TEST: [1 3]
Notes
-----
All the folds have size ``trunc(n_samples / n_splits)``, the last one has
the complementary.
"""
def __init__(self, n_splits=3, shuffle=False, random_state=None):
super(StratifiedKFold, self).__init__(n_splits, shuffle, random_state)
def _make_test_folds(self, X, y=None, groups=None):
if self.shuffle:
rng = check_random_state(self.random_state)
else:
rng = self.random_state
y = np.asarray(y)
n_samples = y.shape[0]
unique_y, y_inversed = np.unique(y, return_inverse=True)
y_counts = bincount(y_inversed)
min_groups = np.min(y_counts)
if np.all(self.n_splits > y_counts):
raise ValueError("All the n_groups for individual classes"
" are less than n_splits=%d."
% (self.n_splits))
if self.n_splits > min_groups:
warnings.warn(("The least populated class in y has only %d"
" members, which is too few. The minimum"
" number of groups for any class cannot"
" be less than n_splits=%d."
% (min_groups, self.n_splits)), Warning)
# pre-assign each sample to a test fold index using individual KFold
# splitting strategies for each class so as to respect the balance of
# classes
# NOTE: Passing the data corresponding to ith class say X[y==class_i]
# will break when the data is not 100% stratifiable for all classes.
# So we pass np.zeroes(max(c, n_splits)) as data to the KFold
per_cls_cvs = [
KFold(self.n_splits, shuffle=self.shuffle,
random_state=rng).split(np.zeros(max(count, self.n_splits)))
for count in y_counts]
test_folds = np.zeros(n_samples, dtype=np.int)
for test_fold_indices, per_cls_splits in enumerate(zip(*per_cls_cvs)):
for cls, (_, test_split) in zip(unique_y, per_cls_splits):
cls_test_folds = test_folds[y == cls]
# the test split can be too big because we used
# KFold(...).split(X[:max(c, n_splits)]) when data is not 100%
# stratifiable for all the classes
# (we use a warning instead of raising an exception)
# If this is the case, let's trim it:
test_split = test_split[test_split < len(cls_test_folds)]
cls_test_folds[test_split] = test_fold_indices
test_folds[y == cls] = cls_test_folds
return test_folds
def _iter_test_masks(self, X, y=None, groups=None):
test_folds = self._make_test_folds(X, y)
for i in range(self.n_splits):
yield test_folds == i
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedKFold, self).split(X, y, groups)
class TimeSeriesSplit(_BaseKFold):
"""Time Series cross-validator
Provides train/test indices to split time series data samples
that are observed at fixed time intervals, in train/test sets.
In each split, test indices must be higher than before, and thus shuffling
in cross validator is inappropriate.
This cross-validation object is a variation of :class:`KFold`.
In the kth split, it returns first k folds as train set and the
(k+1)th fold as test set.
Note that unlike standard cross-validation methods, successive
training sets are supersets of those that come before them.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int, default=3
Number of splits. Must be at least 1.
Examples
--------
>>> from sklearn.model_selection import TimeSeriesSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([1, 2, 3, 4])
>>> tscv = TimeSeriesSplit(n_splits=3)
>>> print(tscv) # doctest: +NORMALIZE_WHITESPACE
TimeSeriesSplit(n_splits=3)
>>> for train_index, test_index in tscv.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [0] TEST: [1]
TRAIN: [0 1] TEST: [2]
TRAIN: [0 1 2] TEST: [3]
Notes
-----
The training set has size ``i * n_samples // (n_splits + 1)
+ n_samples % (n_splits + 1)`` in the ``i``th split,
with a test set of size ``n_samples//(n_splits + 1)``,
where ``n_samples`` is the number of samples.
"""
def __init__(self, n_splits=3):
super(TimeSeriesSplit, self).__init__(n_splits,
shuffle=False,
random_state=None)
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
n_samples = _num_samples(X)
n_splits = self.n_splits
n_folds = n_splits + 1
if n_folds > n_samples:
raise ValueError(
("Cannot have number of folds ={0} greater"
" than the number of samples: {1}.").format(n_folds,
n_samples))
indices = np.arange(n_samples)
test_size = (n_samples // n_folds)
test_starts = range(test_size + n_samples % n_folds,
n_samples, test_size)
for test_start in test_starts:
yield (indices[:test_start],
indices[test_start:test_start + test_size])
class LeaveOneGroupOut(BaseCrossValidator):
"""Leave One Group Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import LeaveOneGroupOut
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> groups = np.array([1, 1, 2, 2])
>>> lol = LeaveOneGroupOut()
>>> lol.get_n_splits(X, y, groups)
2
>>> print(lol)
LeaveOneGroupOut()
>>> for train_index, test_index in lol.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2 3] TEST: [0 1]
[[5 6]
[7 8]] [[1 2]
[3 4]] [1 2] [1 2]
TRAIN: [0 1] TEST: [2 3]
[[1 2]
[3 4]] [[5 6]
[7 8]] [1 2] [1 2]
"""
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
# We make a copy of groups to avoid side-effects during iteration
groups = np.array(groups, copy=True)
unique_groups = np.unique(groups)
for i in unique_groups:
yield groups == i
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return len(np.unique(groups))
class LeavePGroupsOut(BaseCrossValidator):
"""Leave P Group(s) Out cross-validator
Provides train/test indices to split data according to a third-party
provided group. This group information can be used to encode arbitrary
domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and LeaveOneGroupOut is that
the former builds the test sets with all the samples assigned to
``p`` different values of the groups while the latter uses samples
all assigned the same groups.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_groups : int
Number of groups (``p``) to leave out in the test split.
Examples
--------
>>> from sklearn.model_selection import LeavePGroupsOut
>>> X = np.array([[1, 2], [3, 4], [5, 6]])
>>> y = np.array([1, 2, 1])
>>> groups = np.array([1, 2, 3])
>>> lpl = LeavePGroupsOut(n_groups=2)
>>> lpl.get_n_splits(X, y, groups)
3
>>> print(lpl)
LeavePGroupsOut(n_groups=2)
>>> for train_index, test_index in lpl.split(X, y, groups):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
... print(X_train, X_test, y_train, y_test)
TRAIN: [2] TEST: [0 1]
[[5 6]] [[1 2]
[3 4]] [1] [1 2]
TRAIN: [1] TEST: [0 2]
[[3 4]] [[1 2]
[5 6]] [2] [1 1]
TRAIN: [0] TEST: [1 2]
[[1 2]] [[3 4]
[5 6]] [1] [2 1]
See also
--------
GroupKFold: K-fold iterator variant with non-overlapping groups.
"""
def __init__(self, n_groups):
self.n_groups = n_groups
def _iter_test_masks(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
groups = np.array(groups, copy=True)
unique_groups = np.unique(groups)
combi = combinations(range(len(unique_groups)), self.n_groups)
for indices in combi:
test_index = np.zeros(_num_samples(X), dtype=np.bool)
for l in unique_groups[np.array(indices)]:
test_index[groups == l] = True
yield test_index
def get_n_splits(self, X, y, groups):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
if groups is None:
raise ValueError("The groups parameter should not be None")
return int(comb(len(np.unique(groups)), self.n_groups, exact=True))
class BaseShuffleSplit(with_metaclass(ABCMeta)):
"""Base class for ShuffleSplit and StratifiedShuffleSplit"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
_validate_shuffle_split_init(test_size, train_size)
self.n_splits = n_splits
self.test_size = test_size
self.train_size = train_size
self.random_state = random_state
def split(self, X, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
X, y, groups = indexable(X, y, groups)
for train, test in self._iter_indices(X, y, groups):
yield train, test
@abstractmethod
def _iter_indices(self, X, y=None, groups=None):
"""Generate (train, test) indices"""
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return self.n_splits
def __repr__(self):
return _build_repr(self)
class ShuffleSplit(BaseShuffleSplit):
"""Random permutation cross-validator
Yields indices to split data into training and test sets.
Note: contrary to other cross-validation strategies, random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float, int, or None, default 0.1
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import ShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [5, 6], [7, 8]])
>>> y = np.array([1, 2, 1, 2])
>>> rs = ShuffleSplit(n_splits=3, test_size=.25, random_state=0)
>>> rs.get_n_splits(X)
3
>>> print(rs)
ShuffleSplit(n_splits=3, random_state=0, test_size=0.25, train_size=None)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1 0] TEST: [2]
TRAIN: [2 1 3] TEST: [0]
TRAIN: [0 2 1] TEST: [3]
>>> rs = ShuffleSplit(n_splits=3, train_size=0.5, test_size=.25,
... random_state=0)
>>> for train_index, test_index in rs.split(X):
... print("TRAIN:", train_index, "TEST:", test_index)
... # doctest: +ELLIPSIS
TRAIN: [3 1] TEST: [2]
TRAIN: [2 1] TEST: [0]
TRAIN: [0 2] TEST: [3]
"""
def _iter_indices(self, X, y=None, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
rng = check_random_state(self.random_state)
for i in range(self.n_splits):
# random partition
permutation = rng.permutation(n_samples)
ind_test = permutation[:n_test]
ind_train = permutation[n_test:(n_test + n_train)]
yield ind_train, ind_test
class GroupShuffleSplit(ShuffleSplit):
'''Shuffle-Group(s)-Out cross-validation iterator
Provides randomized train/test indices to split data according to a
third-party provided group. This group information can be used to encode
arbitrary domain specific stratifications of the samples as integers.
For instance the groups could be the year of collection of the samples
and thus allow for cross-validation against time-based splits.
The difference between LeavePGroupsOut and GroupShuffleSplit is that
the former generates splits using all subsets of size ``p`` unique groups,
whereas GroupShuffleSplit generates a user-determined number of random
test splits, each with a user-determined fraction of unique groups.
For example, a less computationally intensive alternative to
``LeavePGroupsOut(p=10)`` would be
``GroupShuffleSplit(test_size=10, n_splits=100)``.
Note: The parameters ``test_size`` and ``train_size`` refer to groups, and
not to samples, as in ShuffleSplit.
Parameters
----------
n_splits : int (default 5)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.2), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the test split. If
int, represents the absolute number of test groups. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the groups to include in the train split. If
int, represents the absolute number of train groups. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
'''
def __init__(self, n_splits=5, test_size=0.2, train_size=None,
random_state=None):
super(GroupShuffleSplit, self).__init__(
n_splits=n_splits,
test_size=test_size,
train_size=train_size,
random_state=random_state)
def _iter_indices(self, X, y, groups):
if groups is None:
raise ValueError("The groups parameter should not be None")
classes, group_indices = np.unique(groups, return_inverse=True)
for group_train, group_test in super(
GroupShuffleSplit, self)._iter_indices(X=classes):
# these are the indices of classes in the partition
# invert them into data indices
train = np.flatnonzero(np.in1d(group_indices, group_train))
test = np.flatnonzero(np.in1d(group_indices, group_test))
yield train, test
def _approximate_mode(class_counts, n_draws, rng):
"""Computes approximate mode of multivariate hypergeometric.
This is an approximation to the mode of the multivariate
hypergeometric given by class_counts and n_draws.
It shouldn't be off by more than one.
It is the mostly likely outcome of drawing n_draws many
samples from the population given by class_counts.
Parameters
----------
class_counts : ndarray of int
Population per class.
n_draws : int
Number of draws (samples to draw) from the overall population.
rng : random state
Used to break ties.
Returns
-------
sampled_classes : ndarray of int
Number of samples drawn from each class.
np.sum(sampled_classes) == n_draws
Examples
--------
>>> from sklearn.model_selection._split import _approximate_mode
>>> _approximate_mode(class_counts=np.array([4, 2]), n_draws=3, rng=0)
array([2, 1])
>>> _approximate_mode(class_counts=np.array([5, 2]), n_draws=4, rng=0)
array([3, 1])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=0)
array([0, 1, 1, 0])
>>> _approximate_mode(class_counts=np.array([2, 2, 2, 1]),
... n_draws=2, rng=42)
array([1, 1, 0, 0])
"""
# this computes a bad approximation to the mode of the
# multivariate hypergeometric given by class_counts and n_draws
continuous = n_draws * class_counts / class_counts.sum()
# floored means we don't overshoot n_samples, but probably undershoot
floored = np.floor(continuous)
# we add samples according to how much "left over" probability
# they had, until we arrive at n_samples
need_to_add = int(n_draws - floored.sum())
if need_to_add > 0:
remainder = continuous - floored
values = np.sort(np.unique(remainder))[::-1]
# add according to remainder, but break ties
# randomly to avoid biases
for value in values:
inds, = np.where(remainder == value)
# if we need_to_add less than what's in inds
# we draw randomly from them.
# if we need to add more, we add them all and
# go to the next value
add_now = min(len(inds), need_to_add)
inds = choice(inds, size=add_now, replace=False, random_state=rng)
floored[inds] += 1
need_to_add -= add_now
if need_to_add == 0:
break
return floored.astype(np.int)
class StratifiedShuffleSplit(BaseShuffleSplit):
"""Stratified ShuffleSplit cross-validator
Provides train/test indices to split data in train/test sets.
This cross-validation object is a merge of StratifiedKFold and
ShuffleSplit, which returns stratified randomized folds. The folds
are made by preserving the percentage of samples for each class.
Note: like the ShuffleSplit strategy, stratified random splits
do not guarantee that all folds will be different, although this is
still very likely for sizeable datasets.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
n_splits : int (default 10)
Number of re-shuffling & splitting iterations.
test_size : float (default 0.1), int, or None
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
Examples
--------
>>> from sklearn.model_selection import StratifiedShuffleSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> sss = StratifiedShuffleSplit(n_splits=3, test_size=0.5, random_state=0)
>>> sss.get_n_splits(X, y)
3
>>> print(sss) # doctest: +ELLIPSIS
StratifiedShuffleSplit(n_splits=3, random_state=0, ...)
>>> for train_index, test_index in sss.split(X, y):
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2] TEST: [3 0]
TRAIN: [0 2] TEST: [1 3]
TRAIN: [0 2] TEST: [3 1]
"""
def __init__(self, n_splits=10, test_size=0.1, train_size=None,
random_state=None):
super(StratifiedShuffleSplit, self).__init__(
n_splits, test_size, train_size, random_state)
def _iter_indices(self, X, y, groups=None):
n_samples = _num_samples(X)
n_train, n_test = _validate_shuffle_split(n_samples, self.test_size,
self.train_size)
classes, y_indices = np.unique(y, return_inverse=True)
n_classes = classes.shape[0]
class_counts = bincount(y_indices)
if np.min(class_counts) < 2:
raise ValueError("The least populated class in y has only 1"
" member, which is too few. The minimum"
" number of groups for any class cannot"
" be less than 2.")
if n_train < n_classes:
raise ValueError('The train_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_train, n_classes))
if n_test < n_classes:
raise ValueError('The test_size = %d should be greater or '
'equal to the number of classes = %d' %
(n_test, n_classes))
rng = check_random_state(self.random_state)
for _ in range(self.n_splits):
# if there are ties in the class-counts, we want
# to make sure to break them anew in each iteration
n_i = _approximate_mode(class_counts, n_train, rng)
class_counts_remaining = class_counts - n_i
t_i = _approximate_mode(class_counts_remaining, n_test, rng)
train = []
test = []
for i, class_i in enumerate(classes):
permutation = rng.permutation(class_counts[i])
perm_indices_class_i = np.where((y == class_i))[0][permutation]
train.extend(perm_indices_class_i[:n_i[i]])
test.extend(perm_indices_class_i[n_i[i]:n_i[i] + t_i[i]])
train = rng.permutation(train)
test = rng.permutation(test)
yield train, test
def split(self, X, y, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
y : array-like, shape (n_samples,)
The target variable for supervised learning problems.
groups : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
return super(StratifiedShuffleSplit, self).split(X, y, groups)
def _validate_shuffle_split_init(test_size, train_size):
"""Validation helper to check the test_size and train_size at init
NOTE This does not take into account the number of samples which is known
only at split
"""
if test_size is None and train_size is None:
raise ValueError('test_size and train_size can not both be None')
if test_size is not None:
if np.asarray(test_size).dtype.kind == 'f':
if test_size >= 1.:
raise ValueError(
'test_size=%f should be smaller '
'than 1.0 or be an integer' % test_size)
elif np.asarray(test_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for test_size: %r" % test_size)
if train_size is not None:
if np.asarray(train_size).dtype.kind == 'f':
if train_size >= 1.:
raise ValueError("train_size=%f should be smaller "
"than 1.0 or be an integer" % train_size)
elif (np.asarray(test_size).dtype.kind == 'f' and
(train_size + test_size) > 1.):
raise ValueError('The sum of test_size and train_size = %f, '
'should be smaller than 1.0. Reduce '
'test_size and/or train_size.' %
(train_size + test_size))
elif np.asarray(train_size).dtype.kind != 'i':
# int values are checked during split based on the input
raise ValueError("Invalid value for train_size: %r" % train_size)
def _validate_shuffle_split(n_samples, test_size, train_size):
"""
Validation helper to check if the test/test sizes are meaningful wrt to the
size of the data (n_samples)
"""
if (test_size is not None and np.asarray(test_size).dtype.kind == 'i' and
test_size >= n_samples):
raise ValueError('test_size=%d should be smaller than the number of '
'samples %d' % (test_size, n_samples))
if (train_size is not None and np.asarray(train_size).dtype.kind == 'i' and
train_size >= n_samples):
raise ValueError("train_size=%d should be smaller than the number of"
" samples %d" % (train_size, n_samples))
if np.asarray(test_size).dtype.kind == 'f':
n_test = ceil(test_size * n_samples)
elif np.asarray(test_size).dtype.kind == 'i':
n_test = float(test_size)
if train_size is None:
n_train = n_samples - n_test
elif np.asarray(train_size).dtype.kind == 'f':
n_train = floor(train_size * n_samples)
else:
n_train = float(train_size)
if test_size is None:
n_test = n_samples - n_train
if n_train + n_test > n_samples:
raise ValueError('The sum of train_size and test_size = %d, '
'should be smaller than the number of '
'samples %d. Reduce test_size and/or '
'train_size.' % (n_train + n_test, n_samples))
return int(n_train), int(n_test)
class PredefinedSplit(BaseCrossValidator):
"""Predefined split cross-validator
Splits the data into training/test set folds according to a predefined
scheme. Each sample can be assigned to at most one test set fold, as
specified by the user through the ``test_fold`` parameter.
Read more in the :ref:`User Guide <cross_validation>`.
Examples
--------
>>> from sklearn.model_selection import PredefinedSplit
>>> X = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
>>> y = np.array([0, 0, 1, 1])
>>> test_fold = [0, 1, -1, 1]
>>> ps = PredefinedSplit(test_fold)
>>> ps.get_n_splits()
2
>>> print(ps) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
PredefinedSplit(test_fold=array([ 0, 1, -1, 1]))
>>> for train_index, test_index in ps.split():
... print("TRAIN:", train_index, "TEST:", test_index)
... X_train, X_test = X[train_index], X[test_index]
... y_train, y_test = y[train_index], y[test_index]
TRAIN: [1 2 3] TEST: [0]
TRAIN: [0 2] TEST: [1 3]
"""
def __init__(self, test_fold):
self.test_fold = np.array(test_fold, dtype=np.int)
self.test_fold = column_or_1d(self.test_fold)
self.unique_folds = np.unique(self.test_fold)
self.unique_folds = self.unique_folds[self.unique_folds != -1]
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
ind = np.arange(len(self.test_fold))
for test_index in self._iter_test_masks():
train_index = ind[np.logical_not(test_index)]
test_index = ind[test_index]
yield train_index, test_index
def _iter_test_masks(self):
"""Generates boolean masks corresponding to test sets."""
for f in self.unique_folds:
test_index = np.where(self.test_fold == f)[0]
test_mask = np.zeros(len(self.test_fold), dtype=np.bool)
test_mask[test_index] = True
yield test_mask
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.unique_folds)
class _CVIterableWrapper(BaseCrossValidator):
"""Wrapper class for old style cv objects and iterables."""
def __init__(self, cv):
self.cv = cv
def get_n_splits(self, X=None, y=None, groups=None):
"""Returns the number of splitting iterations in the cross-validator
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
n_splits : int
Returns the number of splitting iterations in the cross-validator.
"""
return len(self.cv) # Both iterables and old-cv objects support len
def split(self, X=None, y=None, groups=None):
"""Generate indices to split data into training and test set.
Parameters
----------
X : object
Always ignored, exists for compatibility.
y : object
Always ignored, exists for compatibility.
groups : object
Always ignored, exists for compatibility.
Returns
-------
train : ndarray
The training set indices for that split.
test : ndarray
The testing set indices for that split.
"""
for train, test in self.cv:
yield train, test
def check_cv(cv=3, y=None, classifier=False):
"""Input checker utility for building a cross-validator
Parameters
----------
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- An object to be used as a cross-validation generator.
- An iterable yielding train/test splits.
For integer/None inputs, if classifier is True and ``y`` is either
binary or multiclass, :class:`StratifiedKFold` is used. In all other
cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
y : array-like, optional
The target variable for supervised learning problems.
classifier : boolean, optional, default False
Whether the task is a classification task, in which case
stratified KFold will be used.
Returns
-------
checked_cv : a cross-validator instance.
The return value is a cross-validator which generates the train/test
splits via the ``split`` method.
"""
if cv is None:
cv = 3
if isinstance(cv, numbers.Integral):
if (classifier and (y is not None) and
(type_of_target(y) in ('binary', 'multiclass'))):
return StratifiedKFold(cv)
else:
return KFold(cv)
if not hasattr(cv, 'split') or isinstance(cv, str):
if not isinstance(cv, Iterable) or isinstance(cv, str):
raise ValueError("Expected cv as an integer, cross-validation "
"object (from sklearn.model_selection) "
"or an iterable. Got %s." % cv)
return _CVIterableWrapper(cv)
return cv # New style cv objects are passed without any modification
def train_test_split(*arrays, **options):
"""Split arrays or matrices into random train and test subsets
Quick utility that wraps input validation and
``next(ShuffleSplit().split(X, y))`` and application to input data
into a single call for splitting (and optionally subsampling) data in a
oneliner.
Read more in the :ref:`User Guide <cross_validation>`.
Parameters
----------
*arrays : sequence of indexables with same length / shape[0]
Allowed inputs are lists, numpy arrays, scipy-sparse
matrices or pandas dataframes.
test_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the test split. If
int, represents the absolute number of test samples. If None,
the value is automatically set to the complement of the train size.
If train size is also None, test size is set to 0.25.
train_size : float, int, or None (default is None)
If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset to include in the train split. If
int, represents the absolute number of train samples. If None,
the value is automatically set to the complement of the test size.
random_state : int or RandomState
Pseudo-random number generator state used for random sampling.
stratify : array-like or None (default is None)
If not None, data is split in a stratified fashion, using this as
the groups array.
Returns
-------
splitting : list, length=2 * len(arrays)
List containing train-test split of inputs.
.. versionadded:: 0.16
If the input is sparse, the output will be a
``scipy.sparse.csr_matrix``. Else, output type is the same as the
input type.
Examples
--------
>>> import numpy as np
>>> from sklearn.model_selection import train_test_split
>>> X, y = np.arange(10).reshape((5, 2)), range(5)
>>> X
array([[0, 1],
[2, 3],
[4, 5],
[6, 7],
[8, 9]])
>>> list(y)
[0, 1, 2, 3, 4]
>>> X_train, X_test, y_train, y_test = train_test_split(
... X, y, test_size=0.33, random_state=42)
...
>>> X_train
array([[4, 5],
[0, 1],
[6, 7]])
>>> y_train
[2, 0, 3]
>>> X_test
array([[2, 3],
[8, 9]])
>>> y_test
[1, 4]
"""
n_arrays = len(arrays)
if n_arrays == 0:
raise ValueError("At least one array required as input")
test_size = options.pop('test_size', None)
train_size = options.pop('train_size', None)
random_state = options.pop('random_state', None)
stratify = options.pop('stratify', None)
if options:
raise TypeError("Invalid parameters passed: %s" % str(options))
if test_size is None and train_size is None:
test_size = 0.25
arrays = indexable(*arrays)
if stratify is not None:
CVClass = StratifiedShuffleSplit
else:
CVClass = ShuffleSplit
cv = CVClass(test_size=test_size,
train_size=train_size,
random_state=random_state)
train, test = next(cv.split(X=arrays[0], y=stratify))
return list(chain.from_iterable((safe_indexing(a, train),
safe_indexing(a, test)) for a in arrays))
train_test_split.__test__ = False # to avoid a pb with nosetests
def _build_repr(self):
# XXX This is copied from BaseEstimator's get_params
cls = self.__class__
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
# Ignore varargs, kw and default values and pop self
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
if init is object.__init__:
args = []
else:
args = sorted([p.name for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD])
class_name = self.__class__.__name__
params = dict()
for key in args:
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
params[key] = value
return '%s(%s)' % (class_name, _pprint(params, offset=len(class_name)))
| bsd-3-clause |
rs2/pandas | pandas/core/indexers.py | 1 | 14164 | """
Low-dependency indexing utilities.
"""
import warnings
import numpy as np
from pandas._typing import Any, AnyArrayLike
from pandas.core.dtypes.common import (
is_array_like,
is_bool_dtype,
is_extension_array_dtype,
is_integer,
is_integer_dtype,
is_list_like,
)
from pandas.core.dtypes.generic import ABCIndexClass, ABCSeries
# -----------------------------------------------------------
# Indexer Identification
def is_valid_positional_slice(slc: slice) -> bool:
"""
Check if a slice object can be interpreted as a positional indexer.
Parameters
----------
slc : slice
Returns
-------
bool
Notes
-----
A valid positional slice may also be interpreted as a label-based slice
depending on the index being sliced.
"""
def is_int_or_none(val):
return val is None or is_integer(val)
return (
is_int_or_none(slc.start)
and is_int_or_none(slc.stop)
and is_int_or_none(slc.step)
)
def is_list_like_indexer(key) -> bool:
"""
Check if we have a list-like indexer that is *not* a NamedTuple.
Parameters
----------
key : object
Returns
-------
bool
"""
# allow a list_like, but exclude NamedTuples which can be indexers
return is_list_like(key) and not (isinstance(key, tuple) and type(key) is not tuple)
def is_scalar_indexer(indexer, ndim: int) -> bool:
"""
Return True if we are all scalar indexers.
Parameters
----------
indexer : object
ndim : int
Number of dimensions in the object being indexed.
Returns
-------
bool
"""
if isinstance(indexer, tuple):
if len(indexer) == ndim:
return all(
is_integer(x) or (isinstance(x, np.ndarray) and x.ndim == len(x) == 1)
for x in indexer
)
return False
def is_empty_indexer(indexer, arr_value: np.ndarray) -> bool:
"""
Check if we have an empty indexer.
Parameters
----------
indexer : object
arr_value : np.ndarray
Returns
-------
bool
"""
if is_list_like(indexer) and not len(indexer):
return True
if arr_value.ndim == 1:
if not isinstance(indexer, tuple):
indexer = tuple([indexer])
return any(isinstance(idx, np.ndarray) and len(idx) == 0 for idx in indexer)
return False
# -----------------------------------------------------------
# Indexer Validation
def check_setitem_lengths(indexer, value, values) -> bool:
"""
Validate that value and indexer are the same length.
An special-case is allowed for when the indexer is a boolean array
and the number of true values equals the length of ``value``. In
this case, no exception is raised.
Parameters
----------
indexer : sequence
Key for the setitem.
value : array-like
Value for the setitem.
values : array-like
Values being set into.
Returns
-------
bool
Whether this is an empty listlike setting which is a no-op.
Raises
------
ValueError
When the indexer is an ndarray or list and the lengths don't match.
"""
no_op = False
if isinstance(indexer, (np.ndarray, list)):
# We can ignore other listlikes because they are either
# a) not necessarily 1-D indexers, e.g. tuple
# b) boolean indexers e.g. BoolArray
if is_list_like(value):
if len(indexer) != len(value):
# boolean with truth values == len of the value is ok too
if not (
isinstance(indexer, np.ndarray)
and indexer.dtype == np.bool_
and len(indexer[indexer]) == len(value)
):
raise ValueError(
"cannot set using a list-like indexer "
"with a different length than the value"
)
if not len(indexer):
no_op = True
elif isinstance(indexer, slice):
if is_list_like(value):
if len(value) != length_of_indexer(indexer, values):
raise ValueError(
"cannot set using a slice indexer with a "
"different length than the value"
)
if not len(value):
no_op = True
return no_op
def validate_indices(indices: np.ndarray, n: int) -> None:
"""
Perform bounds-checking for an indexer.
-1 is allowed for indicating missing values.
Parameters
----------
indices : ndarray
n : int
Length of the array being indexed.
Raises
------
ValueError
Examples
--------
>>> validate_indices([1, 2], 3)
# OK
>>> validate_indices([1, -2], 3)
ValueError
>>> validate_indices([1, 2, 3], 3)
IndexError
>>> validate_indices([-1, -1], 0)
# OK
>>> validate_indices([0, 1], 0)
IndexError
"""
if len(indices):
min_idx = indices.min()
if min_idx < -1:
msg = f"'indices' contains values less than allowed ({min_idx} < -1)"
raise ValueError(msg)
max_idx = indices.max()
if max_idx >= n:
raise IndexError("indices are out-of-bounds")
# -----------------------------------------------------------
# Indexer Conversion
def maybe_convert_indices(indices, n: int):
"""
Attempt to convert indices into valid, positive indices.
If we have negative indices, translate to positive here.
If we have indices that are out-of-bounds, raise an IndexError.
Parameters
----------
indices : array-like
Array of indices that we are to convert.
n : int
Number of elements in the array that we are indexing.
Returns
-------
array-like
An array-like of positive indices that correspond to the ones
that were passed in initially to this function.
Raises
------
IndexError
One of the converted indices either exceeded the number of,
elements (specified by `n`), or was still negative.
"""
if isinstance(indices, list):
indices = np.array(indices)
if len(indices) == 0:
# If `indices` is empty, np.array will return a float,
# and will cause indexing errors.
return np.empty(0, dtype=np.intp)
mask = indices < 0
if mask.any():
indices = indices.copy()
indices[mask] += n
mask = (indices >= n) | (indices < 0)
if mask.any():
raise IndexError("indices are out-of-bounds")
return indices
# -----------------------------------------------------------
# Unsorted
def length_of_indexer(indexer, target=None) -> int:
"""
Return the expected length of target[indexer]
Returns
-------
int
"""
if target is not None and isinstance(indexer, slice):
target_len = len(target)
start = indexer.start
stop = indexer.stop
step = indexer.step
if start is None:
start = 0
elif start < 0:
start += target_len
if stop is None or stop > target_len:
stop = target_len
elif stop < 0:
stop += target_len
if step is None:
step = 1
elif step < 0:
start, stop = stop + 1, start + 1
step = -step
return (stop - start + step - 1) // step
elif isinstance(indexer, (ABCSeries, ABCIndexClass, np.ndarray, list)):
if isinstance(indexer, list):
indexer = np.array(indexer)
if indexer.dtype == bool:
# GH#25774
return indexer.sum()
return len(indexer)
elif not is_list_like_indexer(indexer):
return 1
raise AssertionError("cannot find the length of the indexer")
def deprecate_ndim_indexing(result, stacklevel=3):
"""
Helper function to raise the deprecation warning for multi-dimensional
indexing on 1D Series/Index.
GH#27125 indexer like idx[:, None] expands dim, but we cannot do that
and keep an index, so we currently return ndarray, which is deprecated
(Deprecation GH#30588).
"""
if np.ndim(result) > 1:
warnings.warn(
"Support for multi-dimensional indexing (e.g. `obj[:, None]`) "
"is deprecated and will be removed in a future "
"version. Convert to a numpy array before indexing instead.",
FutureWarning,
stacklevel=stacklevel,
)
def unpack_1tuple(tup):
"""
If we have a length-1 tuple/list that contains a slice, unpack to just
the slice.
Notes
-----
The list case is deprecated.
"""
if len(tup) == 1 and isinstance(tup[0], slice):
# if we don't have a MultiIndex, we may still be able to handle
# a 1-tuple. see test_1tuple_without_multiindex
if isinstance(tup, list):
# GH#31299
warnings.warn(
"Indexing with a single-item list containing a "
"slice is deprecated and will raise in a future "
"version. Pass a tuple instead.",
FutureWarning,
stacklevel=3,
)
return tup[0]
return tup
# -----------------------------------------------------------
# Public indexer validation
def check_array_indexer(array: AnyArrayLike, indexer: Any) -> Any:
"""
Check if `indexer` is a valid array indexer for `array`.
For a boolean mask, `array` and `indexer` are checked to have the same
length. The dtype is validated, and if it is an integer or boolean
ExtensionArray, it is checked if there are missing values present, and
it is converted to the appropriate numpy array. Other dtypes will raise
an error.
Non-array indexers (integer, slice, Ellipsis, tuples, ..) are passed
through as is.
.. versionadded:: 1.0.0
Parameters
----------
array : array-like
The array that is being indexed (only used for the length).
indexer : array-like or list-like
The array-like that's used to index. List-like input that is not yet
a numpy array or an ExtensionArray is converted to one. Other input
types are passed through as is.
Returns
-------
numpy.ndarray
The validated indexer as a numpy array that can be used to index.
Raises
------
IndexError
When the lengths don't match.
ValueError
When `indexer` cannot be converted to a numpy ndarray to index
(e.g. presence of missing values).
See Also
--------
api.types.is_bool_dtype : Check if `key` is of boolean dtype.
Examples
--------
When checking a boolean mask, a boolean ndarray is returned when the
arguments are all valid.
>>> mask = pd.array([True, False])
>>> arr = pd.array([1, 2])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
An IndexError is raised when the lengths don't match.
>>> mask = pd.array([True, False, True])
>>> pd.api.indexers.check_array_indexer(arr, mask)
Traceback (most recent call last):
...
IndexError: Boolean index has wrong length: 3 instead of 2.
NA values in a boolean array are treated as False.
>>> mask = pd.array([True, pd.NA])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
A numpy boolean mask will get passed through (if the length is correct):
>>> mask = np.array([True, False])
>>> pd.api.indexers.check_array_indexer(arr, mask)
array([ True, False])
Similarly for integer indexers, an integer ndarray is returned when it is
a valid indexer, otherwise an error is (for integer indexers, a matching
length is not required):
>>> indexer = pd.array([0, 2], dtype="Int64")
>>> arr = pd.array([1, 2, 3])
>>> pd.api.indexers.check_array_indexer(arr, indexer)
array([0, 2])
>>> indexer = pd.array([0, pd.NA], dtype="Int64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
ValueError: Cannot index with an integer indexer containing NA values
For non-integer/boolean dtypes, an appropriate error is raised:
>>> indexer = np.array([0., 2.], dtype="float64")
>>> pd.api.indexers.check_array_indexer(arr, indexer)
Traceback (most recent call last):
...
IndexError: arrays used as indices must be of integer or boolean type
"""
from pandas.core.construction import array as pd_array
# whatever is not an array-like is returned as-is (possible valid array
# indexers that are not array-like: integer, slice, Ellipsis, None)
# In this context, tuples are not considered as array-like, as they have
# a specific meaning in indexing (multi-dimensional indexing)
if is_list_like(indexer):
if isinstance(indexer, tuple):
return indexer
else:
return indexer
# convert list-likes to array
if not is_array_like(indexer):
indexer = pd_array(indexer)
if len(indexer) == 0:
# empty list is converted to float array by pd.array
indexer = np.array([], dtype=np.intp)
dtype = indexer.dtype
if is_bool_dtype(dtype):
if is_extension_array_dtype(dtype):
indexer = indexer.to_numpy(dtype=bool, na_value=False)
else:
indexer = np.asarray(indexer, dtype=bool)
# GH26658
if len(indexer) != len(array):
raise IndexError(
f"Boolean index has wrong length: "
f"{len(indexer)} instead of {len(array)}"
)
elif is_integer_dtype(dtype):
try:
indexer = np.asarray(indexer, dtype=np.intp)
except ValueError as err:
raise ValueError(
"Cannot index with an integer indexer containing NA values"
) from err
else:
raise IndexError("arrays used as indices must be of integer or boolean type")
return indexer
| bsd-3-clause |
jmmease/pandas | pandas/tests/plotting/test_frame.py | 3 | 119068 | # coding: utf-8
""" Test cases for DataFrame.plot """
import pytest
import string
import warnings
from datetime import datetime, date
import pandas as pd
from pandas import (Series, DataFrame, MultiIndex, PeriodIndex, date_range,
bdate_range)
from pandas.core.dtypes.api import is_list_like
from pandas.compat import range, lrange, lmap, lzip, u, zip, PY3
from pandas.io.formats.printing import pprint_thing
import pandas.util.testing as tm
import numpy as np
from numpy.random import rand, randn
import pandas.plotting as plotting
from pandas.tests.plotting.common import (TestPlotBase, _check_plot_works,
_skip_if_no_scipy_gaussian_kde,
_ok_for_gaussian_kde)
tm._skip_if_no_mpl()
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame({"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(
size=20)})
@pytest.mark.slow
def test_plot(self):
df = self.tdf
_check_plot_works(df.plot, grid=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot,
subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# mpl >= 1.5.2 (or slightly below) throw AttributError
with pytest.raises((TypeError, AttributeError)):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title='blah')
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title='blah')
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
# axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title='blah')
tuples = lzip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3),
index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples([(u('\u03b1'), 0),
(u('\u03b1'), 1),
(u('\u03b2'), 2),
(u('\u03b2'), 3),
(u('\u03b3'), 4),
(u('\u03b3'), 5),
(u('\u03b4'), 6),
(u('\u03b4'), 7)], names=['i0', 'i1'])
columns = MultiIndex.from_tuples([('bar', u('\u0394')),
('bar', u('\u0395'))], names=['c0',
'c1'])
df = DataFrame(np.random.randint(0, 10, (8, 2)),
columns=columns,
index=index)
_check_plot_works(df.plot, title=u('\u03A3'))
# GH 6951
# Test with single column
df = DataFrame({'x': np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
if self.mpl_ge_1_5_0:
result = ax.axes
else:
result = ax.get_axes() # deprecated
assert result is axes[0]
# GH 15516
def test_mpl2_color_cycle_str(self):
# test CN mpl 2.0 color cycle
if self.mpl_ge_2_0_0:
colors = ['C' + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
for c in colors:
_check_plot_works(df.plot, color=c)
else:
pytest.skip("not supported in matplotlib < 2.0.0")
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=['red'])
def test_rgb_tuple_color(self):
# GH 16695
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0))
_check_plot_works(df.plot, x='x', y='y', color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color='')
def test_color_and_style_arguments(self):
df = DataFrame({'x': [1, 2], 'y': [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=['red', 'black'], style=['-', '--'])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ['-', '--']
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ['red', 'black']
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=['red', 'black'], style=['k-', 'r--'])
def test_nonnumeric_exclude(self):
df = DataFrame({'A': ["x", "y", "z"], 'B': [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
@pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=['a', 'b', 'c'])
ax = df.plot(x='a', y='b')
self._check_text_labels(ax.xaxis.get_label(), 'a')
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=['a', 'b'])
df.index.name = 'NAME'
df.plot(y='b', label='LABEL')
assert df.index.name == 'NAME'
@pytest.mark.slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index('A')['B'].plot())
self._check_data(df.plot(x=0), df.set_index('A').plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x='A', y='B'), df.set_index('A').B.plot())
self._check_data(df.plot(x='A'), df.set_index('A').plot())
self._check_data(df.plot(y='B'), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = lrange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title='Test', figsize=(16, 8))
self._check_text_labels(ax.title, 'Test')
self._check_axes_shape(ax, axes_num=1, layout=(1, 1),
figsize=(16., 8.))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@pytest.mark.slow
def test_logscales(self):
df = DataFrame({'a': np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=True)
self._check_ax_scales(ax, yaxis='log')
ax = df.plot(logx=True)
self._check_ax_scales(ax, xaxis='log')
ax = df.plot(loglog=True)
self._check_ax_scales(ax, xaxis='log', yaxis='log')
@pytest.mark.slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['xaxis.compat'] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params['x_compat'] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plotting.plot_params.use('x_compat', True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=['a', 'b'])
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame({'y': np.arange(100)}, index=np.arange(99, -1, -1),
dtype=np.int64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
l = ax.get_lines()[0]
rs = l.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name='y')
tm.assert_series_equal(rs, df.y)
def test_unsorted_index_lims(self):
df = DataFrame({'y': [0., 1., 2., 3.]}, index=[1., 0., 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., np.nan, 3., 4., 5., 6.]},
index=[1., 0., 3., 2., np.nan, 3., 2.])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({'y': [0., 1., 2., 3.], 'z': [91., 90., 93., 92.]})
ax = df.plot(x='z', y='y')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
@pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
for kind in ['bar', 'barh', 'line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3, )
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax,
labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
@pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start='2014-07-01', freq='M', periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ['line', 'area']:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45,
fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45,
ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {"numeric": np.array([1, 2, 5]),
"timedelta": [pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h")],
"datetime_no_tz": [pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")],
"datetime_all_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00",
utc=True),
pd.to_datetime("2017-08-02 00:00:00",
utc=True)],
"text": ["This", "should", "fail"]}
testdata = DataFrame(data)
ax_numeric = testdata.plot(y="numeric")
assert (ax_numeric.get_lines()[0].get_data()[1] ==
testdata["numeric"].values).all()
ax_timedelta = testdata.plot(y="timedelta")
assert (ax_timedelta.get_lines()[0].get_data()[1] ==
testdata["timedelta"].values).all()
ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
assert (ax_datetime_no_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_no_tz"].values).all()
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (ax_datetime_all_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_all_tz"].values).all()
with pytest.raises(TypeError):
testdata.plot(y="text")
@pytest.mark.xfail(reason='not support for period, categorical, '
'datetime_mixed_tz')
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formater (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formater (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handels ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {"numeric": np.array([1, 2, 5]),
"period": [pd.Period('2017-08-01 00:00:00', freq='H'),
pd.Period('2017-08-01 02:00', freq='H'),
pd.Period('2017-08-02 00:00:00', freq='H')],
"categorical": pd.Categorical(["c", "b", "a"],
categories=["a", "b", "c"],
ordered=False),
"datetime_mixed_tz": [pd.to_datetime("2017-08-01 00:00:00",
utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00")]}
testdata = pd.DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (ax_period.get_lines()[0].get_data()[1] ==
testdata["period"].values).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (ax_categorical.get_lines()[0].get_data()[1] ==
testdata["categorical"].values).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric",
y="datetime_mixed_tz")
assert (ax_datetime_mixed_tz.get_lines()[0].get_data()[1] ==
testdata["datetime_mixed_tz"].values).all()
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
warnings.simplefilter('error')
try:
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(np.random.randn(100, 4),
index=date_range('1/1/2000', periods=100))
df.plot(subplots=True, layout=(3, 2))
except Warning as w:
self.fail(w)
warnings.simplefilter('default')
@pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False,
sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3, )
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
df = DataFrame(np.random.rand(10, 4),
index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes, layout=(2, 1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(2, -1),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
returned = df.plot(subplots=True, ax=axes, layout=(-1, 2),
sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4, )
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1),
index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1, )
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start='2014-07-01', freq='M', periods=10))
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {'A': [1., 2., 3., 4.], 'B': [4., 3., 2., 1.], 'C': [5, 1, 3, 4]}
df = DataFrame(d, index=date_range('2014 10 11', '2014 10 14'))
axes = df[['A', 'B']].plot(subplots=True)
df['C'].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list('aaaaa'))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y='a')
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=['a'])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y='a')
self._check_legend_labels(ax, labels=['a (right)'] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = - DataFrame(rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coodinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=['w', 'x', 'y', 'z'])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame({'w': rand(6),
'x': rand(6),
'y': -rand(6),
'z': -rand(6)})
# each column has positive-negative mixed value
mixed_df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['w', 'x', 'y', 'z'])
for kind in ['line', 'area']:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
_check_plot_works(df.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({'a': values1, 'b': values2})
tdf = DataFrame({'a': values1,
'b': values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False]))
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False]))
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(),
expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=['x', 'y', 'z'])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, 'left_ax')
assert not hasattr(ax, 'right_ax')
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=['x', 'y', 'z', 'four'])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._maybe_unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
tm.close()
ax = df.plot(kind='bar', color='green')
self._check_colors(ax.patches[::5], facecolors=['green'] * 5)
tm.close()
def test_bar_user_colors(self):
df = pd.DataFrame({"A": range(4),
"B": range(1, 5),
"color": ['red', 'blue', 'blue', 'red']})
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y='A', color=df['color'])
result = [p.get_facecolor() for p in ax.patches]
expected = [(1., 0., 0., 1.),
(0., 0., 1., 1.),
(0., 0., 1., 1.),
(1., 0., 0., 1.)]
assert result == expected
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@pytest.mark.slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
position=0.2)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
position=0.2)
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind='bar', stacked=True, width=1)
self._check_bar_alignment(df, kind='barh', stacked=False, width=1)
self._check_bar_alignment(df, kind='barh', stacked=True, width=1)
self._check_bar_alignment(df, kind='bar', subplots=True, width=1)
self._check_bar_alignment(df, kind='barh', subplots=True, width=1)
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({'A': [10, np.nan, 20],
'B': [5, 10, 20],
'C': [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(np.random.randn(6, 5),
index=pd.Index(list('ABCDEF')),
columns=pd.Index(list('abcde')))
# categorical index must behave the same
df2 = pd.DataFrame(np.random.randn(6, 5),
index=pd.CategoricalIndex(list('ABCDEF')),
columns=pd.CategoricalIndex(list('abcde')))
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
_check_plot_works(df.plot.scatter, x='x', y='y')
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x='x')
with pytest.raises(TypeError):
df.plot.scatter(y='y')
# GH 6951
axes = df.plot(x='x', y='y', kind='scatter', subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self):
# GH 16199
df = pd.DataFrame({'x': [1, 2, 3, 4],
'y': pd.Categorical(['a', 'b', 'a', 'c'])})
with pytest.raises(ValueError) as ve:
df.plot(x='x', y='y', kind='scatter')
ve.match('requires y column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='x', kind='scatter')
ve.match('requires x column to be numeric')
with pytest.raises(ValueError) as ve:
df.plot(x='y', y='y', kind='scatter')
ve.match('requires x column to be numeric')
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['x', 'y', 'z', 'four'])
axes = [df.plot.scatter(x='x', y='y', c='z'),
df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == 'Greys'
if self.mpl_ge_1_3_1:
# n.b. there appears to be no public method to get the colorbar
# label
assert ax.collections[0].colorbar._label == 'z'
cm = 'cubehelix'
ax = df.plot.scatter(x='x', y='y', c='z', colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x='x', y='y', c='z', colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c='red')
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=['r'])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({'A': [1, 2], 'B': [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x='A', y='B', c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0]
.get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x='A', y='B', c=float_array, cmap='spring')
def test_scatter_colors(self):
df = DataFrame({'a': [1, 2, 3], 'b': [1, 2, 3], 'c': [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x='a', y='b', c='c', color='green')
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x='a', y='b', c='c')
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])))
ax = df.plot.scatter(x='a', y='b', color='white')
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64))
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(randn(10, 15),
index=list(string.ascii_letters[:10]),
columns=lrange(15))
_check_plot_works(df.plot.bar)
df = DataFrame({'a': [0, 1], 'b': [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(self, df, kind='bar', stacked=False,
subplots=False, align='center', width=0.5,
position=0.5):
axes = df.plot(kind=kind, stacked=stacked, subplots=subplots,
align=align, width=width, position=position, grid=True)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == 'bar':
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min([p.get_x() for p in ax.patches])
max_edge = max([p.get_x() + p.get_width() for p in ax.patches])
elif kind == 'barh':
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min([p.get_y() for p in ax.patches])
max_edge = max([p.get_y() + p.get_height() for p in ax.patches
])
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == 'bar' and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == 'bar' and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == 'barh' and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == 'barh' and stacked is False:
center = p.get_y() + p.get_height() * len(
df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == 'center':
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == 'edge':
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@pytest.mark.slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9)
@pytest.mark.slow
def test_bar_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9)
@pytest.mark.slow
def test_bar_subplots_center(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9)
self._check_bar_alignment(df, kind='barh', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9)
@pytest.mark.slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind='bar', stacked=False)
self._check_bar_alignment(df, kind='bar', stacked=True)
self._check_bar_alignment(df, kind='barh', stacked=False)
self._check_bar_alignment(df, kind='barh', stacked=True)
self._check_bar_alignment(df, kind='bar', subplots=True)
self._check_bar_alignment(df, kind='barh', subplots=True)
@pytest.mark.slow
def test_bar_edge(self):
df = DataFrame({'A': [3] * 5, 'B': lrange(5)}, index=lrange(5))
self._check_bar_alignment(df, kind='bar', stacked=True, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, align='edge')
self._check_bar_alignment(df, kind='bar', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, align='edge')
self._check_bar_alignment(df, kind='barh', stacked=False, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, align='edge')
self._check_bar_alignment(df, kind='bar', subplots=True, width=0.9,
align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, align='edge')
self._check_bar_alignment(df, kind='barh', subplots=True, width=0.9,
align='edge')
@pytest.mark.slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([1., 10.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 100))
# no subplots
df = DataFrame({'A': [3] * 5, 'B': lrange(1, 6)}, index=lrange(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([1., 10., 100., 1000.])
if not self.mpl_le_1_2_1:
expected = np.hstack((.1, expected, 1e4))
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df['height']
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(),
np.arange(1, len(numeric_cols) + 1))
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# different warning on py3
if not PY3:
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, yaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_xticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box,
subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis='log')
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=['one', 'two', 'three', 'four'])
with pytest.raises(ValueError):
df.plot.box(return_type='NOTATYPE')
result = df.plot.box(return_type='dict')
self._check_box_return_type(result, 'dict')
result = df.plot.box(return_type='axes')
self._check_box_return_type(result, 'axes')
result = df.plot.box() # default axes
self._check_box_return_type(result, 'axes')
result = df.plot.box(return_type='both')
self._check_box_return_type(result, 'both')
@pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(result, None, expected_keys=[
'height', 'weight', 'category'])
for t in ['dict', 'axes', 'both']:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned, t,
expected_keys=['height', 'weight', 'category'],
check_ax_title=False)
@pytest.mark.slow
def test_kde_df(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind='kde')
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind='kde', rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind='kde',
subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind='kde', logy=True, subplots=True)
self._check_ax_scales(axes, yaxis='log')
@pytest.mark.slow
def test_kde_missing_vals(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind='kde')
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
if self.mpl_le_1_2_1:
pytest.skip("not supported in matplotlib <= 1.2.x")
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist,
subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis='log')
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(normed=True, cumulative=True, bins=4)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation='horizontal')
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(self, patches, expected_y=None, expected_h=None,
expected_x=None, expected_w=None):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y,
check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h,
check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x,
check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w,
check_dtype=False)
@pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame({'A': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([8, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, 4, 5]),
np.array([6, 7, 8, 9, 10]))},
columns=['A', 'B', 'C'])
nan_df = DataFrame({'A': np.repeat(np.array([np.nan, 1, 2, 3, 4, 5]),
np.array([3, 10, 9, 8, 7, 6])),
'B': np.repeat(np.array([1, np.nan, 2, 3, 4, 5]),
np.array([8, 3, 8, 8, 8, 8])),
'C': np.repeat(np.array([1, 2, 3, np.nan, 4, 5]),
np.array([6, 7, 8, 3, 9, 10]))},
columns=['A', 'B', 'C'])
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]))
if self.mpl_ge_1_3_1:
# horizontal
ax = df.plot.hist(bins=5, orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
ax = df.plot.hist(bins=5, stacked=True,
orientation='horizontal')
self._check_box_coord(ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]))
axes = df.plot.hist(bins=5, stacked=True, subplots=True,
orientation='horizontal')
self._check_box_coord(axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]))
self._check_box_coord(axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]))
self._check_box_coord(axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]))
@pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
df2 = DataFrame(rand(3, 3), columns=['d', 'e', 'f'])
df3 = DataFrame(rand(3, 3), columns=['g', 'h', 'i'])
df4 = DataFrame(rand(3, 3), columns=['j', 'k', 'l'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend='reverse', ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(
df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(kind='bar', legend=True, secondary_y='h', ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h (right)', 'i'])
# Time Series
ind = date_range('1/1/2014', periods=3)
df = DataFrame(randn(3, 3), columns=['a', 'b', 'c'], index=ind)
df2 = DataFrame(randn(3, 3), columns=['d', 'e', 'f'], index=ind)
df3 = DataFrame(randn(3, 3), columns=['g', 'h', 'i'], index=ind)
ax = df.plot(legend=True, secondary_y='b')
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=['a', 'b (right)', 'c'])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(
ax, labels=['a', 'b (right)', 'c', 'g', 'h', 'i'])
# scatter
ax = df.plot.scatter(x='a', y='b', label='data1')
self._check_legend_labels(ax, labels=['data1'])
ax = df2.plot.scatter(x='d', y='e', legend=False, label='data2', ax=ax)
self._check_legend_labels(ax, labels=['data1'])
ax = df3.plot.scatter(x='g', y='h', label='data3', ax=ax)
self._check_legend_labels(ax, labels=['data1', 'data3'])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index('a')
ax = df5.plot(y='b')
self._check_legend_labels(ax, labels=['b'])
ax = df5.plot(y='b', label='LABEL_b')
self._check_legend_labels(ax, labels=['LABEL_b'])
self._check_text_labels(ax.xaxis.get_label(), 'a')
ax = df5.plot(y='c', label='LABEL_c', ax=ax)
self._check_legend_labels(ax, labels=['LABEL_b', 'LABEL_c'])
assert df5.columns.tolist() == ['b', 'c']
def test_legend_name(self):
multi = DataFrame(randn(4, 4),
columns=[np.array(['a', 'a', 'b', 'b']),
np.array(['x', 'y', 'x', 'y'])])
multi.columns.names = ['group', 'individual']
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
df.columns.name = 'new'
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'group,individual')
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, 'new')
@pytest.mark.slow
def test_no_legend(self):
kinds = ['line', 'bar', 'barh', 'kde', 'area', 'hist']
df = DataFrame(rand(3, 3), columns=['a', 'b', 'c'])
for kind in kinds:
if not _ok_for_gaussian_kde(kind):
continue
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [{0: '^',
1: '+',
2: 'o'}, {0: '^',
1: '+'}, ['^', '+', 'o'], ['^', '+']]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[:len(markers)]):
assert l.get_marker() == markers[i]
@pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == 'None'
@pytest.mark.slow
@tm.capture_stdout
def test_line_colors(self):
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(colors=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color='DodgerBlue')
self._check_colors(ax.lines, linecolors=['DodgerBlue'])
ax = df.plot(color='red')
self._check_colors(ax.get_lines(), linecolors=['red'] * 5)
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ['r', 'g', 'b']
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
if self.mpl_ge_2_0_0:
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color='k')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(subplots=True, color='green')
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['green'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ['#FF0000', '#0000FF', '#FFFF00', '#000000', '#FFFFFF']
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ['#F00', '#00F', '#FF0', '#000', '#FFF']
# Forced show plot
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, color=custom_colors, subplots=True)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color='DodgerBlue', subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
self._check_colors(handles, facecolors=custom_colors)
else:
# legend is stored as Line2D, thus check linecolors
linehandles = [x for x in handles
if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap='jet')
jet_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
self._check_colors(handles, facecolors=jet_colors)
else:
linehandles = [x for x in handles
if not isinstance(x, PolyCollection)]
self._check_colors(linehandles, linecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
if self.mpl_ge_1_5_0:
linecolors = jet_with_alpha
else:
# Line2D can't have alpha in its linecolor
linecolors = jet_colors
self._check_colors(handles[:len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = 'rgcby'
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, 5))
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color='DodgerBlue')
self._check_colors([ax.patches[0]], facecolors=['DodgerBlue'])
ax = df.plot(kind='hist', color='green')
self._check_colors(ax.patches[::10], facecolors=['green'] * 5)
tm.close()
@pytest.mark.slow
def test_kde_colors(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
from matplotlib import cm
custom_colors = 'rgcby'
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap='jet')
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
def test_kde_colors_and_styles_subplots(self):
tm._skip_if_no_scipy()
_skip_if_no_scipy_gaussian_kde()
if not self.mpl_ge_1_5_0:
pytest.skip("mpl is not supported")
from matplotlib import cm
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind='kde', subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind='kde', color='k', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['k'])
tm.close()
# single color str
axes = df.plot(kind='kde', color='red', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['red'])
tm.close()
custom_colors = 'rgcby'
axes = df.plot(kind='kde', color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = lmap(cm.jet, np.linspace(0, 1, len(df)))
for cmap in ['jet', cm.jet]:
axes = df.plot(kind='kde', colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind='kde', color='DodgerBlue',
subplots=True)
self._check_colors(axes[0].lines, linecolors=['DodgerBlue'])
# single character style
axes = df.plot(kind='kde', style='r', subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=['r'])
tm.close()
# list of styles
styles = list('rgcby')
axes = df.plot(kind='kde', style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c='k',
fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = 'k' if self.mpl_ge_2_0_0 else 'b'
self._check_colors(bp['boxes'],
linecolors=[box_c] * len(bp['boxes']))
self._check_colors(bp['whiskers'],
linecolors=[whiskers_c] * len(bp['whiskers']))
self._check_colors(bp['medians'],
linecolors=[medians_c] * len(bp['medians']))
self._check_colors(bp['fliers'],
linecolors=[fliers_c] * len(bp['fliers']))
self._check_colors(bp['caps'],
linecolors=[caps_c] * len(bp['caps']))
default_colors = self._maybe_unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type='dict')
_check_colors(bp, default_colors[0], default_colors[0],
default_colors[2])
tm.close()
dict_colors = dict(boxes='#572923', whiskers='#982042',
medians='#804823', caps='#123456')
bp = df.plot.box(color=dict_colors, sym='r+', return_type='dict')
_check_colors(bp, dict_colors['boxes'], dict_colors['whiskers'],
dict_colors['medians'], dict_colors['caps'], 'r')
tm.close()
# partial colors
dict_colors = dict(whiskers='c', medians='m')
bp = df.plot.box(color=dict_colors, return_type='dict')
_check_colors(bp, default_colors[0], 'c', 'm')
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap='jet', return_type='dict')
jet_colors = lmap(cm.jet, np.linspace(0, 1, 3))
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type='dict')
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color='DodgerBlue', return_type='dict')
_check_colors(bp, 'DodgerBlue', 'DodgerBlue', 'DodgerBlue',
'DodgerBlue')
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym='#123456', return_type='dict')
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0),
(0, 1, 0), '#123456')
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes='red', xxxx='blue'))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
colors = list('rgbk')
if self.mpl_ge_1_5_0:
import cycler
plt.rcParams['axes.prop_cycle'] = cycler.cycler('color', colors)
else:
plt.rcParams['axes.color_cycle'] = colors
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._maybe_unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1),
date(2012, 9, 1),
date(2012, 8, 1)],
columns=['test'])
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
def test_kind_both_ways(self):
df = DataFrame({'x': [1, 2, 3]})
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ['scatter', 'hexbin']:
df.plot('x', 'x', kind=kind)
getattr(df.plot, kind)('x', 'x')
def test_all_invalid_plot_data(self):
df = DataFrame(list('abcd'))
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in plotting._core._common_kinds:
if not _ok_for_gaussian_kde(kind):
continue
with pytest.raises(TypeError):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ['area']
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = 'a'
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind='aasdf')
@pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
# GH 6951
axes = df.plot.hexbin(x='A', y='B', subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', C='C')
assert len(ax.collections) == 1
ax = df.plot.hexbin(x='A', y='B', C='C', reduce_C_function=np.std)
assert len(ax.collections) == 1
@pytest.mark.slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x='A', y='B')
assert ax.collections[0].cmap.name == 'BuGn'
cm = 'cubehelix'
ax = df.plot.hexbin(x='A', y='B', colormap=cm)
assert ax.collections[0].cmap.name == cm
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x='A', y='B', cmap='YlGn')
assert ax.collections[0].cmap.name == 'YlGn'
with pytest.raises(TypeError):
df.plot.hexbin(x='A', y='B', cmap='YlGn', colormap='BuGn')
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(np.random.rand(5, 3), columns=['X', 'Y', 'Z'],
index=['a', 'b', 'c', 'd', 'e'])
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y='Y')
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ['A', 'B', 'C', 'D', 'E']
color_args = ['r', 'g', 'b', 'c', 'm']
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie,
subplots=True, labels=labels,
colors=color_args)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ['0', '1', '2', '3']
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ''
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
assert ([x.get_text() for x in ax.get_legend().get_texts()] ==
base_expected[:i] + base_expected[i + 1:])
@pytest.mark.slow
def test_errorbar_plot(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err['x'], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err,
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err['x'],
xerr=df_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(df.plot,
yerr=df_err, xerr=df_err,
subplots=True,
kind=kind)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works((df + 1).plot, yerr=df_err,
xerr=df_err, kind='bar', log=True)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df['y'].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is iterator
import itertools
ax = _check_plot_works(df.plot,
yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ['yerr', u('誤差')]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y='y', x='x', yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({'x': ['zzz'] * 12, 'y': ['zzz'] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ['line', 'bar']
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range('1/1/2000', periods=10, freq='M')
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind='line')
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {'x': np.ones(12) * 0.2, 'z': np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_timeseries(self):
with warnings.catch_warnings():
d = {'x': np.arange(12), 'y': np.arange(12, 0, -1)}
d_err = {'x': np.ones(12) * 0.2, 'y': np.ones(12) * 0.4}
# check time-series plots
ix = date_range('1/1/2000', '1/1/2001', freq='M')
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ['line', 'bar', 'barh']
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y='y', yerr=tdf_err['x'],
kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y='y', yerr='x', kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(tdf.plot,
kind=kind, yerr=tdf_err,
subplots=True)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
data = df.values
ax = df.plot(yerr=err, xerr=err / 2)
if self.mpl_ge_2_0_0:
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
else:
assert ax.lines[7].get_ydata()[0] == data[0, 1] - err[1, 0, 0]
assert ax.lines[8].get_ydata()[0] == data[0, 1] + err[1, 1, 0]
assert ax.lines[5].get_xdata()[0] == -err[1, 0, 0] / 2
assert ax.lines[6].get_xdata()[0] == err[1, 1, 0] / 2
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3),
index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(
np.random.randn(5, 2), index=range(5), columns=['x', 'y'])
df_err = DataFrame(np.random.randn(5, 2) / 5,
index=range(5), columns=['x', 'y'])
ax = _check_plot_works(df.plot.scatter, x='x', y='y')
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x='x', y='y', xerr=df_err,
yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err='has_xerr'):
lines = []
errs = [c.lines
for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines)))
# GH 8081
df = DataFrame(
np.random.randn(10, 5), columns=['a', 'b', 'c', 'd', 'e'])
ax = df.plot.scatter(x='a', y='b', xerr='d', yerr='e', c='red')
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, 'red', has_err='has_xerr')
_check_errorbar_color(ax.containers, 'red', has_err='has_yerr')
ax = df.plot.scatter(x='a', y='b', yerr='e', color='green')
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, 'green', has_err='has_yerr')
@pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close('all')
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(
ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame({"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6]})
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting._core._plot_klass.keys():
if not _ok_for_gaussian_kde(kind):
continue
args = {}
if kind in ['hexbin', 'scatter', 'pie']:
df = self.hexbin_df
args = {'x': 'A', 'y': 'B'}
elif kind == 'area':
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(np.random.randn(10, 2),
index=date_range('1/1/2000', periods=10),
columns=list('AB'))
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10),
index=date_range('1/1/2000', periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index,
columns=list('AB'))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(
ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True,
sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4),
index=ts.index, columns=list('ABCD'))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({'a': [1, 2, 3], 'b': [2, 3, 4]}),
plotting._core._dataframe_kinds, kws={'x': 'a', 'y': 'b'})
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=['A', 'B'])
with pytest.raises(ValueError):
df.plot(colormap='invalid_colormap')
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# suppliad ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({'a': randn(8), 'b': randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind='scatter', ax=ax, x='a', y='b', c='a', cmap='hsv')
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1., loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
try: # mpl 1.5
with mpl.rc_context(
rc={'axes.prop_cycle': mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
except (AttributeError, KeyError): # mpl 1.4
with mpl.rc_context(rc={'axes.color_cycle': color_tuples}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
@pytest.mark.parametrize('method', ['line', 'barh', 'bar'])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (pd.DataFrame(np.random.randn(15, 2),
columns=list('AB'))
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1))
fontsize = 20
sy = ['C', 'D']
kwargs = dict(secondary_y=sy, fontsize=fontsize,
mark_right=True)
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax,
ylabelsize=fontsize)
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| bsd-3-clause |
chrisburr/scikit-learn | sklearn/manifold/tests/test_spectral_embedding.py | 26 | 9488 | from nose.tools import assert_true
from nose.tools import assert_equal
from scipy.sparse import csr_matrix
from scipy.sparse import csc_matrix
from scipy.linalg import eigh
import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from nose.plugins.skip import SkipTest
from sklearn.manifold.spectral_embedding_ import SpectralEmbedding
from sklearn.manifold.spectral_embedding_ import _graph_is_connected
from sklearn.manifold.spectral_embedding_ import _graph_connected_component
from sklearn.manifold import spectral_embedding
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.metrics import normalized_mutual_info_score
from sklearn.cluster import KMeans
from sklearn.datasets.samples_generator import make_blobs
from sklearn.utils.graph import graph_laplacian
from sklearn.utils.extmath import _deterministic_vector_sign_flip
# non centered, sparse centers to check the
centers = np.array([
[0.0, 5.0, 0.0, 0.0, 0.0],
[0.0, 0.0, 4.0, 0.0, 0.0],
[1.0, 0.0, 0.0, 5.0, 1.0],
])
n_samples = 1000
n_clusters, n_features = centers.shape
S, true_labels = make_blobs(n_samples=n_samples, centers=centers,
cluster_std=1., random_state=42)
def _check_with_col_sign_flipping(A, B, tol=0.0):
""" Check array A and B are equal with possible sign flipping on
each columns"""
sign = True
for column_idx in range(A.shape[1]):
sign = sign and ((((A[:, column_idx] -
B[:, column_idx]) ** 2).mean() <= tol ** 2) or
(((A[:, column_idx] +
B[:, column_idx]) ** 2).mean() <= tol ** 2))
if not sign:
return False
return True
def test_spectral_embedding_two_components(seed=36):
# Test spectral embedding with two components
random_state = np.random.RandomState(seed)
n_sample = 100
affinity = np.zeros(shape=[n_sample * 2,
n_sample * 2])
# first component
affinity[0:n_sample,
0:n_sample] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# second component
affinity[n_sample::,
n_sample::] = np.abs(random_state.randn(n_sample, n_sample)) + 2
# Test of internal _graph_connected_component before connection
component = _graph_connected_component(affinity, 0)
assert_true(component[:n_sample].all())
assert_true(not component[n_sample:].any())
component = _graph_connected_component(affinity, -1)
assert_true(not component[:n_sample].any())
assert_true(component[n_sample:].all())
# connection
affinity[0, n_sample + 1] = 1
affinity[n_sample + 1, 0] = 1
affinity.flat[::2 * n_sample + 1] = 0
affinity = 0.5 * (affinity + affinity.T)
true_label = np.zeros(shape=2 * n_sample)
true_label[0:n_sample] = 1
se_precomp = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed))
embedded_coordinate = se_precomp.fit_transform(affinity)
# Some numpy versions are touchy with types
embedded_coordinate = \
se_precomp.fit_transform(affinity.astype(np.float32))
# thresholding on the first components using 0.
label_ = np.array(embedded_coordinate.ravel() < 0, dtype="float")
assert_equal(normalized_mutual_info_score(true_label, label_), 1.0)
def test_spectral_embedding_precomputed_affinity(seed=36):
# Test spectral embedding with precomputed kernel
gamma = 1.0
se_precomp = SpectralEmbedding(n_components=2, affinity="precomputed",
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_precomp = se_precomp.fit_transform(rbf_kernel(S, gamma=gamma))
embed_rbf = se_rbf.fit_transform(S)
assert_array_almost_equal(
se_precomp.affinity_matrix_, se_rbf.affinity_matrix_)
assert_true(_check_with_col_sign_flipping(embed_precomp, embed_rbf, 0.05))
def test_spectral_embedding_callable_affinity(seed=36):
# Test spectral embedding with callable affinity
gamma = 0.9
kern = rbf_kernel(S, gamma=gamma)
se_callable = SpectralEmbedding(n_components=2,
affinity=(
lambda x: rbf_kernel(x, gamma=gamma)),
gamma=gamma,
random_state=np.random.RandomState(seed))
se_rbf = SpectralEmbedding(n_components=2, affinity="rbf",
gamma=gamma,
random_state=np.random.RandomState(seed))
embed_rbf = se_rbf.fit_transform(S)
embed_callable = se_callable.fit_transform(S)
assert_array_almost_equal(
se_callable.affinity_matrix_, se_rbf.affinity_matrix_)
assert_array_almost_equal(kern, se_rbf.affinity_matrix_)
assert_true(
_check_with_col_sign_flipping(embed_rbf, embed_callable, 0.05))
def test_spectral_embedding_amg_solver(seed=36):
# Test spectral embedding with amg solver
try:
from pyamg import smoothed_aggregation_solver
except ImportError:
raise SkipTest("pyamg not available.")
se_amg = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="amg", n_neighbors=5,
random_state=np.random.RandomState(seed))
se_arpack = SpectralEmbedding(n_components=2, affinity="nearest_neighbors",
eigen_solver="arpack", n_neighbors=5,
random_state=np.random.RandomState(seed))
embed_amg = se_amg.fit_transform(S)
embed_arpack = se_arpack.fit_transform(S)
assert_true(_check_with_col_sign_flipping(embed_amg, embed_arpack, 0.05))
def test_pipeline_spectral_clustering(seed=36):
# Test using pipeline to do spectral clustering
random_state = np.random.RandomState(seed)
se_rbf = SpectralEmbedding(n_components=n_clusters,
affinity="rbf",
random_state=random_state)
se_knn = SpectralEmbedding(n_components=n_clusters,
affinity="nearest_neighbors",
n_neighbors=5,
random_state=random_state)
for se in [se_rbf, se_knn]:
km = KMeans(n_clusters=n_clusters, random_state=random_state)
km.fit(se.fit_transform(S))
assert_array_almost_equal(
normalized_mutual_info_score(
km.labels_,
true_labels), 1.0, 2)
def test_spectral_embedding_unknown_eigensolver(seed=36):
# Test that SpectralClustering fails with an unknown eigensolver
se = SpectralEmbedding(n_components=1, affinity="precomputed",
random_state=np.random.RandomState(seed),
eigen_solver="<unknown>")
assert_raises(ValueError, se.fit, S)
def test_spectral_embedding_unknown_affinity(seed=36):
# Test that SpectralClustering fails with an unknown affinity type
se = SpectralEmbedding(n_components=1, affinity="<unknown>",
random_state=np.random.RandomState(seed))
assert_raises(ValueError, se.fit, S)
def test_connectivity(seed=36):
# Test that graph connectivity test works as expected
graph = np.array([[1, 0, 0, 0, 0],
[0, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), False)
assert_equal(_graph_is_connected(csr_matrix(graph)), False)
assert_equal(_graph_is_connected(csc_matrix(graph)), False)
graph = np.array([[1, 1, 0, 0, 0],
[1, 1, 1, 0, 0],
[0, 1, 1, 1, 0],
[0, 0, 1, 1, 1],
[0, 0, 0, 1, 1]])
assert_equal(_graph_is_connected(graph), True)
assert_equal(_graph_is_connected(csr_matrix(graph)), True)
assert_equal(_graph_is_connected(csc_matrix(graph)), True)
def test_spectral_embedding_deterministic():
# Test that Spectral Embedding is deterministic
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
embedding_1 = spectral_embedding(sims)
embedding_2 = spectral_embedding(sims)
assert_array_almost_equal(embedding_1, embedding_2)
def test_spectral_embedding_unnormalized():
# Test that spectral_embedding is also processing unnormalized laplacian correctly
random_state = np.random.RandomState(36)
data = random_state.randn(10, 30)
sims = rbf_kernel(data)
n_components = 8
embedding_1 = spectral_embedding(sims,
norm_laplacian=False,
n_components=n_components,
drop_first=False)
# Verify using manual computation with dense eigh
laplacian, dd = graph_laplacian(sims, normed=False, return_diag=True)
_, diffusion_map = eigh(laplacian)
embedding_2 = diffusion_map.T[:n_components] * dd
embedding_2 = _deterministic_vector_sign_flip(embedding_2).T
assert_array_almost_equal(embedding_1, embedding_2)
| bsd-3-clause |
sodafree/backend | build/ipython/docs/examples/parallel/dagdeps.py | 6 | 3566 | """Example for generating an arbitrary DAG as a dependency map.
This demo uses networkx to generate the graph.
Authors
-------
* MinRK
"""
import networkx as nx
from random import randint, random
from IPython import parallel
def randomwait():
import time
from random import random
time.sleep(random())
return time.time()
def random_dag(nodes, edges):
"""Generate a random Directed Acyclic Graph (DAG) with a given number of nodes and edges."""
G = nx.DiGraph()
for i in range(nodes):
G.add_node(i)
while edges > 0:
a = randint(0,nodes-1)
b=a
while b==a:
b = randint(0,nodes-1)
G.add_edge(a,b)
if nx.is_directed_acyclic_graph(G):
edges -= 1
else:
# we closed a loop!
G.remove_edge(a,b)
return G
def add_children(G, parent, level, n=2):
"""Add children recursively to a binary tree."""
if level == 0:
return
for i in range(n):
child = parent+str(i)
G.add_node(child)
G.add_edge(parent,child)
add_children(G, child, level-1, n)
def make_bintree(levels):
"""Make a symmetrical binary tree with @levels"""
G = nx.DiGraph()
root = '0'
G.add_node(root)
add_children(G, root, levels, 2)
return G
def submit_jobs(view, G, jobs):
"""Submit jobs via client where G describes the time dependencies."""
results = {}
for node in nx.topological_sort(G):
with view.temp_flags(after=[ results[n] for n in G.predecessors(node) ]):
results[node] = view.apply(jobs[node])
return results
def validate_tree(G, results):
"""Validate that jobs executed after their dependencies."""
for node in G:
started = results[node].metadata.started
for parent in G.predecessors(node):
finished = results[parent].metadata.completed
assert started > finished, "%s should have happened after %s"%(node, parent)
def main(nodes, edges):
"""Generate a random graph, submit jobs, then validate that the
dependency order was enforced.
Finally, plot the graph, with time on the x-axis, and
in-degree on the y (just for spread). All arrows must
point at least slightly to the right if the graph is valid.
"""
from matplotlib import pyplot as plt
from matplotlib.dates import date2num
from matplotlib.cm import gist_rainbow
print("building DAG")
G = random_dag(nodes, edges)
jobs = {}
pos = {}
colors = {}
for node in G:
jobs[node] = randomwait
client = parallel.Client()
view = client.load_balanced_view()
print("submitting %i tasks with %i dependencies"%(nodes,edges))
results = submit_jobs(view, G, jobs)
print("waiting for results")
view.wait()
print("done")
for node in G:
md = results[node].metadata
start = date2num(md.started)
runtime = date2num(md.completed) - start
pos[node] = (start, runtime)
colors[node] = md.engine_id
validate_tree(G, results)
nx.draw(G, pos, node_list=colors.keys(), node_color=colors.values(), cmap=gist_rainbow,
with_labels=False)
x,y = zip(*pos.values())
xmin,ymin = map(min, (x,y))
xmax,ymax = map(max, (x,y))
xscale = xmax-xmin
yscale = ymax-ymin
plt.xlim(xmin-xscale*.1,xmax+xscale*.1)
plt.ylim(ymin-yscale*.1,ymax+yscale*.1)
return G,results
if __name__ == '__main__':
from matplotlib import pyplot as plt
# main(5,10)
main(32,96)
plt.show()
| bsd-3-clause |
asoliveira/NumShip | scripts/plot/r-velo-r-zz-plt.py | 1 | 3085 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#É adimensional?
adi = False
#É para salvar as figuras(True|False)?
save = True
#Caso seja para salvar, qual é o formato desejado?
formato = 'jpg'
#Caso seja para salvar, qual é o diretório que devo salvar?
dircg = 'fig-sen'
#Caso seja para salvar, qual é o nome do arquivo?
nome = 'r-velo-r-zz'
#Qual título colocar no gráficos?
titulo = ''#'Curva de ZigZag'
titulo2=''
#Qual a cor dos gráficos?
pc = 'k'
r1c = 'b'
r2c = 'y'
r3c = 'r'
#Estilo de linha
ps = '-'
r1s = '-'
r2s = '-'
r3s = '-'
import os
import scipy as sp
import matplotlib.pyplot as plt
from libplot import *
acelhis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/velo.dat')
acelhis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/velo.dat')
acelhis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/velo.dat')
acelhis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/velo.dat')
lemehis = sp.genfromtxt('../entrada/padrao/CurvaZigZag/leme.dat')
lemehis2 = sp.genfromtxt('../entrada/r/saida1.1/CurvaZigZag/leme.dat')
lemehis3 = sp.genfromtxt('../entrada/r/saida1.2/CurvaZigZag/leme.dat')
lemehis4 = sp.genfromtxt('../entrada/r/saida1.3/CurvaZigZag/leme.dat')
axl = [0, 1000, -0.7, 0.7]
axl2 = [0, 1000, -25, 25]#do leme
#Plotando a Curva de Giro
if adi:
ylabel = r'$t\prime$'
xacellabel = r'$ r\prime$'
else:
ylabel = r'$\dot \psi \quad graus/s$'
xacellabel = r'$t \quad segundos$'
plt.subplot2grid((1,4),(0,0), colspan=3)
#Padrao
plt.plot(acelhis[:, 0], acelhis[:, 6] * (180/sp.pi), color = pc, linestyle = ps,
linewidth = 2, label=ur'padrão')
plt.plot(acelhis2[:, 0], acelhis2[:, 6] * (180/sp.pi), color = r1c,linestyle = r1s,
linewidth = 2, label=ur'1.1--$r$')
plt.plot(acelhis3[:, 0], acelhis3[:, 6] * (180/sp.pi), color = r2c, linestyle = r2s,
linewidth = 2, label=ur'1.2--$r$')
plt.plot(acelhis4[:, 0], acelhis4[:, 6] * (180/sp.pi), color = r3c, linestyle = r3s,
linewidth = 2, label=ur'1.3--$r$')
plt.title(titulo)
plt.legend(bbox_to_anchor=(1.1, 1), loc=2, borderaxespad=0.)
plt.ylabel(ylabel)
plt.xlabel(xacellabel)
plt.axis(axl)
plt.grid(True)
plt.twinx()
plt.plot(lemehis[:, 0], lemehis[:, 1] * (180/sp.pi), color = pc, linestyle = "--",
linewidth = 1, label=ur'leme--padrão')
plt.plot(lemehis2[:, 0], lemehis2[:, 1] * (180/sp.pi), color = r1c, linestyle = "--",
linewidth = 1, label=ur'leme--1.1$r$')
plt.plot(lemehis3[:, 0], lemehis3[:, 1] * (180/sp.pi), color = r2c, linestyle = "--",
linewidth = 1, label=ur'leme--1.2$r$')
plt.plot(lemehis4[:, 0], lemehis4[:, 1] * (180/sp.pi), color = r3c, linestyle = "--",
linewidth = 1, label=ur'leme--1.3$r$')
plt.title(titulo2)
plt.legend(bbox_to_anchor=(1.1, 0), loc=3, borderaxespad=0.)
plt.ylabel(r"$\delta_R$")
plt.axis(axl2)
plt.grid(False)
if save:
if not os.path.exists(dircg):
os.makedirs(dircg)
if os.path.exists(dircg + '/' + nome + '.' + formato):
os.remove(dircg + '/' + nome + '.' + formato)
plt.savefig(dircg + '/' + nome + '.' + formato , format=formato)
else:
plt.show()
| gpl-3.0 |
openego/data_processing | preprocessing/python_scripts/renpass_gis/gitgeojson.py | 2 | 1489 | #!/bin/python
# -*- coding: utf-8 -*-
"""
Database dump CSV-file with lon/lat is converted to a git compatible geojson
format.
Attributes
----------
infile : str
File path to database dump.
outfile : str
File path to geojson file.
Notes
-----
Dump has to match geojson keys title, description, lon and lat.
"""
__copyright__ = "ZNES"
__license__ = "GNU Affero General Public License Version 3 (AGPL-3.0)"
__url__ = "https://github.com/openego/data_processing/blob/master/LICENSE"
__author__ = "s3pp"
import pandas as pd
import json
from os.path import expanduser
infile = '~/open_eGo/scenario/modelpowerplants.csv'
outfile = expanduser("")
df = pd.read_csv(infile, sep=";")
features = []
for ix, row in df.iterrows():
# properties section
properties = {"title": row['title'],
"description": row['description'],
"marker-size": "medium",
"marker-symbol": "triangle",
"stroke": "#555555"}
# geometry section
geometry = {"type": "Point",
"coordinates": [row['lon'], row['lat']]}
# concat to feature
feature = {"type": "Feature",
"geometry": geometry,
"properties": properties}
features.append(feature)
feature_collection = {"type": "FeatureCollection",
"features": features}
with open(outfile, 'w') as out:
json.dump(feature_collection, out)
| agpl-3.0 |
ogvalt/saturn | spiking_som.py | 1 | 18544 |
from brian2 import *
from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import matplotlib.pyplot as plt
from dataset import ArtificialDataSet
class ReceptiveField:
# Parameter that used in standard deviation definition
gamma = 1.5
def __init__(self, bank_size=10, I_min=0.0, I_max=1.0):
self.bank_size = bank_size
self.field_mu = np.array([(I_min + ((2 * i - 2) / 2) * ((I_max - I_min) / (bank_size - 1)))
for i in range(1, bank_size + 1)])
self.field_sigma = (1.0 / self.gamma) * (I_max - I_min)
def float_to_membrane_potential(self, input_vector):
try:
input_vector = input_vector.reshape((input_vector.shape[0], 1))
except Exception as exc:
print("Exception: {0}\nObject shape: {1}".format(repr(exc), input_vector.shape))
exit(1)
temp = np.exp(-((input_vector - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector - 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
temp += np.exp(-((input_vector + 1 - self.field_mu) ** 2) / (2 * self.field_sigma * self.field_sigma)) / \
(np.sqrt(2 * np.pi) * self.field_sigma)
return temp
if __name__ == "__main__":
prefs.codegen.target = 'numpy'
np.random.seed(1)
seed(1)
np.set_printoptions(suppress=True)
bank_size = 10
diff_method = 'euler'
# inputs = np.random.rand(3)
# inputs = np.array([0.332, 0.167, 0.946])
# inputs = np.array([0.013, 0.3401, 0.2196])
# inputs = np.array([0.829, 0.7452, 0.6728])
# print(inputs)
# N = inputs.shape[0] * bank_size
N = 20
rf = ReceptiveField(bank_size=bank_size, I_min=0.05, I_max=0.95)
# potential_input = rf.float_to_membrane_potential(inputs)
# potential_input = potential_input.flatten()
# TABLE 1
# (A) Neuronal parameters, used in (1) and (4)
time_step = 0.01;
tau_m = 10.0 * ms;
tau_m_inh = 5 * ms;
tau_m_som = 3 * ms
theta_reset_u = -0.5;
theta_reset_inh = -0.0;
theta_reset_som = 0.0
theta_u = 0.5;
theta_u_inh = 0.01;
theta_som = 0.8
# (B) Synaptic parameters, used in (2) and (3) for different synapse types
# temporal layer to som layer (u to v)
tau_r_afferent = 0.2 * ms;
tau_f_afferent = 1.0 * ms
# temporal layer (u to inh exc, u to inh inh, inh to u)
tau_r_exc = 0.4 * ms;
tau_f_exc = 2.0 * ms;
tau_r_inh = 0.2 * ms;
tau_f_inh = 1.0 * ms
tau_r_inh2u = 1.0 * ms;
tau_f_inh2u = 5.0 * ms
# som layer
tau_r_lateral = 0.1 * ms;
tau_f_lateral = 0.5 * ms
# (C) Maximum magnitudes of synaptic connection strength
w_syn_temporal_to_som_max = 2.2;
w_syn_u2inh_exc_max = 1.0;
w_syn_u2inh_inh_max = 1.0;
w_syn_inh2u_max = 100.0
w_syn_som_to_som_max = 1.0
# (D) Neighbourhood parameters, used in (6) and (7), for layer v (som)
a = 3.0;
b = 3.0;
X = 3.0;
X_ = 3.0
# (E) Learning parameter, used in (5)
# A_plus - Max synaptic strength, A_minus - max synaptic weakness; tau_plus, tau_minus - time constant of STDP
A_plus = 0.0016;
A_minus = 0.0055;
tau_plus = 11;
tau_minus = 10
# used in (7)
T = 10.0;
power_n = 2.0
# used in (6)
pi = np.pi
# size of the self-organizing map
map_size = 10
temporal_layer_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection to u layer
ds_inh2u/dt = (-s_inh2u)/tau_r_inh2u: 1
dw_inh2u/dt = (s_inh2u - w_inh2u)/tau_f_inh2u: 1
# membrane potential of u layer
dv/dt = (-v + I_ext - w_inh2u) / tau_m: 1
I_ext : 1
'''
inhibition_neuron_equ = '''
dtime/dt = 1 / ms : 1
# inhibition connection
# s_inh - internal variable
# w_inh - output potential
ds_inh/dt = (-s_inh)/tau_r_inh: 1
dw_inh/dt = (s_inh - w_inh)/tau_f_inh: 1
# excitation connection
# s_exc - internal variable
# w_exc - output potential
ds_exc/dt = (-s_exc)/tau_r_exc: 1
dw_exc/dt = (s_exc - w_exc)/tau_f_exc: 1
# diff equation membrane potential of inhibition neuron
dv/dt = (-v + w_exc - w_inh) / tau_m_inh: 1
'''
som_layer_neuron_equ = '''
dglobal_time/dt = 1 / ms : 1
dtime/dt = 1 / ms : 1
# Afferent connection (from temporal layer to som layer)
ds_afferent/dt = (-s_afferent)/tau_r_afferent: 1
dw_afferent/dt = (s_afferent - w_afferent)/tau_f_afferent: 1
# lateral connection
ds_lateral/dt = (-s_lateral)/tau_r_lateral: 1
dw_lateral/dt = (s_lateral - w_lateral)/tau_f_lateral: 1
# membrane potential of u layer
dv/dt = (-v + w_lateral + w_afferent) / tau_m_som: 1
'''
temporal_layer = NeuronGroup(N, temporal_layer_neuron_equ, threshold='v>theta_u', method=diff_method,
reset='''v = theta_reset_u; time = 0''')
# temporal_layer.I_ext = potential_input
# inhibition neuron
inhibition_neuron = NeuronGroup(1, inhibition_neuron_equ, threshold='v>theta_u_inh', method=diff_method,
reset='''v = theta_reset_inh; time = 0''')
# self-organizing layer
som_layer = NeuronGroup(map_size * map_size, som_layer_neuron_equ, threshold='v>theta_som', method=diff_method,
reset='''v = theta_reset_som; time = 0''')
# v to inh neuron, excitation connection
u2inh_excitation = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_exc += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_exc_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) ** time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_exc_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_excitation.connect(i=np.arange(N), j=0)
u2inh_excitation.w_syn = 'rand() * w_syn_u2inh_exc_max'
# v to inh neuron, inhibition connection
u2inh_inhibition = Synapses(temporal_layer, target=inhibition_neuron, method=diff_method,
on_pre='''
s_inh += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_u2inh_inh_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_u2inh_inh_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
u2inh_inhibition.connect(i=np.arange(N), j=0)
u2inh_inhibition.w_syn = 'rand() * w_syn_u2inh_inh_max'
# inh neuron to v, inhibition connection
inh2u_inhibition = Synapses(inhibition_neuron, target=temporal_layer, method=diff_method,
on_pre='''
s_inh2u += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) * time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_inh2u_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_inh2u_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
inh2u_inhibition.connect(i=0, j=np.arange(N))
# inh2u_inhibition.w_syn = 'rand() * w_syn_inh2u_max'
inh2u_inhibition.w_syn = 0.5 * w_syn_inh2u_max
# som lateral connection
som_synapse = Synapses(som_layer, target=som_layer, method=diff_method,
on_pre='''
radius = X - (X - X_)/(1+(2**0.5 - 1)*((global_time/T)**(2 * power_n)))
y_pre = floor(i / map_size)
x_pre = i - y_pre * map_size
y_post = floor(j/map_size)
x_post = j - y_post * map_size
dist = (x_post - x_pre)**2 + (y_post - y_pre)**2
G1 = (1 + a) * exp(- dist/(radius**2)) / (2 * pi * radius**2)
G2 = a * exp(- dist/(b * radius)**2) / (2 * pi * (b * radius)**2)
w_syn = clip(G1 + G2, 0, w_syn_som_to_som_max)
s_lateral += w_syn
''',
on_post='''
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
''')
som_synapse.connect(condition='i!=j')
# som afferent connection
temporal_to_som_synapse = Synapses(temporal_layer, target=som_layer, method=diff_method,
on_pre='''
s_afferent += w_syn
A_pre = (- w_syn) * A_minus * (1 - 1/tau_minus) ** time_post
w_syn = clip(w_syn + plasticity * A_pre, 0, w_syn_temporal_to_som_max)
''',
on_post='''
A_post = exp(-w_syn) * A_plus * (1 - 1/tau_plus) * time_pre
w_syn = clip(w_syn + plasticity * A_post, 0, w_syn_temporal_to_som_max)
''',
model='''
w_syn : 1 # synaptic weight / synapse efficacy
plasticity : boolean (shared)
''')
temporal_to_som_synapse.connect()
temporal_to_som_synapse.w_syn = np.random.randint(low=40000, high=60000, size=N*map_size*map_size) \
* w_syn_temporal_to_som_max / 100000.0
# Visualization
som_spike_mon = SpikeMonitor(som_layer)
u_spike_mon = SpikeMonitor(temporal_layer)
# u_state_mon_v = StateMonitor(temporal_layer, 'v', record=True)
# u_state_mon_time = StateMonitor(temporal_layer, 'time', record=True)
# u_state_mon_w = StateMonitor(temporal_layer, 'w_inh2u', record=True)
inh_spike_mon = SpikeMonitor(inhibition_neuron)
# inh_state_mon = StateMonitor(inhibition_neuron, 'v', record=True)
# w_exc_neu_state = StateMonitor(inhibition_neuron, 'w_exc', record=True)
# w_inh_neu_state = StateMonitor(inhibition_neuron, 'w_inh', record=True)
#
# w_syn_u2inh_exc = StateMonitor(u2inh_excitation, 'w_syn', record=True)
defaultclock.dt = time_step * ms
step = 2
plasticity_state = False
u2inh_excitation.plasticity = plasticity_state
u2inh_inhibition.plasticity = plasticity_state
inh2u_inhibition.plasticity = plasticity_state
temporal_to_som_synapse.plasticity = True # plasticity_state
# simulation_time = 200
# run(simulation_time * ms, report='text')
# weight visualization
# simulation
simulation_time = 50
attempts = 5
dataset = ArtificialDataSet(500, int(N/10))
dataset = dataset.generate_set()
np.savetxt('dataset.txt', dataset, delimiter=';')
plt.scatter(dataset[:, 0], dataset[:, 1], s=5)
plt.show()
net_model = Network(collect())
net_model.store()
for vector in dataset:
for it in range(attempts):
net_model.restore()
print("Input vector: {0}, attempt: {1}".format(vector, it))
potential_input = rf.float_to_membrane_potential(vector)
potential_input = potential_input.flatten()
temporal_layer.I_ext = potential_input
net_model.run(simulation_time * ms, report='text')
net_model.store()
# visual
app = QtGui.QApplication([])
win = pg.GraphicsWindow(title="som")
win.resize(1000, 600)
win.setWindowTitle('brain')
# Enable antialiasing for prettier plots
pg.setConfigOptions(antialias=True)
p1 = win.addPlot(title="Region Selection")
p1.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p1.showGrid(x=True, y=True)
lr = pg.LinearRegionItem([0, simulation_time])
lr.setZValue(0)
p1.addItem(lr)
p2 = win.addPlot(title="Zoom on selected region")
p2.plot(u_spike_mon.t / ms, u_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p2.showGrid(x=True, y=True)
def updatePlot():
p2.setXRange(*lr.getRegion(), padding=0)
def updateRegion():
lr.setRegion(p2.getViewBox().viewRange()[0])
lr.sigRegionChanged.connect(updatePlot)
p2.sigXRangeChanged.connect(updateRegion)
updatePlot()
win.nextRow()
p3 = win.addPlot(title="Region Selection")
p3.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p3.showGrid(x=True, y=True)
lr1 = pg.LinearRegionItem([0, 10])
lr1.setZValue(0)
p3.addItem(lr1)
p4 = win.addPlot(title="Zoom on selected region")
p4.plot(som_spike_mon.t / ms, som_spike_mon.i[:], pen=None, symbol='o',
symbolPen=None, symbolSize=5, symbolBrush=(255, 255, 255, 255))
p4.showGrid(x=True, y=True)
def updatePlot2():
p4.setXRange(*lr1.getRegion(), padding=0)
def updateRegion2():
lr1.setRegion(p4.getViewBox().viewRange()[0])
lr1.sigRegionChanged.connect(updatePlot2)
p4.sigXRangeChanged.connect(updateRegion2)
updatePlot2()
u2som_syn_shape = temporal_to_som_synapse.w_syn[:].shape
picture = temporal_to_som_synapse.w_syn[:].reshape(N, int(u2som_syn_shape[0] / N))
np.savetxt('weights.txt', picture, delimiter=';')
win2 = QtGui.QMainWindow()
win2.resize(800, 800)
imv = pg.ImageView()
win2.setCentralWidget(imv)
win2.show()
win2.setWindowTitle("SOM weights")
imv.setImage(picture)
# subplot(421)
# # subplot(111)
# title("Temporal layer spikes")
# plot(u_spike_mon.t / ms, u_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, N + 1, 1))
#
# # show()
#
# subplot(422)
# title("Inhibition neuron spikes")
# plot(inh_spike_mon.t / ms, inh_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, 1, 1))
#
# subplot(423)
# title("u membrane potential")
# for item in u_state_mon_v:
# plot(u_state_mon_v.t / ms, item.v)
# # plot(u_state_mon_v.t / ms, u_state_mon_v[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(424)
# title("Inhibition neuron membrane potential")
# plot(inh_state_mon.t / ms, inh_state_mon[0].v)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(425)
# title("Excitation/inhibition interaction")
# plot(w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc, w_exc_neu_state.t / ms, w_inh_neu_state[0].w_inh,
# w_exc_neu_state.t / ms, w_exc_neu_state[0].w_exc - w_inh_neu_state[0].w_inh)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(426)
# title("Inhibition to u potential")
# plot(u_state_mon_w.t / ms, u_state_mon_w[0].w_inh2u)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# subplot(427)
# title("Synaptic Weight")
# for item in w_syn_u2inh_exc:
# plot(w_syn_u2inh_exc.t / ms, item.w_syn)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-0.1, 1.1, 0.1))
#
# subplot(428)
# title("Synaptic time pre spike")
# for item in u_state_mon_time:
# plot(w_syn_u2inh_exc.t / ms, item.time)
# xlabel('Time (ms)')
# ylabel('Potential')
# xticks(np.arange(0.0, simulation_time + step, step))
#
# show()
#
# # subplot(111)
# title("Som layer spikes")
# plot(som_spike_mon.t / ms, som_spike_mon.i, '.k')
# xlabel('Time (ms)')
# ylabel('Neuron index')
# grid(True)
# xticks(np.arange(0.0, simulation_time + step, step))
# yticks(np.arange(-1, map_size * map_size + 1, 1))
#
# show()
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| mit |
louisLouL/pair_trading | capstone_env/lib/python3.6/site-packages/matplotlib/tests/test_backend_pdf.py | 2 | 6994 | # -*- encoding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import six
import io
import os
import tempfile
import pytest
import numpy as np
from matplotlib import checkdep_usetex, cm, rcParams
from matplotlib.backends.backend_pdf import PdfPages
from matplotlib import pyplot as plt
from matplotlib.testing.determinism import (_determinism_source_date_epoch,
_determinism_check)
from matplotlib.testing.decorators import image_comparison
from matplotlib import dviread
from matplotlib.testing.compare import compare_images
import matplotlib as mpl
needs_usetex = pytest.mark.xfail(
not checkdep_usetex(True),
reason="This test needs a TeX installation")
@image_comparison(baseline_images=['pdf_use14corefonts'],
extensions=['pdf'])
def test_use14corefonts():
rcParams['pdf.use14corefonts'] = True
rcParams['font.family'] = 'sans-serif'
rcParams['font.size'] = 8
rcParams['font.sans-serif'] = ['Helvetica']
rcParams['pdf.compression'] = 0
text = '''A three-line text positioned just above a blue line
and containing some French characters and the euro symbol:
"Merci pépé pour les 10 €"'''
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_title('Test PDF backend with option use14corefonts=True')
ax.text(0.5, 0.5, text, horizontalalignment='center',
verticalalignment='bottom',
fontsize=14)
ax.axhline(0.5, linewidth=0.5)
def test_type42():
rcParams['pdf.fonttype'] = 42
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(io.BytesIO())
def test_multipage_pagecount():
with PdfPages(io.BytesIO()) as pdf:
assert pdf.get_pagecount() == 0
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
fig.savefig(pdf, format="pdf")
assert pdf.get_pagecount() == 1
pdf.savefig()
assert pdf.get_pagecount() == 2
def test_multipage_keep_empty():
from matplotlib.backends.backend_pdf import PdfPages
from tempfile import NamedTemporaryFile
# test empty pdf files
# test that an empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
assert os.path.exists(filename)
os.remove(filename)
# test if an empty pdf is deleting itself afterwards with keep_empty=False
with PdfPages(filename, keep_empty=False) as pdf:
pass
assert not os.path.exists(filename)
# test pdf files with content, they should never be deleted
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot([1, 2, 3])
# test that a non-empty pdf is left behind with keep_empty=True (default)
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
# test that a non-empty pdf is left behind with keep_empty=False
with NamedTemporaryFile(delete=False) as tmp:
with PdfPages(tmp, keep_empty=False) as pdf:
filename = pdf._file.fh.name
pdf.savefig()
assert os.path.exists(filename)
os.remove(filename)
def test_composite_image():
# Test that figures can be saved with and without combining multiple images
# (on a single set of axes) into a single composite image.
X, Y = np.meshgrid(np.arange(-5, 5, 1), np.arange(-5, 5, 1))
Z = np.sin(Y ** 2)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.set_xlim(0, 3)
ax.imshow(Z, extent=[0, 1, 0, 1])
ax.imshow(Z[::-1], extent=[2, 3, 0, 1])
plt.rcParams['image.composite_image'] = True
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 1
plt.rcParams['image.composite_image'] = False
with PdfPages(io.BytesIO()) as pdf:
fig.savefig(pdf, format="pdf")
assert len(pdf._file._images) == 2
def test_source_date_epoch():
"""Test SOURCE_DATE_EPOCH support for PDF output"""
_determinism_source_date_epoch("pdf", b"/CreationDate (D:20000101000000Z)")
def test_determinism_plain():
"""Test for reproducible PDF output: simple figure"""
_determinism_check('', format="pdf")
def test_determinism_images():
"""Test for reproducible PDF output: figure with different images"""
_determinism_check('i', format="pdf")
def test_determinism_hatches():
"""Test for reproducible PDF output: figure with different hatches"""
_determinism_check('h', format="pdf")
def test_determinism_markers():
"""Test for reproducible PDF output: figure with different markers"""
_determinism_check('m', format="pdf")
def test_determinism_all():
"""Test for reproducible PDF output"""
_determinism_check(format="pdf")
@image_comparison(baseline_images=['hatching_legend'],
extensions=['pdf'])
def test_hatching_legend():
"""Test for correct hatching on patches in legend"""
fig = plt.figure(figsize=(1, 2))
a = plt.Rectangle([0, 0], 0, 0, facecolor="green", hatch="XXXX")
b = plt.Rectangle([0, 0], 0, 0, facecolor="blue", hatch="XXXX")
fig.legend([a, b, a, b], ["", "", "", ""])
@image_comparison(baseline_images=['grayscale_alpha'],
extensions=['pdf'])
def test_grayscale_alpha():
"""Masking images with NaN did not work for grayscale images"""
x, y = np.ogrid[-2:2:.1, -2:2:.1]
dd = np.exp(-(x**2 + y**2))
dd[dd < .1] = np.nan
fig, ax = plt.subplots()
ax.imshow(dd, interpolation='none', cmap='gray_r')
ax.set_xticks([])
ax.set_yticks([])
# This tests tends to hit a TeX cache lock on AppVeyor.
@pytest.mark.flaky(reruns=3)
@needs_usetex
def test_missing_psfont(monkeypatch):
"""An error is raised if a TeX font lacks a Type-1 equivalent"""
def psfont(*args, **kwargs):
return dviread.PsFont(texname='texfont', psname='Some Font',
effects=None, encoding=None, filename=None)
monkeypatch.setattr(dviread.PsfontsMap, '__getitem__', psfont)
rcParams['text.usetex'] = True
fig, ax = plt.subplots()
ax.text(0.5, 0.5, 'hello')
with tempfile.TemporaryFile() as tmpfile, pytest.raises(ValueError):
fig.savefig(tmpfile, format='pdf')
@pytest.mark.style('default')
def test_pdf_savefig_when_color_is_none(tmpdir):
fig, ax = plt.subplots()
plt.axis('off')
ax.plot(np.sin(np.linspace(-5, 5, 100)), 'v', c='none')
actual_image = tmpdir.join('figure.pdf')
expected_image = tmpdir.join('figure.eps')
fig.savefig(str(actual_image), format='pdf')
fig.savefig(str(expected_image), format='eps')
result = compare_images(str(actual_image), str(expected_image), 0)
assert result is None
| mit |
miguelzuma/montepython_zuma | montepython/analyze.py | 1 | 95056 | """
.. module:: analyze
:synopsis: Extract data from chains and produce plots
.. moduleauthor:: Karim Benabed <benabed@iap.fr>
.. moduleauthor:: Benjamin Audren <benjamin.audren@epfl.ch>
Collection of functions needed to analyze the Markov chains.
This module defines as well a class :class:`Information`, that stores useful
quantities, and shortens the argument passing between the functions.
.. note::
Some of the methods used in this module are directly adapted from the
`CosmoPmc <http://www.cosmopmc.info>`_ code from Kilbinger et. al.
"""
import os
import math
import numpy as np
from itertools import count
# The root plotting module, to change options like font sizes, etc...
import matplotlib
# The following line suppresses the need for an X server
matplotlib.use("Agg")
# Module for handling display
import matplotlib.pyplot as plt
# Module to handle warnings from matplotlib
import warnings
import importlib
import io_mp
from itertools import ifilterfalse
from itertools import ifilter
import scipy.ndimage
# Defined to remove the burnin for all the points that were produced before the
# first time where -log-likelihood <= min-minus-log-likelihood+LOG_LKL_CUTOFF
LOG_LKL_CUTOFF = 3
NUM_COLORS = 6
def analyze(command_line):
"""
Main function, does the entire analysis.
It calls in turn all the other routines from this module. To limit the
arguments of each function to a reasonnable size, a :class:`Information`
instance is used. This instance is initialized in this function, then
appended by the other routines.
"""
# Check if the scipy module has the interpolate method correctly
# installed (should be the case on every linux distribution with
# standard numpy)
try:
from scipy.interpolate import interp1d
Information.has_interpolate_module = True
except ImportError:
Information.has_interpolate_module = False
warnings.warn(
'No cubic interpolation done (no interpolate method found ' +
'in scipy), only linear')
# Determine how many different folders are asked through the 'info'
# command, and create as many Information instances
files = separate_files(command_line.files)
# Create an instance of the Information class for each subgroup found in
# the previous function. They will each hold all relevant information, and
# be used as a compact way of exchanging information between functions
information_instances = []
for item in files:
info = Information(command_line)
information_instances.append(info)
# Prepare the files, according to the case, load the log.param, and
# prepare the output (plots folder, .covmat, .info and .log files).
# After this step, info.files will contain all chains.
status = prepare(item, info)
# If the preparation step generated new files (for instance,
# translating from NS or CH to Markov Chains) this routine should stop
# now.
if not status:
return
# Compute the mean, maximum of likelihood, 1-sigma variance for this
# main folder. This will create the info.chain object, which contains
# all the points computed stacked in one big array.
convergence(info)
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
command_line.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
command_line.update = 0
# compute covariance matrix, excepted when we are in update mode and convergence is too bad or too good
if command_line.update and (np.amax(info.R) > 3. or np.amax(info.R) < 0.4):
print '--> Not computing covariance matrix'
else:
try:
if command_line.want_covmat:
print '--> Computing covariance matrix'
info.covar = compute_covariance_matrix(info)
# Writing it out in name_of_folder.covmat
io_mp.write_covariance_matrix(
info.covar, info.backup_names, info.cov_path)
except:
print '--> Computing covariance matrix failed'
pass
# Store an array, sorted_indices, containing the list of indices
# corresponding to the line with the highest likelihood as the first
# element, and then as decreasing likelihood
info.sorted_indices = info.chain[:, 1].argsort(0)
# Writing the best-fit model in name_of_folder.bestfit
bestfit_line = [elem*info.scales[i, i] for i, elem in
enumerate(info.chain[info.sorted_indices[0], 2:])]
io_mp.write_bestfit_file(bestfit_line, info.backup_names,
info.best_fit_path)
if not command_line.minimal:
# Computing 1,2 and 3-sigma errors, and plot. This will create the
# triangle and 1d plot by default.
compute_posterior(information_instances)
print '--> Writing .info and .tex files'
for info in information_instances:
info.write_information_files()
# when called by MCMC in update mode, return R values so that they can be written for information in the chains
if command_line.update:
return info.R
def prepare(files, info):
"""
Scan the whole input folder, and include all chains in it.
Since you can decide to analyze some file(s), or a complete folder, this
function first needs to separate between the two cases.
.. warning::
If someday you change the way the chains are named, remember to change
here too, because this routine assumes the chains have a double
underscore in their names.
.. note::
Only files ending with .txt will be selected, to keep compatibility
with CosmoMC format
.. note::
New in version 2.0.0: if you ask to analyze a Nested Sampling
sub-folder (i.e. something that ends in `NS` with capital letters), the
analyze module will translate the output from Nested Sampling to
standard chains for Monte Python, and stops. You can then run the
`-- info` flag on the whole folder. **This procedure is not necessary
if the run was complete, but only if the Nested Sampling run was killed
before completion**.
Parameters
----------
files : list
list of potentially only one element, containing the files to analyze.
This can be only one file, or the encompassing folder, files
info : Information instance
Used to store the result
"""
# First test if the folder is a Nested Sampling or CosmoHammer folder. If
# so, call the module's own routine through the clean conversion function,
# which will translate the output of this other sampling into MCMC chains
# that can then be analyzed.
modules = ['nested_sampling', 'cosmo_hammer']
tags = ['NS', 'CH']
for module_name, tag in zip(modules, tags):
action_done = clean_conversion(module_name, tag, files[0])
if action_done:
return False
# If the input command was an entire folder, then grab everything in it.
# Too small files (below 600 octets) and subfolders are automatically
# removed.
folder, files, basename = recover_folder_and_files(files)
info.files = files
info.folder = folder
info.basename = basename
# Check if the log.param file exists
parameter_file_path = os.path.join(folder, 'log.param')
if os.path.isfile(parameter_file_path):
if os.path.getsize(parameter_file_path) == 0:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"seems empty")
else:
raise io_mp.AnalyzeError(
"The log param file %s " % os.path.join(folder, 'log.param') +
"is missing in the analyzed folder?")
# If the folder has no subdirectory, then go for a simple infoname,
# otherwise, call it with the last name
basename = (os.path.basename(folder) if os.path.basename(folder) != '.'
else os.path.basename(os.path.abspath(
os.path.join(folder, '..'))))
info.v_info_path = os.path.join(folder, basename+'.v_info')
info.h_info_path = os.path.join(folder, basename+'.h_info')
info.tex_path = os.path.join(folder, basename+'.tex')
info.cov_path = os.path.join(folder, basename+'.covmat')
info.log_path = os.path.join(folder, basename+'.log')
info.best_fit_path = os.path.join(folder, basename+'.bestfit')
info.param_path = parameter_file_path
return True
def convergence(info):
"""
Compute convergence for the desired chains, using Gelman-Rubin diagnostic
Chains have been stored in the info instance of :class:`Information`. Note
that the G-R diagnostic can be computed for a single chain, albeit it will
most probably give absurd results. To do so, it separates the chain into
three subchains.
"""
# Recovering parameter names and scales, creating tex names,
extract_parameter_names(info)
# Now that the number of parameters is known, the array containing bounds
# can be initialised
info.bounds = np.zeros((len(info.ref_names), len(info.levels), 2))
# Circle through all files to find the global maximum of likelihood
#print '--> Finding global maximum of likelihood'
find_maximum_of_likelihood(info)
# Restarting the circling through files, this time removing the burnin,
# given the maximum of likelihood previously found and the global variable
# LOG_LKL_CUTOFF. spam now contains all the accepted points that were
# explored once the chain moved within min_minus_lkl - LOG_LKL_CUTOFF.
# If the user asks for a keep_fraction <1, this is also the place where
# a fraction (1-keep_fraction) is removed at the beginning of each chain.
#print '--> Removing burn-in'
spam = remove_bad_points(info)
info.remap_parameters(spam)
# Now that the list spam contains all the different chains removed of
# their respective burn-in, proceed to the convergence computation
# 2D arrays for mean and var, one column will contain the total (over
# all chains) mean (resp. variance), and each other column the
# respective chain mean (resp. chain variance). R only contains the
# values for each parameter. Therefore, mean and var will have len(spam)+1
# as a first dimension
mean = np.zeros((len(spam)+1, info.number_parameters))
var = np.zeros((len(spam)+1, info.number_parameters))
R = np.zeros(info.number_parameters)
# Store the total number of points, and the total in each chain
total = np.zeros(len(spam)+1)
for j in xrange(len(spam)):
total[j+1] = spam[j][:, 0].sum()
total[0] = total[1:].sum()
# Compute mean and variance for each chain
print '--> Computing mean values'
compute_mean(mean, spam, total)
print '--> Computing variance'
compute_variance(var, mean, spam, total)
print '--> Computing convergence criterium (Gelman-Rubin)'
# Gelman Rubin Diagnostic:
# Computes a quantity linked to the ratio of the mean of the variances of
# the different chains (within), and the variance of the means (between)
# Note: This is not strictly speaking the Gelman Rubin test, defined for
# same-length MC chains. Our quantity is defined without the square root,
# which should not change much the result: a small sqrt(R) will still be a
# small R. The same convention is used in CosmoMC, except for the weighted
# average: we decided to do the average taking into account that longer
# chains should count more
within = 0
between = 0
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
within += total[j+1]*var[j+1, i]
between += total[j+1]*(mean[j+1, i]-mean[0, i])**2
within /= total[0]
between /= (total[0]-1)
R[i] = between/within
if i == 0:
print ' -> R-1 is %.6f' % R[i], '\tfor ', info.ref_names[i]
else:
print ' %.6f' % R[i], '\tfor ', info.ref_names[i]
# Log finally the total number of steps, and absolute loglikelihood
with open(info.log_path, 'a') as log:
log.write("--> Total number of steps: %d\n" % (
info.steps))
log.write("--> Total number of accepted steps: %d\n" % (
info.accepted_steps))
log.write("--> Minimum of -logLike : %.2f" % (
info.min_minus_lkl))
# Store the remaining members in the info instance, for further writing to
# files, storing only the mean and total of all the chains taken together
info.mean = mean[0]
info.R = R
info.total = total[0]
# Create the main chain, which consists in all elements of spam
# put together. This will serve for the plotting.
info.chain = np.vstack(spam)
def compute_posterior(information_instances):
"""
computes the marginalized posterior distributions, and optionnally plots
them
Parameters
----------
information_instances : list
list of information objects, initialised on the given folders, or list
of file, in input. For each of these instance, plot the 1d and 2d
posterior distribution, depending on the flags stored in the instances,
comming from command line arguments or read from a file.
"""
# For convenience, store as `conf` the first element of the list
# information_instances, since it will be called often to check for
# configuration parameters
conf = information_instances[0]
# Pre configuration of the output, note that changes to the font size
# will occur later on as well, to obtain a nice scaling.
matplotlib.rc('text', usetex=True)
matplotlib.rc('font', size=11)
matplotlib.rc('xtick', labelsize='8')
matplotlib.rc('ytick', labelsize='8')
# Recover max and min values for each instance, defining the a priori place
# of ticks (in case of a comparison, this should change)
for info in information_instances:
info.define_ticks()
# If plots/ folder in output folder does not exist, create it
if os.path.isdir(os.path.join(info.folder, 'plots')) is False:
os.mkdir(os.path.join(info.folder, 'plots'))
# Determine the total number of parameters to plot, based on the list
# without duplicates of the plotted parameters of all information instances
plotted_parameters = []
# For printing not in latex
ref_names = []
for info in information_instances:
for index, name in enumerate(info.plotted_parameters):
if name not in plotted_parameters:
plotted_parameters.append(name)
ref_names.append(info.ref_names[index])
if len(plotted_parameters) == 0:
raise io_mp.AnalyzeError(
"You provided no parameters to analyze, probably by selecting"
" wrong parameters names in the '--extra' file.")
# Find the appropriate number of columns and lines for the 1d posterior
# plot
if conf.num_columns_1d == None:
num_columns = int(round(math.sqrt(len(plotted_parameters))))
else:
num_columns = conf.num_columns_1d
num_lines = int(math.ceil(len(plotted_parameters)*1.0/num_columns))
# For special needs, you can impose here a different number of columns and lines in the 1d plot
# Here is a commented example:
# if (len(plotted_parameters) == 10):
# num_columns = 5
# num_lines = 2
# Create the figures
# which will be 3*3 inches per subplot, quickly growing!
if conf.plot:
fig1d = plt.figure(num=1, figsize=(
3*num_columns,
3*num_lines), dpi=80)
if conf.plot_2d:
fig2d = plt.figure(num=2, figsize=(
3*len(plotted_parameters),
3*len(plotted_parameters)), dpi=80)
# Create the name of the files, concatenating the basenames with
# underscores.
file_name = "_".join(
[info.basename for info in information_instances])
# Loop over all the plotted parameters
# There will be two indices at all time, the one running over the plotted
# parameters, `index`, and the one corresponding to the actual column in
# the actual file, `native_index`. For instance, if you try to plot only
# two columns of a several columns file, index will vary from 0 to 1, but
# the corresponding native indices might be anything.
# Obviously, since plotted parameters contain potentially names not
# contained in some files (in case of a comparison), native index might be
# undefined.
# Defined the legends object, which will store the plot style, to display
# at the level of the figure
legends = [None for _ in range(len(information_instances))]
if not conf.legendnames:
legend_names = [info.basename.replace('_', ' ')
for info in information_instances]
else:
legend_names = conf.legendnames
print '-----------------------------------------------'
for index, name in enumerate(plotted_parameters):
# Adding the subplots to the respective figures, this will correspond
# to the diagonal on the triangle plot.
if conf.plot_2d:
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
index*(len(plotted_parameters)+1)+1,
yticks=[])
if conf.plot:
ax1d = fig1d.add_subplot(
num_lines, num_columns, index+1, yticks=[])
# check for each instance if the name is part of the list of plotted
# parameters, and if yes, store the native_index. If not, store a flag
# to ignore any further plotting or computing issues concerning this
# particular instance.
for info in information_instances:
try:
info.native_index = info.ref_names.index(name)
info.ignore_param = False
standard_name = info.backup_names[info.native_index]
except ValueError:
info.ignore_param = True
# The limits might have been enforced by the user
if name in conf.force_limits.iterkeys():
x_span = conf.force_limits[name][1]-conf.force_limits[name][0]
tick_min = conf.force_limits[name][0] +0.1*x_span
tick_max = conf.force_limits[name][1] -0.1*x_span
ticks = np.linspace(tick_min,
tick_max,
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = conf.force_limits[name]
info.ticks[info.native_index] = ticks
# otherwise, find them automatically
else:
adjust_ticks(name, information_instances)
print ' -> Computing histograms for ', name
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (first step)
#
# simply the histogram from the chains, with few bins
#
info.hist, info.bin_edges = np.histogram(
info.chain[:, info.native_index+2], bins=info.bins,
weights=info.chain[:, 0], normed=False, density=False)
info.hist = info.hist/info.hist.max()
info.bincenters = 0.5*(info.bin_edges[1:]+info.bin_edges[:-1])
# 1D posterior normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
info.interp_hist, info.interp_grid = cubic_interpolation(
info, info.hist, info.bincenters)
# minimum credible interval (method by Jan Haman). Fails for
# multimodal histograms
bounds = minimum_credible_intervals(info)
info.bounds[info.native_index] = bounds
# plotting
for info in information_instances:
if not info.ignore_param:
# 1D posterior normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# factor by which the grid has been made thinner (10 means 10 times more bins)
interpolation_factor = float(len(info.interp_grid))/float(len(info.bincenters))
# factor for gaussian smoothing
sigma = interpolation_factor*info.gaussian_smoothing
# smooth
smoothed_interp_hist = scipy.ndimage.filters.gaussian_filter(info.interp_hist,sigma)
# re-normalised
smoothed_interp_hist = smoothed_interp_hist/smoothed_interp_hist.max()
if conf.plot_2d:
##################################################
# plot 1D posterior in diagonal of triangle plot #
##################################################
plot = ax2d.plot(
info.interp_grid,
smoothed_interp_hist,
linewidth=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
legends[info.id] = plot[0]
ax2d.set_xticks(info.ticks[info.native_index])
if conf.legend_style == 'top':
ax2d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
elif conf.legend_style == 'sides':
# Except for the last 1d plot (bottom line), don't
# print ticks
if index == len(plotted_parameters)-1:
ax2d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax2d.tick_params('x',direction='inout')
ax2d.set_xlabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
else:
ax2d.set_xticklabels([])
ax2d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
if conf.plot:
if conf.short_title_1d:
ax1d.set_title(
'%s'.format(info.decimal) % (
info.tex_names[info.native_index]),
fontsize=info.fontsize)
else:
# Note the use of double curly brackets {{ }} to produce
# the desired LaTeX output. This is necessary because the
# format function would otherwise understand single
# brackets as fields.
ax1d.set_title(
'%s=$%.{0}g^{{+%.{0}g}}_{{%.{0}g}}$'.format(
info.decimal) % (
info.tex_names[info.native_index],
info.mean[info.native_index],
info.bounds[info.native_index, 0, -1],
info.bounds[info.native_index, 0, 0]),
fontsize=info.fontsize)
ax1d.set_xticks(info.ticks[info.native_index])
ax1d.set_xticklabels(
['%.{0}g'.format(info.decimal) % s
for s in info.ticks[info.native_index]],
fontsize=info.ticksize)
ax1d.axis([info.x_range[info.native_index][0],
info.x_range[info.native_index][1],
0, 1.05])
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
##################################################
# plot 1D posterior in 1D plot #
##################################################
ax1d.plot(
info.interp_grid,
# gaussian filtered 1d posterior:
smoothed_interp_hist,
# raw 1d posterior:
#info.interp_hist,
lw=info.line_width, ls='-',
color = info.MP_color_cycle[info.id][1],
# the [1] picks up the color of the 68% contours
# with [0] you would get that of the 95% contours
alpha = info.alphas[info.id])
# uncomment if you want to see the raw points from the histogram
# (to check whether the inteprolation and smoothing generated artefacts)
#ax1d.plot(
# info.bincenters,
# info.hist,
# 'ro')
if conf.mean_likelihood:
for info in information_instances:
if not info.ignore_param:
try:
# 1D mean likelihood normalised to P_max=1 (first step)
#
# simply the histogram from the chains, weighted by mutiplicity*likelihood
#
lkl_mean, _ = np.histogram(
info.chain[:, info.native_index+2],
bins=info.bin_edges,
normed=False,
weights=np.exp(
conf.min_minus_lkl-info.chain[:, 1])*info.chain[:, 0])
lkl_mean /= lkl_mean.max()
# 1D mean likelihood normalised to P_max=1 (second step)
#
# returns a histogram still normalised to one, but with a ten times finer sampling;
# >> first, tries a method with spline interpolation between bin centers and extrapolation at the edges
# >> if it fails, a simpler and more robust method of linear interpolation between bin centers is used
# >> if the interpolation module is not installed, this step keeps the same posterior
#
interp_lkl_mean, interp_grid = cubic_interpolation(
info, lkl_mean, info.bincenters)
# 1D mean likelihood normalised to P_max=1 (third step, used only for plotting)
#
# apply gaussian smoothing
#
# smooth
smoothed_interp_lkl_mean = scipy.ndimage.filters.gaussian_filter(interp_lkl_mean,sigma)
# re-normalised
smoothed_interp_lkl_mean = smoothed_interp_lkl_mean/smoothed_interp_lkl_mean.max()
# Execute some customisation scripts for the 1d plots
if (info.custom1d != []):
for elem in info.custom1d:
execfile('plot_files/'+elem)
########################################################
# plot 1D mean likelihood in diagonal of triangle plot #
########################################################
if conf.plot_2d:
# raw mean likelihoods:
#ax2d.plot(info.bincenter, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax2d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
########################################################
# plot 1D mean likelihood in 1D plot #
########################################################
if conf.plot:
# raw mean likelihoods:
#ax1d.plot(info.bincenters, lkl_mean,
# ls='--', lw=conf.line_width,
# color = info.MP_color_cycle[info.id][1],
# alpha = info.alphas[info.id])
# smoothed and interpolated mean likelihoods:
ax1d.plot(interp_grid, smoothed_interp_lkl_mean,
ls='--', lw=conf.line_width,
color = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id])
except:
print 'could not find likelihood contour for ',
print info.ref_parameters[info.native_index]
if conf.subplot is True:
if conf.plot_2d:
extent2d = ax2d.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
fig2d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent2d.expanded(1.1, 1.4))
if conf.plot:
extent1d = ax1d.get_window_extent().transformed(
fig1d.dpi_scale_trans.inverted())
fig1d.savefig(os.path.join(
conf.folder, 'plots', file_name+'.'+conf.extension),
bbox_inches=extent1d.expanded(1.1, 1.4))
# Store the function in a file
for info in information_instances:
if not info.ignore_param:
hist_file_name = os.path.join(
info.folder, 'plots',
info.basename+'_%s.hist' % (
standard_name))
write_histogram(hist_file_name,
info.interp_grid, info.interp_hist)
# Now do the rest of the triangle plot
if conf.plot_2d:
for second_index in xrange(index):
second_name = plotted_parameters[second_index]
for info in information_instances:
if not info.ignore_param:
try:
info.native_second_index = info.ref_names.index(
plotted_parameters[second_index])
info.has_second_param = True
second_standard_name = info.backup_names[
info.native_second_index]
except ValueError:
info.has_second_param = False
else:
info.has_second_param = False
ax2dsub = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
(index)*len(plotted_parameters)+second_index+1)
for info in information_instances:
if info.has_second_param:
ax2dsub.axis([info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]])
# 2D likelihood (first step)
#
# simply the histogram from the chains, with few bins only
#
info.n, info.xedges, info.yedges = np.histogram2d(
info.chain[:, info.native_index+2],
info.chain[:, info.native_second_index+2],
weights=info.chain[:, 0],
bins=(info.bins, info.bins),
normed=False)
info.extent = [
info.x_range[info.native_second_index][0],
info.x_range[info.native_second_index][1],
info.x_range[info.native_index][0],
info.x_range[info.native_index][1]]
info.x_centers = 0.5*(info.xedges[1:]+info.xedges[:-1])
info.y_centers = 0.5*(info.yedges[1:]+info.yedges[:-1])
# 2D likelihood (second step)
#
# like for 1D, interpolate to get a finer grid
# TODO: we should not only interpolate between bin centers, but also extrapolate between side bin centers and bin edges
#
interp_y_centers = scipy.ndimage.zoom(info.y_centers,info.interpolation_smoothing, mode='reflect')
interp_x_centers = scipy.ndimage.zoom(info.x_centers,info.interpolation_smoothing, mode='reflect')
interp_likelihood = scipy.ndimage.zoom(info.n,info.interpolation_smoothing, mode='reflect')
# 2D likelihood (third step)
#
# gaussian smoothing
#
sigma = info.interpolation_smoothing*info.gaussian_smoothing
interp_smoothed_likelihood = scipy.ndimage.filters.gaussian_filter(interp_likelihood,[sigma,sigma], mode='reflect')
# Execute some customisation scripts for the 2d contour plots
if (info.custom2d != []):
for elem in info.custom2d:
execfile('plot_files/'+elem)
# plotting contours, using the ctr_level method (from Karim
# Benabed). Note that only the 1 and 2 sigma contours are
# displayed (due to the line with info.levels[:2])
try:
###########################
# plot 2D filled contours #
###########################
if not info.contours_only:
contours = ax2dsub.contourf(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha=info.alphas[info.id])
# now add a thin darker line
# around the 95% contour
ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent,
levels=ctr_level(
interp_smoothed_likelihood,
info.levels[1:2]),
zorder=4,
colors = info.MP_color_cycle[info.id][1],
alpha = info.alphas[info.id],
linewidths=1)
###########################
# plot 2D contours #
###########################
if info.contours_only:
contours = ax2dsub.contour(
interp_y_centers,
interp_x_centers,
interp_smoothed_likelihood,
extent=info.extent, levels=ctr_level(
interp_smoothed_likelihood,
info.levels[:2]),
zorder=4,
colors = info.MP_color_cycle[info.id],
alpha = info.alphas[info.id],
linewidths=info.line_width)
except Warning:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
except ValueError as e:
if str(e) == "Contour levels must be increasing":
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot. \n " % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]) +
'The error is: "Contour levels must be increasing"' +
" but " + str(ctr_level(info.n, info.levels[:2])) +
" were found. This may happen when most" +
" points fall in the same bin.")
else:
warnings.warn(
"The routine could not find the contour of the " +
"'%s-%s' 2d-plot" % (
info.plotted_parameters[info.native_index],
info.plotted_parameters[info.native_second_index]))
ax2dsub.set_xticks(info.ticks[info.native_second_index])
ax2dsub.set_yticks(info.ticks[info.native_index])
ax2dsub.tick_params('both',direction='inout',top=True,bottom=True,left=True,right=True)
if index == len(plotted_parameters)-1:
ax2dsub.set_xticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_second_index]],
fontsize=info.ticksize)
if conf.legend_style == 'sides':
ax2dsub.set_xlabel(
info.tex_names[info.native_second_index],
fontsize=info.fontsize)
else:
ax2dsub.set_xticklabels([''])
ax2dsub.set_yticks(info.ticks[info.native_index])
if second_index == 0:
ax2dsub.set_yticklabels(
['%.{0}g'.format(info.decimal) % s for s in
info.ticks[info.native_index]],
fontsize=info.ticksize)
else:
ax2dsub.set_yticklabels([''])
if conf.legend_style == 'sides':
if second_index == 0:
ax2dsub.set_ylabel(
info.tex_names[info.native_index],
fontsize=info.fontsize)
if conf.subplot is True:
# Store the individual 2d plots.
if conf.plot_2d:
area = ax2dsub.get_window_extent().transformed(
fig2d.dpi_scale_trans.inverted())
# Pad the saved area by 10% in the x-direction and 20% in
# the y-direction
fig2d.savefig(os.path.join(
conf.folder, 'plots',
file_name+'_2d_%s-%s.%s' % (
standard_name, second_standard_name,
conf.extension)),
bbox_inches=area.expanded(1.4, 1.4))
# store the coordinates of the points for further
# plotting.
store_contour_coordinates(
conf, standard_name, second_standard_name, contours)
for info in information_instances:
if not info.ignore_param and info.has_second_param:
info.hist_file_name = os.path.join(
info.folder, 'plots',
'{0}_2d_{1}-{2}.hist'.format(
info.basename,
standard_name,
second_standard_name))
write_histogram_2d(
info.hist_file_name, info.x_centers, info.y_centers,
info.extent, info.n)
print '-----------------------------------------------'
if conf.plot:
print '--> Saving figures to .{0} files'.format(info.extension)
plot_name = '-vs-'.join([os.path.split(elem.folder)[-1]
for elem in information_instances])
if conf.plot_2d:
# Legend of triangle plot
if ((conf.plot_legend_2d == None) and (len(legends) > 1)) or (conf.plot_legend_2d == True):
# Create a virtual subplot in the top right corner,
# just to be able to anchor the legend nicely
ax2d = fig2d.add_subplot(
len(plotted_parameters),
len(plotted_parameters),
len(plotted_parameters),
)
ax2d.axis('off')
try:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
fontsize=info.legendsize)
except TypeError:
ax2d.legend(legends, legend_names,
loc='upper right',
borderaxespad=0.,
prop={'fontsize': info.legendsize})
fig2d.subplots_adjust(wspace=0, hspace=0)
fig2d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_triangle.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
# Legend of 1D plot
if conf.plot:
if ((conf.plot_legend_1d == None) and (len(legends) > 1)) or (conf.plot_legend_1d == True):
# no space left: add legend to thr right
if len(plotted_parameters)<num_columns*num_lines:
fig1d.legend(legends, legend_names,
loc= ((num_columns-0.9)/num_columns,0.1/num_columns),
fontsize=info.legendsize)
# space left in lower right part: add legend there
else:
fig1d.legend(legends, legend_names,
loc= 'center right',
bbox_to_anchor = (1.2,0.5),
fontsize=info.legendsize)
fig1d.tight_layout()
fig1d.savefig(
os.path.join(
conf.folder, 'plots', '{0}_1d.{1}'.format(
plot_name, info.extension)),
bbox_inches='tight')
def ctr_level(histogram2d, lvl, infinite=False):
"""
Extract the contours for the 2d plots (Karim Benabed)
"""
hist = histogram2d.flatten()*1.
hist.sort()
cum_hist = np.cumsum(hist[::-1])
cum_hist /= cum_hist[-1]
alvl = np.searchsorted(cum_hist, lvl)[::-1]
clist = [0]+[hist[-i] for i in alvl]+[hist.max()]
if not infinite:
return clist[1:]
return clist
def minimum_credible_intervals(info):
"""
Extract minimum credible intervals (method from Jan Haman) FIXME
"""
histogram = info.hist
bincenters = info.bincenters
levels = info.levels
bounds = np.zeros((len(levels), 2))
j = 0
delta = bincenters[1]-bincenters[0]
left_edge = max(histogram[0] - 0.5*(histogram[1]-histogram[0]), 0.)
right_edge = max(histogram[-1] + 0.5*(histogram[-1]-histogram[-2]), 0.)
failed = False
for level in levels:
norm = float(
(np.sum(histogram)-0.5*(histogram[0]+histogram[-1]))*delta)
norm += 0.25*(left_edge+histogram[0])*delta
norm += 0.25*(right_edge+histogram[-1])*delta
water_level_up = np.max(histogram)*1.0
water_level_down = np.min(histogram)*1.0
top = 0.
iterations = 0
while (abs((top/norm)-level) > 0.0001) and not failed:
top = 0.
water_level = (water_level_up + water_level_down)/2.
#ontop = [elem for elem in histogram if elem > water_level]
indices = [i for i in range(len(histogram))
if histogram[i] > water_level]
# check for multimodal posteriors
if ((indices[-1]-indices[0]+1) != len(indices)):
warnings.warn(
"could not derive minimum credible intervals " +
"for this multimodal posterior")
warnings.warn(
"please try running longer chains or reducing " +
"the number of bins with --bins BINS (default: 20)")
failed = True
break
top = (np.sum(histogram[indices]) -
0.5*(histogram[indices[0]]+histogram[indices[-1]]))*(delta)
# left
if indices[0] > 0:
top += (0.5*(water_level+histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-histogram[indices[0]-1]))
else:
if (left_edge > water_level):
top += 0.25*(left_edge+histogram[indices[0]])*delta
else:
top += (0.25*(water_level + histogram[indices[0]]) *
delta*(histogram[indices[0]]-water_level) /
(histogram[indices[0]]-left_edge))
# right
if indices[-1] < (len(histogram)-1):
top += (0.5*(water_level + histogram[indices[-1]]) *
delta*(histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-histogram[indices[-1]+1]))
else:
if (right_edge > water_level):
top += 0.25*(right_edge+histogram[indices[-1]])*delta
else:
top += (0.25*(water_level + histogram[indices[-1]]) *
delta * (histogram[indices[-1]]-water_level) /
(histogram[indices[-1]]-right_edge))
if top/norm >= level:
water_level_down = water_level
else:
water_level_up = water_level
# safeguard, just in case
iterations += 1
if (iterations > 1000):
warnings.warn(
"the loop to check for sigma deviations was " +
"taking too long to converge")
failed = True
break
# min
if failed:
bounds[j][0] = np.nan
elif indices[0] > 0:
bounds[j][0] = bincenters[indices[0]] - delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-histogram[indices[0]-1])
else:
if (left_edge > water_level):
bounds[j][0] = bincenters[0]-0.5*delta
else:
bounds[j][0] = bincenters[indices[0]] - 0.5*delta*(histogram[indices[0]]-water_level)/(histogram[indices[0]]-left_edge)
# max
if failed:
bounds[j][1] = np.nan
elif indices[-1] < (len(histogram)-1):
bounds[j][1] = bincenters[indices[-1]] + delta*(histogram[indices[-1]]-water_level)/(histogram[indices[-1]]-histogram[indices[-1]+1])
else:
if (right_edge > water_level):
bounds[j][1] = bincenters[-1]+0.5*delta
else:
bounds[j][1] = bincenters[indices[-1]] + \
0.5*delta*(histogram[indices[-1]]-water_level) / \
(histogram[indices[-1]]-right_edge)
j += 1
for elem in bounds:
for j in (0, 1):
elem[j] -= info.mean[info.native_index]
return bounds
def write_h(info_file, indices, name, string, quantity, modifiers=None):
"""
Write one horizontal line of output
"""
info_file.write('\n '+name+'\t: ')
for i in indices:
info_file.write(string % quantity[i]+'\t')
def cubic_interpolation(info, hist, bincenters):
"""
Small routine to accomodate the absence of the interpolate module
"""
# we start from a try becuase if anything goes wrong, we want to return the raw histogram rather than nothing
try:
# test that all elements are strictly positive, otherwise we could not take the log, and we must switch to the robust method
for i,elem in enumerate(hist):
if elem == 0.:
hist[i] = 1.e-99
elif elem <0:
print hist[i]
raise exception()
# One of our methods (using polyfit) does assume that the input histogram has a maximum value of 1.
# If in a future version this is not guaranteedanymore, we should renormalise it here.
# This is important for computing weights and thresholds.
# The threshold below which the likelihood will be
# approximated as zero is hard-codeed here (could become an
# input parameter but that would not clearly be useful).:
threshold = 1.e-3
# prepare the interpolation on log(Like):
ln_hist = np.log(hist)
# define a finer grid on a wider range (assuming that the following method is fine both for inter- and extra-polation)
left = max(info.boundaries[info.native_index][0],bincenters[0]-2.5*(bincenters[1]-bincenters[0]))
right = min(info.boundaries[info.native_index][1],bincenters[-1]+2.5*(bincenters[-1]-bincenters[-2]))
interp_grid = np.linspace(left, right, (len(bincenters)+4)*10+1)
######################################
# polynomial fit method (default): #
#####################################W
if info.posterior_smoothing >= 2:
# the points in the histogram with a very low likelihood (i.e. hist[i]<<1 hist is normalised to a maximum of one)
# have a lot of Poisson noise and are unreliable. However, if we do nothing, they may dominate the outcome of the fitted polynomial.
# Hence we can:
# 1) give them less weight (weight = sqrt(hist) seems to work well)
# 2) cut them at some threshold value and base the fit only on higher points
# 3) both
# the one working best seems to be 2). We also wrote 1) below, but copmmented out.
# method 1):
#f = np.poly1d(np.polyfit(bincenters,ln_hist,info.posterior_smoothing,w=np.sqrt(hist)))
#interp_hist = f(interp_grid)
# method 2):
# find index values such that hist is negligble everywhere excepted in hist[sub_indices[0]], hist[sub_indices[-1]]
sub_indices = [i for i,elem in enumerate(hist) if elem > threshold]
# The interpolation is done precisely in this range: hist[sub_indices[0]] < x < hist[sub_indices[-1]]
g = np.poly1d(np.polyfit(bincenters[sub_indices],ln_hist[sub_indices],info.posterior_smoothing)) #,w=np.sqrt(hist[sub_indices])))
# The extrapolation is done in a range including one more bin on each side, excepted when the boundarty is hit
extrapolation_range_left = [info.boundaries[info.native_index][0] if sub_indices[0] == 0 else bincenters[sub_indices[0]-1]]
extrapolation_range_right = [info.boundaries[info.native_index][1] if sub_indices[-1] == len(hist)-1 else bincenters[sub_indices[-1]+1]]
# outisde of this range, log(L) is brutally set to a negligible value,e, log(1.e-10)
interp_hist = [g(elem) if (elem > extrapolation_range_left and elem < extrapolation_range_right) else np.log(1.e-10) for elem in interp_grid]
elif info.posterior_smoothing<0:
raise io_mp.AnalyzeError(
"You passed --posterior-smoothing %d, this value is not understood"%info.posterior_smoothing)
############################################################
# other methods: #
# - linear inter/extra-polation if posterior_smoothing = 0 #
# - cubic inter/extra-polation if posterior_smoothing = 0 #
############################################################
else:
# try first inter/extra-polation
try:
# prepare to interpolate and extrapolate:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear', fill_value='extrapolate')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic', fill_value='extrapolate')
interp_hist = f(interp_grid)
# failure probably caused by old scipy not having the fill_value='extrapolate' argument. Then, only interpoolate.
except:
# define a finer grid but not a wider one
left = max(info.boundaries[info.native_index][0],bincenters[0])
right = min(info.boundaries[info.native_index][1],bincenters[-1])
interp_grid = np.linspace(left, right, len(bincenters)*10+1)
# prepare to interpolate only:
if info.posterior_smoothing == 0:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='linear')
else:
f = scipy.interpolate.interp1d(bincenters, ln_hist, kind='cubic')
interp_hist = f(interp_grid)
# final steps used b y all methods
# go back from ln_Like to Like
interp_hist = np.exp(interp_hist)
# re-normalise the interpolated curve
interp_hist = interp_hist / interp_hist.max()
return interp_hist, interp_grid
except:
# we will end up here if anything went wrong before
# do nothing (raw histogram)
warnings.warn(
"The 1D posterior could not be processed normally, probably" +
"due to incomplete or obsolete numpy and/or scipy versions." +
"So the raw histograms will be plotted.")
return hist, bincenters
def write_histogram(hist_file_name, x_centers, hist):
"""
Store the posterior distribution to a file
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# 1d posterior distribution\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# Histogram\n")
hist_file.write(", ".join(
[str(elem) for elem in hist])+"\n")
print 'wrote ', hist_file_name
def read_histogram(histogram_path):
"""
Recover a stored 1d posterior
"""
with open(histogram_path, 'r') as hist_file:
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = [float(elem) for elem in
hist_file.next().split(",")]
x_centers = np.array(x_centers)
hist = np.array(hist)
return x_centers, hist
def write_histogram_2d(hist_file_name, x_centers, y_centers, extent, hist):
"""
Store the histogram information to a file, to plot it later
"""
with open(hist_file_name, 'w') as hist_file:
hist_file.write("# Interpolated histogram\n")
hist_file.write("\n# x_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in x_centers])+"\n")
hist_file.write("\n# y_centers\n")
hist_file.write(", ".join(
[str(elem) for elem in y_centers])+"\n")
hist_file.write("\n# Extent\n")
hist_file.write(", ".join(
[str(elem) for elem in extent])+"\n")
hist_file.write("\n# Histogram\n")
for line in hist:
hist_file.write(", ".join(
[str(elem) for elem in line])+"\n")
def read_histogram_2d(histogram_path):
"""
Read the histogram information that was stored in a file.
To use it, call something like this:
.. code::
x_centers, y_centers, extent, hist = read_histogram_2d_from_file(path)
fig, ax = plt.subplots()
ax.contourf(
y_centers, x_centers, hist, extent=extent,
levels=ctr_level(hist, [0.68, 0.95]),
zorder=5, cma=plt.cm.autumn_r)
plt.show()
"""
with open(histogram_path, 'r') as hist_file:
length = 0
for line in hist_file:
if line:
if line.find("# x_centers") != -1:
x_centers = [float(elem) for elem in
hist_file.next().split(",")]
length = len(x_centers)
elif line.find("# y_centers") != -1:
y_centers = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Extent") != -1:
extent = [float(elem) for elem in
hist_file.next().split(",")]
elif line.find("# Histogram") != -1:
hist = []
for index in range(length):
hist.append([float(elem) for elem in
hist_file.next().split(",")])
x_centers = np.array(x_centers)
y_centers = np.array(y_centers)
extent = np.array(extent)
hist = np.array(hist)
return x_centers, y_centers, extent, hist
def clean_conversion(module_name, tag, folder):
"""
Execute the methods "convert" from the different sampling algorithms
Returns True if something was made, False otherwise
"""
has_module = False
subfolder_name = tag+"_subfolder"
try:
module = importlib.import_module(module_name)
subfolder = getattr(module, subfolder_name)
has_module = True
except ImportError:
# The module is not installed, the conversion can not take place
pass
if has_module and os.path.isdir(folder):
# Remove any potential trailing slash
folder = os.path.join(
*[elem for elem in folder.split(os.path.sep) if elem])
if folder.split(os.path.sep)[-1] == subfolder:
try:
getattr(module, 'from_%s_output_to_chains' % tag)(folder)
except IOError:
raise io_mp.AnalyzeError(
"You asked to analyze a %s folder which " % tag +
"seems to come from an unfinished run, or to be empty " +
"or corrupt. Please make sure the run went smoothly " +
"enough.")
warnings.warn(
"The content of the %s subfolder has been " % tag +
"translated for Monte Python. Please run an "
"analysis of the entire folder now.")
return True
else:
return False
def separate_files(files):
"""
Separate the input files in folder
Given all input arguments to the command line files entry, separate them in
a list of lists, grouping them by folders. The number of identified folders
will determine the number of information instances to create
"""
final_list = []
temp = [files[0]]
folder = (os.path.dirname(files[0]) if os.path.isfile(files[0])
else files[0])
if len(files) > 1:
for elem in files[1:]:
new_folder = (os.path.dirname(elem) if os.path.isfile(elem)
else elem)
if new_folder == folder:
temp.append(elem)
else:
folder = new_folder
final_list.append(temp)
temp = [elem]
final_list.append(temp)
return final_list
def recover_folder_and_files(files):
"""
Distinguish the cases when analyze is called with files or folder
Note that this takes place chronologically after the function
`separate_files`"""
# The following list defines the substring that a chain should contain for
# the code to recognise it as a proper chain.
substrings = ['.txt', '__']
# The following variable defines the substring that identify error_log
# files and therefore there must not be taken into account in the analysis.
substring_err = 'error_log'
limit = 10
# If the first element is a folder, grab all chain files inside
if os.path.isdir(files[0]):
folder = os.path.normpath(files[0])
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
# Otherwise, extract the folder from the chain file-name.
else:
# If the name is completely wrong, say it
if not os.path.exists(files[0]):
raise io_mp.AnalyzeError(
"You provided a non-existant folder/file to analyze")
folder = os.path.relpath(
os.path.dirname(os.path.realpath(files[0])), os.path.curdir)
files = [os.path.join(folder, elem) for elem in os.listdir(folder)
if os.path.join(folder, elem) in np.copy(files)
and not os.path.isdir(os.path.join(folder, elem))
and not os.path.getsize(os.path.join(folder, elem)) < limit
and (substring_err not in elem)
and all([x in elem for x in substrings])]
basename = os.path.basename(folder)
return folder, files, basename
def extract_array(line):
"""
Return the array on the RHS of the line
>>> extract_array("toto = ['one', 'two']\n")
['one', 'two']
>>> extract_array('toto = ["one", 0.2]\n')
['one', 0.2]
"""
# Recover RHS of the equal sign, and remove surrounding spaces
rhs = line.split('=')[-1].strip()
# Remove array signs
rhs = rhs.strip(']').lstrip('[')
# Recover each element of the list
sequence = [e.strip().strip('"').strip("'") for e in rhs.split(',')]
for index, elem in enumerate(sequence):
try:
sequence[index] = int(elem)
except ValueError:
try:
sequence[index] = float(elem)
except ValueError:
pass
return sequence
def extract_dict(line):
"""
Return the key and value of the dictionary element contained in line
>>> extract_dict("something['toto'] = [0, 1, 2, -2, 'cosmo']")
'toto', [0, 1, 2, -2, 'cosmo']
"""
# recovering the array
sequence = extract_array(line)
# Recovering only the LHS
lhs = line.split('=')[0].strip()
# Recovering the name from the LHS
name = lhs.split('[')[-1].strip(']')
name = name.strip('"').strip("'")
return name, sequence
def extract_parameter_names(info):
"""
Reading the log.param, store in the Information instance the names
"""
backup_names = []
plotted_parameters = []
boundaries = []
ref_names = []
tex_names = []
scales = []
with open(info.param_path, 'r') as param:
for line in param:
if line.find('#') == -1:
if line.find('data.experiments') != -1:
info.experiments = extract_array(line)
if line.find('data.parameters') != -1:
name, array = extract_dict(line)
original = name
# Rename the names according the .extra file (opt)
if name in info.to_change.iterkeys():
name = info.to_change[name]
# If the name corresponds to a varying parameter (fourth
# entry in the initial array being non-zero, or a derived
# parameter (could be designed as fixed, it does not make
# any difference)), then continue the process of analyzing.
if array[3] != 0 or array[5] == 'derived':
# The real name is always kept, to have still the class
# names in the covmat
backup_names.append(original)
# With the list "to_plot", we can potentially restrict
# the variables plotted. If it is empty, though, simply
# all parameters will be plotted.
if info.to_plot == []:
plotted_parameters.append(name)
else:
if name in info.to_plot:
plotted_parameters.append(name)
# Append to the boundaries array
boundaries.append([
None if elem == 'None' or (isinstance(elem, int)
and elem == -1)
else elem for elem in array[1:3]])
ref_names.append(name)
# Take care of the scales
scale = array[4]
rescale = 1.
if name in info.new_scales.iterkeys():
scale = info.new_scales[name]
rescale = info.new_scales[name]/array[4]
scales.append(rescale)
# Given the scale, decide for the pretty tex name
number = 1./scale
tex_names.append(
io_mp.get_tex_name(name, number=number))
scales = np.diag(scales)
info.ref_names = ref_names
info.tex_names = tex_names
info.boundaries = boundaries
info.backup_names = backup_names
info.scales = scales
# Beware, the following two numbers are different. The first is the total
# number of parameters stored in the chain, whereas the second is for
# plotting purpose only.
info.number_parameters = len(ref_names)
info.plotted_parameters = plotted_parameters
def find_maximum_of_likelihood(info):
"""
Finding the global maximum of likelihood
min_minus_lkl will be appended with all the maximum likelihoods of files,
then will be replaced by its own maximum. This way, the global
maximum likelihood will be used as a reference, and not each chain's
maximum.
"""
min_minus_lkl = []
for chain_file in info.files:
# cheese will brutally contain everything (- log likelihood) in the
# file chain_file being scanned.
# This could potentially be faster with pandas, but is already quite
# fast
#
# This would read the chains including comment lines:
#cheese = (np.array([float(line.split()[1].strip())
# for line in open(chain_file, 'r')]))
#
# This reads the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([float(line.split()[1].strip())
for line in ifilterfalse(iscomment,f)]))
try:
min_minus_lkl.append(cheese[:].min())
except ValueError:
pass
# beware, it is the min because we are talking about
# '- log likelihood'
# Selecting only the true maximum.
try:
min_minus_lkl = min(min_minus_lkl)
except ValueError:
raise io_mp.AnalyzeError(
"No decently sized chain was found in the desired folder. " +
"Please wait to have more accepted point before trying " +
"to analyze it.")
info.min_minus_lkl = min_minus_lkl
def remove_bad_points(info):
"""
Create an array with all the points from the chains, after removing non-markovian, burn-in and fixed fraction
"""
# spam will brutally contain all the chains with sufficient number of
# points, after the burn-in was removed.
spam = list()
# Recover the longest file name, for pleasing display
max_name_length = max([len(e) for e in info.files])
# Total number of steps done:
steps = 0
accepted_steps = 0
# Open the log file
log = open(info.log_path, 'w')
for index, chain_file in enumerate(info.files):
# To improve presentation, and print only once the full path of the
# analyzed folder, we recover the length of the path name, and
# create an empty complementary string of this length
total_length = 18+max_name_length
empty_length = 18+len(os.path.dirname(chain_file))+1
basename = os.path.basename(chain_file)
if index == 0:
exec "print '--> Scanning file %-{0}s' % chain_file,".format(
max_name_length)
else:
exec "print '%{0}s%-{1}s' % ('', basename),".format(
empty_length, total_length-empty_length)
# cheese will brutally contain everything in the chain chain_file being
# scanned
#
# This would read the chains including comment lines:
#cheese = (np.array([[float(elem) for elem in line.split()]
# for line in open(chain_file, 'r')]))
#
# This read the chains excluding comment lines:
with open(chain_file, 'r') as f:
cheese = (np.array([[float(elem) for elem in line.split()]
for line in ifilterfalse(iscomment,f)]))
# If the file contains a broken line with a different number of
# elements, the previous array generation might fail, and will not have
# the correct shape. Hence the following command will fail. To avoid
# that, the error is caught.
try:
local_min_minus_lkl = cheese[:, 1].min()
except IndexError:
raise io_mp.AnalyzeError(
"Error while scanning %s." % chain_file +
" This file most probably contains "
"an incomplete line, rendering the analysis impossible. "
"I think that the following line(s) is(are) wrong:\n %s" % (
'\n '.join(
['-> %s' % line for line in
open(chain_file, 'r') if
len(line.split()) != len(info.backup_names)+2])))
line_count = float(sum(1 for line in open(chain_file, 'r')))
# Logging the information obtained until now.
number_of_steps = cheese[:, 0].sum()
log.write("%s\t " % os.path.basename(chain_file))
log.write(" Number of steps:%d\t" % number_of_steps)
log.write(" Steps accepted:%d\t" % line_count)
log.write(" acc = %.2g\t" % (float(line_count)/number_of_steps))
log.write("min(-loglike) = %.2f\n" % local_min_minus_lkl)
steps += number_of_steps
accepted_steps += line_count
# check if analyze() is called directly by the user, or by the mcmc loop during an updating phase
try:
# command_line.update is defined when called by the mcmc loop
info.update
except:
# in case it was not defined (i.e. when analyze() is called directly by user), set it to False
info.update = 0
# Removing non-markovian part, burn-in, and fraction= (1 - keep-fraction)
start = 0
markovian=0
try:
# Read all comments in chains about times when proposal was updated
# The last of these comments gives the number of lines to be skipped in the files
if info.markovian and not info.update:
with open(chain_file, 'r') as f:
for line in ifilter(iscomment,f):
start = int(line.split()[2])
markovian = start
# Remove burn-in, defined as all points until the likelhood reaches min_minus_lkl+LOG_LKL_CUTOFF
while cheese[start, 1] > info.min_minus_lkl+LOG_LKL_CUTOFF:
start += 1
burnin = start-markovian
# Remove fixed fraction as requested by user (usually not useful if non-markovian is also removed)
if info.keep_fraction < 1:
start = start + int((1.-info.keep_fraction)*(line_count - start))
print ": Removed",
if info.markovian:
print "%d non-markovian points," % markovian,
print "%d points of burn-in," % burnin,
if info.keep_fraction < 1:
print "and first %.0f percent," % (100.*(1-info.keep_fraction)),
print "keep %d steps" % (line_count-start)
except IndexError:
print ': Removed everything: chain not converged'
# ham contains cheese without the burn-in, if there are any points
# left (more than 5)
if np.shape(cheese)[0] > start+5:
ham = np.copy(cheese[int(start)::])
# Deal with single file case
if len(info.files) == 1:
warnings.warn("Convergence computed for a single file")
bacon = np.copy(cheese[::3, :])
egg = np.copy(cheese[1::3, :])
sausage = np.copy(cheese[2::3, :])
spam.append(bacon)
spam.append(egg)
spam.append(sausage)
continue
# Adding resulting table to spam
spam.append(ham)
# Test the length of the list
if len(spam) == 0:
raise io_mp.AnalyzeError(
"No decently sized chain was found. " +
"Please wait a bit to analyze this folder")
# Applying now new rules for scales, if the name is contained in the
# referenced names
for name in info.new_scales.iterkeys():
try:
index = info.ref_names.index(name)
for i in xrange(len(spam)):
spam[i][:, index+2] *= 1./info.scales[index, index]
except ValueError:
# there is nothing to do if the name is not contained in ref_names
pass
info.steps = steps
info.accepted_steps = accepted_steps
return spam
def compute_mean(mean, spam, total):
"""
"""
for i in xrange(np.shape(mean)[1]):
for j in xrange(len(spam)):
submean = np.sum(spam[j][:, 0]*spam[j][:, i+2])
mean[j+1, i] = submean / total[j+1]
mean[0, i] += submean
mean[0, i] /= total[0]
def compute_variance(var, mean, spam, total):
"""
"""
for i in xrange(np.shape(var)[1]):
for j in xrange(len(spam)):
var[0, i] += np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[0, i])**2)
var[j+1, i] = np.sum(
spam[j][:, 0]*(spam[j][:, i+2]-mean[j+1, i])**2) / \
(total[j+1]-1)
var[0, i] /= (total[0]-1)
def compute_covariance_matrix(info):
"""
"""
covar = np.zeros((len(info.ref_names), len(info.ref_names)))
for i in xrange(len(info.ref_names)):
for j in xrange(i, len(info.ref_names)):
covar[i, j] = (
info.chain[:, 0]*(
(info.chain[:, i+2]-info.mean[i]) *
(info.chain[:, j+2]-info.mean[j]))).sum()
if i != j:
covar[j, i] = covar[i, j]
covar /= info.total
# Removing scale factors in order to store true parameter covariance
covar = np.dot(info.scales.T, np.dot(covar, info.scales))
return covar
def adjust_ticks(param, information_instances):
"""
"""
if len(information_instances) == 1:
return
# Recovering all x_range and ticks entries from the concerned information
# instances
x_ranges = []
ticks = []
for info in information_instances:
if not info.ignore_param:
x_ranges.append(info.x_range[info.native_index])
ticks.append(info.ticks[info.native_index])
# The new x_range and tick should min/max all the existing ones
new_x_range = np.array(
[min([e[0] for e in x_ranges]), max([e[1] for e in x_ranges])])
temp_ticks = np.array(
[min([e[0] for e in ticks]), max([e[-1] for e in ticks])])
new_ticks = np.linspace(temp_ticks[0],
temp_ticks[1],
info.ticknumber)
for info in information_instances:
if not info.ignore_param:
info.x_range[info.native_index] = new_x_range
info.ticks[info.native_index] = new_ticks
def store_contour_coordinates(info, name1, name2, contours):
"""docstring"""
file_name = os.path.join(
info.folder, 'plots', '{0}_2d_{1}-{2}.dat'.format(
info.basename, name1, name2))
with open(file_name, 'w') as plot_file:
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[1]))
for elem in contours.collections[0].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
# stop to not include the inner contours
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
plot_file.write(
'# contour for confidence level {0}\n'.format(
info.levels[0]))
for elem in contours.collections[1].get_paths():
points = elem.vertices
for k in range(np.shape(points)[0]):
plot_file.write("%.8g\t %.8g\n" % (
points[k, 0], points[k, 1]))
if k != 0:
if all(points[k] == points[0]):
plot_file.write("\n")
break
plot_file.write("\n\n")
def iscomment(s):
"""
Define what we call a comment in MontePython chain files
"""
return s.startswith('#')
class Information(object):
"""
Hold all information for analyzing runs
"""
# Counting the number of instances, to choose the color map
_ids = count(0)
# Flag checking the absence or presence of the interp1d function
has_interpolate_module = False
# Actual pairs of colors used by MP.
# For each pair, the first color is for the 95% contour,
# and the second for the 68% contour + the 1d probability.
# Note that, as with the other customisation options, you can specify new
# values for this in the extra plot_file.
MP_color = {
'Red':['#E37C80','#CE121F'],
'Blue':['#7A98F6','#1157EF'],
'Green':['#88B27A','#297C09'],
'Orange':['#F3BE82','#ED920F'],
'Grey':['#ABABAB','#737373'],
'Purple':['#B87294','#88004C']
}
# order used when several directories are analysed
MP_color_cycle = [
MP_color['Red'],
MP_color['Blue'],
MP_color['Green'],
MP_color['Orange'],
MP_color['Grey'],
MP_color['Purple']
]
# in the same order, list of transparency levels
alphas = [0.9, 0.9, 0.9, 0.9, 0.9, 0.9]
def __init__(self, command_line, other=None):
"""
The following initialization creates the three tables that can be
customized in an extra plot_file (see :mod:`parser_mp`).
Parameters
----------
command_line : Namespace
it contains the initialised command line arguments
"""
self.to_change = {}
"""
Dictionary whose keys are the old parameter names, and values are the
new ones. For instance :code:`{'beta_plus_lambda':'beta+lambda'}`
"""
self.to_plot = []
"""
Array of names of parameters to plot. If left empty, all will be
plotted.
.. warning::
If you changed a parameter name with :attr:`to_change`, you need to
give the new name to this array
"""
self.new_scales = {}
"""
Dictionary that redefines some scales. The keys will be the parameter
name, and the value its scale.
"""
# Assign a unique id to this instance
self.id = self._ids.next()
# Defining the sigma contours (1, 2 and 3-sigma)
self.levels = np.array([68.26, 95.4, 99.7])/100.
# Follows a bunch of initialisation to provide default members
self.ref_names, self.backup_names = [], []
self.scales, self.plotted_parameters = [], []
self.spam = []
# Store directly all information from the command_line object into this
# instance, except the protected members (begin and end with __)
for elem in dir(command_line):
if elem.find('__') == -1:
setattr(self, elem, getattr(command_line, elem))
# initialise the legend flags
self.plot_legend_1d = None
self.plot_legend_2d = None
# initialize the legend size to be the same as fontsize, but can be
# altered in the extra file
self.legendsize = self.fontsize
self.legendnames = []
# initialize the customisation script flags
self.custom1d = []
self.custom2d = []
# initialise the dictionary enmforcing limit
self.force_limits = {}
# Read a potential file describing changes to be done for the parameter
# names, and number of paramaters plotted (can be let empty, all will
# then be plotted), but also the style of the plot. Note that this
# overrides the command line options
if command_line.optional_plot_file:
plot_file_vars = {'info': self,'plt': plt}
execfile(command_line.optional_plot_file, plot_file_vars)
# check and store keep_fraction
if command_line.keep_fraction<=0 or command_line.keep_fraction>1:
raise io_mp.AnalyzeError("after --keep-fraction you should pass a float >0 and <=1")
self.keep_fraction = command_line.keep_fraction
def remap_parameters(self, spam):
"""
Perform substitutions of parameters for analyzing
.. note::
for arbitrary combinations of parameters, the prior will not
necessarily be flat.
"""
if hasattr(self, 'redefine'):
for key, value in self.redefine.iteritems():
# Check that the key was an original name
if key in self.backup_names:
print ' /|\ Transforming', key, 'into', value
# We recover the indices of the key
index_to_change = self.backup_names.index(key)+2
print('/_o_\ The new variable will be called ' +
self.ref_names[self.backup_names.index(key)])
# Recover all indices of all variables present in the
# remapping
variable_names = [elem for elem in self.backup_names if
value.find(elem) != -1]
indices = [self.backup_names.index(name)+2 for name in
variable_names]
# Now loop over all files in spam
for i in xrange(len(spam)):
# Assign variables to their values
for index, name in zip(indices, variable_names):
exec("%s = spam[i][:, %i]" % (name, index))
# Assign to the desired index the combination
exec("spam[i][:, %i] = %s" % (index_to_change, value))
def define_ticks(self):
"""
"""
self.max_values = self.chain[:, 2:].max(axis=0)
self.min_values = self.chain[:, 2:].min(axis=0)
self.span = (self.max_values-self.min_values)
# Define the place of ticks, given the number of ticks desired, stored
# in conf.ticknumber
self.ticks = np.array(
[np.linspace(self.min_values[i]+self.span[i]*0.1,
self.max_values[i]-self.span[i]*0.1,
self.ticknumber) for i in range(len(self.span))])
# Define the x range (ticks start not exactly at the range boundary to
# avoid display issues)
self.x_range = np.array((self.min_values, self.max_values)).T
# In case the exploration hit a boundary (as defined in the parameter
# file), at the level of precision defined by the number of bins, the
# ticks and x_range should be altered in order to display this
# meaningful number instead.
for i in range(np.shape(self.ticks)[0]):
x_range = self.x_range[i]
bounds = self.boundaries[i]
# Left boundary
if bounds[0] is not None:
if abs(x_range[0]-bounds[0]) < self.span[i]/self.bins:
self.ticks[i][0] = bounds[0]
self.x_range[i][0] = bounds[0]
# Right boundary
if bounds[-1] is not None:
if abs(x_range[-1]-bounds[-1]) < self.span[i]/self.bins:
self.ticks[i][-1] = bounds[-1]
self.x_range[i][-1] = bounds[-1]
def write_information_files(self):
# Store in info_names only the tex_names that were plotted, for this
# instance, and in indices the corresponding list of indices. It also
# removes the $ signs, for clarity
self.info_names = [
name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.indices = [self.tex_names.index(name) for name in self.info_names]
self.tex_names = [name for index, name in enumerate(self.tex_names) if
self.ref_names[index] in self.plotted_parameters]
self.info_names = [name.replace('$', '') for name in self.info_names]
# Define the bestfit array
self.bestfit = np.zeros(len(self.ref_names))
for i in xrange(len(self.ref_names)):
self.bestfit[i] = self.chain[self.sorted_indices[0], :][2+i]
# Write down to the .h_info file all necessary information
self.write_h_info()
self.write_v_info()
self.write_tex()
def write_h_info(self):
with open(self.h_info_path, 'w') as h_info:
h_info.write(' param names\t: ')
for name in self.info_names:
h_info.write("%-14s" % name)
write_h(h_info, self.indices, 'R-1 values', '% .6f', self.R)
write_h(h_info, self.indices, 'Best Fit ', '% .6e', self.bestfit)
write_h(h_info, self.indices, 'mean ', '% .6e', self.mean)
write_h(h_info, self.indices, 'sigma ', '% .6e',
(self.bounds[:, 0, 1]-self.bounds[:, 0, 0])/2.)
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma - ', '% .6e',
self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma + ', '% .6e',
self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma - ', '% .6e',
self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma + ', '% .6e',
self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma - ', '% .6e',
self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma + ', '% .6e',
self.bounds[:, 2, 1])
# bounds
h_info.write('\n')
write_h(h_info, self.indices, '1-sigma > ', '% .6e',
self.mean+self.bounds[:, 0, 0])
write_h(h_info, self.indices, '1-sigma < ', '% .6e',
self.mean+self.bounds[:, 0, 1])
write_h(h_info, self.indices, '2-sigma > ', '% .6e',
self.mean+self.bounds[:, 1, 0])
write_h(h_info, self.indices, '2-sigma < ', '% .6e',
self.mean+self.bounds[:, 1, 1])
write_h(h_info, self.indices, '3-sigma > ', '% .6e',
self.mean+self.bounds[:, 2, 0])
write_h(h_info, self.indices, '3-sigma < ', '% .6e',
self.mean+self.bounds[:, 2, 1])
def write_v_info(self):
"""Write vertical info file"""
with open(self.v_info_path, 'w') as v_info:
v_info.write('%-15s\t: %-11s' % ('param names', 'R-1'))
v_info.write(' '.join(['%-11s' % elem for elem in [
'Best fit', 'mean', 'sigma', '1-sigma -', '1-sigma +',
'2-sigma -', '2-sigma +', '1-sigma >', '1-sigma <',
'2-sigma >', '2-sigma <']]))
for index, name in zip(self.indices, self.info_names):
v_info.write('\n%-15s\t: % .4e' % (name, self.R[index]))
v_info.write(' '.join(['% .4e' % elem for elem in [
self.bestfit[index], self.mean[index],
(self.bounds[index, 0, 1]-self.bounds[index, 0, 0])/2.,
self.bounds[index, 0, 0], self.bounds[index, 0, 1],
self.bounds[index, 1, 0], self.bounds[index, 1, 1],
self.mean[index]+self.bounds[index, 0, 0],
self.mean[index]+self.bounds[index, 0, 1],
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]]]))
def write_tex(self):
"""Write a tex table containing the main results """
with open(self.tex_path, 'w') as tex:
tex.write("\\begin{tabular}{|l|c|c|c|c|} \n \\hline \n")
tex.write("Param & best-fit & mean$\pm\sigma$ ")
tex.write("& 95\% lower & 95\% upper \\\\ \\hline \n")
for index, name in zip(self.indices, self.tex_names):
tex.write("%s &" % name)
tex.write("$%.4g$ & $%.4g_{%.2g}^{+%.2g}$ " % (
self.bestfit[index], self.mean[index],
self.bounds[index, 0, 0], self.bounds[index, 0, 1]))
tex.write("& $%.4g$ & $%.4g$ \\\\ \n" % (
self.mean[index]+self.bounds[index, 1, 0],
self.mean[index]+self.bounds[index, 1, 1]))
tex.write("\\hline \n \\end{tabular} \\\\ \n")
tex.write("$-\ln{\cal L}_\mathrm{min} =%.6g$, " % (
self.min_minus_lkl))
tex.write("minimum $\chi^2=%.4g$ \\\\ \n" % (
self.min_minus_lkl*2.))
| mit |
budnyjj/bsuir_magistracy | disciplines/OTOS/lab_1/lab.py | 1 | 1813 | #!/usr/bin/env python
import functools
import math
import random
import numpy as np
import matplotlib.pyplot as plt
plt.rc('text', usetex=True)
plt.rc('font', family='serif')
# 1D model
def model(x):
a = 2.7; d = 0.1; y_0 = 2
sigma = 0.001
result = y_0 - 0.04 * (x - a) - d * (x - a)**2
return result + random.gauss(0, sigma)
def search_asymmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
def search_symmetric(model, start_x, num_iter=100):
next_x = cur_x = start_x
vals_x = [cur_x]
for k in range(num_iter):
alpha = (k + 1) ** (-1/3)
factor = (k + 1) ** (-2/3)
next_x = cur_x + factor * (model(cur_x + alpha) - model(cur_x - alpha))
cur_x = next_x
vals_x.append(cur_x)
return vals_x
NUM_ITER = 1000
MIN_X = 1; MAX_X = 10; NUM_X = 100
VALS_X = np.linspace(MIN_X, MAX_X, NUM_X)
model_vec = np.vectorize(model)
plt.plot(VALS_X, model_vec(VALS_X),
color='r', linestyle=' ',
marker='.', markersize=5,
label='model')
search_asymmetric_x = search_asymmetric(model, MAX_X, NUM_ITER)
plt.plot(search_asymmetric_x, model_vec(search_asymmetric_x),
color='g', marker='x', markersize=5,
label='asymmetric')
search_symmetric_x = search_symmetric(model, MAX_X, NUM_ITER)
plt.plot(search_symmetric_x, model_vec(search_symmetric_x),
color='b', marker='x', markersize=5,
label='symmetric')
plt.xlabel('$ x $')
plt.ylabel('$ y $')
plt.grid(True)
# plt.legend(loc=2)
plt.savefig('plot.png', dpi=200)
| gpl-3.0 |
alvarofierroclavero/scikit-learn | sklearn/kernel_ridge.py | 155 | 6545 | """Module :mod:`sklearn.kernel_ridge` implements kernel ridge regression."""
# Authors: Mathieu Blondel <mathieu@mblondel.org>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD 3 clause
import numpy as np
from .base import BaseEstimator, RegressorMixin
from .metrics.pairwise import pairwise_kernels
from .linear_model.ridge import _solve_cholesky_kernel
from .utils import check_X_y
from .utils.validation import check_is_fitted
class KernelRidge(BaseEstimator, RegressorMixin):
"""Kernel ridge regression.
Kernel ridge regression (KRR) combines ridge regression (linear least
squares with l2-norm regularization) with the kernel trick. It thus
learns a linear function in the space induced by the respective kernel and
the data. For non-linear kernels, this corresponds to a non-linear
function in the original space.
The form of the model learned by KRR is identical to support vector
regression (SVR). However, different loss functions are used: KRR uses
squared error loss while support vector regression uses epsilon-insensitive
loss, both combined with l2 regularization. In contrast to SVR, fitting a
KRR model can be done in closed-form and is typically faster for
medium-sized datasets. On the other hand, the learned model is non-sparse
and thus slower than SVR, which learns a sparse model for epsilon > 0, at
prediction-time.
This estimator has built-in support for multi-variate regression
(i.e., when y is a 2d-array of shape [n_samples, n_targets]).
Read more in the :ref:`User Guide <kernel_ridge>`.
Parameters
----------
alpha : {float, array-like}, shape = [n_targets]
Small positive values of alpha improve the conditioning of the problem
and reduce the variance of the estimates. Alpha corresponds to
``(2*C)^-1`` in other linear models such as LogisticRegression or
LinearSVC. If an array is passed, penalties are assumed to be specific
to the targets. Hence they must correspond in number.
kernel : string or callable, default="linear"
Kernel mapping used internally. A callable should accept two arguments
and the keyword arguments passed to this object as kernel_params, and
should return a floating point number.
gamma : float, default=None
Gamma parameter for the RBF, polynomial, exponential chi2 and
sigmoid kernels. Interpretation of the default value is left to
the kernel; see the documentation for sklearn.metrics.pairwise.
Ignored by other kernels.
degree : float, default=3
Degree of the polynomial kernel. Ignored by other kernels.
coef0 : float, default=1
Zero coefficient for polynomial and sigmoid kernels.
Ignored by other kernels.
kernel_params : mapping of string to any, optional
Additional parameters (keyword arguments) for kernel function passed
as callable object.
Attributes
----------
dual_coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
X_fit_ : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data, which is also required for prediction
References
----------
* Kevin P. Murphy
"Machine Learning: A Probabilistic Perspective", The MIT Press
chapter 14.4.3, pp. 492-493
See also
--------
Ridge
Linear ridge regression.
SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> from sklearn.kernel_ridge import KernelRidge
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples)
>>> X = rng.randn(n_samples, n_features)
>>> clf = KernelRidge(alpha=1.0)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE
KernelRidge(alpha=1.0, coef0=1, degree=3, gamma=None, kernel='linear',
kernel_params=None)
"""
def __init__(self, alpha=1, kernel="linear", gamma=None, degree=3, coef0=1,
kernel_params=None):
self.alpha = alpha
self.kernel = kernel
self.gamma = gamma
self.degree = degree
self.coef0 = coef0
self.kernel_params = kernel_params
def _get_kernel(self, X, Y=None):
if callable(self.kernel):
params = self.kernel_params or {}
else:
params = {"gamma": self.gamma,
"degree": self.degree,
"coef0": self.coef0}
return pairwise_kernels(X, Y, metric=self.kernel,
filter_params=True, **params)
@property
def _pairwise(self):
return self.kernel == "precomputed"
def fit(self, X, y=None, sample_weight=None):
"""Fit Kernel Ridge regression model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data
y : array-like, shape = [n_samples] or [n_samples, n_targets]
Target values
sample_weight : float or numpy array of shape [n_samples]
Individual weights for each sample, ignored if None is passed.
Returns
-------
self : returns an instance of self.
"""
# Convert data
X, y = check_X_y(X, y, accept_sparse=("csr", "csc"), multi_output=True,
y_numeric=True)
K = self._get_kernel(X)
alpha = np.atleast_1d(self.alpha)
ravel = False
if len(y.shape) == 1:
y = y.reshape(-1, 1)
ravel = True
copy = self.kernel == "precomputed"
self.dual_coef_ = _solve_cholesky_kernel(K, y, alpha,
sample_weight,
copy)
if ravel:
self.dual_coef_ = self.dual_coef_.ravel()
self.X_fit_ = X
return self
def predict(self, X):
"""Predict using the the kernel ridge model
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : array, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ["X_fit_", "dual_coef_"])
K = self._get_kernel(X, self.X_fit_)
return np.dot(K, self.dual_coef_)
| bsd-3-clause |
VUEG/bdes_to | src/03_post_processing/similarity.py | 1 | 18175 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
""" Functions and utilities comparing raster and vector similarities.
Module can be used alone or as part of Snakemake workflow.
"""
import logging
import rasterio
import geopandas as gpd
import pandas as pd
import numpy as np
import numpy.ma as ma
from importlib.machinery import SourceFileLoader
from scipy.spatial.distance import jaccard
from scipy.stats import kendalltau
from timeit import default_timer as timer
utils = SourceFileLoader("lib.utils", "src/00_lib/utils.py").load_module()
def compute_jaccard(x, y, x_min=0.0, x_max=1.0, y_min=0.0, y_max=1.0,
warn_uneven=True, limit_tolerance=4, disable_checks=False):
"""Calculate the Jaccard index (Jaccard similarity coefficient).
The Jaccard coefficient measures similarity between sample sets, and is
defined as the size of the intersection divided by the size of the union of
the sample sets. The Jaccard coefficient can be calculated for a subset of
rasters provided by using the threshold argument.
Min and max values must be provided for both RasterLayer objects x
and y. Method can be used with RasterLayers of any value range, but
the defaults [0.0, 1.0] are geared towards comparing Zonation rank priority
rasters. Limits provided are inclusive.
:param x ndarray object.
:param y ndarray object.
:param x_min Numeric minimum threshold value for x to be used
(default 0.0).
:param x_max Numeric maximum threshold value for x to be used
(default 1.0).
:param y_min Numeric minimum threshold value for y to be used
(default 0.0).
:param y_max Numeric maximum threshold value for y to be used
(default 1.0).
:param warn_uneven Boolean indicating whether a warning is raised if the
compared raster coverages are very (>20x) uneven.
:param limit_tolerance integer values that defines to which precision x and
y limits are rounded to. This helps e.g. with values
that close to 0 but not quite 0 (default: 4, i.e.
round(x, 4)).
:param disable_checks boolean indicating if the input limit values are
checked against the actual raster values in x and y.
:return numeric value in [0, 1].
"""
if not disable_checks:
assert x_min >= np.round(np.min(x), limit_tolerance), "Min threshold smaller than computed min of x"
assert x_max <= np.round(np.max(x), limit_tolerance), "Max threshold greater than computed max of x"
assert x_min < x_max, "Min threshold for x larger to max threshold"
assert y_min >= np.round(np.min(y), limit_tolerance), "Min threshold smaller than computed min of y"
assert y_max <= np.round(np.max(y), limit_tolerance), "Max threshold greater than computed max of y"
assert y_min < y_max, "Min threshold for y larger to max threshold"
# Get the values according to the limits provided
x_bin = (x >= x_min) & (x <= x_max)
y_bin = (y >= y_min) & (y <= y_max)
if warn_uneven:
x_size = np.sum(x_bin)
y_size = np.sum(y_bin)
# Sort from smaller to larger
sizes = np.sort([x_size, y_size])
if sizes[1] / sizes[0] > 20:
print("WARNING: The extents of raster values above the "
"threshhold differ more than 20-fold: Jaccard coefficient " +
"may not be informative.")
# Compute the Jaccard-Needham dissimilarity between two boolean 1-D arrays
# and subtract from 1 to get the Jaccard index
return 1 - jaccard(x_bin.flatten(), y_bin.flatten())
def cross_correlation(input_rasters, verbose=False, logger=None):
""" Calculate Kendall tau rank correlation between all the inpur rasters.
Input rasters are read in as masked arrays and all cells that are NoData
are discarded. This way, only the values of informative cells are passed
on to scipy.stats.kendalltau() which makes things faster. The assumption is
that all rasters exactly match on which cells have values. An intersection
of both rasters' masks is used to define informative cells.
:param input_rasters list of input raster paths.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:return Pandas Dataframe with rank correlation information.
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_correlation')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_rasters) > 1, "More than one input rasters are needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING KENDALL TAU RANK CORRELATIONS **]")
all_correlations = pd.DataFrame({"feature1": [], "feature2": [],
"tau": [], "pvalue": []})
n_rasters = len(input_rasters)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_rasters * n_rasters - n_rasters) / 2)
no_computation = 1
for i in range(0, n_rasters):
raster1 = rasterio.open(input_rasters[i])
raster1_src = raster1.read(1, masked=True)
for j in range(i+1, n_rasters):
raster2 = rasterio.open(input_rasters[j])
raster2_src = raster2.read(1, masked=True)
# Compute the intersection of the masks of both rasters and use
# that as a value mask.
value_mask = raster1_src.mask & raster2_src.mask
# Then set the mask of both raster to the intersection mask
raster1_src.mask = value_mask
raster2_src.mask = value_mask
# Inlude only cells with actual values
raster1_values = ma.compressed(raster1_src)
raster2_values = ma.compressed(raster2_src)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating correlation ".format(prefix) +
"between {} ".format(input_rasters[i]) +
"and {}".format(input_rasters[j])))
# Compute Kendall's tau rank correlation
tau, pvalue = kendalltau(raster1_values, raster2_values)
llogger.debug("Tau: {0} (p-value: {1})".format(tau, pvalue))
correlations = pd.DataFrame({"feature1": [input_rasters[i]],
"feature2": [input_rasters[j]],
"tau": [tau],
"pvalue": [pvalue]})
all_correlations = pd.concat([all_correlations, correlations])
no_computation += 1
all_correlations.index = np.arange(0, len(all_correlations.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_correlations
def cross_jaccard(input_rasters, thresholds, verbose=False, logger=None):
""" Calculate Jaccard coefficients between all the inpur rasters.
This is a utility function that is intented to be used to compare
fractions of the landscape.
:param input_rasters list of input raster paths.
:param thresholds vector of numeric tuples (x_min, x_max, y_min, y_max) values of thresholds.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:param ... additional arguments passed on to jaccard().
:return Pandas Dataframe with Jaccard coefficients between all rasters.
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_jaccard')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_rasters) > 1, "More than one input rasters are needed"
assert len(thresholds) >= 1, "At least one tuple of thresholds is needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING JACCARD INDICES **]")
all_jaccards = pd.DataFrame({"feature1": [], "feature2": [],
"threshold": [], "coef": []})
n_rasters = len(input_rasters)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_rasters * n_rasters - n_rasters) / 2 * len(thresholds))
no_computation = 1
for threshold in thresholds:
if len(threshold) != 4:
llogger.error("Threshold tuple needs 4 values")
next
for i in range(0, n_rasters):
x_min, x_max, y_min, y_max = threshold
raster1 = rasterio.open(input_rasters[i])
# To calculate the Jaccard index we are dealing with binary data
# only. Avoid using masked arrays and replace NoData values with
# zeros.
raster1_nodata = raster1.nodata
raster1_src = raster1.read(1)
np.place(raster1_src, np.isclose(raster1_src, raster1_nodata), 0.0)
for j in range(i+1, n_rasters):
raster2 = rasterio.open(input_rasters[j])
raster2_nodata = raster2.nodata
raster2_src = raster2.read(1)
np.place(raster2_src, np.isclose(raster2_src, raster2_nodata),
0.0)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating Jaccard ".format(prefix) +
"index for [{0}, {1}] ".format(x_min, x_max) +
"in {} ".format(input_rasters[i]) +
"and, [{0}, {1}] ".format(y_min, y_max) +
"in {}".format(input_rasters[j])))
coef = compute_jaccard(raster1_src, raster2_src,
x_min=x_min, x_max=x_max,
y_min=y_min, y_max=y_max)
jaccards = pd.DataFrame({"feature1": [input_rasters[i]],
"feature2": [input_rasters[j]],
"threshold": [threshold],
"coef": [coef]})
all_jaccards = pd.concat([all_jaccards, jaccards])
no_computation += 1
all_jaccards.index = np.arange(0, len(all_jaccards.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_jaccards
def compute_mcs(a, b):
""" Compute MCS between vectors a and b.
:param a numeric vector.
:param b numeric vector.
:return ndarray of computed MCS scores.
"""
assert len(a) == len(b), "Vectors a and b must be of same length"
N = len(a)
# Create an array filled with -1s to store the MCS.
mcs = 0
nans = False
for i in range(0, N):
if np.isnan(a[i]) or np.isnan(b[i]):
nans = True
else:
# If eiher a or b is 0, do nothing as division would fail
if a[i] == 0.0 or b[i] == 0.0:
pass
else:
abs_subs = np.abs(a[i] - b[i]) / np.max([a[i], b[i]])
mcs += abs_subs
if nans:
print("WARNING: a and/or b contain NaNs")
return mcs / N
def cross_mcs(input_vectors, value_fields, verbose=False, logger=None):
""" Compute map comparison statistics between input vector features.
MCS (Map Comparison Statistic) indicates the average difference between any
pair of feature polygon values, expressed as a fraction of the highest
value. MCS is calculated between each polygon in the input vector features
and it is required (and checked) that all the inputs are based on the
same vector feature.
For another application of MCS, see:
Schulp, C. J. E., Burkhard, B., Maes, J., Van Vliet, J., & Verburg, P. H.
(2014). Uncertainties in Ecosystem Service Maps: A Comparison on the
European Scale. PLoS ONE, 9(10), e109643.
http://doi.org/10.1371/journal.pone.0109643
:param input_vectors list of input vector paths.
:param value_field list of String names indicating which fields contains
the values to be compared.
:param verbose: Boolean indicating how much information is printed out.
:param logger: logger object to be used.
:return list of GeoPandas Dataframe with MCS between all rasters in field
"mcs".
"""
# 1. Setup --------------------------------------------------------------
all_start = timer()
if not logger:
logging.basicConfig()
llogger = logging.getLogger('cross_mcs')
llogger.setLevel(logging.DEBUG if verbose else logging.INFO)
else:
llogger = logger
# Check the inputs
assert len(input_vectors) > 1, "More than one input vector needed"
assert len(value_fields) == len(input_vectors), "One value field per vector feature needed"
# 2. Calculations --------------------------------------------------------
llogger.info(" [** COMPUTING MCS SCORES **]")
all_mcs = pd.DataFrame({"feature1": [], "feature2": [],
"mcs": []})
n_vectors = len(input_vectors)
# Generate counter information for all the computations. The results
# matrix is always diagonally symmetrical.
n_computations = int((n_vectors * n_vectors - n_vectors) / 2)
no_computation = 1
for i in range(0, n_vectors):
# Read in the data as a GeoPandas dataframe
vector1_path = input_vectors[i]
vector1 = gpd.read_file(vector1_path)
for j in range(i+1, n_vectors):
vector2_path = input_vectors[j]
vector2 = gpd.read_file(vector2_path)
prefix = utils.get_iteration_prefix(no_computation,
n_computations)
llogger.info(("{} Calculating MCS ".format(prefix) +
"between {} ".format(vector1_path) +
"and {}".format(vector2_path)))
a = vector1[value_fields[i]]
b = vector2[value_fields[j]]
mcs_value = compute_mcs(a, b)
mcs = pd.DataFrame({"feature1": [vector1_path],
"feature2": [vector2_path],
"mcs": [mcs_value]})
all_mcs = pd.concat([all_mcs, mcs])
no_computation += 1
all_mcs.index = np.arange(0, len(all_mcs.index), 1)
all_end = timer()
all_elapsed = round(all_end - all_start, 2)
llogger.info(" [TIME] All processing took {} sec".format(all_elapsed))
return all_mcs
def plu_variation(input_files, input_codes, logger=None):
""" Compute per planning unit (PLU) variation statistics.
Given a list of input features describing the same planinng units,
calculate statistics based on defined field names.
:param input_files: String list of paths to input (vector) features.
:param input_codes: String list of field names corresponding to each
input feature. The statistics will calculated based on
these fields.
:param logger: Logger object.
:return: GeoPandas DataFrame object.
"""
# Set up logging
if not logger:
logging.basicConfig()
llogger = logging.getLogger('plu_variation')
llogger.setLevel(logging.INFO)
else:
llogger = logger
n_features = len(input_files)
# Create an empty DataFrame to store the rank priority cols
rank_values = pd.DataFrame({'NUTS_ID': []})
llogger.info("[1/2] Reading in {} features...".format(n_features))
for i, feature_file in enumerate(input_files):
feature_code = input_codes[i]
prefix = utils.get_iteration_prefix(i+1, n_features)
llogger.debug("{0} Processing feature {1}".format(prefix,
feature_file))
# Read in the feature as GeoPandas dataframe
feat_data = gpd.read_file(feature_file)
# Two different field names are used to store the mean rank
# information: "_mean" for geojson-files and 'Men_rnk' for
# shapefiles. Figure out which is currently used.
if '_mean' in feat_data.columns:
mean_field = '_mean'
elif 'Men_rnk' in feat_data.columns:
mean_field = 'Men_rnk'
else:
llogger.error("Field '_mean' or 'Men_rnk' not found")
raise ValueError
# On first iteration, also get the NUTS_ID column
if i == 1:
rank_values['NUTS_ID'] = feat_data['NUTS_ID']
# Get the rank priority column and place if the store DataFrame
rank_values[feature_code] = feat_data[mean_field]
llogger.info("[2/2] Calculating mean and STD...")
# Read in the first input feature to act as a template.
output_feature = gpd.read_file(input_files[0])
# Only take one field: NUTS_ID
output_feature = output_feature[['geometry', 'NUTS_ID']]
# Merge with the collected data
output_feature = output_feature.merge(rank_values, on='NUTS_ID')
# Calculate mean
agg_means = output_feature.mean(1)
# Calculate STD
agg_stds = output_feature.std(1)
output_feature['agg_mean'] = agg_means
output_feature['agg_std'] = agg_stds
return output_feature
| mit |
tawsifkhan/scikit-learn | sklearn/tree/tree.py | 113 | 34767 | """
This module gathers tree-based methods, including decision, regression and
randomized trees. Single and multi-output problems are both handled.
"""
# Authors: Gilles Louppe <g.louppe@gmail.com>
# Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Brian Holt <bdholt1@gmail.com>
# Noel Dawe <noel@dawe.me>
# Satrajit Gosh <satrajit.ghosh@gmail.com>
# Joly Arnaud <arnaud.v.joly@gmail.com>
# Fares Hedayati <fares.hedayati@gmail.com>
#
# Licence: BSD 3 clause
from __future__ import division
import numbers
from abc import ABCMeta, abstractmethod
import numpy as np
from scipy.sparse import issparse
from ..base import BaseEstimator, ClassifierMixin, RegressorMixin
from ..externals import six
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_array, check_random_state, compute_sample_weight
from ..utils.validation import NotFittedError
from ._tree import Criterion
from ._tree import Splitter
from ._tree import DepthFirstTreeBuilder, BestFirstTreeBuilder
from ._tree import Tree
from . import _tree
__all__ = ["DecisionTreeClassifier",
"DecisionTreeRegressor",
"ExtraTreeClassifier",
"ExtraTreeRegressor"]
# =============================================================================
# Types and constants
# =============================================================================
DTYPE = _tree.DTYPE
DOUBLE = _tree.DOUBLE
CRITERIA_CLF = {"gini": _tree.Gini, "entropy": _tree.Entropy}
CRITERIA_REG = {"mse": _tree.MSE, "friedman_mse": _tree.FriedmanMSE}
DENSE_SPLITTERS = {"best": _tree.BestSplitter,
"presort-best": _tree.PresortBestSplitter,
"random": _tree.RandomSplitter}
SPARSE_SPLITTERS = {"best": _tree.BestSparseSplitter,
"random": _tree.RandomSparseSplitter}
# =============================================================================
# Base decision tree
# =============================================================================
class BaseDecisionTree(six.with_metaclass(ABCMeta, BaseEstimator,
_LearntSelectorMixin)):
"""Base class for decision trees.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def __init__(self,
criterion,
splitter,
max_depth,
min_samples_split,
min_samples_leaf,
min_weight_fraction_leaf,
max_features,
max_leaf_nodes,
random_state,
class_weight=None):
self.criterion = criterion
self.splitter = splitter
self.max_depth = max_depth
self.min_samples_split = min_samples_split
self.min_samples_leaf = min_samples_leaf
self.min_weight_fraction_leaf = min_weight_fraction_leaf
self.max_features = max_features
self.random_state = random_state
self.max_leaf_nodes = max_leaf_nodes
self.class_weight = class_weight
self.n_features_ = None
self.n_outputs_ = None
self.classes_ = None
self.n_classes_ = None
self.tree_ = None
self.max_features_ = None
def fit(self, X, y, sample_weight=None, check_input=True):
"""Build a decision tree from the training set (X, y).
Parameters
----------
X : array-like or sparse matrix, shape = [n_samples, n_features]
The training input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csc_matrix``.
y : array-like, shape = [n_samples] or [n_samples, n_outputs]
The target values (class labels in classification, real numbers in
regression). In the regression case, use ``dtype=np.float64`` and
``order='C'`` for maximum efficiency.
sample_weight : array-like, shape = [n_samples] or None
Sample weights. If None, then samples are equally weighted. Splits
that would create child nodes with net zero or negative weight are
ignored while searching for a split in each node. In the case of
classification, splits are also ignored if they would result in any
single class carrying a negative weight in either child node.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
self : object
Returns self.
"""
random_state = check_random_state(self.random_state)
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csc")
if issparse(X):
X.sort_indices()
if X.indices.dtype != np.intc or X.indptr.dtype != np.intc:
raise ValueError("No support for np.int64 index based "
"sparse matrices")
# Determine output settings
n_samples, self.n_features_ = X.shape
is_classification = isinstance(self, ClassifierMixin)
y = np.atleast_1d(y)
expanded_class_weight = None
if y.ndim == 1:
# reshape is necessary to preserve the data contiguity against vs
# [:, np.newaxis] that does not.
y = np.reshape(y, (-1, 1))
self.n_outputs_ = y.shape[1]
if is_classification:
y = np.copy(y)
self.classes_ = []
self.n_classes_ = []
if self.class_weight is not None:
y_original = np.copy(y)
y_store_unique_indices = np.zeros(y.shape, dtype=np.int)
for k in range(self.n_outputs_):
classes_k, y_store_unique_indices[:, k] = np.unique(y[:, k], return_inverse=True)
self.classes_.append(classes_k)
self.n_classes_.append(classes_k.shape[0])
y = y_store_unique_indices
if self.class_weight is not None:
expanded_class_weight = compute_sample_weight(
self.class_weight, y_original)
else:
self.classes_ = [None] * self.n_outputs_
self.n_classes_ = [1] * self.n_outputs_
self.n_classes_ = np.array(self.n_classes_, dtype=np.intp)
if getattr(y, "dtype", None) != DOUBLE or not y.flags.contiguous:
y = np.ascontiguousarray(y, dtype=DOUBLE)
# Check parameters
max_depth = ((2 ** 31) - 1 if self.max_depth is None
else self.max_depth)
max_leaf_nodes = (-1 if self.max_leaf_nodes is None
else self.max_leaf_nodes)
if isinstance(self.max_features, six.string_types):
if self.max_features == "auto":
if is_classification:
max_features = max(1, int(np.sqrt(self.n_features_)))
else:
max_features = self.n_features_
elif self.max_features == "sqrt":
max_features = max(1, int(np.sqrt(self.n_features_)))
elif self.max_features == "log2":
max_features = max(1, int(np.log2(self.n_features_)))
else:
raise ValueError(
'Invalid value for max_features. Allowed string '
'values are "auto", "sqrt" or "log2".')
elif self.max_features is None:
max_features = self.n_features_
elif isinstance(self.max_features, (numbers.Integral, np.integer)):
max_features = self.max_features
else: # float
if self.max_features > 0.0:
max_features = max(1, int(self.max_features * self.n_features_))
else:
max_features = 0
self.max_features_ = max_features
if len(y) != n_samples:
raise ValueError("Number of labels=%d does not match "
"number of samples=%d" % (len(y), n_samples))
if self.min_samples_split <= 0:
raise ValueError("min_samples_split must be greater than zero.")
if self.min_samples_leaf <= 0:
raise ValueError("min_samples_leaf must be greater than zero.")
if not 0 <= self.min_weight_fraction_leaf <= 0.5:
raise ValueError("min_weight_fraction_leaf must in [0, 0.5]")
if max_depth <= 0:
raise ValueError("max_depth must be greater than zero. ")
if not (0 < max_features <= self.n_features_):
raise ValueError("max_features must be in (0, n_features]")
if not isinstance(max_leaf_nodes, (numbers.Integral, np.integer)):
raise ValueError("max_leaf_nodes must be integral number but was "
"%r" % max_leaf_nodes)
if -1 < max_leaf_nodes < 2:
raise ValueError(("max_leaf_nodes {0} must be either smaller than "
"0 or larger than 1").format(max_leaf_nodes))
if sample_weight is not None:
if (getattr(sample_weight, "dtype", None) != DOUBLE or
not sample_weight.flags.contiguous):
sample_weight = np.ascontiguousarray(
sample_weight, dtype=DOUBLE)
if len(sample_weight.shape) > 1:
raise ValueError("Sample weights array has more "
"than one dimension: %d" %
len(sample_weight.shape))
if len(sample_weight) != n_samples:
raise ValueError("Number of weights=%d does not match "
"number of samples=%d" %
(len(sample_weight), n_samples))
if expanded_class_weight is not None:
if sample_weight is not None:
sample_weight = sample_weight * expanded_class_weight
else:
sample_weight = expanded_class_weight
# Set min_weight_leaf from min_weight_fraction_leaf
if self.min_weight_fraction_leaf != 0. and sample_weight is not None:
min_weight_leaf = (self.min_weight_fraction_leaf *
np.sum(sample_weight))
else:
min_weight_leaf = 0.
# Set min_samples_split sensibly
min_samples_split = max(self.min_samples_split,
2 * self.min_samples_leaf)
# Build tree
criterion = self.criterion
if not isinstance(criterion, Criterion):
if is_classification:
criterion = CRITERIA_CLF[self.criterion](self.n_outputs_,
self.n_classes_)
else:
criterion = CRITERIA_REG[self.criterion](self.n_outputs_)
SPLITTERS = SPARSE_SPLITTERS if issparse(X) else DENSE_SPLITTERS
splitter = self.splitter
if not isinstance(self.splitter, Splitter):
splitter = SPLITTERS[self.splitter](criterion,
self.max_features_,
self.min_samples_leaf,
min_weight_leaf,
random_state)
self.tree_ = Tree(self.n_features_, self.n_classes_, self.n_outputs_)
# Use BestFirst if max_leaf_nodes given; use DepthFirst otherwise
if max_leaf_nodes < 0:
builder = DepthFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth)
else:
builder = BestFirstTreeBuilder(splitter, min_samples_split,
self.min_samples_leaf,
min_weight_leaf,
max_depth,
max_leaf_nodes)
builder.build(self.tree_, X, y, sample_weight)
if self.n_outputs_ == 1:
self.n_classes_ = self.n_classes_[0]
self.classes_ = self.classes_[0]
return self
def _validate_X_predict(self, X, check_input):
"""Validate X whenever one tries to predict, apply, predict_proba"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, "
"call `fit` before exploiting the model.")
if check_input:
X = check_array(X, dtype=DTYPE, accept_sparse="csr")
if issparse(X) and (X.indices.dtype != np.intc or
X.indptr.dtype != np.intc):
raise ValueError("No support for np.int64 index based "
"sparse matrices")
n_features = X.shape[1]
if self.n_features_ != n_features:
raise ValueError("Number of features of the model must "
" match the input. Model n_features is %s and "
" input n_features is %s "
% (self.n_features_, n_features))
return X
def predict(self, X, check_input=True):
"""Predict class or regression value for X.
For a classification model, the predicted class for each sample in X is
returned. For a regression model, the predicted value based on X is
returned.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
y : array of shape = [n_samples] or [n_samples, n_outputs]
The predicted classes, or the predict values.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
n_samples = X.shape[0]
# Classification
if isinstance(self, ClassifierMixin):
if self.n_outputs_ == 1:
return self.classes_.take(np.argmax(proba, axis=1), axis=0)
else:
predictions = np.zeros((n_samples, self.n_outputs_))
for k in range(self.n_outputs_):
predictions[:, k] = self.classes_[k].take(
np.argmax(proba[:, k], axis=1),
axis=0)
return predictions
# Regression
else:
if self.n_outputs_ == 1:
return proba[:, 0]
else:
return proba[:, :, 0]
def apply(self, X, check_input=True):
"""
Returns the index of the leaf that each sample is predicted as.
Parameters
----------
X : array_like or sparse matrix, shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Returns
-------
X_leaves : array_like, shape = [n_samples,]
For each datapoint x in X, return the index of the leaf x
ends up in. Leaves are numbered within
``[0; self.tree_.node_count)``, possibly with gaps in the
numbering.
"""
X = self._validate_X_predict(X, check_input)
return self.tree_.apply(X)
@property
def feature_importances_(self):
"""Return the feature importances.
The importance of a feature is computed as the (normalized) total
reduction of the criterion brought by that feature.
It is also known as the Gini importance.
Returns
-------
feature_importances_ : array, shape = [n_features]
"""
if self.tree_ is None:
raise NotFittedError("Estimator not fitted, call `fit` before"
" `feature_importances_`.")
return self.tree_.compute_feature_importances()
# =============================================================================
# Public estimators
# =============================================================================
class DecisionTreeClassifier(BaseDecisionTree, ClassifierMixin):
"""A decision tree classifier.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="gini")
The function to measure the quality of a split. Supported criteria are
"gini" for the Gini impurity and "entropy" for the information gain.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=sqrt(n_features)`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
class_weight : dict, list of dicts, "balanced" or None, optional
(default=None)
Weights associated with classes in the form ``{class_label: weight}``.
If not given, all classes are supposed to have weight one. For
multi-output problems, a list of dicts can be provided in the same
order as the columns of y.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
For multi-output, the weights of each column of y will be multiplied.
Note that these weights will be multiplied with sample_weight (passed
through the fit method) if sample_weight is specified.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
classes_ : array of shape = [n_classes] or a list of such arrays
The classes labels (single output problem),
or a list of arrays of class labels (multi-output problem).
feature_importances_ : array of shape = [n_features]
The feature importances. The higher, the more important the
feature. The importance of a feature is computed as the (normalized)
total reduction of the criterion brought by that feature. It is also
known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_classes_ : int or list
The number of classes (for single output problems),
or a list containing the number of classes for each
output (for multi-output problems).
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeRegressor
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_iris
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeClassifier
>>> clf = DecisionTreeClassifier(random_state=0)
>>> iris = load_iris()
>>> cross_val_score(clf, iris.data, iris.target, cv=10)
... # doctest: +SKIP
...
array([ 1. , 0.93..., 0.86..., 0.93..., 0.93...,
0.93..., 0.93..., 1. , 0.93..., 1. ])
"""
def __init__(self,
criterion="gini",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(DecisionTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
def predict_proba(self, X, check_input=True):
"""Predict class probabilities of the input samples X.
The predicted class probability is the fraction of samples of the same
class in a leaf.
check_input : boolean, (default=True)
Allow to bypass several input checking.
Don't use this parameter unless you know what you do.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
X = self._validate_X_predict(X, check_input)
proba = self.tree_.predict(X)
if self.n_outputs_ == 1:
proba = proba[:, :self.n_classes_]
normalizer = proba.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba /= normalizer
return proba
else:
all_proba = []
for k in range(self.n_outputs_):
proba_k = proba[:, k, :self.n_classes_[k]]
normalizer = proba_k.sum(axis=1)[:, np.newaxis]
normalizer[normalizer == 0.0] = 1.0
proba_k /= normalizer
all_proba.append(proba_k)
return all_proba
def predict_log_proba(self, X):
"""Predict class log-probabilities of the input samples X.
Parameters
----------
X : array-like or sparse matrix of shape = [n_samples, n_features]
The input samples. Internally, it will be converted to
``dtype=np.float32`` and if a sparse matrix is provided
to a sparse ``csr_matrix``.
Returns
-------
p : array of shape = [n_samples, n_classes], or a list of n_outputs
such arrays if n_outputs > 1.
The class log-probabilities of the input samples. The order of the
classes corresponds to that in the attribute `classes_`.
"""
proba = self.predict_proba(X)
if self.n_outputs_ == 1:
return np.log(proba)
else:
for k in range(self.n_outputs_):
proba[k] = np.log(proba[k])
return proba
class DecisionTreeRegressor(BaseDecisionTree, RegressorMixin):
"""A decision tree regressor.
Read more in the :ref:`User Guide <tree>`.
Parameters
----------
criterion : string, optional (default="mse")
The function to measure the quality of a split. The only supported
criterion is "mse" for the mean squared error, which is equal to
variance reduction as feature selection criterion.
splitter : string, optional (default="best")
The strategy used to choose the split at each node. Supported
strategies are "best" to choose the best split and "random" to choose
the best random split.
max_features : int, float, string or None, optional (default=None)
The number of features to consider when looking for the best split:
- If int, then consider `max_features` features at each split.
- If float, then `max_features` is a percentage and
`int(max_features * n_features)` features are considered at each
split.
- If "auto", then `max_features=n_features`.
- If "sqrt", then `max_features=sqrt(n_features)`.
- If "log2", then `max_features=log2(n_features)`.
- If None, then `max_features=n_features`.
Note: the search for a split does not stop until at least one
valid partition of the node samples is found, even if it requires to
effectively inspect more than ``max_features`` features.
max_depth : int or None, optional (default=None)
The maximum depth of the tree. If None, then nodes are expanded until
all leaves are pure or until all leaves contain less than
min_samples_split samples.
Ignored if ``max_leaf_nodes`` is not None.
min_samples_split : int, optional (default=2)
The minimum number of samples required to split an internal node.
min_samples_leaf : int, optional (default=1)
The minimum number of samples required to be at a leaf node.
min_weight_fraction_leaf : float, optional (default=0.)
The minimum weighted fraction of the input samples required to be at a
leaf node.
max_leaf_nodes : int or None, optional (default=None)
Grow a tree with ``max_leaf_nodes`` in best-first fashion.
Best nodes are defined as relative reduction in impurity.
If None then unlimited number of leaf nodes.
If not None then ``max_depth`` will be ignored.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Attributes
----------
feature_importances_ : array of shape = [n_features]
The feature importances.
The higher, the more important the feature.
The importance of a feature is computed as the
(normalized) total reduction of the criterion brought
by that feature. It is also known as the Gini importance [4]_.
max_features_ : int,
The inferred value of max_features.
n_features_ : int
The number of features when ``fit`` is performed.
n_outputs_ : int
The number of outputs when ``fit`` is performed.
tree_ : Tree object
The underlying Tree object.
See also
--------
DecisionTreeClassifier
References
----------
.. [1] http://en.wikipedia.org/wiki/Decision_tree_learning
.. [2] L. Breiman, J. Friedman, R. Olshen, and C. Stone, "Classification
and Regression Trees", Wadsworth, Belmont, CA, 1984.
.. [3] T. Hastie, R. Tibshirani and J. Friedman. "Elements of Statistical
Learning", Springer, 2009.
.. [4] L. Breiman, and A. Cutler, "Random Forests",
http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm
Examples
--------
>>> from sklearn.datasets import load_boston
>>> from sklearn.cross_validation import cross_val_score
>>> from sklearn.tree import DecisionTreeRegressor
>>> boston = load_boston()
>>> regressor = DecisionTreeRegressor(random_state=0)
>>> cross_val_score(regressor, boston.data, boston.target, cv=10)
... # doctest: +SKIP
...
array([ 0.61..., 0.57..., -0.34..., 0.41..., 0.75...,
0.07..., 0.29..., 0.33..., -1.42..., -1.77...])
"""
def __init__(self,
criterion="mse",
splitter="best",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features=None,
random_state=None,
max_leaf_nodes=None):
super(DecisionTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
class ExtraTreeClassifier(DecisionTreeClassifier):
"""An extremely randomized tree classifier.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeRegressor, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="gini",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None,
class_weight=None):
super(ExtraTreeClassifier, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
class_weight=class_weight,
random_state=random_state)
class ExtraTreeRegressor(DecisionTreeRegressor):
"""An extremely randomized tree regressor.
Extra-trees differ from classic decision trees in the way they are built.
When looking for the best split to separate the samples of a node into two
groups, random splits are drawn for each of the `max_features` randomly
selected features and the best split among those is chosen. When
`max_features` is set 1, this amounts to building a totally random
decision tree.
Warning: Extra-trees should only be used within ensemble methods.
Read more in the :ref:`User Guide <tree>`.
See also
--------
ExtraTreeClassifier, ExtraTreesClassifier, ExtraTreesRegressor
References
----------
.. [1] P. Geurts, D. Ernst., and L. Wehenkel, "Extremely randomized trees",
Machine Learning, 63(1), 3-42, 2006.
"""
def __init__(self,
criterion="mse",
splitter="random",
max_depth=None,
min_samples_split=2,
min_samples_leaf=1,
min_weight_fraction_leaf=0.,
max_features="auto",
random_state=None,
max_leaf_nodes=None):
super(ExtraTreeRegressor, self).__init__(
criterion=criterion,
splitter=splitter,
max_depth=max_depth,
min_samples_split=min_samples_split,
min_samples_leaf=min_samples_leaf,
min_weight_fraction_leaf=min_weight_fraction_leaf,
max_features=max_features,
max_leaf_nodes=max_leaf_nodes,
random_state=random_state)
| bsd-3-clause |
AlexGidiotis/Multimodal-Gesture-Recognition-with-LSTMs-and-CTC | multimodal_fusion/sequence_decoding.py | 1 | 3585 |
import pandas as pd
import numpy as np
from operator import itemgetter
from itertools import groupby
from keras.preprocessing import sequence
from keras.models import Sequential
from keras.models import model_from_json
from keras import backend as K
from keras.optimizers import RMSprop
import keras.callbacks
from keras.layers import Input, Lambda
from keras.models import Model
import itertools
from sklearn import preprocessing
from data_generator import DataGenerator
from losses import ctc_lambda_func
def decode_batch(pred_out,f_list):
"""
"""
# Map gesture codes to classes.
map_gest = {0:"oov", 1:"VA", 2:"VQ", 3:"PF", 4:"FU", 5:"CP", 6:"CV",
7:"DC", 8:"SP", 9:"CN", 10:"FN", 11:"OK", 12:"CF", 13:"BS",
14:"PR", 15:"NU", 16:"FM", 17:"TT", 18:"BN", 19:"MC",
20:"ST", 21:"sil"}
# These files are problematic during decoding.
ignore_list = [228,298,299,300,303,304,334,343,373,375]
# Write the output to .mlf
of = open('final_ctc_recout.mlf', 'w')
of.write('#!MLF!#\n')
out = pred_out
ret = []
for j in range(out.shape[0]):
out_prob = list(np.max(out[j, 2:],1))
out_best = list(np.argmax(out[j, 2:],1))
# Filter the probabilities to get the most confident predictions.
for p,s in zip(out_prob,out_best):
if p < 0.5:
out_prob.remove(p)
out_best.remove(s)
out_best = [k for k, g in itertools.groupby(out_best)]
outstr = [map_gest[i] for i in out_best]
ret.append(outstr)
f_num = f_list[j]
if int(f_num) in ignore_list:
continue
fileNum = str(format(f_num, '05'))
fileName = 'Sample'+fileNum
of.write('"*/%s.rec"\n' %fileName)
for cl in outstr:
of.write('%s\n' %cl)
of.write('.\n')
of.close()
return ret
if __name__ == '__main__':
minibatch_size = 2
maxlen = 1900
nb_classes = 22
nb_epoch = 100
numfeats_speech = 39
numfeats_skeletal = 20
K.set_learning_phase(0)
dataset = raw_input('select train or val: ')
data_gen = DataGenerator(minibatch_size=minibatch_size,
numfeats_speech=numfeats_speech,
numfeats_skeletal=numfeats_skeletal,
maxlen=maxlen,
nb_classes=nb_classes,
dataset=dataset)
input_shape_a = (maxlen, numfeats_speech)
input_shape_s = (maxlen, numfeats_skeletal)
input_data_a = Input(name='the_input_audio', shape=input_shape_a, dtype='float32')
input_data_s = Input(name='the_input_skeletal', shape=input_shape_s, dtype='float32')
json_file = open('multimodal_ctc_blstm_model.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
loaded_model.load_weights("multimodal_ctc_lstm_weights_best.h5")
print("Loaded model from disk")
y_pred = loaded_model.get_layer('softmax').output
labels = Input(name='the_labels', shape=[data_gen.absolute_max_sequence_len], dtype='float32')
input_length = Input(name='input_length', shape=[1], dtype='int64')
label_length = Input(name='label_length', shape=[1], dtype='int64')
loss_out = Lambda(ctc_lambda_func, output_shape=(1,), name="ctc")([y_pred, labels, input_length, label_length])
rmsprop = RMSprop(lr=0.001, clipnorm=5)
# the loss calc occurs elsewhere, so use a dummy lambda func for the loss
loaded_model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer=rmsprop)
pred_model = Model(inputs=loaded_model.input,
outputs=loaded_model.get_layer('softmax').output)
predictions = pred_model.predict_generator(generator=data_gen.next_val(),
steps=data_gen.get_size(train=False)/minibatch_size,
verbose=1)
f_list = data_gen.get_file_list(train=False)
decoded_res = decode_batch(predictions, f_list)
| mit |
juharris/tensorflow | tensorflow/contrib/learn/python/learn/estimators/dnn_linear_combined_test.py | 2 | 43185 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for DNNLinearCombinedEstimators."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import tempfile
import numpy as np
import tensorflow as tf
from tensorflow.contrib.learn.python.learn.estimators import _sklearn
from tensorflow.contrib.learn.python.learn.estimators import estimator_test_utils
def _get_quantile_based_buckets(feature_values, num_buckets):
quantiles = np.percentile(
np.array(feature_values), ([100 * (i + 1.) / (num_buckets + 1.)
for i in range(num_buckets)]))
return list(quantiles)
def _prepare_iris_data_for_logistic_regression():
# Converts iris data to a logistic regression problem.
iris = tf.contrib.learn.datasets.load_iris()
ids = np.where((iris.target == 0) | (iris.target == 1))
iris = tf.contrib.learn.datasets.base.Dataset(data=iris.data[ids],
target=iris.target[ids])
return iris
def _iris_input_multiclass_fn():
iris = tf.contrib.learn.datasets.load_iris()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[150, 1], dtype=tf.int32)
def _iris_input_logistic_fn():
iris = _prepare_iris_data_for_logistic_regression()
return {
'feature': tf.constant(iris.data, dtype=tf.float32)
}, tf.constant(iris.target, shape=[100, 1], dtype=tf.int32)
class DNNLinearCombinedClassifierTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNLinearCombinedClassifier)
def testLogisticRegression_MatrixData(self):
"""Tests binary classification using matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_feature = [tf.contrib.layers.bucketized_column(
cont_features[0], _get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_feature,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testLogisticRegression_TensorData(self):
"""Tests binary classification using Tensor data as input."""
def _input_fn():
iris = _prepare_iris_data_for_logistic_regression()
features = {}
for i in range(4):
# The following shows how to provide the Tensor data for
# RealValuedColumns.
features.update({
str(i): tf.reshape(tf.constant(iris.data[:, i], dtype=tf.float32),
[-1, 1])})
# The following shows how to provide the SparseTensor data for
# a SparseColumn.
features['dummy_sparse_column'] = tf.SparseTensor(
values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [60, 0]],
shape=[len(iris.target), 2])
target = tf.reshape(tf.constant(iris.target, dtype=tf.int32), [-1, 1])
return features, target
iris = _prepare_iris_data_for_logistic_regression()
cont_features = [tf.contrib.layers.real_valued_column(str(i))
for i in range(4)]
linear_features = [
tf.contrib.layers.bucketized_column(
cont_features[i], _get_quantile_based_buckets(iris.data[:, str(i)],
10)) for i in range(4)
]
linear_features.append(tf.contrib.layers.sparse_column_with_hash_bucket(
'dummy_sparse_column', hash_bucket_size=100))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=linear_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn():
features = {
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
target = tf.constant([[1], [0], [0]])
return features, target
sparse_features = [
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
tf.contrib.layers.sparse_column_with_hash_bucket('language',
hash_bucket_size=2e7)
]
embedding_features = [
tf.contrib.layers.embedding_column(sparse_features[0], dimension=1)
]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=sparse_features,
dnn_feature_columns=embedding_features,
dnn_hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({})))
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(input_fn=_input_fn, steps=1)
self.assertGreater(scores['accuracy'], 0.9)
def testMultiClass(self):
"""Tests multi-class classification using matrix data as input.
Please see testLogisticRegression_TensorData() for how to use Tensor
data as input instead.
"""
iris = tf.contrib.learn.datasets.load_iris()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0], _get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testWeightAndBiasNames(self):
"""Tests that weight and bias names haven't changed."""
iris = tf.contrib.learn.datasets.load_iris()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0], _get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=3,
linear_feature_columns=bucketized_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=100)
self.assertEquals(4, len(classifier.dnn_bias_))
self.assertEquals(3, len(classifier.dnn_weights_))
self.assertEquals(3, len(classifier.linear_bias_))
self.assertEquals(44, len(classifier.linear_weights_))
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=2,
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_train, steps=1)
# Cross entropy = -0.25*log(0.25)-0.75*log(0.75) = 0.562
self.assertAlmostEqual(scores['loss'], 0.562, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The logistic prediction should be (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
weight_column_name='w',
n_classes=2,
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted cross entropy = (-7*log(0.25)-3*log(0.75))/10 = 1.06
self.assertAlmostEqual(scores['loss'], 1.06, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1], [1], [1], [1]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn_train, steps=100)
scores = classifier.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the accuracy
# should be close to 1.
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByObject(self):
"""Tests binary classification using matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0], _get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=tf.train.FtrlOptimizer(learning_rate=0.1),
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=tf.train.AdagradOptimizer(learning_rate=0.1))
classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByString(self):
"""Tests binary classification using matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0], _get_quantile_based_buckets(iris.data, 10))]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer='Ftrl',
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer='Adagrad')
classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.9)
def testCustomOptimizerByFunction(self):
"""Tests binary classification using matrix data as input."""
iris = _prepare_iris_data_for_logistic_regression()
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)
]
bucketized_features = [
tf.contrib.layers.bucketized_column(
cont_features[0], _get_quantile_based_buckets(iris.data, 10))
]
def _optimizer_exp_decay():
global_step = tf.contrib.framework.get_global_step()
learning_rate = tf.train.exponential_decay(learning_rate=0.1,
global_step=global_step,
decay_steps=100,
decay_rate=0.001)
return tf.train.AdagradOptimizer(learning_rate=learning_rate)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=bucketized_features,
linear_optimizer=_optimizer_exp_decay,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
dnn_optimizer=_optimizer_exp_decay)
classifier.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = classifier.evaluate(input_fn=_iris_input_logistic_fn, steps=100)
self.assertGreater(scores['accuracy'], 0.8)
def testPredict(self):
"""Tests weight column in evaluation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1], [0], [0], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32)}
return features, target
def _input_fn_predict():
y = tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=1)
features = {'x': y}
return features
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=100)
probs = list(classifier.predict_proba(input_fn=_input_fn_predict))
self.assertAllClose([[0.75, 0.25]] * 4, probs, 0.05)
classes = list(classifier.predict(input_fn=_input_fn_predict))
self.assertListEqual([0] * 4, classes)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1], [0], [0], [0]])
features = {
'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, target
def _my_metric_op(predictions, targets):
# For the case of binary classification, the 2nd column of "predictions"
# denotes the model predictions.
targets = tf.to_float(targets)
predictions = tf.slice(predictions, [0, 1], [-1, 1])
return tf.reduce_sum(tf.mul(predictions, targets))
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn, steps=100)
scores = classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
'my_accuracy': tf.contrib.metrics.streaming_accuracy,
('my_precision', 'classes'): tf.contrib.metrics.streaming_precision,
('my_metric', 'probabilities'): _my_metric_op
})
self.assertTrue(
set(['loss', 'my_accuracy', 'my_precision', 'my_metric'
]).issubset(set(scores.keys())))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(
list(classifier.predict(input_fn=predict_input_fn)))
self.assertEqual(_sklearn.accuracy_score([1, 0, 0, 0], predictions),
scores['my_accuracy'])
# Test the case where the 2nd element of the key is neither "classes" nor
# "probabilities".
with self.assertRaises(KeyError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={('bad_name', 'bad_type'): tf.contrib.metrics.streaming_auc})
# Test the case where the tuple of the key doesn't have 2 elements.
with self.assertRaises(ValueError):
classifier.evaluate(
input_fn=_input_fn,
steps=100,
metrics={
('bad_length_name', 'classes', 'bad_length'):
tf.contrib.metrics.streaming_accuracy
})
def testVariableQuery(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
target = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
var_names = classifier.get_variable_names()
self.assertGreater(len(var_names), 3)
for name in var_names:
classifier.get_variable_value(name)
def testCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
target = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=500)
# logodds(0.75) = 1.09861228867
self.assertAlmostEqual(
1.0986,
float(classifier.get_variable_value('centered_bias_weight')[0]),
places=2)
def testDisableCenteredBias(self):
"""Tests bias is centered or not."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
target = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
enable_centered_bias=False)
classifier.fit(input_fn=_input_fn_train, steps=500)
self.assertFalse('centered_bias_weight' in classifier.get_variable_names())
def testLinearOnly(self):
"""Tests that linear-only instantiation works."""
def input_fn():
return {
'age': tf.constant([1]),
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
age = tf.contrib.layers.real_valued_column('age')
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[age, language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
self.assertNotIn('dnn/logits/biases', classifier.get_variable_names())
self.assertNotIn('dnn/logits/weights', classifier.get_variable_names())
self.assertEquals(1, len(classifier.linear_bias_))
self.assertEquals(2, len(classifier.linear_weights_))
self.assertEquals(1, len(classifier.linear_weights_['linear/age/weight']))
self.assertEquals(
100, len(classifier.linear_weights_['linear/language/weights']))
def testLinearOnlyOneFeature(self):
"""Tests that linear-only instantiation works for one feature only."""
def input_fn():
return {
'language': tf.SparseTensor(values=['english'],
indices=[[0, 0]],
shape=[1, 1])
}, tf.constant([[1]])
language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 99)
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[language])
classifier.fit(input_fn=input_fn, steps=100)
loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
classifier.fit(input_fn=input_fn, steps=200)
loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
self.assertLess(loss2, loss1)
self.assertLess(loss2, 0.01)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
self.assertNotIn('dnn/logits/biases', classifier.get_variable_names())
self.assertNotIn('dnn/logits/weights', classifier.get_variable_names())
self.assertEquals(1, len(classifier.linear_bias_))
self.assertEquals(99, len(classifier.linear_weights_))
def testDNNOnly(self):
"""Tests that DNN-only instantiation works."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
n_classes=3, dnn_feature_columns=cont_features, dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_iris_input_multiclass_fn, steps=1000)
classifier.evaluate(input_fn=_iris_input_multiclass_fn, steps=100)
self.assertTrue('centered_bias_weight' in classifier.get_variable_names())
self.assertEquals(4, len(classifier.dnn_bias_))
self.assertEquals(3, len(classifier.dnn_weights_))
self.assertNotIn('linear/bias_weight', classifier.get_variable_names())
self.assertNotIn('linear/feature_BUCKETIZED_weights',
classifier.get_variable_names())
def testDNNWeightsBiasesNames(self):
"""Tests the names of DNN weights and biases in the checkpoints."""
def _input_fn_train():
# Create 4 rows, three (y = x), one (y=Not(x))
target = tf.constant([[1], [1], [1], [0]])
features = {'x': tf.ones(shape=[4, 1], dtype=tf.float32),}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedClassifier(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3])
classifier.fit(input_fn=_input_fn_train, steps=5)
# hiddenlayer_0/weights,hiddenlayer_1/weights and dnn_logits/weights.
self.assertEquals(3, len(classifier.dnn_weights_))
# hiddenlayer_0/biases, hiddenlayer_1/biases, dnn_logits/biases,
# centered_bias_weight.
self.assertEquals(4, len(classifier.dnn_bias_))
class DNNLinearCombinedRegressorTest(tf.test.TestCase):
def testEstimatorContract(self):
estimator_test_utils.assert_estimator_contract(
self, tf.contrib.learn.DNNLinearCombinedRegressor)
def testRegression_MatrixData(self):
"""Tests regression using matrix data as input."""
cont_features = [
tf.contrib.layers.real_valued_column('feature', dimension=4)]
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=cont_features,
dnn_feature_columns=cont_features,
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_iris_input_logistic_fn, steps=100)
scores = regressor.evaluate(input_fn=_iris_input_logistic_fn, steps=1)
self.assertLess(scores['loss'], 0.3)
def testRegression_TensorData(self):
"""Tests regression using tensor data as input."""
def _input_fn():
# Create 4 rows of (y = x)
target = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.constant([[100.], [3.], [2.], [2.]])}
return features, target
classifier = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
classifier.fit(input_fn=_input_fn, steps=100)
classifier.evaluate(input_fn=_input_fn, steps=1)
def testLoss(self):
"""Tests loss calculation."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
}
return features, target
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_train, steps=1)
# Average square loss = (0.75^2 + 3*0.25^2) / 4 = 0.1875
self.assertAlmostEqual(scores['loss'], 0.1875, delta=0.1)
def testLossWithWeights(self):
"""Tests loss calculation with weights."""
def _input_fn_train():
# 4 rows with equal weight, one of them (y = x), three of them (y=Not(x))
# The algorithm should learn (y = 0.25).
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
def _input_fn_eval():
# 4 rows, with different weights.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[7.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# Weighted average square loss = (7*0.75^2 + 3*0.25^2) / 10 = 0.4125
self.assertAlmostEqual(scores['loss'], 0.4125, delta=0.1)
def testTrainWithWeights(self):
"""Tests training with given weight column."""
def _input_fn_train():
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
# First row has more weight than others. Model should fit (y=x) better
# than (y=Not(x)) due to the relative higher weight of the first row.
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[100.], [3.], [2.], [2.]])
}
return features, target
def _input_fn_eval():
# Create 4 rows (y = x)
target = tf.constant([[1.], [1.], [1.], [1.]])
features = {
'x': tf.ones(shape=[4, 1], dtype=tf.float32),
'w': tf.constant([[1.], [1.], [1.], [1.]])
}
return features, target
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
weight_column_name='w',
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn_train, steps=100)
scores = regressor.evaluate(input_fn=_input_fn_eval, steps=1)
# The model should learn (y = x) because of the weights, so the loss should
# be close to zero.
self.assertLess(scores['loss'], 0.2)
def testPredict_AsIterableFalse(self):
"""Tests predict method with as_iterable=False."""
target = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predictions = regressor.predict(input_fn=_input_fn, as_iterable=False)
self.assertAllClose(predictions, target, atol=0.2)
def testPredict_AsIterable(self):
"""Tests predict method with as_iterable=True."""
target = [1., 0., 0.2]
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant(target, dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = list(
regressor.predict(input_fn=predict_input_fn, as_iterable=True))
self.assertAllClose(predictions, target, atol=0.2)
def testCustomMetrics(self):
"""Tests custom evaluation metrics."""
def _input_fn(num_epochs=None):
# Create 4 rows, one of them (y = x), three of them (y=Not(x))
target = tf.constant([[1.], [0.], [0.], [0.]])
features = {'x': tf.train.limit_epochs(
tf.ones(shape=[4, 1], dtype=tf.float32), num_epochs=num_epochs)}
return features, target
def _my_metric_op(predictions, targets):
return tf.reduce_sum(tf.mul(predictions, targets))
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={
'my_error': tf.contrib.metrics.streaming_mean_squared_error,
'my_metric': _my_metric_op
})
self.assertIn('loss', set(scores.keys()))
self.assertIn('my_error', set(scores.keys()))
self.assertIn('my_metric', set(scores.keys()))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
predictions = np.array(list(regressor.predict(input_fn=predict_input_fn)))
self.assertAlmostEqual(
_sklearn.mean_squared_error(np.array([1, 0, 0, 0]), predictions),
scores['my_error'])
# Tests that when the key is a tuple, an error is raised.
with self.assertRaises(KeyError):
regressor.evaluate(
input_fn=_input_fn,
steps=1,
metrics={('my_error', 'predictions'
): tf.contrib.metrics.streaming_mean_squared_error})
def testTrainSaveLoad(self):
"""Tests regression with restarting training / evaluate."""
def _input_fn(num_epochs=None):
# Create 4 rows of (y = x)
target = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.train.limit_epochs(
tf.constant([[100.], [3.], [2.], [2.]]), num_epochs=num_epochs)}
return features, target
model_dir = tempfile.mkdtemp()
# pylint: disable=g-long-lambda
new_estimator = lambda: tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
model_dir=model_dir,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
predict_input_fn = functools.partial(_input_fn, num_epochs=1)
classifier = new_estimator()
classifier.fit(input_fn=_input_fn, steps=100)
predictions = list(classifier.predict(input_fn=predict_input_fn))
del classifier
classifier = new_estimator()
predictions2 = list(classifier.predict(input_fn=predict_input_fn))
self.assertAllClose(predictions, predictions2)
def testTrainWithPartitionedVariables(self):
"""Tests training with partitioned variables."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
# Because we did not start a distributed cluster, we need to pass an
# empty ClusterSpec, otherwise the device_setter will look for
# distributed jobs, such as "/job:ps" which are not present.
config=tf.contrib.learn.RunConfig(
num_ps_replicas=2, cluster_spec=tf.train.ClusterSpec({}),
tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testDisableCenteredBias(self):
"""Tests that we can disable centered bias."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
enable_centered_bias=False,
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testLinearOnly(self):
"""Tests linear-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[
language_column,
tf.contrib.layers.real_valued_column('age')
],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
def testDNNOnly(self):
"""Tests DNN-only instantiation and training."""
def _input_fn(num_epochs=None):
features = {
'age': tf.train.limit_epochs(tf.constant([[0.8], [0.15], [0.]]),
num_epochs=num_epochs),
'language': tf.SparseTensor(values=['en', 'fr', 'zh'],
indices=[[0, 0], [0, 1], [2, 0]],
shape=[3, 2])
}
return features, tf.constant([1., 0., 0.2], dtype=tf.float32)
language_column = tf.contrib.layers.sparse_column_with_hash_bucket(
'language', hash_bucket_size=20)
regressor = tf.contrib.learn.DNNLinearCombinedRegressor(
dnn_feature_columns=[
tf.contrib.layers.embedding_column(language_column, dimension=1),
tf.contrib.layers.real_valued_column('age')
],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
regressor.fit(input_fn=_input_fn, steps=100)
scores = regressor.evaluate(input_fn=_input_fn, steps=1)
self.assertLess(scores['loss'], 0.2)
class FeatureEngineeringFunctionTest(tf.test.TestCase):
"""Tests feature_engineering_fn."""
def testNoneFeatureEngineeringFn(self):
def input_fn():
# Create 4 rows of (y = x)
target = tf.constant([[100.], [3.], [2.], [2.]])
features = {'x': tf.constant([[100.], [3.], [2.], [2.]])}
return features, target
def feature_engineering_fn(features, targets):
_, _ = features, targets
target = tf.constant([[1000.], [30.], [20.], [20.]])
features = {'x': tf.constant([[1000.], [30.], [20.], [20.]])}
return features, target
estimator_with_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1),
feature_engineering_fn=feature_engineering_fn)
estimator_with_fe_fn.fit(input_fn=input_fn, steps=100)
estimator_without_fe_fn = tf.contrib.learn.DNNLinearCombinedRegressor(
linear_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_feature_columns=[tf.contrib.layers.real_valued_column('x')],
dnn_hidden_units=[3, 3],
config=tf.contrib.learn.RunConfig(tf_random_seed=1))
estimator_without_fe_fn.fit(input_fn=input_fn, steps=100)
# predictions = y
prediction_with_fe_fn = next(
estimator_with_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(1000., prediction_with_fe_fn, delta=1.0)
prediction_without_fe_fn = next(
estimator_without_fe_fn.predict(input_fn=input_fn, as_iterable=True))
self.assertAlmostEqual(100., prediction_without_fe_fn, delta=1.0)
if __name__ == '__main__':
tf.test.main()
| apache-2.0 |
PrashntS/scikit-learn | sklearn/decomposition/tests/test_incremental_pca.py | 297 | 8265 | """Tests for Incremental PCA."""
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn import datasets
from sklearn.decomposition import PCA, IncrementalPCA
iris = datasets.load_iris()
def test_incremental_pca():
# Incremental PCA on dense arrays.
X = iris.data
batch_size = X.shape[0] // 3
ipca = IncrementalPCA(n_components=2, batch_size=batch_size)
pca = PCA(n_components=2)
pca.fit_transform(X)
X_transformed = ipca.fit_transform(X)
np.testing.assert_equal(X_transformed.shape, (X.shape[0], 2))
assert_almost_equal(ipca.explained_variance_ratio_.sum(),
pca.explained_variance_ratio_.sum(), 1)
for n_components in [1, 2, X.shape[1]]:
ipca = IncrementalPCA(n_components, batch_size=batch_size)
ipca.fit(X)
cov = ipca.get_covariance()
precision = ipca.get_precision()
assert_array_almost_equal(np.dot(cov, precision),
np.eye(X.shape[1]))
def test_incremental_pca_check_projection():
# Test that the projection of data is correct.
rng = np.random.RandomState(1999)
n, p = 100, 3
X = rng.randn(n, p) * .1
X[:10] += np.array([3, 4, 5])
Xt = 0.1 * rng.randn(1, p) + np.array([3, 4, 5])
# Get the reconstruction of the generated data X
# Note that Xt has the same "components" as X, just separated
# This is what we want to ensure is recreated correctly
Yt = IncrementalPCA(n_components=2).fit(X).transform(Xt)
# Normalize
Yt /= np.sqrt((Yt ** 2).sum())
# Make sure that the first element of Yt is ~1, this means
# the reconstruction worked as expected
assert_almost_equal(np.abs(Yt[0][0]), 1., 1)
def test_incremental_pca_inverse():
# Test that the projection of data can be inverted.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
ipca = IncrementalPCA(n_components=2, batch_size=10).fit(X)
Y = ipca.transform(X)
Y_inverse = ipca.inverse_transform(Y)
assert_almost_equal(X, Y_inverse, decimal=3)
def test_incremental_pca_validation():
# Test that n_components is >=1 and <= n_features.
X = [[0, 1], [1, 0]]
for n_components in [-1, 0, .99, 3]:
assert_raises(ValueError, IncrementalPCA(n_components,
batch_size=10).fit, X)
def test_incremental_pca_set_params():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 20
X = rng.randn(n_samples, n_features)
X2 = rng.randn(n_samples, n_features)
X3 = rng.randn(n_samples, n_features)
ipca = IncrementalPCA(n_components=20)
ipca.fit(X)
# Decreasing number of components
ipca.set_params(n_components=10)
assert_raises(ValueError, ipca.partial_fit, X2)
# Increasing number of components
ipca.set_params(n_components=15)
assert_raises(ValueError, ipca.partial_fit, X3)
# Returning to original setting
ipca.set_params(n_components=20)
ipca.partial_fit(X)
def test_incremental_pca_num_features_change():
# Test that changing n_components will raise an error.
rng = np.random.RandomState(1999)
n_samples = 100
X = rng.randn(n_samples, 20)
X2 = rng.randn(n_samples, 50)
ipca = IncrementalPCA(n_components=None)
ipca.fit(X)
assert_raises(ValueError, ipca.partial_fit, X2)
def test_incremental_pca_batch_signs():
# Test that components_ sign is stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(10, 20)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(np.sign(i), np.sign(j), decimal=6)
def test_incremental_pca_batch_values():
# Test that components_ values are stable over batch sizes.
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features)
all_components = []
batch_sizes = np.arange(20, 40, 3)
for batch_size in batch_sizes:
ipca = IncrementalPCA(n_components=None, batch_size=batch_size).fit(X)
all_components.append(ipca.components_)
for i, j in zip(all_components[:-1], all_components[1:]):
assert_almost_equal(i, j, decimal=1)
def test_incremental_pca_partial_fit():
# Test that fit and partial_fit get equivalent results.
rng = np.random.RandomState(1999)
n, p = 50, 3
X = rng.randn(n, p) # spherical data
X[:, 1] *= .00001 # make middle component relatively small
X += [5, 4, 3] # make a large mean
# same check that we can find the original data from the transformed
# signal (since the data is almost of rank n_components)
batch_size = 10
ipca = IncrementalPCA(n_components=2, batch_size=batch_size).fit(X)
pipca = IncrementalPCA(n_components=2, batch_size=batch_size)
# Add one to make sure endpoint is included
batch_itr = np.arange(0, n + 1, batch_size)
for i, j in zip(batch_itr[:-1], batch_itr[1:]):
pipca.partial_fit(X[i:j, :])
assert_almost_equal(ipca.components_, pipca.components_, decimal=3)
def test_incremental_pca_against_pca_iris():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
X = iris.data
Y_pca = PCA(n_components=2).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=2, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_incremental_pca_against_pca_random_data():
# Test that IncrementalPCA and PCA are approximate (to a sign flip).
rng = np.random.RandomState(1999)
n_samples = 100
n_features = 3
X = rng.randn(n_samples, n_features) + 5 * rng.rand(1, n_features)
Y_pca = PCA(n_components=3).fit_transform(X)
Y_ipca = IncrementalPCA(n_components=3, batch_size=25).fit_transform(X)
assert_almost_equal(np.abs(Y_pca), np.abs(Y_ipca), 1)
def test_explained_variances():
# Test that PCA and IncrementalPCA calculations match
X = datasets.make_low_rank_matrix(1000, 100, tail_strength=0.,
effective_rank=10, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 99]:
pca = PCA(n_components=nc).fit(X)
ipca = IncrementalPCA(n_components=nc, batch_size=100).fit(X)
assert_almost_equal(pca.explained_variance_, ipca.explained_variance_,
decimal=prec)
assert_almost_equal(pca.explained_variance_ratio_,
ipca.explained_variance_ratio_, decimal=prec)
assert_almost_equal(pca.noise_variance_, ipca.noise_variance_,
decimal=prec)
def test_whitening():
# Test that PCA and IncrementalPCA transforms match to sign flip.
X = datasets.make_low_rank_matrix(1000, 10, tail_strength=0.,
effective_rank=2, random_state=1999)
prec = 3
n_samples, n_features = X.shape
for nc in [None, 9]:
pca = PCA(whiten=True, n_components=nc).fit(X)
ipca = IncrementalPCA(whiten=True, n_components=nc,
batch_size=250).fit(X)
Xt_pca = pca.transform(X)
Xt_ipca = ipca.transform(X)
assert_almost_equal(np.abs(Xt_pca), np.abs(Xt_ipca), decimal=prec)
Xinv_ipca = ipca.inverse_transform(Xt_ipca)
Xinv_pca = pca.inverse_transform(Xt_pca)
assert_almost_equal(X, Xinv_ipca, decimal=prec)
assert_almost_equal(X, Xinv_pca, decimal=prec)
assert_almost_equal(Xinv_pca, Xinv_ipca, decimal=prec)
| bsd-3-clause |
u3099811/BaxterTictacToe | src/baxter_interface/src/joint_trajectory_action/bezier.py | 3 | 13110 | #! /usr/bin/python
# Software License Agreement (BSD License)
#
# Copyright (c) 2013-2015, Rethink Robotics
# All rights reserved.
#
# Copyright (c) 2011, Ian McMahon
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Ian McMahon nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
"""
The Bezier library was implemented as a class project in CIS515,
Fundamentals of Linear Algebra, taught by Professor Jean Gallier
in the summer of 2011 at the University of Pennsylvania. For an
excellent explanation of Cubic Bezier Curves, and the math
represented in this library, see
http://www.cis.upenn.edu/~cis515/proj1-12.pdf
~~~~~~~~~~~~~~~~~~~~~~~~ Bezier ~~~~~~~~~~~~~~~~~~~~~~~~
A library for computing Bezier Cubic Splines for an arbitrary
set of control points in R2, R3, up to RN space.
Cubic Segment:
C(t) = (1 - t)^3*b0 + 3(1 - t)*b1 + 3(1 - t)*t^2*b2 + t^3*b3
Bezier Spline of Cubic Segments:
B(t) = C_(i)(t-i+1), i-1 <= t <= i
where C0 continuity exists: C_(i)(1) = C_(i+1)(0)
where C1 continuity exists: C'_(i)(1) = C'_(i+1)(0)
and where C2 continuity exists: C"_(i)(1) = C"_(i+1)(0)
ex. usage:
import numpy
import bezier
points_array = numpy.array([[1, 2, 3], [4, 4, 4],
[6, 4, 6], [2, 5, 6],
[5, 6, 7]])
d_pts = bezier.de_boor_control_pts(points_array)
b_coeffs = bezier.bezier_coefficients(points_array, d_pts)
b_curve = bezier.bezier_curve(b_coeffs, 50)
# plotting example
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.gca(projection='3d')
#plot bezier curve
ax.plot(b_curve[:,0], b_curve[:,1], b_curve[:,2])
#plot specified points
ax.plot(points_array[:,0], points_array[:,1], points_array[:,2], 'g*')
ax.set_title("Cubic Bezier Spline")
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
ax.legend(["Bezier Curve", "Control Points"], loc=2)
plt.show()
"""
import numpy as np
def de_boor_control_pts(points_array, d0=None,
dN=None, natural=True):
"""
Compute the de Boor control points for a given
set for control points
params:
points_array: array of user-supplied control points
numpy.array of size N by k
N is the number of input control points
k is the number of dimensions for each point
d0: the first control point - None if "natural"
numpy.array of size 1 by k
dN: the last control point - None if "natural"
numpy.array of size 1 by k
natural: flag to signify natural start/end conditions
bool
returns:
d_pts: array of de Boor control points
numpy.array of size N+3 by k
"""
# N+3 auxiliary points required to compute d_pts
# dpts_(-1) = x_(0)
# dpts_(N+1) = x_(N)
# so it is only necessary to find N+1 pts, dpts_(0) to to dpts_(N)
(rows, k) = np.shape(points_array)
N = rows - 1 # minus 1 because list includes x_(0)
# Compute A matrix
if natural:
if N > 2:
A = np.zeros((N-1, N-1))
A[np.ix_([0], [0, 1])] = [4, 1]
A[np.ix_([N-2], [N-3, N-2])] = [1, 4]
else:
A = 4.0
else:
if N > 2:
A = np.zeros((N-1, N-1))
A[np.ix_([0], [0, 1])] = [3.5, 1]
A[np.ix_([N-2], [N-3, N-2])] = [1, 3.5]
else:
A = 3.5
for i in range(1, N-2):
A[np.ix_([i], [i-1, i, i+1])] = [1, 4, 1]
# Construct de Boor Control Points from A matrix
d_pts = np.zeros((N+3, k))
for col in range(0, k):
x = np.zeros((max(N-1, 1), 1))
if N > 2:
# Compute start / end conditions
if natural:
x[N-2, 0] = 6*points_array[-2, col] - points_array[-1, col]
x[0, 0] = 6*points_array[1, col] - points_array[0, col]
else:
x[N-2, 0] = 6*points_array[-2, col] - 1.5*dN[0, col]
x[0, 0] = 6*points_array[1, col] - 1.5*d0[0, col]
x[range(1, N-3+1), 0] = 6*points_array[range(2, N-2+1), col]
# Solve bezier interpolation
d_pts[2:N+1, col] = np.linalg.solve(A, x).T
else:
# Compute start / end conditions
if natural:
x[0, 0] = 6*points_array[1, col] - points_array[0, col]
else:
x[0, 0] = 6*points_array[1, col] - 1.5*d0[col]
# Solve bezier interpolation
d_pts[2, col] = x / A
# Store off start and end positions
d_pts[0, :] = points_array[0, :]
d_pts[-1, :] = points_array[-1, :]
# Compute the second to last de Boor point based on end conditions
if natural:
one_third = (1.0/3.0)
two_thirds = (2.0/3.0)
d_pts[1, :] = (two_thirds)*points_array[0, :] + (one_third)*d_pts[2, :]
d_pts[N+1, :] = ((one_third)*d_pts[-3, :] +
(two_thirds)*points_array[-1, :])
else:
d_pts[1, :] = d0
d_pts[N+1, :] = dN
return d_pts
def bezier_coefficients(points_array, d_pts):
"""
Compute the Bezier coefficients for a given
set for user-supplied control pts and
de Boor control pts.
These B coeffs are used to compute the cubic
splines for each cubic spline segment as
follows (where t is a percentage of time between
b_coeff segments):
C(t) = (1 - t)^3*b0 + 3(1 - t)*b1
+ 3(1 - t)*t^2*b2 + t^3*b3
params:
points_array: array of user-supplied control points
numpy.array of size N by k
N is the number of control points
k is the number of dimensions for each point
d_pts: array of de Boor control points
numpy.array of size N+3 by k
returns:
b_coeffs: k-dimensional array of 4 Bezier coefficients
for every control point
numpy.array of size N by 4 by k
"""
(rows, k) = np.shape(points_array)
N = rows - 1 # N minus 1 because points array includes x_0
b_coeffs = np.zeros(shape=(k, N, 4))
for i in range(0, N):
points_array_i = i+1
d_pts_i = i + 2
if i == 0:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = d_pts[d_pts_i - 1, axis_pos]
b_coeffs[axis_pos, i, 2] = (0.5 * d_pts[d_pts_i - 1, axis_pos]
+ 0.5 * d_pts[d_pts_i, axis_pos])
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
elif i == N-1:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = (0.5 * d_pts[d_pts_i - 1, axis_pos]
+ 0.5 * d_pts[d_pts_i, axis_pos])
b_coeffs[axis_pos, i, 2] = d_pts[d_pts_i, axis_pos]
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
else:
for axis_pos in range(0, k):
b_coeffs[axis_pos, i, 0] = points_array[points_array_i - 1,
axis_pos]
b_coeffs[axis_pos, i, 1] = (2.0/3.0 * d_pts[d_pts_i - 1,
axis_pos]
+ 1.0/3.0 * d_pts[d_pts_i,
axis_pos])
b_coeffs[axis_pos, i, 2] = (1.0/3.0 * d_pts[d_pts_i - 1,
axis_pos]
+ 2.0/3.0 * d_pts[d_pts_i,
axis_pos])
b_coeffs[axis_pos, i, 3] = points_array[points_array_i,
axis_pos]
return b_coeffs
def _cubic_spline_point(b_coeff, t):
"""
Internal convenience function for calculating
a k-dimensional point defined by the supplied
Bezier coefficients. Finds the point that
describes the current position along the bezier
segment for k dimensions.
params:
b_coeff => b0...b3: Four k-dimensional Bezier
coefficients each one is a numpy.array
of size k by 1, so
b_coeff is a numpy array of size k by 4
k is the number of dimensions for each
coefficient
t: percentage of time elapsed for this segment
0 <= int <= 1.0
returns:
current position in k dimensions
numpy.array of size 1 by k
"""
return (pow((1-t), 3)*b_coeff[:, 0] +
3*pow((1-t), 2)*t*b_coeff[:, 1] +
3*(1-t)*pow(t, 2)*b_coeff[:, 2] +
pow(t, 3)*b_coeff[:, 3]
)
def bezier_point(b_coeffs, b_index, t):
"""
Finds the k values that describe the current
position along the bezier curve for k dimensions.
params:
b_coeffs: k-dimensional array
for every control point with 4 Bezier coefficients
numpy.array of size k by N by 4
N is the number of control points
k is the number of dimensions for each point
b_index: index position out between two of
the N b_coeffs for this point in time
int
t: percentage of time that has passed between
the two control points
0 <= int <= 1.0
returns:
b_point: current position in k dimensions
numpy.array of size 1 by k
"""
if b_index <= 0:
b_point = b_coeffs[:, 0, 0]
elif b_index > b_coeffs.shape[1]:
b_point = b_coeffs[:, -1, -1]
else:
t = 0.0 if t < 0.0 else t
t = 1.0 if t > 1.0 else t
b_coeff_set = b_coeffs[:, b_index-1, range(4)]
b_point = _cubic_spline_point(b_coeff_set, t)
return b_point
def bezier_curve(b_coeffs, num_intervals):
"""
Iterpolation of the entire Bezier curve at once,
using a specified number of intervals between
control points (encapsulated by b_coeffs).
params:
b_coeffs: k-dimensional array of 4 Bezier coefficients
for every control point
numpy.array of size N by 4 by k
N is the number of control points
k is the number of dimensions for each point
num_intervals: the number of intervals between
control points
int > 0
returns:
b_curve: positions along the bezier curve in k-dimensions
numpy.array of size N*num_interval+1 by k
(the +1 is to include the start position on the curve)
"""
assert num_intervals > 0,\
"Invalid number of intervals chosen (must be greater than 0)"
interval = 1.0 / num_intervals
(num_axes, num_bpts, _) = np.shape(b_coeffs)
b_curve = np.zeros((num_bpts*num_intervals+1, num_axes))
# Copy out initial point
b_curve[0, :] = b_coeffs[:, 0, 0]
for current_bpt in range(num_bpts):
b_coeff_set = b_coeffs[:, current_bpt, range(4)]
for iteration, t in enumerate(np.linspace(interval, 1,
num_intervals)):
b_curve[(current_bpt *
num_intervals +
iteration+1), :] = _cubic_spline_point(b_coeff_set, t)
return b_curve
| apache-2.0 |
jdavidrcamacho/Tests_GP | 08 - Thesis results/speed_test6.py | 1 | 5414 | import Gedi as gedi
import george
import numpy as np;
import matplotlib.pylab as pl; pl.close('all')
from time import time,sleep
import scipy.optimize as op
import sys
##### INITIAL DATA ###########################################################
nrep = 1
pontos=[]
temposQP=[]
temposmulti=[]
georgeQP=[]
sleeptime=10
lista=[10,20,50,100,200,500]
#for i in np.arange(100,650,200):
#for i in np.arange(100,1400,350):
### Functions george
# Define the objective function (negative log-likelihood in this case).
def nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
ll = gp.lnlikelihood(y, quiet=True)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll(p):
# Update the kernel parameters and compute the likelihood.
gp.kernel[:] = p
return -gp.grad_lnlikelihood(y, quiet=True)
### Functions gedi
def nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
ll = gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# The scipy optimizer doesn't play well with infinities.
return -ll if np.isfinite(ll) else 1e25
# And the gradient of the objective function.
def grad_nll_gedi(p):
global kernel
# Update the kernel parameters and compute the likelihood.
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(p))
return -np.array(gedi.kernel_likelihood.gradient_likelihood(kernel,x,y,yerr))
###############################################################################
### Things to run
for i0, i in enumerate(lista):
f=open("{0}.txt".format(i),"w")
sys.stdout = f
print i
pontos.append(i)
print 'pontos', pontos
x = 10 * np.sort(np.random.rand(2*i))
yerr = 0.2 * np.ones_like(x)
y = np.sin(x) + yerr * np.random.randn(len(x))
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.QuasiPeriodic(15.0,2.0,1.0,10.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposQP.append(sum(av) / float(nrep))
print 'temposQP', temposQP
sleep(sleeptime*i0)
###############################################################################
av = []
for _ in range(nrep):
start= time()
kernel= gedi.kernel.ExpSineSquared(15.0, 2.0, 10.0)* \
gedi.kernel.ExpSquared(1.0,1.0)
print 'Initial gedi kernel =',kernel
print 'Initial gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
# Run the optimization routine.
p0_gedi = np.log(kernel.pars)
results_gedi = op.minimize(nll_gedi, p0_gedi, jac=grad_nll_gedi)
kernel= gedi.kernel_optimization.new_kernel(kernel,np.exp(results_gedi.x))
print 'Final gedi kernel =',kernel
print 'Final gedi likelihood =',gedi.kernel_likelihood.likelihood(kernel,x,y,yerr)
print
tempo1= time() - start
av.append(tempo1)
temposmulti.append(sum(av) / float(nrep))
print 'temposmult', temposmulti
sleep(sleeptime*i0)
av = []
for _ in range(nrep):
start = time() # Calculation using george
kernelg1 = 15.0**2*george.kernels.ExpSine2Kernel(2/2.0**2,10.0)* \
george.kernels.ExpSquaredKernel(1.0**2)
# You need to compute the GP once before starting the optimization.
gp = george.GP(kernelg1, mean=np.mean(y))
gp.compute(x,yerr)
# Print the initial ln-likelihood.
print 'Initial george kernel', kernelg1
print 'Initial george likelihood', gp.lnlikelihood(y)
# Run the optimization routine.
p0 = gp.kernel.vector
results = op.minimize(nll, p0, jac=grad_nll)
# Update the kernel and print the final log-likelihood.
gp.kernel[:] = results.x
print 'Final george kernel =',gp.kernel
print 'Final george likelihood= ', gp.lnlikelihood(y)
print
tempog1= time() - start
av.append(tempog1)
georgeQP.append(sum(av) / float(nrep))
print 'georgeQP', georgeQP
###########################################################################
sys.stdout = sys.__stdout__
f.close()
sleep(sleeptime*i0)
N = pontos
pl.figure()
pl.loglog(N, temposQP, 'r-')
pl.loglog(N, temposmulti, 'b-o')
pl.loglog(N, georgeQP, 'b--')
pl.xlim(0.9*N[0], 1.1*N[-1])
pl.xlabel('Number of points')
pl.ylabel('Time')
#pl.title('Covariance matrix calculations')
pl.legend(['gedi QP', 'gedi ESS*ES','george ESS*ES'],loc='upper left')
pl.xticks(fontsize = 18);pl.yticks(fontsize=18)
pl.savefig('speedtest_6.png')
#pl.close('all') | mit |
sujithvm/internationality-journals | src/get_journal_list_Aminer.py | 3 | 1586 | __author__ = 'Sukrit'
import bson
import pandas as pd
import numpy as np
#import matplotlib.pyplot as plt
#from scipy.optimize import curve_fit
ELElist = []
with open('../data/Elsevier_journal_list.csv', 'r') as file :
x = file.readlines()
for line in x :
#print line
line = line.replace('&','and') #converting & to 'and' [UGH]
ELElist.append(line.rstrip()) #remove whitespaces
import pymongo
client = pymongo.MongoClient("localhost", 27017)
# db name - aminer
db = client.aminer
# collection
db.publications
jlist = []
i = 0
flag = False
for jname in ELElist :
flag = False
try :
if db.publications.find_one(filter = {'publication' : jname},limit = 1) != None :
flag = True
except bson.errors.InvalidStringData :
print "[ERROR] Could not insert value: " + jname
else :
if flag == True :
jlist.append(jname)
print "[INFO] Value found: " + jname
i += 1
print i
with open ("../output/both_journal_list.txt","w")as file:
for line in jlist:
file.write(line+"\n")
'''
cursor = db.publications.find()
for document in cursor :
if document['publication'] not in jlist :
if document['publication'] in ELElist :
jlist.append(document['publication'])
print document['publication']
'''
'''
citable_items = list(db.publications.find({"publication" : P})
citable_items_ids = []
for cite in citable_items : citable_items_ids.append(cite['index'])
'''
#print "[DEBUG] Number of papers ", len(papers)
#print papers
| mit |
roverdotcom/pandastream-tools | sync_profiles.py | 1 | 3938 | import logging
import json
import argparse
from ConfigParser import SafeConfigParser
import panda
logging.basicConfig()
logging.getLogger().setLevel(logging.DEBUG)
logger = logging.getLogger('requests.packages.urllib3')
logger.setLevel(logging.DEBUG)
logger.propagate = True
class ServiceError(Exception):
pass
class EncodingProfilesSynchronizer(object):
def __init__(self, service):
self._service = service
def run(self, profiles):
current_profiles = self._fetch_profiles()
for current_profile in current_profiles:
profile_name = current_profile['name']
if profile_name in profiles:
new_profile = profiles.pop(profile_name)
self._update_profile(current_profile, new_profile)
for new_profile in profiles.values():
self._create_profile(new_profile)
def _fetch_profiles(self):
current_profiles = self._service.get('/profiles.json')
return json.loads(current_profiles)
def _update_profile(self, current_profile, new_profile):
payload = current_profile.copy()
payload.update(new_profile)
payload.pop('preset_name')
profile_id = payload.pop('id')
self._service.put('/profiles/%s.json' % profile_id, payload)
print "Updated profile '%s'" % current_profile['name']
def _create_profile(self, new_profile):
self._service.post('/profiles.json', new_profile)
print "Created profile '%s'" % new_profile['name']
def get_config_parser(filename):
config = SafeConfigParser()
with open(filename) as config_file:
config.readfp(config_file)
return config
def load_profiles_from_config_parser(parser):
profiles = {}
for profile_name in parser.sections():
profile = {'name': profile_name}
for field, value in parser.items(profile_name):
profile[field] = value
profiles[profile_name] = profile
return profiles
def load_profiles_from_file(filename):
parser = get_config_parser(filename)
return load_profiles_from_config_parser(parser)
def get_arguments():
parser = argparse.ArgumentParser(
description=("Synchronize the profiles in the configuration file "
"to the provided PandaStream cloud"))
parser.add_argument(
'--api-host',
dest='api_host',
action='store',
default='api.pandastream.com',
help="The PandaStream API URL (defaults to %(default)s)")
parser.add_argument(
'--api-port',
dest='api_port',
action='store',
default='443',
help=("The PandaStream API port to use. Possible values: 80 and 443 "
"(defaults to %(default)s)"))
parser.add_argument(
'access_key',
action='store',
help="The PandaStream API access key")
parser.add_argument(
'secret_key',
action='store',
help="The PandaStream API secret key")
parser.add_argument(
'cloud_id',
action='store',
help="The ID of PandaStream cloud to use")
parser.add_argument(
'--profiles-file',
dest='profiles_file',
action='store',
default='profiles.cfg',
help=("The path to the configuration file containing the profiles to "
"synchronize (defaults to %(default)s)"))
return parser.parse_args()
def main():
args = get_arguments()
service = panda.Panda(
api_host=args.api_host,
cloud_id=args.cloud_id,
access_key=args.access_key,
secret_key=args.secret_key,
api_port=args.api_port)
synchronizer = EncodingProfilesSynchronizer(service)
profiles = load_profiles_from_file(args.profiles_file)
try:
synchronizer.run(profiles)
except ServiceError, e:
print "Failed to synchronize profiles: %s" % e
if __name__ == "__main__":
main()
| mit |
hainm/scikit-learn | examples/linear_model/plot_ols.py | 220 | 1940 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Linear Regression Example
=========================================================
This example uses the only the first feature of the `diabetes` dataset, in
order to illustrate a two-dimensional plot of this regression technique. The
straight line can be seen in the plot, showing how linear regression attempts
to draw a straight line that will best minimize the residual sum of squares
between the observed responses in the dataset, and the responses predicted by
the linear approximation.
The coefficients, the residual sum of squares and the variance score are also
calculated.
"""
print(__doc__)
# Code source: Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# Load the diabetes dataset
diabetes = datasets.load_diabetes()
# Use only one feature
diabetes_X = diabetes.data[:, np.newaxis, 2]
# Split the data into training/testing sets
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
# Split the targets into training/testing sets
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# Create linear regression object
regr = linear_model.LinearRegression()
# Train the model using the training sets
regr.fit(diabetes_X_train, diabetes_y_train)
# The coefficients
print('Coefficients: \n', regr.coef_)
# The mean square error
print("Residual sum of squares: %.2f"
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# Explained variance score: 1 is perfect prediction
print('Variance score: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue',
linewidth=3)
plt.xticks(())
plt.yticks(())
plt.show()
| bsd-3-clause |
cbertinato/pandas | pandas/tests/indexes/timedeltas/test_scalar_compat.py | 1 | 2391 | """
Tests for TimedeltaIndex methods behaving like their Timedelta counterparts
"""
import numpy as np
import pytest
import pandas as pd
from pandas import Index, Series, Timedelta, TimedeltaIndex, timedelta_range
import pandas.util.testing as tm
class TestVectorizedTimedelta:
def test_tdi_total_seconds(self):
# GH#10939
# test index
rng = timedelta_range('1 days, 10:11:12.100123456', periods=2,
freq='s')
expt = [1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9,
1 * 86400 + 10 * 3600 + 11 * 60 + 13 + 100123456. / 1e9]
tm.assert_almost_equal(rng.total_seconds(), Index(expt))
# test Series
ser = Series(rng)
s_expt = Series(expt, index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with nat
ser[1] = np.nan
s_expt = Series([1 * 86400 + 10 * 3600 + 11 * 60 +
12 + 100123456. / 1e9, np.nan], index=[0, 1])
tm.assert_series_equal(ser.dt.total_seconds(), s_expt)
# with both nat
ser = Series([np.nan, np.nan], dtype='timedelta64[ns]')
tm.assert_series_equal(ser.dt.total_seconds(),
Series([np.nan, np.nan], index=[0, 1]))
def test_tdi_round(self):
td = pd.timedelta_range(start='16801 days', periods=5, freq='30Min')
elt = td[1]
expected_rng = TimedeltaIndex([Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 00:00:00'),
Timedelta('16801 days 01:00:00'),
Timedelta('16801 days 02:00:00'),
Timedelta('16801 days 02:00:00')])
expected_elt = expected_rng[1]
tm.assert_index_equal(td.round(freq='H'), expected_rng)
assert elt.round(freq='H') == expected_elt
msg = pd._libs.tslibs.frequencies.INVALID_FREQ_ERR_MSG
with pytest.raises(ValueError, match=msg):
td.round(freq='foo')
with pytest.raises(ValueError, match=msg):
elt.round(freq='foo')
msg = "<MonthEnd> is a non-fixed frequency"
with pytest.raises(ValueError, match=msg):
td.round(freq='M')
with pytest.raises(ValueError, match=msg):
elt.round(freq='M')
| bsd-3-clause |
depet/scikit-learn | sklearn/decomposition/pca.py | 1 | 20538 | """ Principal Component Analysis
"""
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Denis A. Engemann <d.engemann@fz-juelich.de>
#
# License: BSD 3 clause
from math import log, sqrt
import warnings
import numpy as np
from scipy import linalg
from scipy.special import gammaln
from ..base import BaseEstimator, TransformerMixin
from ..utils import array2d, check_random_state, as_float_array
from ..utils import atleast2d_or_csr
from ..utils.extmath import fast_logdet, safe_sparse_dot, randomized_svd, \
fast_dot
def _assess_dimension_(spectrum, rank, n_samples, n_features):
"""Compute the likelihood of a rank ``rank`` dataset
The dataset is assumed to be embedded in gaussian noise of shape(n,
dimf) having spectrum ``spectrum``.
Parameters
----------
spectrum: array of shape (n)
data spectrum
rank: int,
tested rank value
n_samples: int,
number of samples
dim: int,
embedding/empirical dimension
Returns
-------
ll: float,
The log-likelihood
Notes
-----
This implements the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
"""
if rank > len(spectrum):
raise ValueError("The tested rank cannot exceed the rank of the"
" dataset")
pu = -rank * log(2.)
for i in range(rank):
pu += (gammaln((n_features - i) / 2.)
- log(np.pi) * (n_features - i) / 2.)
pl = np.sum(np.log(spectrum[:rank]))
pl = -pl * n_samples / 2.
if rank == n_features:
pv = 0
v = 1
else:
v = np.sum(spectrum[rank:]) / (n_features - rank)
pv = -np.log(v) * n_samples * (n_features - rank) / 2.
m = n_features * rank - rank * (rank + 1.) / 2.
pp = log(2. * np.pi) * (m + rank + 1.) / 2.
pa = 0.
spectrum_ = spectrum.copy()
spectrum_[rank:n_features] = v
for i in range(rank):
for j in range(i + 1, len(spectrum)):
pa += log((spectrum[i] - spectrum[j]) *
(1. / spectrum_[j] - 1. / spectrum_[i])) + log(n_samples)
ll = pu + pl + pv + pp - pa / 2. - rank * log(n_samples) / 2.
return ll
def _infer_dimension_(spectrum, n_samples, n_features):
"""Infers the dimension of a dataset of shape (n_samples, n_features)
The dataset is described by its spectrum `spectrum`.
"""
n_spectrum = len(spectrum)
ll = np.empty(n_spectrum)
for rank in range(n_spectrum):
ll[rank] = _assess_dimension_(spectrum, rank, n_samples, n_features)
return ll.argmax()
class PCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA)
Linear dimensionality reduction using Singular Value Decomposition of the
data and keeping only the most significant singular vectors to project the
data to a lower dimensional space.
This implementation uses the scipy.linalg implementation of the singular
value decomposition. It only works for dense arrays and is not scalable to
large dimensional data.
The time complexity of this implementation is ``O(n ** 3)`` assuming
n ~ n_samples ~ n_features.
Parameters
----------
n_components : int, None or string
Number of components to keep.
if n_components is not set all components are kept::
n_components == min(n_samples, n_features)
if n_components == 'mle', Minka\'s MLE is used to guess the dimension
if ``0 < n_components < 1``, select the number of components such that
the amount of variance that needs to be explained is greater than the
percentage specified by n_components
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by n_samples times singular values to ensure uncorrelated outputs
with unit component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making there data respect some hard-wired assumptions.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
`n_components_` : int
The estimated number of components. Relevant when n_components is set
to 'mle' or a number between 0 and 1 to select using explained
variance.
Notes
-----
For n_components='mle', this class uses the method of `Thomas P. Minka:
Automatic Choice of Dimensionality for PCA. NIPS 2000: 598-604`
Due to implementation subtleties of the Singular Value Decomposition (SVD),
which is used in this implementation, running fit twice on the same matrix
can lead to principal components with signs flipped (change in direction).
For this reason, it is important to always use the same estimator object to
transform data in a consistent fashion.
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import PCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = PCA(n_components=2)
>>> pca.fit(X)
PCA(copy=True, n_components=2, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
ProbabilisticPCA
RandomizedPCA
KernelPCA
SparsePCA
TruncatedSVD
"""
def __init__(self, n_components=None, copy=True, whiten=False):
self.n_components = n_components
self.copy = copy
self.whiten = whiten
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model with X and apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Training data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
U, S, V = self._fit(X)
U = U[:, :self.n_components_]
if self.whiten:
# X_new = X * V / S * sqrt(n_samples) = U * sqrt(n_samples)
U *= sqrt(X.shape[0])
else:
# X_new = X * V = U * S * V^T * V = U * S
U *= S[:self.n_components_]
return U
def _fit(self, X):
""" Fit the model on X
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
U, s, V : ndarrays
The SVD of the input data, copied and centered when
requested.
"""
X = array2d(X)
n_samples, n_features = X.shape
X = as_float_array(X, copy=self.copy)
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
U, S, V = linalg.svd(X, full_matrices=False)
self.explained_variance_ = (S ** 2) / n_samples
self.explained_variance_ratio_ = (self.explained_variance_ /
self.explained_variance_.sum())
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
n_components = self.n_components
if n_components is None:
n_components = n_features
elif n_components == 'mle':
if n_samples < n_features:
raise ValueError("n_components='mle' is only supported "
"if n_samples >= n_features")
n_components = _infer_dimension_(self.explained_variance_,
n_samples, n_features)
if 0 < n_components < 1.0:
# number of components for which the cumulated explained variance
# percentage is superior to the desired threshold
ratio_cumsum = self.explained_variance_ratio_.cumsum()
n_components = np.sum(ratio_cumsum < n_components) + 1
self.components_ = self.components_[:n_components, :]
self.explained_variance_ = \
self.explained_variance_[:n_components]
self.explained_variance_ratio_ = \
self.explained_variance_ratio_[:n_components]
self.n_components_ = n_components
return (U, S, V)
def transform(self, X):
"""Apply the dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples is the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = array2d(X)
if self.mean_ is not None:
X = X - self.mean_
X_transformed = fast_dot(X, self.components_.T)
return X_transformed
def inverse_transform(self, X):
"""Transform data back to its original space, i.e.,
return an input X_original whose transform would be X
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples is the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation as transform.
"""
return fast_dot(X, self.components_) + self.mean_
class ProbabilisticPCA(PCA):
"""Additional layer on top of PCA that adds a probabilistic evaluation"""
__doc__ += PCA.__doc__
def fit(self, X, y=None, homoscedastic=True):
"""Additionally to PCA.fit, learns a covariance model
Parameters
----------
X : array of shape(n_samples, n_features)
The data to fit
homoscedastic : bool, optional,
If True, average variance across remaining dimensions
"""
PCA.fit(self, X)
n_samples, n_features = X.shape
self._dim = n_features
Xr = X - self.mean_
Xr -= np.dot(np.dot(Xr, self.components_.T), self.components_)
n_components = self.n_components
if n_components is None:
n_components = n_features
# Make the low rank part of the estimated covariance
self.covariance_ = np.dot(self.components_[:n_components].T *
self.explained_variance_[:n_components],
self.components_[:n_components])
if n_features == n_components:
delta = 0.
elif homoscedastic:
delta = (Xr ** 2).sum() / (n_samples * n_features)
else:
delta = (Xr ** 2).mean(axis=0) / (n_features - n_components)
# Add delta to the diagonal without extra allocation
self.covariance_.flat[::n_features + 1] += delta
return self
def score(self, X, y=None):
"""Return a score associated to new data
Parameters
----------
X: array of shape(n_samples, n_features)
The data to test
Returns
-------
ll: array of shape (n_samples),
log-likelihood of each row of X under the current model
"""
Xr = X - self.mean_
n_features = X.shape[1]
log_like = np.zeros(X.shape[0])
self.precision_ = linalg.inv(self.covariance_)
log_like = -.5 * (Xr * (np.dot(Xr, self.precision_))).sum(axis=1)
log_like -= .5 * (fast_logdet(self.covariance_)
+ n_features * log(2. * np.pi))
return log_like
class RandomizedPCA(BaseEstimator, TransformerMixin):
"""Principal component analysis (PCA) using randomized SVD
Linear dimensionality reduction using approximated Singular Value
Decomposition of the data and keeping only the most significant
singular vectors to project the data to a lower dimensional space.
Parameters
----------
n_components : int, optional
Maximum number of components to keep. When not given or None, this
is set to n_features (the second dimension of the training data).
copy : bool
If False, data passed to fit are overwritten and running
fit(X).transform(X) will not yield the expected results,
use fit_transform(X) instead.
iterated_power : int, optional
Number of iterations for the power method. 3 by default.
whiten : bool, optional
When True (False by default) the `components_` vectors are divided
by the singular values to ensure uncorrelated outputs with unit
component-wise variances.
Whitening will remove some information from the transformed signal
(the relative variance scales of the components) but can sometime
improve the predictive accuracy of the downstream estimators by
making their data respect some hard-wired assumptions.
random_state : int or RandomState instance or None (default)
Pseudo Random Number generator seed control. If None, use the
numpy.random singleton.
Attributes
----------
`components_` : array, [n_components, n_features]
Components with maximum variance.
`explained_variance_ratio_` : array, [n_components]
Percentage of variance explained by each of the selected components. \
k is not set then all components are stored and the sum of explained \
variances is equal to 1.0
Examples
--------
>>> import numpy as np
>>> from sklearn.decomposition import RandomizedPCA
>>> X = np.array([[-1, -1], [-2, -1], [-3, -2], [1, 1], [2, 1], [3, 2]])
>>> pca = RandomizedPCA(n_components=2)
>>> pca.fit(X) # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
RandomizedPCA(copy=True, iterated_power=3, n_components=2,
random_state=None, whiten=False)
>>> print(pca.explained_variance_ratio_) # doctest: +ELLIPSIS
[ 0.99244... 0.00755...]
See also
--------
PCA
ProbabilisticPCA
TruncatedSVD
References
----------
.. [Halko2009] `Finding structure with randomness: Stochastic algorithms
for constructing approximate matrix decompositions Halko, et al., 2009
(arXiv:909)`
.. [MRT] `A randomized algorithm for the decomposition of matrices
Per-Gunnar Martinsson, Vladimir Rokhlin and Mark Tygert`
Notes
-----
This class supports sparse matrix input for backward compatibility, but
actually computes a truncated SVD instead of a PCA in that case (i.e. no
centering is performed). This support is deprecated; use the class
TruncatedSVD for sparse matrix support.
"""
def __init__(self, n_components=None, copy=True, iterated_power=3,
whiten=False, random_state=None):
self.n_components = n_components
self.copy = copy
self.iterated_power = iterated_power
self.whiten = whiten
self.mean_ = None
self.random_state = random_state
def fit(self, X, y=None):
"""Fit the model with X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
self : object
Returns the instance itself.
"""
self._fit(X)
return self
def _fit(self, X):
"""Fit the model to the data X.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Training vector, where n_samples in the number of samples and
n_features is the number of features.
Returns
-------
X : ndarray, shape (n_samples, n_features)
The input data, copied, centered and whitened when requested.
"""
random_state = check_random_state(self.random_state)
if hasattr(X, 'todense'):
warnings.warn("Sparse matrix support is deprecated"
" and will be dropped in 0.16."
" Use TruncatedSVD instead.",
DeprecationWarning)
else:
# not a sparse matrix, ensure this is a 2D array
X = np.atleast_2d(as_float_array(X, copy=self.copy))
n_samples = X.shape[0]
if not hasattr(X, 'todense'):
# Center data
self.mean_ = np.mean(X, axis=0)
X -= self.mean_
if self.n_components is None:
n_components = X.shape[1]
else:
n_components = self.n_components
U, S, V = randomized_svd(X, n_components,
n_iter=self.iterated_power,
random_state=random_state)
self.explained_variance_ = exp_var = (S ** 2) / n_samples
self.explained_variance_ratio_ = exp_var / exp_var.sum()
if self.whiten:
self.components_ = V / S[:, np.newaxis] * sqrt(n_samples)
else:
self.components_ = V
return X
def transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
# XXX remove scipy.sparse support here in 0.16
X = atleast2d_or_csr(X)
if self.mean_ is not None:
X = X - self.mean_
X = safe_sparse_dot(X, self.components_.T)
return X
def fit_transform(self, X, y=None):
"""Apply dimensionality reduction on X.
Parameters
----------
X : array-like, shape (n_samples, n_features)
New data, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new : array-like, shape (n_samples, n_components)
"""
X = self._fit(atleast2d_or_csr(X))
X = safe_sparse_dot(X, self.components_.T)
return X
def inverse_transform(self, X, y=None):
"""Transform data back to its original space.
Returns an array X_original whose transform would be X.
Parameters
----------
X : array-like, shape (n_samples, n_components)
New data, where n_samples in the number of samples
and n_components is the number of components.
Returns
-------
X_original array-like, shape (n_samples, n_features)
Notes
-----
If whitening is enabled, inverse_transform does not compute the
exact inverse operation of transform.
"""
# XXX remove scipy.sparse support here in 0.16
X_original = safe_sparse_dot(X, self.components_)
if self.mean_ is not None:
X_original = X_original + self.mean_
return X_original
| bsd-3-clause |
gef756/statsmodels | statsmodels/stats/tests/test_panel_robustcov.py | 34 | 2750 | # -*- coding: utf-8 -*-
"""Test for panel robust covariance estimators after pooled ols
this follows the example from xtscc paper/help
Created on Tue May 22 20:27:57 2012
Author: Josef Perktold
"""
from statsmodels.compat.python import range, lmap
import numpy as np
from numpy.testing import assert_almost_equal
from statsmodels.regression.linear_model import OLS
from statsmodels.tools.tools import add_constant
import statsmodels.stats.sandwich_covariance as sw
def test_panel_robust_cov():
import pandas as pa
import statsmodels.datasets.grunfeld as gr
from .results.results_panelrobust import results as res_stata
dtapa = gr.data.load_pandas()
#Stata example/data seems to miss last firm
dtapa_endog = dtapa.endog[:200]
dtapa_exog = dtapa.exog[:200]
res = OLS(dtapa_endog, add_constant(dtapa_exog[['value', 'capital']],
prepend=False)).fit()
#time indicator in range(max Ti)
time = np.asarray(dtapa_exog[['year']])
time -= time.min()
time = np.squeeze(time).astype(int)
#sw.cov_nw_panel requires bounds instead of index
tidx = [(i*20, 20*(i+1)) for i in range(10)]
#firm index in range(n_firms)
firm_names, firm_id = np.unique(np.asarray(dtapa_exog[['firm']], 'S20'),
return_inverse=True)
#panel newey west standard errors
cov = sw.cov_nw_panel(res, 0, tidx, use_correction='hac')
#dropping numpy 1.4 soon
#np.testing.assert_allclose(cov, res_stata.cov_pnw0_stata, rtol=1e-6)
assert_almost_equal(cov, res_stata.cov_pnw0_stata, decimal=4)
cov = sw.cov_nw_panel(res, 1, tidx, use_correction='hac')
#np.testing.assert_allclose(cov, res_stata.cov_pnw1_stata, rtol=1e-6)
assert_almost_equal(cov, res_stata.cov_pnw1_stata, decimal=4)
cov = sw.cov_nw_panel(res, 4, tidx) #check default
#np.testing.assert_allclose(cov, res_stata.cov_pnw4_stata, rtol=1e-6)
assert_almost_equal(cov, res_stata.cov_pnw4_stata, decimal=4)
#cluster robust standard errors
cov_clu = sw.cov_cluster(res, firm_id)
assert_almost_equal(cov_clu, res_stata.cov_clu_stata, decimal=4)
#cluster robust standard errors, non-int groups
cov_clu = sw.cov_cluster(res, lmap(str, firm_id))
assert_almost_equal(cov_clu, res_stata.cov_clu_stata, decimal=4)
#Driscoll and Kraay panel robust standard errors
rcov = sw.cov_nw_groupsum(res, 0, time, use_correction=0)
assert_almost_equal(rcov, res_stata.cov_dk0_stata, decimal=4)
rcov = sw.cov_nw_groupsum(res, 1, time, use_correction=0)
assert_almost_equal(rcov, res_stata.cov_dk1_stata, decimal=4)
rcov = sw.cov_nw_groupsum(res, 4, time) #check default
assert_almost_equal(rcov, res_stata.cov_dk4_stata, decimal=4)
| bsd-3-clause |
MagnusS/mirage-bench | test-jitsu/plot.py | 1 | 1208 | #!/usr/bin/env python
import sys
print "# Creating graphs from stdin (requires matplotlib)"
results = {}
for filename in sys.argv[1:]:
results[filename] = []
with open(filename) as f:
for l in f:
line = l.strip()
if len(line) == 0 or line[0] == '#':
continue
if l[0] == "!":
print "Warning: Some results are invalid:"
print l
continue
results[filename].append(float(l) * 1000)
print results
import matplotlib.pyplot as plt
import numpy as np
#fig,ax = plt.subplots()
name = {}
name["processed_results_warm.dat"] = "Jitsu warm start"
name["processed_results_cold.dat"] = "Jitsu cold start wo/synjitsu"
name["processed_results_http_warm.dat"] = "Jitsu warm start (http)"
name["processed_results_http_cold.dat"] = "Jitsu cold start wo/synjitsu (http)"
plt.title('Time from DNS query to first packet of HTTP response')
for t in results:
title = t
if t in name:
title = name[t]
r = results[t]
print "Plotting",r,"==",len(r)
maxval = 1500
bins = 20
binwidth = maxval / bins
plt.hist(r, bins=range(1, maxval+binwidth, binwidth), label=title)
plt.legend(loc="best")
plt.ylabel("Results")
plt.xlabel("Time in milliseconds")
plt.savefig("jitsu.pdf")
plt.show()
| isc |
pravsripad/mne-python | mne/inverse_sparse/mxne_optim.py | 5 | 55953 | # Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Daniel Strohmeier <daniel.strohmeier@gmail.com>
# Mathurin Massias <mathurin.massias@gmail.com>
# License: Simplified BSD
import functools
from math import sqrt
import numpy as np
from .mxne_debiasing import compute_bias
from ..utils import logger, verbose, sum_squared, warn, _get_blas_funcs
from ..time_frequency._stft import stft_norm1, stft_norm2, stft, istft
@functools.lru_cache(None)
def _get_dgemm():
return _get_blas_funcs(np.float64, 'gemm')
def groups_norm2(A, n_orient):
"""Compute squared L2 norms of groups inplace."""
n_positions = A.shape[0] // n_orient
return np.sum(np.power(A, 2, A).reshape(n_positions, -1), axis=1)
def norm_l2inf(A, n_orient, copy=True):
"""L2-inf norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return sqrt(np.max(groups_norm2(A, n_orient)))
def norm_l21(A, n_orient, copy=True):
"""L21 norm."""
if A.size == 0:
return 0.0
if copy:
A = A.copy()
return np.sum(np.sqrt(groups_norm2(A, n_orient)))
def prox_l21(Y, alpha, n_orient, shape=None, is_stft=False):
"""Proximity operator for l21 norm.
L2 over columns and L1 over rows => groups contain n_orient rows.
It can eventually take into account the negative frequencies
when a complex value is passed and is_stft=True.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
shape : None | tuple
Shape of TF coefficients matrix.
is_stft : bool
If True, Y contains TF coefficients.
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources
Examples
--------
>>> Y = np.tile(np.array([0, 4, 3, 0, 0], dtype=np.float64), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 0. 4. 3. 0. 0.]
[ 0. 4. 3. 0. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l21(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 2.86862915 2.15147186 0. 0. ]
[0. 2.86862915 2.15147186 0. 0. ]]
>>> print(active_set)
[ True True False False]
"""
if len(Y) == 0:
return np.zeros_like(Y), np.zeros((0,), dtype=bool)
if shape is not None:
shape_init = Y.shape
Y = Y.reshape(*shape)
n_positions = Y.shape[0] // n_orient
if is_stft:
rows_norm = np.sqrt(stft_norm2(Y).reshape(n_positions, -1).sum(axis=1))
else:
rows_norm = np.sqrt((Y * Y.conj()).real.reshape(n_positions,
-1).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(rows_norm, alpha), 0.0)
active_set = shrink > 0.0
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
shrink = np.tile(shrink[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if shape is None:
Y *= shrink[active_set][:, np.newaxis]
else:
Y *= shrink[active_set][:, np.newaxis, np.newaxis]
Y = Y.reshape(-1, *shape_init[1:])
return Y, active_set
def prox_l1(Y, alpha, n_orient):
"""Proximity operator for l1 norm with multiple orientation support.
Please note that this function computes a soft-thresholding if
n_orient == 1 and a block soft-thresholding (L2 over orientation and
L1 over position (space + time)) if n_orient == 3. See also
:footcite:`GramfortEtAl2013b`.
Parameters
----------
Y : array, shape (n_sources, n_coefs)
The input data.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
Y : array, shape (n_sources, n_coefs)
The output data.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
References
----------
.. footbibliography::
Examples
--------
>>> Y = np.tile(np.array([1, 2, 3, 2, 0], dtype=np.float64), (2, 1))
>>> Y = np.r_[Y, np.zeros_like(Y)]
>>> print(Y) # doctest:+SKIP
[[ 1. 2. 3. 2. 0.]
[ 1. 2. 3. 2. 0.]
[ 0. 0. 0. 0. 0.]
[ 0. 0. 0. 0. 0.]]
>>> Yp, active_set = prox_l1(Y, 2, 2)
>>> print(Yp) # doctest:+SKIP
[[0. 0.58578644 1.58578644 0.58578644 0. ]
[0. 0.58578644 1.58578644 0.58578644 0. ]]
>>> print(active_set)
[ True True False False]
"""
n_positions = Y.shape[0] // n_orient
norms = np.sqrt((Y * Y.conj()).real.T.reshape(-1, n_orient).sum(axis=1))
# Ensure shrink is >= 0 while avoiding any division by zero
shrink = np.maximum(1.0 - alpha / np.maximum(norms, alpha), 0.0)
shrink = shrink.reshape(-1, n_positions).T
active_set = np.any(shrink > 0.0, axis=1)
shrink = shrink[active_set]
if n_orient > 1:
active_set = np.tile(active_set[:, None], [1, n_orient]).ravel()
Y = Y[active_set]
if len(Y) > 0:
for o in range(n_orient):
Y[o::n_orient] *= shrink
return Y, active_set
def dgap_l21(M, G, X, active_set, alpha, n_orient):
"""Duality gap for the mixed norm inverse problem.
See :footcite:`GramfortEtAl2012`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_active, n_times)
Sources.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha : float
The regularization parameter.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
Returns
-------
gap : float
Dual gap.
p_obj : float
Primal objective.
d_obj : float
Dual objective. gap = p_obj - d_obj.
R : array, shape (n_sensors, n_times)
Current residual (M - G * X).
References
----------
.. footbibilography::
"""
GX = np.dot(G[:, active_set], X)
R = M - GX
penalty = norm_l21(X, n_orient, copy=True)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha * penalty
dual_norm = norm_l2inf(np.dot(G.T, R), n_orient, copy=False)
scaling = alpha / dual_norm
scaling = min(scaling, 1.0)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
@verbose
def _mixed_norm_solver_prox(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with proximal iterations and FISTA."""
n_sensors, n_times = M.shape
_, n_sources = G.shape
if n_sources < n_sensors:
gram = np.dot(G.T, G)
GTM = np.dot(G.T, M)
else:
gram = None
if init is None:
X = 0.0
R = M.copy()
if gram is not None:
R = np.dot(G.T, R)
else:
X = init
if gram is None:
R = M - np.dot(G, X)
else:
R = GTM - np.dot(gram, X)
t = 1.0
Y = np.zeros((n_sources, n_times)) # FISTA aux variable
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.ones(n_sources, dtype=bool) # start with full AS
for i in range(maxit):
X0, active_set_0 = X, active_set # store previous values
if gram is None:
Y += np.dot(G.T, R) / lipschitz_constant # ISTA step
else:
Y += R / lipschitz_constant # ISTA step
X, active_set = prox_l21(Y, alpha / lipschitz_constant, n_orient)
t0 = t
t = 0.5 * (1.0 + sqrt(1.0 + 4.0 * t ** 2))
Y.fill(0.0)
dt = ((t0 - 1.0) / t)
Y[active_set] = (1.0 + dt) * X
Y[active_set_0] -= dt * X0
Y_as = active_set_0 | active_set
if gram is None:
R = M - np.dot(G[:, Y_as], Y[Y_as])
else:
R = GTM - np.dot(gram[:, Y_as], Y[Y_as])
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("p_obj : %s -- gap : %s" % (p_obj, gap))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
return X, active_set, E
@verbose
def _mixed_norm_solver_cd(M, G, alpha, lipschitz_constant, maxit=10000,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with coordinate descent."""
from sklearn.linear_model import MultiTaskLasso
assert M.ndim == G.ndim and M.shape[0] == G.shape[0]
clf = MultiTaskLasso(alpha=alpha / len(M), tol=tol / sum_squared(M),
normalize=False, fit_intercept=False, max_iter=maxit,
warm_start=True)
if init is not None:
clf.coef_ = init.T
else:
clf.coef_ = np.zeros((G.shape[1], M.shape[1])).T
clf.fit(G, M)
X = clf.coef_.T
active_set = np.any(X, axis=1)
X = X[active_set]
gap, p_obj, d_obj, _ = dgap_l21(M, G, X, active_set, alpha, n_orient)
return X, active_set, p_obj
@verbose
def _mixed_norm_solver_bcd(M, G, alpha, lipschitz_constant, maxit=200,
tol=1e-8, verbose=None, init=None, n_orient=1,
dgap_freq=10):
"""Solve L21 inverse problem with block coordinate descent."""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
if init is None:
X = np.zeros((n_sources, n_times))
R = M.copy()
else:
X = init
R = M - np.dot(G, X)
E = [] # track primal objective function
highest_d_obj = - np.inf
active_set = np.zeros(n_sources, dtype=bool) # start with full AS
alpha_lc = alpha / lipschitz_constant
# First make G fortran for faster access to blocks of columns
G = np.asfortranarray(G)
# Ensure these are correct for dgemm
assert R.dtype == np.float64
assert G.dtype == np.float64
one_ovr_lc = 1. / lipschitz_constant
# assert that all the multiplied matrices are fortran contiguous
assert X.T.flags.f_contiguous
assert R.T.flags.f_contiguous
assert G.flags.f_contiguous
# storing list of contiguous arrays
list_G_j_c = []
for j in range(n_positions):
idx = slice(j * n_orient, (j + 1) * n_orient)
list_G_j_c.append(np.ascontiguousarray(G[:, idx]))
for i in range(maxit):
_bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions,
alpha_lc, list_G_j_c)
if (i + 1) % dgap_freq == 0:
_, p_obj, d_obj, _ = dgap_l21(M, G, X[active_set], active_set,
alpha, n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.debug("Iteration %d :: p_obj %f :: dgap %f :: n_active %d" %
(i + 1, p_obj, gap, np.sum(active_set) / n_orient))
if gap < tol:
logger.debug('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
X = X[active_set]
return X, active_set, E
def _bcd(G, X, R, active_set, one_ovr_lc, n_orient, n_positions,
alpha_lc, list_G_j_c):
"""Implement one full pass of BCD.
BCD stands for Block Coordinate Descent.
This function make use of scipy.linalg.get_blas_funcs to speed reasons.
Parameters
----------
G : array, shape (n_sensors, n_active)
The gain matrix a.k.a. lead field.
X : array, shape (n_sources, n_times)
Sources, modified in place.
R : array, shape (n_sensors, n_times)
The residuals: R = M - G @ X, modified in place.
active_set : array of bool, shape (n_sources, )
Mask of active sources, modified in place.
one_ovr_lc : array, shape (n_positions, )
One over the lipschitz constants.
n_orient : int
Number of dipoles per positions (typically 1 or 3).
n_positions : int
Number of source positions.
alpha_lc: array, shape (n_positions, )
alpha * (Lipschitz constants).
"""
X_j_new = np.zeros_like(X[0:n_orient, :], order='C')
dgemm = _get_dgemm()
for j, G_j_c in enumerate(list_G_j_c):
idx = slice(j * n_orient, (j + 1) * n_orient)
G_j = G[:, idx]
X_j = X[idx]
dgemm(alpha=one_ovr_lc[j], beta=0., a=R.T, b=G_j, c=X_j_new.T,
overwrite_c=True)
# X_j_new = G_j.T @ R
# Mathurin's trick to avoid checking all the entries
was_non_zero = X_j[0, 0] != 0
# was_non_zero = np.any(X_j)
if was_non_zero:
dgemm(alpha=1., beta=1., a=X_j.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R += np.dot(G_j, X_j)
X_j_new += X_j
block_norm = sqrt(sum_squared(X_j_new))
if block_norm <= alpha_lc[j]:
X_j.fill(0.)
active_set[idx] = False
else:
shrink = max(1.0 - alpha_lc[j] / block_norm, 0.0)
X_j_new *= shrink
dgemm(alpha=-1., beta=1., a=X_j_new.T, b=G_j_c.T, c=R.T,
overwrite_c=True)
# R -= np.dot(G_j, X_j_new)
X_j[:] = X_j_new
active_set[idx] = True
@verbose
def mixed_norm_solver(M, G, alpha, maxit=3000, tol=1e-8, verbose=None,
active_set_size=50, debias=True, n_orient=1,
solver='auto', return_gap=False, dgap_freq=10):
"""Solve L1/L2 mixed-norm inverse problem with active set strategy.
See references :footcite:`GramfortEtAl2012,StrohmeierEtAl2016`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
return_gap : bool
Return final duality gap.
dgap_freq : int
The duality gap is computed every dgap_freq iterations of the solver on
the active set.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. footbibliography::
"""
n_dipoles = G.shape[1]
n_positions = n_dipoles // n_orient
n_sensors, n_times = M.shape
alpha_max = norm_l2inf(np.dot(G.T, M), n_orient, copy=False)
logger.info("-- ALPHA MAX : %s" % alpha_max)
alpha = float(alpha)
has_sklearn = True
try:
from sklearn.linear_model import MultiTaskLasso # noqa: F401
except ImportError:
has_sklearn = False
if solver == 'auto':
if has_sklearn and (n_orient == 1):
solver = 'cd'
else:
solver = 'bcd'
if solver == 'cd':
if n_orient == 1 and not has_sklearn:
warn('Scikit-learn >= 0.12 cannot be found. Using block coordinate'
' descent instead of coordinate descent.')
solver = 'bcd'
if n_orient > 1:
warn('Coordinate descent is only available for fixed orientation. '
'Using block coordinate descent instead of coordinate '
'descent')
solver = 'bcd'
if solver == 'cd':
logger.info("Using coordinate descent")
l21_solver = _mixed_norm_solver_cd
lc = None
elif solver == 'bcd':
logger.info("Using block coordinate descent")
l21_solver = _mixed_norm_solver_bcd
G = np.asfortranarray(G)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
else:
logger.info("Using proximal iterations")
l21_solver = _mixed_norm_solver_prox
lc = 1.01 * np.linalg.norm(G, ord=2) ** 2
if active_set_size is not None:
E = list()
highest_d_obj = - np.inf
X_init = None
active_set = np.zeros(n_dipoles, dtype=bool)
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, M), n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :]).ravel()
active_set[new_active_idx] = True
as_size = np.sum(active_set)
for k in range(maxit):
if solver == 'bcd':
lc_tmp = lc[active_set[::n_orient]]
elif solver == 'cd':
lc_tmp = None
else:
lc_tmp = 1.01 * np.linalg.norm(G[:, active_set], ord=2) ** 2
X, as_, _ = l21_solver(M, G[:, active_set], alpha, lc_tmp,
maxit=maxit, tol=tol, init=X_init,
n_orient=n_orient, dgap_freq=dgap_freq)
active_set[active_set] = as_.copy()
idx_old_active_set = np.where(active_set)[0]
_, p_obj, d_obj, R = dgap_l21(M, G, X, active_set, alpha,
n_orient)
highest_d_obj = max(d_obj, highest_d_obj)
gap = p_obj - highest_d_obj
E.append(p_obj)
logger.info("Iteration %d :: p_obj %f :: dgap %f ::"
"n_active_start %d :: n_active_end %d" % (
k + 1, p_obj, gap, as_size // n_orient,
np.sum(active_set) // n_orient))
if gap < tol:
logger.info('Convergence reached ! (gap: %s < %s)'
% (gap, tol))
break
# add sources if not last iteration
if k < (maxit - 1):
idx_large_corr = np.argsort(groups_norm2(np.dot(G.T, R),
n_orient))
new_active_idx = idx_large_corr[-active_set_size:]
if n_orient > 1:
new_active_idx = (n_orient * new_active_idx[:, None] +
np.arange(n_orient)[None, :])
new_active_idx = new_active_idx.ravel()
active_set[new_active_idx] = True
idx_active_set = np.where(active_set)[0]
as_size = np.sum(active_set)
X_init = np.zeros((as_size, n_times), dtype=X.dtype)
idx = np.searchsorted(idx_active_set, idx_old_active_set)
X_init[idx] = X
else:
warn('Did NOT converge ! (gap: %s > %s)' % (gap, tol))
else:
X, active_set, E = l21_solver(M, G, alpha, lc, maxit=maxit,
tol=tol, n_orient=n_orient, init=None)
if return_gap:
gap = dgap_l21(M, G, X, active_set, alpha, n_orient)[0]
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
logger.info('Final active set size: %s' % (np.sum(active_set) // n_orient))
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
@verbose
def iterative_mixed_norm_solver(M, G, alpha, n_mxne_iter, maxit=3000,
tol=1e-8, verbose=None, active_set_size=50,
debias=True, n_orient=1, dgap_freq=10,
solver='auto'):
"""Solve L0.5/L2 mixed-norm inverse problem with active set strategy.
See reference :footcite:`StrohmeierEtAl2016`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha : float
The regularization parameter. It should be between 0 and 100.
A value of 100 will lead to an empty active set (no active source).
n_mxne_iter : int
The number of MxNE iterations. If > 1, iterative reweighting
is applied.
maxit : int
The number of iterations.
tol : float
Tolerance on dual gap for convergence checking.
%(verbose)s
active_set_size : int
Size of active set increase at each iteration.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
solver : 'prox' | 'cd' | 'bcd' | 'auto'
The algorithm to use for the optimization.
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over the iterations.
References
----------
.. footbibliography::
"""
def g(w):
return np.sqrt(np.sqrt(groups_norm2(w.copy(), n_orient)))
def gprime(w):
return 2. * np.repeat(g(w), n_orient).ravel()
E = list()
active_set = np.ones(G.shape[1], dtype=bool)
weights = np.ones(G.shape[1])
X = np.zeros((G.shape[1], M.shape[1]))
for k in range(n_mxne_iter):
X0 = X.copy()
active_set_0 = active_set.copy()
G_tmp = G[:, active_set] * weights[np.newaxis, :]
if active_set_size is not None:
if np.sum(active_set) > (active_set_size * n_orient):
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=active_set_size,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
else:
X, _active_set, _ = mixed_norm_solver(
M, G_tmp, alpha, debias=False, n_orient=n_orient,
maxit=maxit, tol=tol, active_set_size=None,
dgap_freq=dgap_freq, solver=solver, verbose=verbose)
logger.info('active set size %d' % (_active_set.sum() / n_orient))
if _active_set.sum() > 0:
active_set[active_set] = _active_set
# Reapply weights to have correct unit
X *= weights[_active_set][:, np.newaxis]
weights = gprime(X)
p_obj = 0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha * np.sum(g(X))
E.append(p_obj)
# Check convergence
if ((k >= 1) and np.all(active_set == active_set_0) and
np.all(np.abs(X - X0) < tol)):
print('Convergence reached after %d reweightings!' % k)
break
else:
active_set = np.zeros_like(active_set)
p_obj = 0.5 * np.linalg.norm(M) ** 2.
E.append(p_obj)
break
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
###############################################################################
# TF-MxNE
@verbose
def tf_lipschitz_constant(M, G, phi, phiT, tol=1e-3, verbose=None):
"""Compute lipschitz constant for FISTA.
It uses a power iteration method.
"""
n_times = M.shape[1]
n_points = G.shape[1]
iv = np.ones((n_points, n_times), dtype=np.float64)
v = phi(iv)
L = 1e100
for it in range(100):
L_old = L
logger.info('Lipschitz estimation: iteration = %d' % it)
iv = np.real(phiT(v))
Gv = np.dot(G, iv)
GtGv = np.dot(G.T, Gv)
w = phi(GtGv)
L = np.max(np.abs(w)) # l_inf norm
v = w / L
if abs((L - L_old) / L_old) < tol:
break
return L
def safe_max_abs(A, ia):
"""Compute np.max(np.abs(A[ia])) possible with empty A."""
if np.sum(ia): # ia is not empty
return np.max(np.abs(A[ia]))
else:
return 0.
def safe_max_abs_diff(A, ia, B, ib):
"""Compute np.max(np.abs(A)) possible with empty A."""
A = A[ia] if np.sum(ia) else 0.0
B = B[ib] if np.sum(ia) else 0.0
return np.max(np.abs(A - B))
class _Phi(object):
"""Have phi stft as callable w/o using a lambda that does not pickle."""
def __init__(self, wsize, tstep, n_coefs, n_times): # noqa: D102
self.wsize = np.atleast_1d(wsize)
self.tstep = np.atleast_1d(tstep)
self.n_coefs = np.atleast_1d(n_coefs)
self.n_dicts = len(tstep)
self.n_freqs = wsize // 2 + 1
self.n_steps = self.n_coefs // self.n_freqs
self.n_times = n_times
# ravel freq+time here
self.ops = list()
for ws, ts in zip(self.wsize, self.tstep):
self.ops.append(
stft(np.eye(n_times), ws, ts,
verbose=False).reshape(n_times, -1))
def __call__(self, x): # noqa: D105
if self.n_dicts == 1:
return x @ self.ops[0]
else:
return np.hstack(
[x @ op for op in self.ops]) / np.sqrt(self.n_dicts)
def norm(self, z, ord=2):
"""Squared L2 norm if ord == 2 and L1 norm if order == 1."""
if ord not in (1, 2):
raise ValueError('Only supported norm order are 1 and 2. '
'Got ord = %s' % ord)
stft_norm = stft_norm1 if ord == 1 else stft_norm2
norm = 0.
if len(self.n_coefs) > 1:
z_ = np.array_split(np.atleast_2d(z), np.cumsum(self.n_coefs)[:-1],
axis=1)
else:
z_ = [np.atleast_2d(z)]
for i in range(len(z_)):
norm += stft_norm(
z_[i].reshape(-1, self.n_freqs[i], self.n_steps[i]))
return norm
class _PhiT(object):
"""Have phi.T istft as callable w/o using a lambda that does not pickle."""
def __init__(self, tstep, n_freqs, n_steps, n_times): # noqa: D102
self.tstep = tstep
self.n_freqs = n_freqs
self.n_steps = n_steps
self.n_times = n_times
self.n_dicts = len(tstep) if isinstance(tstep, np.ndarray) else 1
self.n_coefs = list()
self.op_re = list()
self.op_im = list()
for nf, ns, ts in zip(self.n_freqs, self.n_steps, self.tstep):
nc = nf * ns
self.n_coefs.append(nc)
eye = np.eye(nc).reshape(nf, ns, nf, ns)
self.op_re.append(istft(
eye, ts, n_times).reshape(nc, n_times))
self.op_im.append(istft(
eye * 1j, ts, n_times).reshape(nc, n_times))
def __call__(self, z): # noqa: D105
if self.n_dicts == 1:
return z.real @ self.op_re[0] + z.imag @ self.op_im[0]
else:
x_out = np.zeros((z.shape[0], self.n_times))
z_ = np.array_split(z, np.cumsum(self.n_coefs)[:-1], axis=1)
for this_z, op_re, op_im in zip(z_, self.op_re, self.op_im):
x_out += this_z.real @ op_re + this_z.imag @ op_im
return x_out / np.sqrt(self.n_dicts)
def norm_l21_tf(Z, phi, n_orient, w_space=None):
"""L21 norm for TF."""
if Z.shape[0]:
l21_norm = np.sqrt(
phi.norm(Z, ord=2).reshape(-1, n_orient).sum(axis=1))
if w_space is not None:
l21_norm *= w_space
l21_norm = l21_norm.sum()
else:
l21_norm = 0.
return l21_norm
def norm_l1_tf(Z, phi, n_orient, w_time):
"""L1 norm for TF."""
if Z.shape[0]:
n_positions = Z.shape[0] // n_orient
Z_ = np.sqrt(np.sum(
(np.abs(Z) ** 2.).reshape((n_orient, -1), order='F'), axis=0))
Z_ = Z_.reshape((n_positions, -1), order='F')
if w_time is not None:
Z_ *= w_time
l1_norm = phi.norm(Z_, ord=1).sum()
else:
l1_norm = 0.
return l1_norm
def norm_epsilon(Y, l1_ratio, phi, w_space=1., w_time=None):
"""Weighted epsilon norm.
The weighted epsilon norm is the dual norm of::
w_{space} * (1. - l1_ratio) * ||Y||_2 + l1_ratio * ||Y||_{1, w_{time}}.
where `||Y||_{1, w_{time}} = (np.abs(Y) * w_time).sum()`
Warning: it takes into account the fact that Y only contains coefficients
corresponding to the positive frequencies (see `stft_norm2()`): some
entries will be counted twice. It is also assumed that all entries of both
Y and w_time are non-negative. See
:footcite:`NdiayeEtAl2016,BurdakovMerkulov2001`.
Parameters
----------
Y : array, shape (n_coefs,)
The input data.
l1_ratio : float between 0 and 1
Tradeoff between L2 and L1 regularization. When it is 0, no temporal
regularization is applied.
phi : instance of _Phi
The TF operator.
w_space : float
Scalar weight of the L2 norm. By default, it is taken equal to 1.
w_time : array, shape (n_coefs, ) | None
Weights of each TF coefficient in the L1 norm. If None, weights equal
to 1 are used.
Returns
-------
nu : float
The value of the dual norm evaluated at Y.
References
----------
.. footbibliography::
"""
# since the solution is invariant to flipped signs in Y, all entries
# of Y are assumed positive
# Add negative freqs: count all freqs twice except first and last:
freqs_count = np.full(len(Y), 2)
for i, fc in enumerate(np.array_split(freqs_count,
np.cumsum(phi.n_coefs)[:-1])):
fc[:phi.n_steps[i]] = 1
fc[-phi.n_steps[i]:] = 1
# exclude 0 weights:
if w_time is not None:
nonzero_weights = (w_time != 0.0)
Y = Y[nonzero_weights]
freqs_count = freqs_count[nonzero_weights]
w_time = w_time[nonzero_weights]
norm_inf_Y = np.max(Y / w_time) if w_time is not None else np.max(Y)
if l1_ratio == 1.:
# dual norm of L1 weighted is Linf with inverse weights
return norm_inf_Y
elif l1_ratio == 0.:
# dual norm of L2 is L2
return np.sqrt(phi.norm(Y[None, :], ord=2).sum())
if norm_inf_Y == 0.:
return 0.
# ignore some values of Y by lower bound on dual norm:
if w_time is None:
idx = Y > l1_ratio * norm_inf_Y
else:
idx = Y > l1_ratio * np.max(Y / (w_space * (1. - l1_ratio) +
l1_ratio * w_time))
if idx.sum() == 1:
return norm_inf_Y
# sort both Y / w_time and freqs_count at the same time
if w_time is not None:
idx_sort = np.argsort(Y[idx] / w_time[idx])[::-1]
w_time = w_time[idx][idx_sort]
else:
idx_sort = np.argsort(Y[idx])[::-1]
Y = Y[idx][idx_sort]
freqs_count = freqs_count[idx][idx_sort]
Y = np.repeat(Y, freqs_count)
if w_time is not None:
w_time = np.repeat(w_time, freqs_count)
K = Y.shape[0]
if w_time is None:
p_sum_Y2 = np.cumsum(Y ** 2)
p_sum_w2 = np.arange(1, K + 1)
p_sum_Yw = np.cumsum(Y)
upper = p_sum_Y2 / Y ** 2 - 2. * p_sum_Yw / Y + p_sum_w2
else:
p_sum_Y2 = np.cumsum(Y ** 2)
p_sum_w2 = np.cumsum(w_time ** 2)
p_sum_Yw = np.cumsum(Y * w_time)
upper = (p_sum_Y2 / (Y / w_time) ** 2 -
2. * p_sum_Yw / (Y / w_time) + p_sum_w2)
upper_greater = np.where(upper > w_space ** 2 * (1. - l1_ratio) ** 2 /
l1_ratio ** 2)[0]
i0 = upper_greater[0] - 1 if upper_greater.size else K - 1
p_sum_Y2 = p_sum_Y2[i0]
p_sum_w2 = p_sum_w2[i0]
p_sum_Yw = p_sum_Yw[i0]
denom = l1_ratio ** 2 * p_sum_w2 - w_space ** 2 * (1. - l1_ratio) ** 2
if np.abs(denom) < 1e-10:
return p_sum_Y2 / (2. * l1_ratio * p_sum_Yw)
else:
delta = (l1_ratio * p_sum_Yw) ** 2 - p_sum_Y2 * denom
return (l1_ratio * p_sum_Yw - np.sqrt(delta)) / denom
def norm_epsilon_inf(G, R, phi, l1_ratio, n_orient, w_space=None, w_time=None):
"""Weighted epsilon-inf norm of phi(np.dot(G.T, R)).
Parameters
----------
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
R : array, shape (n_sensors, n_times)
Residual.
phi : instance of _Phi
The TF operator.
l1_ratio : float between 0 and 1
Parameter controlling the tradeoff between L21 and L1 regularization.
0 corresponds to an absence of temporal regularization, ie MxNE.
n_orient : int
Number of dipoles per location (typically 1 or 3).
w_space : array, shape (n_positions,) or None.
Weights for the L2 term of the epsilon norm. If None, weights are
all equal to 1.
w_time : array, shape (n_positions, n_coefs) or None
Weights for the L1 term of the epsilon norm. If None, weights are
all equal to 1.
Returns
-------
nu : float
The maximum value of the epsilon norms over groups of n_orient dipoles
(consecutive rows of phi(np.dot(G.T, R))).
"""
n_positions = G.shape[1] // n_orient
GTRPhi = np.abs(phi(np.dot(G.T, R)))
# norm over orientations:
GTRPhi = GTRPhi.reshape((n_orient, -1), order='F')
GTRPhi = np.linalg.norm(GTRPhi, axis=0)
GTRPhi = GTRPhi.reshape((n_positions, -1), order='F')
nu = 0.
for idx in range(n_positions):
GTRPhi_ = GTRPhi[idx]
w_t = w_time[idx] if w_time is not None else None
w_s = w_space[idx] if w_space is not None else 1.
norm_eps = norm_epsilon(GTRPhi_, l1_ratio, phi, w_space=w_s,
w_time=w_t)
if norm_eps > nu:
nu = norm_eps
return nu
def dgap_l21l1(M, G, Z, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, highest_d_obj, w_space=None, w_time=None):
"""Duality gap for the time-frequency mixed norm inverse problem.
See :footcite:`GramfortEtAl2012,NdiayeEtAl2016`
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_sources)
Gain matrix a.k.a. lead field.
Z : array, shape (n_active, n_coefs)
Sources in TF domain.
active_set : array of bool, shape (n_sources, )
Mask of active sources.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
phi : instance of _Phi
The TF operator.
phiT : instance of _PhiT
The transpose of the TF operator.
n_orient : int
Number of dipoles per locations (typically 1 or 3).
highest_d_obj : float
The highest value of the dual objective so far.
w_space : array, shape (n_positions, )
Array of spatial weights.
w_time : array, shape (n_positions, n_coefs)
Array of TF weights.
Returns
-------
gap : float
Dual gap
p_obj : float
Primal objective
d_obj : float
Dual objective. gap = p_obj - d_obj
R : array, shape (n_sensors, n_times)
Current residual (M - G * X)
References
----------
.. footbibliography::
"""
X = phiT(Z)
GX = np.dot(G[:, active_set], X)
R = M - GX
# some functions need w_time only on active_set, other need it completely
if w_time is not None:
w_time_as = w_time[active_set[::n_orient]]
else:
w_time_as = None
if w_space is not None:
w_space_as = w_space[active_set[::n_orient]]
else:
w_space_as = None
penaltyl1 = norm_l1_tf(Z, phi, n_orient, w_time_as)
penaltyl21 = norm_l21_tf(Z, phi, n_orient, w_space_as)
nR2 = sum_squared(R)
p_obj = 0.5 * nR2 + alpha_space * penaltyl21 + alpha_time * penaltyl1
l1_ratio = alpha_time / (alpha_space + alpha_time)
dual_norm = norm_epsilon_inf(G, R, phi, l1_ratio, n_orient,
w_space=w_space, w_time=w_time)
scaling = min(1., (alpha_space + alpha_time) / dual_norm)
d_obj = (scaling - 0.5 * (scaling ** 2)) * nR2 + scaling * np.sum(R * GX)
d_obj = max(d_obj, highest_d_obj)
gap = p_obj - d_obj
return gap, p_obj, d_obj, R
def _tf_mixed_norm_solver_bcd_(M, G, Z, active_set, candidates, alpha_space,
alpha_time, lipschitz_constant, phi, phiT,
w_space=None, w_time=None, n_orient=1,
maxit=200, tol=1e-8, dgap_freq=10, perc=None,
timeit=True, verbose=None):
n_sources = G.shape[1]
n_positions = n_sources // n_orient
# First make G fortran for faster access to blocks of columns
Gd = np.asfortranarray(G)
G = np.ascontiguousarray(
Gd.T.reshape(n_positions, n_orient, -1).transpose(0, 2, 1))
R = M.copy() # residual
active = np.where(active_set[::n_orient])[0]
for idx in active:
R -= np.dot(G[idx], phiT(Z[idx]))
E = [] # track primal objective function
if w_time is None:
alpha_time_lc = alpha_time / lipschitz_constant
else:
alpha_time_lc = alpha_time * w_time / lipschitz_constant[:, None]
if w_space is None:
alpha_space_lc = alpha_space / lipschitz_constant
else:
alpha_space_lc = alpha_space * w_space / lipschitz_constant
converged = False
d_obj = - np.inf
for i in range(maxit):
for jj in candidates:
ids = jj * n_orient
ide = ids + n_orient
G_j = G[jj]
Z_j = Z[jj]
active_set_j = active_set[ids:ide]
was_active = np.any(active_set_j)
# gradient step
GTR = np.dot(G_j.T, R) / lipschitz_constant[jj]
X_j_new = GTR.copy()
if was_active:
X_j = phiT(Z_j)
R += np.dot(G_j, X_j)
X_j_new += X_j
rows_norm = np.linalg.norm(X_j_new, 'fro')
if rows_norm <= alpha_space_lc[jj]:
if was_active:
Z[jj] = 0.0
active_set_j[:] = False
else:
GTR_phi = phi(GTR)
if was_active:
Z_j_new = Z_j + GTR_phi
else:
Z_j_new = GTR_phi
col_norm = np.linalg.norm(Z_j_new, axis=0)
if np.all(col_norm <= alpha_time_lc[jj]):
Z[jj] = 0.0
active_set_j[:] = False
else:
# l1
shrink = np.maximum(1.0 - alpha_time_lc[jj] / np.maximum(
col_norm, alpha_time_lc[jj]), 0.0)
if w_time is not None:
shrink[w_time[jj] == 0.0] = 0.0
Z_j_new *= shrink[np.newaxis, :]
# l21
shape_init = Z_j_new.shape
row_norm = np.sqrt(phi.norm(Z_j_new, ord=2).sum())
if row_norm <= alpha_space_lc[jj]:
Z[jj] = 0.0
active_set_j[:] = False
else:
shrink = np.maximum(
1.0 - alpha_space_lc[jj] /
np.maximum(row_norm, alpha_space_lc[jj]), 0.0)
Z_j_new *= shrink
Z[jj] = Z_j_new.reshape(-1, *shape_init[1:]).copy()
active_set_j[:] = True
Z_j_phi_T = phiT(Z[jj])
R -= np.dot(G_j, Z_j_phi_T)
if (i + 1) % dgap_freq == 0:
Zd = np.vstack([Z[pos] for pos in range(n_positions)
if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, Gd, Zd, active_set, alpha_space, alpha_time, phi, phiT,
n_orient, d_obj, w_space=w_space, w_time=w_time)
converged = (gap < tol)
E.append(p_obj)
logger.info("\n Iteration %d :: n_active %d" % (
i + 1, np.sum(active_set) / n_orient))
logger.info(" dgap %.2e :: p_obj %f :: d_obj %f" % (
gap, p_obj, d_obj))
if converged:
break
if perc is not None:
if np.sum(active_set) / float(n_orient) <= perc * n_positions:
break
return Z, active_set, E, converged
@verbose
def _tf_mixed_norm_solver_bcd_active_set(M, G, alpha_space, alpha_time,
lipschitz_constant, phi, phiT,
Z_init=None, w_space=None,
w_time=None, n_orient=1, maxit=200,
tol=1e-8, dgap_freq=10,
verbose=None):
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
Z = dict.fromkeys(np.arange(n_positions), 0.0)
active_set = np.zeros(n_sources, dtype=bool)
active = []
if Z_init is not None:
if Z_init.shape != (n_sources, phi.n_coefs.sum()):
raise Exception('Z_init must be None or an array with shape '
'(n_sources, n_coefs).')
for ii in range(n_positions):
if np.any(Z_init[ii * n_orient:(ii + 1) * n_orient]):
active_set[ii * n_orient:(ii + 1) * n_orient] = True
active.append(ii)
if len(active):
Z.update(dict(zip(active,
np.vsplit(Z_init[active_set], len(active)))))
E = []
candidates = range(n_positions)
d_obj = -np.inf
while True:
# single BCD pass on all positions:
Z_init = dict.fromkeys(np.arange(n_positions), 0.0)
Z_init.update(dict(zip(active, Z.values())))
Z, active_set, E_tmp, _ = _tf_mixed_norm_solver_bcd_(
M, G, Z_init, active_set, candidates, alpha_space, alpha_time,
lipschitz_constant, phi, phiT, w_space=w_space, w_time=w_time,
n_orient=n_orient, maxit=1, tol=tol, perc=None, verbose=verbose)
E += E_tmp
# multiple BCD pass on active positions:
active = np.where(active_set[::n_orient])[0]
Z_init = dict(zip(range(len(active)), [Z[idx] for idx in active]))
candidates_ = range(len(active))
if w_space is not None:
w_space_as = w_space[active_set[::n_orient]]
else:
w_space_as = None
if w_time is not None:
w_time_as = w_time[active_set[::n_orient]]
else:
w_time_as = None
Z, as_, E_tmp, converged = _tf_mixed_norm_solver_bcd_(
M, G[:, active_set], Z_init,
np.ones(len(active) * n_orient, dtype=bool),
candidates_, alpha_space, alpha_time,
lipschitz_constant[active_set[::n_orient]], phi, phiT,
w_space=w_space_as, w_time=w_time_as,
n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, perc=0.5,
verbose=verbose)
active = np.where(active_set[::n_orient])[0]
active_set[active_set] = as_.copy()
E += E_tmp
converged = True
if converged:
Zd = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
gap, p_obj, d_obj, _ = dgap_l21l1(
M, G, Zd, active_set, alpha_space, alpha_time,
phi, phiT, n_orient, d_obj, w_space, w_time)
logger.info("\ndgap %.2e :: p_obj %f :: d_obj %f :: n_active %d"
% (gap, p_obj, d_obj, np.sum(active_set) / n_orient))
if gap < tol:
logger.info("\nConvergence reached!\n")
break
if active_set.sum():
Z = np.vstack([Z[pos] for pos in range(len(Z)) if np.any(Z[pos])])
X = phiT(Z)
else:
Z = np.zeros((0, phi.n_coefs.sum()), dtype=np.complex128)
X = np.zeros((0, n_times))
return X, Z, active_set, E, gap
@verbose
def tf_mixed_norm_solver(M, G, alpha_space, alpha_time, wsize=64, tstep=4,
n_orient=1, maxit=200, tol=1e-8,
active_set_size=None, debias=True, return_gap=False,
dgap_freq=10, verbose=None):
"""Solve TF L21+L1 inverse solver with BCD and active set approach.
See :footcite:`GramfortEtAl2013b,GramfortEtAl2011,BekhtiEtAl2016`.
Parameters
----------
M : array, shape (n_sensors, n_times)
The data.
G : array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space : float
The spatial regularization parameter.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series.
wsize: int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4.
tstep: int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
maxit : int
The number of iterations.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached.
debias : bool
Debias source estimates.
return_gap : bool
Return final duality gap.
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
%(verbose)s
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function every dgap_freq iteration. If
log_objective is False or dgap_freq is np.inf, it will be empty.
gap : float
Final duality gap. Returned only if return_gap is True.
References
----------
.. footbibliography::
"""
n_sensors, n_times = M.shape
n_sensors, n_sources = G.shape
n_positions = n_sources // n_orient
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
n_steps = np.ceil(M.shape[1] / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, n_times)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
logger.info("Using block coordinate descent with active set approach")
X, Z, active_set, E, gap = _tf_mixed_norm_solver_bcd_active_set(
M, G, alpha_space, alpha_time, lc, phi, phiT,
Z_init=None, n_orient=n_orient, maxit=maxit, tol=tol,
dgap_freq=dgap_freq, verbose=None)
if np.any(active_set) and debias:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
if return_gap:
return X, active_set, E, gap
else:
return X, active_set, E
@verbose
def iterative_tf_mixed_norm_solver(M, G, alpha_space, alpha_time,
n_tfmxne_iter, wsize=64, tstep=4,
maxit=3000, tol=1e-8, debias=True,
n_orient=1, dgap_freq=10, verbose=None):
"""Solve TF L0.5/L1 + L0.5 inverse problem with BCD + active set approach.
Parameters
----------
M: array, shape (n_sensors, n_times)
The data.
G: array, shape (n_sensors, n_dipoles)
The gain matrix a.k.a. lead field.
alpha_space: float
The spatial regularization parameter. The higher it is the less there
will be active sources.
alpha_time : float
The temporal regularization parameter. The higher it is the smoother
will be the estimated time series. 0 means no temporal regularization,
a.k.a. irMxNE.
n_tfmxne_iter : int
Number of TF-MxNE iterations. If > 1, iterative reweighting is applied.
wsize : int or array-like
Length of the STFT window in samples (must be a multiple of 4).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep) and each entry of wsize must be a multiple
of 4.
tstep : int or array-like
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
If an array is passed, multiple TF dictionaries are used (each having
its own wsize and tstep), and each entry of tstep must be a multiple
of 2 and divide the corresponding entry of wsize.
maxit : int
The maximum number of iterations for each TF-MxNE problem.
tol : float
If absolute difference between estimates at 2 successive iterations
is lower than tol, the convergence is reached. Also used as criterion
on duality gap for each TF-MxNE problem.
debias : bool
Debias source estimates.
n_orient : int
The number of orientation (1 : fixed or 3 : free or loose).
dgap_freq : int or np.inf
The duality gap is evaluated every dgap_freq iterations.
verbose : bool, str, int, or None
If not None, override default verbose level (see :func:`mne.verbose`
and :ref:`Logging documentation <tut_logging>` for more).
Returns
-------
X : array, shape (n_active, n_times)
The source estimates.
active_set : array
The mask of active sources.
E : list
The value of the objective function over iterations.
"""
n_sensors, n_times = M.shape
n_sources = G.shape[1]
n_positions = n_sources // n_orient
tstep = np.atleast_1d(tstep)
wsize = np.atleast_1d(wsize)
if len(tstep) != len(wsize):
raise ValueError('The same number of window sizes and steps must be '
'passed. Got tstep = %s and wsize = %s' %
(tstep, wsize))
n_steps = np.ceil(n_times / tstep.astype(float)).astype(int)
n_freqs = wsize // 2 + 1
n_coefs = n_steps * n_freqs
phi = _Phi(wsize, tstep, n_coefs, n_times)
phiT = _PhiT(tstep, n_freqs, n_steps, n_times)
if n_orient == 1:
lc = np.sum(G * G, axis=0)
else:
lc = np.empty(n_positions)
for j in range(n_positions):
G_tmp = G[:, (j * n_orient):((j + 1) * n_orient)]
lc[j] = np.linalg.norm(np.dot(G_tmp.T, G_tmp), ord=2)
# space and time penalties, and inverse of their derivatives:
def g_space(Z):
return np.sqrt(np.sqrt(phi.norm(Z, ord=2).reshape(
-1, n_orient).sum(axis=1)))
def g_space_prime_inv(Z):
return 2. * g_space(Z)
def g_time(Z):
return np.sqrt(np.sqrt(np.sum((np.abs(Z) ** 2.).reshape(
(n_orient, -1), order='F'), axis=0)).reshape(
(-1, Z.shape[1]), order='F'))
def g_time_prime_inv(Z):
return 2. * g_time(Z)
E = list()
active_set = np.ones(n_sources, dtype=bool)
Z = np.zeros((n_sources, phi.n_coefs.sum()), dtype=np.complex128)
for k in range(n_tfmxne_iter):
active_set_0 = active_set.copy()
Z0 = Z.copy()
if k == 0:
w_space = None
w_time = None
else:
w_space = 1. / g_space_prime_inv(Z)
w_time = g_time_prime_inv(Z)
w_time[w_time == 0.0] = -1.
w_time = 1. / w_time
w_time[w_time < 0.0] = 0.0
X, Z, active_set_, E_, _ = _tf_mixed_norm_solver_bcd_active_set(
M, G[:, active_set], alpha_space, alpha_time,
lc[active_set[::n_orient]], phi, phiT,
Z_init=Z, w_space=w_space, w_time=w_time, n_orient=n_orient,
maxit=maxit, tol=tol, dgap_freq=dgap_freq, verbose=None)
active_set[active_set] = active_set_
if active_set.sum() > 0:
l21_penalty = np.sum(g_space(Z.copy()))
l1_penalty = phi.norm(g_time(Z.copy()), ord=1).sum()
p_obj = (0.5 * np.linalg.norm(M - np.dot(G[:, active_set], X),
'fro') ** 2. + alpha_space * l21_penalty +
alpha_time * l1_penalty)
E.append(p_obj)
logger.info('Iteration %d: active set size=%d, E=%f' % (
k + 1, active_set.sum() / n_orient, p_obj))
# Check convergence
if np.array_equal(active_set, active_set_0):
max_diff = np.amax(np.abs(Z - Z0))
if (max_diff < tol):
print('Convergence reached after %d reweightings!' % k)
break
else:
p_obj = 0.5 * np.linalg.norm(M) ** 2.
E.append(p_obj)
logger.info('Iteration %d: as_size=%d, E=%f' % (
k + 1, active_set.sum() / n_orient, p_obj))
break
if debias:
if active_set.sum() > 0:
bias = compute_bias(M, G[:, active_set], X, n_orient=n_orient)
X *= bias[:, np.newaxis]
return X, active_set, E
| bsd-3-clause |
xuewei4d/scikit-learn | sklearn/metrics/cluster/_unsupervised.py | 9 | 13917 | """Unsupervised evaluation metrics."""
# Authors: Robert Layton <robertlayton@gmail.com>
# Arnaud Fouchet <foucheta@gmail.com>
# Thierry Guillemot <thierry.guillemot.work@gmail.com>
# License: BSD 3 clause
import functools
import numpy as np
from ...utils import check_random_state
from ...utils import check_X_y
from ...utils import _safe_indexing
from ..pairwise import pairwise_distances_chunked
from ..pairwise import pairwise_distances
from ...preprocessing import LabelEncoder
from ...utils.validation import _deprecate_positional_args
def check_number_of_labels(n_labels, n_samples):
"""Check that number of labels are valid.
Parameters
----------
n_labels : int
Number of labels.
n_samples : int
Number of samples.
"""
if not 1 < n_labels < n_samples:
raise ValueError("Number of labels is %d. Valid values are 2 "
"to n_samples - 1 (inclusive)" % n_labels)
@_deprecate_positional_args
def silhouette_score(X, labels, *, metric='euclidean', sample_size=None,
random_state=None, **kwds):
"""Compute the mean Silhouette Coefficient of all samples.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``. To clarify, ``b`` is the distance between a sample and the nearest
cluster that the sample is not a part of.
Note that Silhouette Coefficient is only defined if number of labels
is ``2 <= n_labels <= n_samples - 1``.
This function returns the mean Silhouette Coefficient over all samples.
To obtain the values for each sample, use :func:`silhouette_samples`.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters. Negative values generally indicate that a sample has
been assigned to the wrong cluster, as a different cluster is more similar.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array-like of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`metrics.pairwise.pairwise_distances
<sklearn.metrics.pairwise.pairwise_distances>`. If ``X`` is
the distance array itself, use ``metric="precomputed"``.
sample_size : int, default=None
The size of the sample to use when computing the Silhouette Coefficient
on a random subset of the data.
If ``sample_size is None``, no sampling is used.
random_state : int, RandomState instance or None, default=None
Determines random number generation for selecting a subset of samples.
Used when ``sample_size is not None``.
Pass an int for reproducible results across multiple function calls.
See :term:`Glossary <random_state>`.
**kwds : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a scipy.spatial.distance metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : float
Mean Silhouette Coefficient for all samples.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
if sample_size is not None:
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
random_state = check_random_state(random_state)
indices = random_state.permutation(X.shape[0])[:sample_size]
if metric == "precomputed":
X, labels = X[indices].T[indices].T, labels[indices]
else:
X, labels = X[indices], labels[indices]
return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
def _silhouette_reduce(D_chunk, start, labels, label_freqs):
"""Accumulate silhouette statistics for vertical chunk of X.
Parameters
----------
D_chunk : array-like of shape (n_chunk_samples, n_samples)
Precomputed distances for a chunk.
start : int
First index in the chunk.
labels : array-like of shape (n_samples,)
Corresponding cluster labels, encoded as {0, ..., n_clusters-1}.
label_freqs : array-like
Distribution of cluster labels in ``labels``.
"""
# accumulate distances from each sample to each cluster
clust_dists = np.zeros((len(D_chunk), len(label_freqs)),
dtype=D_chunk.dtype)
for i in range(len(D_chunk)):
clust_dists[i] += np.bincount(labels, weights=D_chunk[i],
minlength=len(label_freqs))
# intra_index selects intra-cluster distances within clust_dists
intra_index = (np.arange(len(D_chunk)), labels[start:start + len(D_chunk)])
# intra_clust_dists are averaged over cluster size outside this function
intra_clust_dists = clust_dists[intra_index]
# of the remaining distances we normalise and extract the minimum
clust_dists[intra_index] = np.inf
clust_dists /= label_freqs
inter_clust_dists = clust_dists.min(axis=1)
return intra_clust_dists, inter_clust_dists
@_deprecate_positional_args
def silhouette_samples(X, labels, *, metric='euclidean', **kwds):
"""Compute the Silhouette Coefficient for each sample.
The Silhouette Coefficient is a measure of how well samples are clustered
with samples that are similar to themselves. Clustering models with a high
Silhouette Coefficient are said to be dense, where samples in the same
cluster are similar to each other, and well separated, where samples in
different clusters are not very similar to each other.
The Silhouette Coefficient is calculated using the mean intra-cluster
distance (``a``) and the mean nearest-cluster distance (``b``) for each
sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a,
b)``.
Note that Silhouette Coefficient is only defined if number of labels
is 2 ``<= n_labels <= n_samples - 1``.
This function returns the Silhouette Coefficient for each sample.
The best value is 1 and the worst value is -1. Values near 0 indicate
overlapping clusters.
Read more in the :ref:`User Guide <silhouette_coefficient>`.
Parameters
----------
X : array-like of shape (n_samples_a, n_samples_a) if metric == \
"precomputed" or (n_samples_a, n_features) otherwise
An array of pairwise distances between samples, or a feature array.
labels : array-like of shape (n_samples,)
Label values for each sample.
metric : str or callable, default='euclidean'
The metric to use when calculating distance between instances in a
feature array. If metric is a string, it must be one of the options
allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`.
If ``X`` is the distance array itself, use "precomputed" as the metric.
Precomputed distance matrices must have 0 along the diagonal.
`**kwds` : optional keyword parameters
Any further parameters are passed directly to the distance function.
If using a ``scipy.spatial.distance`` metric, the parameters are still
metric dependent. See the scipy docs for usage examples.
Returns
-------
silhouette : array-like of shape (n_samples,)
Silhouette Coefficients for each sample.
References
----------
.. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the
Interpretation and Validation of Cluster Analysis". Computational
and Applied Mathematics 20: 53-65.
<https://www.sciencedirect.com/science/article/pii/0377042787901257>`_
.. [2] `Wikipedia entry on the Silhouette Coefficient
<https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
"""
X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr'])
# Check for non-zero diagonal entries in precomputed distance matrix
if metric == 'precomputed':
atol = np.finfo(X.dtype).eps * 100
if np.any(np.abs(np.diagonal(X)) > atol):
raise ValueError(
'The precomputed distance matrix contains non-zero '
'elements on the diagonal. Use np.fill_diagonal(X, 0).'
)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples = len(labels)
label_freqs = np.bincount(labels)
check_number_of_labels(len(le.classes_), n_samples)
kwds['metric'] = metric
reduce_func = functools.partial(_silhouette_reduce,
labels=labels, label_freqs=label_freqs)
results = zip(*pairwise_distances_chunked(X, reduce_func=reduce_func,
**kwds))
intra_clust_dists, inter_clust_dists = results
intra_clust_dists = np.concatenate(intra_clust_dists)
inter_clust_dists = np.concatenate(inter_clust_dists)
denom = (label_freqs - 1).take(labels, mode='clip')
with np.errstate(divide="ignore", invalid="ignore"):
intra_clust_dists /= denom
sil_samples = inter_clust_dists - intra_clust_dists
with np.errstate(divide="ignore", invalid="ignore"):
sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists)
# nan values are for clusters of size 1, and should be 0
return np.nan_to_num(sil_samples)
def calinski_harabasz_score(X, labels):
"""Compute the Calinski and Harabasz score.
It is also known as the Variance Ratio Criterion.
The score is defined as ratio between the within-cluster dispersion and
the between-cluster dispersion.
Read more in the :ref:`User Guide <calinski_harabasz_index>`.
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score : float
The resulting Calinski-Harabasz score.
References
----------
.. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster
analysis". Communications in Statistics
<https://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
extra_disp, intra_disp = 0., 0.
mean = np.mean(X, axis=0)
for k in range(n_labels):
cluster_k = X[labels == k]
mean_k = np.mean(cluster_k, axis=0)
extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2)
intra_disp += np.sum((cluster_k - mean_k) ** 2)
return (1. if intra_disp == 0. else
extra_disp * (n_samples - n_labels) /
(intra_disp * (n_labels - 1.)))
def davies_bouldin_score(X, labels):
"""Computes the Davies-Bouldin score.
The score is defined as the average similarity measure of each cluster with
its most similar cluster, where similarity is the ratio of within-cluster
distances to between-cluster distances. Thus, clusters which are farther
apart and less dispersed will result in a better score.
The minimum score is zero, with lower values indicating better clustering.
Read more in the :ref:`User Guide <davies-bouldin_index>`.
.. versionadded:: 0.20
Parameters
----------
X : array-like of shape (n_samples, n_features)
A list of ``n_features``-dimensional data points. Each row corresponds
to a single data point.
labels : array-like of shape (n_samples,)
Predicted labels for each sample.
Returns
-------
score: float
The resulting Davies-Bouldin score.
References
----------
.. [1] Davies, David L.; Bouldin, Donald W. (1979).
`"A Cluster Separation Measure"
<https://ieeexplore.ieee.org/document/4766909>`__.
IEEE Transactions on Pattern Analysis and Machine Intelligence.
PAMI-1 (2): 224-227
"""
X, labels = check_X_y(X, labels)
le = LabelEncoder()
labels = le.fit_transform(labels)
n_samples, _ = X.shape
n_labels = len(le.classes_)
check_number_of_labels(n_labels, n_samples)
intra_dists = np.zeros(n_labels)
centroids = np.zeros((n_labels, len(X[0])), dtype=float)
for k in range(n_labels):
cluster_k = _safe_indexing(X, labels == k)
centroid = cluster_k.mean(axis=0)
centroids[k] = centroid
intra_dists[k] = np.average(pairwise_distances(
cluster_k, [centroid]))
centroid_distances = pairwise_distances(centroids)
if np.allclose(intra_dists, 0) or np.allclose(centroid_distances, 0):
return 0.0
centroid_distances[centroid_distances == 0] = np.inf
combined_intra_dists = intra_dists[:, None] + intra_dists
scores = np.max(combined_intra_dists / centroid_distances, axis=1)
return np.mean(scores)
| bsd-3-clause |
cpcloud/ibis | ibis/pandas/execution/tests/test_join.py | 1 | 13150 | import pandas as pd
import pandas.util.testing as tm
import pytest
from pytest import param
import ibis
import ibis.common.exceptions as com
pytestmark = pytest.mark.pandas
join_type = pytest.mark.parametrize(
'how',
[
'inner',
'left',
'right',
'outer',
param(
'semi',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Semi join not implemented'
),
),
param(
'anti',
marks=pytest.mark.xfail(
raises=NotImplementedError, reason='Anti join not implemented'
),
),
],
)
@join_type
def test_join(how, left, right, df1, df2):
expr = left.join(right, left.key == right.key, how=how)[
left, right.other_value, right.key3
]
result = expr.execute()
expected = pd.merge(df1, df2, how=how, on='key')
tm.assert_frame_equal(result[expected.columns], expected)
def test_cross_join(left, right, df1, df2):
expr = left.cross_join(right)[left, right.other_value, right.key3]
result = expr.execute()
expected = pd.merge(
df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy'
).rename(columns=dict(key_x='key'))
del expected['dummy'], expected['key_y']
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_project_left_table(how, left, right, df1, df2):
expr = left.join(right, left.key == right.key, how=how)[left, right.key3]
result = expr.execute()
expected = pd.merge(df1, df2, how=how, on='key')[
list(left.columns) + ['key3']
]
tm.assert_frame_equal(result[expected.columns], expected)
def test_cross_join_project_left_table(left, right, df1, df2):
expr = left.cross_join(right)[left, right.key3]
result = expr.execute()
expected = pd.merge(
df1.assign(dummy=1), df2.assign(dummy=1), how='inner', on='dummy'
).rename(columns=dict(key_x='key'))[list(left.columns) + ['key3']]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_multiple_predicates(how, left, right, df1, df2):
expr = left.join(
right, [left.key == right.key, left.key2 == right.key3], how=how
)[left, right.key3, right.other_value]
result = expr.execute()
expected = pd.merge(
df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3']
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_multiple_predicates_written_as_one(
how, left, right, df1, df2
):
predicate = (left.key == right.key) & (left.key2 == right.key3)
expr = left.join(right, predicate, how=how)[
left, right.key3, right.other_value
]
result = expr.execute()
expected = pd.merge(
df1, df2, how=how, left_on=['key', 'key2'], right_on=['key', 'key3']
).reset_index(drop=True)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_invalid_predicates(how, left, right):
predicate = (left.key == right.key) & (left.key2 <= right.key3)
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.execute()
predicate = left.key >= right.key
expr = left.join(right, predicate, how=how)
with pytest.raises(TypeError):
expr.execute()
@join_type
@pytest.mark.xfail(reason='Hard to detect this case')
def test_join_with_duplicate_non_key_columns(how, left, right, df1, df2):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
expr = left.join(right, left.key == right.key, how=how)
# This is undefined behavior because `x` is duplicated. This is difficult
# to detect
with pytest.raises(ValueError):
expr.execute()
@join_type
def test_join_with_duplicate_non_key_columns_not_selected(
how, left, right, df1, df2
):
left = left.mutate(x=left.value * 2)
right = right.mutate(x=right.other_value * 3)
right = right[['key', 'other_value']]
expr = left.join(right, left.key == right.key, how=how)[
left, right.other_value
]
result = expr.execute()
expected = pd.merge(
df1.assign(x=df1.value * 2),
df2[['key', 'other_value']],
how=how,
on='key',
)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_post_expression_selection(how, left, right, df1, df2):
join = left.join(right, left.key == right.key, how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.execute()
expected = pd.merge(df1, df2, on='key', how=how)[
['key', 'value', 'other_value']
]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_post_expression_filter(how, left):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
expr = projected[projected.value == 4]
result = expr.execute()
df1 = lhs.execute()
df2 = rhs.execute()
expected = pd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@join_type
def test_multi_join_with_post_expression_filter(how, left, df1):
lhs = left[['key', 'key2']]
rhs = left[['key2', 'value']]
rhs2 = left[['key2', 'value']].relabel(dict(value='value2'))
joined = lhs.join(rhs, 'key2', how=how)
projected = joined[lhs, rhs.value]
filtered = projected[projected.value == 4]
joined2 = filtered.join(rhs2, 'key2')
projected2 = joined2[filtered.key, rhs2.value2]
expr = projected2[projected2.value2 == 3]
result = expr.execute()
df1 = lhs.execute()
df2 = rhs.execute()
df3 = rhs2.execute()
expected = pd.merge(df1, df2, on='key2', how=how)
expected = expected.loc[expected.value == 4].reset_index(drop=True)
expected = pd.merge(expected, df3, on='key2')[['key', 'value2']]
expected = expected.loc[expected.value2 == 3].reset_index(drop=True)
tm.assert_frame_equal(result, expected)
@join_type
def test_join_with_non_trivial_key(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left.key, left.value, right.other_value]
result = expr.execute()
expected = (
pd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
)
.drop(['key_len', 'key_y', 'key2', 'key3'], axis=1)
.rename(columns={'key_x': 'key'})
)
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_non_trivial_key_project_table(how, left, right, df1, df2):
# also test that the order of operands in the predicate doesn't matter
join = left.join(right, right.key.length() == left.key.length(), how=how)
expr = join[left, right.other_value]
expr = expr[expr.key.length() == 1]
result = expr.execute()
expected = (
pd.merge(
df1.assign(key_len=df1.key.str.len()),
df2.assign(key_len=df2.key.str.len()),
on='key_len',
how=how,
)
.drop(['key_len', 'key_y', 'key2', 'key3'], axis=1)
.rename(columns={'key_x': 'key'})
)
expected = expected.loc[expected.key.str.len() == 1]
tm.assert_frame_equal(result[expected.columns], expected)
@join_type
def test_join_with_project_right_duplicate_column(client, how, left, df1, df3):
# also test that the order of operands in the predicate doesn't matter
right = client.table('df3')
join = left.join(right, ['key'], how=how)
expr = join[left.key, right.key2, right.other_value]
result = expr.execute()
expected = (
pd.merge(df1, df3, on='key', how=how)
.drop(['key2_x', 'key3', 'value'], axis=1)
.rename(columns={'key2_y': 'key2'})
)
tm.assert_frame_equal(result[expected.columns], expected)
def test_join_with_window_function(
players_base, players_df, batting, batting_df
):
players = players_base
# this should be semi_join
tbl = batting.left_join(players, ['playerID'])
t = tbl[batting.G, batting.playerID, batting.teamID]
expr = t.groupby(t.teamID).mutate(
team_avg=lambda d: d.G.mean(),
demeaned_by_player=lambda d: d.G - d.G.mean(),
)
result = expr.execute()
expected = pd.merge(
batting_df, players_df[['playerID']], on='playerID', how='left'
)[['G', 'playerID', 'teamID']]
team_avg = expected.groupby('teamID').G.transform('mean')
expected = expected.assign(
team_avg=team_avg, demeaned_by_player=lambda df: df.G - team_avg
)
tm.assert_frame_equal(result[expected.columns], expected)
merge_asof_minversion = pytest.mark.skipif(
pd.__version__ < '0.19.2',
reason="at least pandas-0.19.2 required for merge_asof",
)
@merge_asof_minversion
def test_asof_join(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(time_right, 'time')[
time_left, time_right.other_value
]
result = expr.execute()
expected = pd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_asof_join_predicate(time_left, time_right, time_df1, time_df2):
expr = time_left.asof_join(time_right, time_left.time == time_right.time)[
time_left, time_right.other_value
]
result = expr.execute()
expected = pd.merge_asof(time_df1, time_df2, on='time')
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_keyed_asof_join(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2
):
expr = time_keyed_left.asof_join(time_keyed_right, 'time', by='key')[
time_keyed_left, time_keyed_right.other_value
]
result = expr.execute()
expected = pd.merge_asof(
time_keyed_df1, time_keyed_df2, on='time', by='key'
)
tm.assert_frame_equal(result[expected.columns], expected)
@merge_asof_minversion
def test_keyed_asof_join_with_tolerance(
time_keyed_left, time_keyed_right, time_keyed_df1, time_keyed_df2
):
expr = time_keyed_left.asof_join(
time_keyed_right, 'time', by='key', tolerance=2 * ibis.interval(days=1)
)[time_keyed_left, time_keyed_right.other_value]
result = expr.execute()
expected = pd.merge_asof(
time_keyed_df1,
time_keyed_df2,
on='time',
by='key',
tolerance=pd.Timedelta('2D'),
)
tm.assert_frame_equal(result[expected.columns], expected)
@pytest.mark.parametrize(
"how",
[
"left",
pytest.param(
"right",
marks=pytest.mark.xfail(
raises=AttributeError, reason="right_join is not an ibis API"
),
),
"inner",
"outer",
],
)
@pytest.mark.parametrize(
"func",
[
pytest.param(lambda join: join["a0", "a1"], id="tuple"),
pytest.param(lambda join: join[["a0", "a1"]], id="list"),
pytest.param(lambda join: join.select(["a0", "a1"]), id="select"),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
def test_select_on_unambiguous_join(how, func):
df_t = pd.DataFrame(dict(a0=[1, 2, 3], b1=list("aab")))
df_s = pd.DataFrame(dict(a1=[2, 3, 4], b2=list("abc")))
con = ibis.pandas.connect({"t": df_t, "s": df_s})
t = con.table("t")
s = con.table("s")
method = getattr(t, "{}_join".format(how))
join = method(s, t.b1 == s.b2)
expected = pd.merge(df_t, df_s, left_on=["b1"], right_on=["b2"], how=how)[
["a0", "a1"]
]
assert not expected.empty
expr = func(join)
result = expr.execute()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"func",
[
pytest.param(lambda join: join["a0", "a1"], id="tuple"),
pytest.param(lambda join: join[["a0", "a1"]], id="list"),
pytest.param(lambda join: join.select(["a0", "a1"]), id="select"),
],
)
@pytest.mark.xfail(
raises=(com.IbisError, AttributeError),
reason="Select from unambiguous joins not implemented",
)
@merge_asof_minversion
def test_select_on_unambiguous_asof_join(func):
df_t = pd.DataFrame(
dict(a0=[1, 2, 3], b1=pd.date_range("20180101", periods=3))
)
df_s = pd.DataFrame(
dict(a1=[2, 3, 4], b2=pd.date_range("20171230", periods=3))
)
con = ibis.pandas.connect({"t": df_t, "s": df_s})
t = con.table("t")
s = con.table("s")
join = t.asof_join(s, t.b1 == s.b2)
expected = pd.merge_asof(df_t, df_s, left_on=["b1"], right_on=["b2"])[
["a0", "a1"]
]
assert not expected.empty
expr = func(join)
result = expr.execute()
tm.assert_frame_equal(result, expected)
| apache-2.0 |
schets/scikit-learn | sklearn/datasets/tests/test_samples_generator.py | 67 | 14842 | from __future__ import division
from collections import defaultdict
from functools import partial
import numpy as np
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import make_hastie_10_2
from sklearn.datasets import make_regression
from sklearn.datasets import make_blobs
from sklearn.datasets import make_friedman1
from sklearn.datasets import make_friedman2
from sklearn.datasets import make_friedman3
from sklearn.datasets import make_low_rank_matrix
from sklearn.datasets import make_sparse_coded_signal
from sklearn.datasets import make_sparse_uncorrelated
from sklearn.datasets import make_spd_matrix
from sklearn.datasets import make_swiss_roll
from sklearn.datasets import make_s_curve
from sklearn.datasets import make_biclusters
from sklearn.datasets import make_checkerboard
from sklearn.utils.validation import assert_all_finite
def test_make_classification():
X, y = make_classification(n_samples=100, n_features=20, n_informative=5,
n_redundant=1, n_repeated=1, n_classes=3,
n_clusters_per_class=1, hypercube=False,
shift=None, scale=None, weights=[0.1, 0.25],
random_state=0)
assert_equal(X.shape, (100, 20), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of classes")
assert_equal(sum(y == 0), 10, "Unexpected number of samples in class #0")
assert_equal(sum(y == 1), 25, "Unexpected number of samples in class #1")
assert_equal(sum(y == 2), 65, "Unexpected number of samples in class #2")
def test_make_classification_informative_features():
"""Test the construction of informative features in make_classification
Also tests `n_clusters_per_class`, `n_classes`, `hypercube` and
fully-specified `weights`.
"""
# Create very separate clusters; check that vertices are unique and
# correspond to classes
class_sep = 1e6
make = partial(make_classification, class_sep=class_sep, n_redundant=0,
n_repeated=0, flip_y=0, shift=0, scale=1, shuffle=False)
for n_informative, weights, n_clusters_per_class in [(2, [1], 1),
(2, [1/3] * 3, 1),
(2, [1/4] * 4, 1),
(2, [1/2] * 2, 2),
(2, [3/4, 1/4], 2),
(10, [1/3] * 3, 10)
]:
n_classes = len(weights)
n_clusters = n_classes * n_clusters_per_class
n_samples = n_clusters * 50
for hypercube in (False, True):
X, y = make(n_samples=n_samples, n_classes=n_classes,
weights=weights, n_features=n_informative,
n_informative=n_informative,
n_clusters_per_class=n_clusters_per_class,
hypercube=hypercube, random_state=0)
assert_equal(X.shape, (n_samples, n_informative))
assert_equal(y.shape, (n_samples,))
# Cluster by sign, viewed as strings to allow uniquing
signs = np.sign(X)
signs = signs.view(dtype='|S{0}'.format(signs.strides[0]))
unique_signs, cluster_index = np.unique(signs,
return_inverse=True)
assert_equal(len(unique_signs), n_clusters,
"Wrong number of clusters, or not in distinct "
"quadrants")
clusters_by_class = defaultdict(set)
for cluster, cls in zip(cluster_index, y):
clusters_by_class[cls].add(cluster)
for clusters in clusters_by_class.values():
assert_equal(len(clusters), n_clusters_per_class,
"Wrong number of clusters per class")
assert_equal(len(clusters_by_class), n_classes,
"Wrong number of classes")
assert_array_almost_equal(np.bincount(y) / len(y) // weights,
[1] * n_classes,
err_msg="Wrong number of samples "
"per class")
# Ensure on vertices of hypercube
for cluster in range(len(unique_signs)):
centroid = X[cluster_index == cluster].mean(axis=0)
if hypercube:
assert_array_almost_equal(np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters are not "
"centered on hypercube "
"vertices")
else:
assert_raises(AssertionError,
assert_array_almost_equal,
np.abs(centroid),
[class_sep] * n_informative,
decimal=0,
err_msg="Clusters should not be cenetered "
"on hypercube vertices")
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=5,
n_clusters_per_class=1)
assert_raises(ValueError, make, n_features=2, n_informative=2, n_classes=3,
n_clusters_per_class=2)
def test_make_multilabel_classification_return_sequences():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = assert_warns(DeprecationWarning, make_multilabel_classification,
n_samples=100, n_features=20, n_classes=3,
random_state=0, allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (100, 20), "X shape mismatch")
if not allow_unlabeled:
assert_equal(max([max(y) for y in Y]), 2)
assert_equal(min([len(y) for y in Y]), min_length)
assert_true(max([len(y) for y in Y]) <= 3)
def test_make_multilabel_classification_return_indicator():
for allow_unlabeled, min_length in zip((True, False), (0, 1)):
X, Y = make_multilabel_classification(n_samples=25, n_features=20,
n_classes=3, random_state=0,
return_indicator=True,
allow_unlabeled=allow_unlabeled)
assert_equal(X.shape, (25, 20), "X shape mismatch")
assert_equal(Y.shape, (25, 3), "Y shape mismatch")
assert_true(np.all(np.sum(Y, axis=0) > min_length))
# Also test return_distributions
X2, Y2, p_c, p_w_c = make_multilabel_classification(
n_samples=25, n_features=20, n_classes=3, random_state=0,
return_indicator=True, allow_unlabeled=allow_unlabeled,
return_distributions=True)
assert_array_equal(X, X2)
assert_array_equal(Y, Y2)
assert_equal(p_c.shape, (3,))
assert_almost_equal(p_c.sum(), 1)
assert_equal(p_w_c.shape, (20, 3))
assert_almost_equal(p_w_c.sum(axis=0), [1] * 3)
def test_make_hastie_10_2():
X, y = make_hastie_10_2(n_samples=100, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(np.unique(y).shape, (2,), "Unexpected number of classes")
def test_make_regression():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
effective_rank=5, coef=True, bias=0.0,
noise=1.0, random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100,), "y shape mismatch")
assert_equal(c.shape, (10,), "coef shape mismatch")
assert_equal(sum(c != 0.0), 3, "Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0).
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
# Test with small number of features.
X, y = make_regression(n_samples=100, n_features=1) # n_informative=3
assert_equal(X.shape, (100, 1))
def test_make_regression_multitarget():
X, y, c = make_regression(n_samples=100, n_features=10, n_informative=3,
n_targets=3, coef=True, noise=1., random_state=0)
assert_equal(X.shape, (100, 10), "X shape mismatch")
assert_equal(y.shape, (100, 3), "y shape mismatch")
assert_equal(c.shape, (10, 3), "coef shape mismatch")
assert_array_equal(sum(c != 0.0), 3,
"Unexpected number of informative features")
# Test that y ~= np.dot(X, c) + bias + N(0, 1.0)
assert_almost_equal(np.std(y - np.dot(X, c)), 1.0, decimal=1)
def test_make_blobs():
X, y = make_blobs(n_samples=50, n_features=2,
centers=[[0.0, 0.0], [1.0, 1.0], [0.0, 1.0]],
random_state=0)
assert_equal(X.shape, (50, 2), "X shape mismatch")
assert_equal(y.shape, (50,), "y shape mismatch")
assert_equal(np.unique(y).shape, (3,), "Unexpected number of blobs")
def test_make_friedman1():
X, y = make_friedman1(n_samples=5, n_features=10, noise=0.0,
random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
10 * np.sin(np.pi * X[:, 0] * X[:, 1])
+ 20 * (X[:, 2] - 0.5) ** 2
+ 10 * X[:, 3] + 5 * X[:, 4])
def test_make_friedman2():
X, y = make_friedman2(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y,
(X[:, 0] ** 2
+ (X[:, 1] * X[:, 2] - 1
/ (X[:, 1] * X[:, 3])) ** 2) ** 0.5)
def test_make_friedman3():
X, y = make_friedman3(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 4), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
assert_array_almost_equal(y, np.arctan((X[:, 1] * X[:, 2]
- 1 / (X[:, 1] * X[:, 3]))
/ X[:, 0]))
def test_make_low_rank_matrix():
X = make_low_rank_matrix(n_samples=50, n_features=25, effective_rank=5,
tail_strength=0.01, random_state=0)
assert_equal(X.shape, (50, 25), "X shape mismatch")
from numpy.linalg import svd
u, s, v = svd(X)
assert_less(sum(s) - 5, 0.1, "X rank is not approximately 5")
def test_make_sparse_coded_signal():
Y, D, X = make_sparse_coded_signal(n_samples=5, n_components=8,
n_features=10, n_nonzero_coefs=3,
random_state=0)
assert_equal(Y.shape, (10, 5), "Y shape mismatch")
assert_equal(D.shape, (10, 8), "D shape mismatch")
assert_equal(X.shape, (8, 5), "X shape mismatch")
for col in X.T:
assert_equal(len(np.flatnonzero(col)), 3, 'Non-zero coefs mismatch')
assert_array_almost_equal(np.dot(D, X), Y)
assert_array_almost_equal(np.sqrt((D ** 2).sum(axis=0)),
np.ones(D.shape[1]))
def test_make_sparse_uncorrelated():
X, y = make_sparse_uncorrelated(n_samples=5, n_features=10, random_state=0)
assert_equal(X.shape, (5, 10), "X shape mismatch")
assert_equal(y.shape, (5,), "y shape mismatch")
def test_make_spd_matrix():
X = make_spd_matrix(n_dim=5, random_state=0)
assert_equal(X.shape, (5, 5), "X shape mismatch")
assert_array_almost_equal(X, X.T)
from numpy.linalg import eig
eigenvalues, _ = eig(X)
assert_array_equal(eigenvalues > 0, np.array([True] * 5),
"X is not positive-definite")
def test_make_swiss_roll():
X, t = make_swiss_roll(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], t * np.cos(t))
assert_array_almost_equal(X[:, 2], t * np.sin(t))
def test_make_s_curve():
X, t = make_s_curve(n_samples=5, noise=0.0, random_state=0)
assert_equal(X.shape, (5, 3), "X shape mismatch")
assert_equal(t.shape, (5,), "t shape mismatch")
assert_array_almost_equal(X[:, 0], np.sin(t))
assert_array_almost_equal(X[:, 2], np.sign(t) * (np.cos(t) - 1))
def test_make_biclusters():
X, rows, cols = make_biclusters(
shape=(100, 100), n_clusters=4, shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (4, 100), "rows shape mismatch")
assert_equal(cols.shape, (4, 100,), "columns shape mismatch")
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X2, _, _ = make_biclusters(shape=(100, 100), n_clusters=4,
shuffle=True, random_state=0)
assert_array_almost_equal(X, X2)
def test_make_checkerboard():
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=(20, 5),
shuffle=True, random_state=0)
assert_equal(X.shape, (100, 100), "X shape mismatch")
assert_equal(rows.shape, (100, 100), "rows shape mismatch")
assert_equal(cols.shape, (100, 100,), "columns shape mismatch")
X, rows, cols = make_checkerboard(
shape=(100, 100), n_clusters=2, shuffle=True, random_state=0)
assert_all_finite(X)
assert_all_finite(rows)
assert_all_finite(cols)
X1, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
X2, _, _ = make_checkerboard(shape=(100, 100), n_clusters=2,
shuffle=True, random_state=0)
assert_array_equal(X1, X2)
| bsd-3-clause |
fredhusser/scikit-learn | examples/calibration/plot_calibration_curve.py | 225 | 5903 | """
==============================
Probability Calibration curves
==============================
When performing classification one often wants to predict not only the class
label, but also the associated probability. This probability gives some
kind of confidence on the prediction. This example demonstrates how to display
how well calibrated the predicted probabilities are and how to calibrate an
uncalibrated classifier.
The experiment is performed on an artificial dataset for binary classification
with 100.000 samples (1.000 of them are used for model fitting) with 20
features. Of the 20 features, only 2 are informative and 10 are redundant. The
first figure shows the estimated probabilities obtained with logistic
regression, Gaussian naive Bayes, and Gaussian naive Bayes with both isotonic
calibration and sigmoid calibration. The calibration performance is evaluated
with Brier score, reported in the legend (the smaller the better). One can
observe here that logistic regression is well calibrated while raw Gaussian
naive Bayes performs very badly. This is because of the redundant features
which violate the assumption of feature-independence and result in an overly
confident classifier, which is indicated by the typical transposed-sigmoid
curve.
Calibration of the probabilities of Gaussian naive Bayes with isotonic
regression can fix this issue as can be seen from the nearly diagonal
calibration curve. Sigmoid calibration also improves the brier score slightly,
albeit not as strongly as the non-parametric isotonic regression. This can be
attributed to the fact that we have plenty of calibration data such that the
greater flexibility of the non-parametric model can be exploited.
The second figure shows the calibration curve of a linear support-vector
classifier (LinearSVC). LinearSVC shows the opposite behavior as Gaussian
naive Bayes: the calibration curve has a sigmoid curve, which is typical for
an under-confident classifier. In the case of LinearSVC, this is caused by the
margin property of the hinge loss, which lets the model focus on hard samples
that are close to the decision boundary (the support vectors).
Both kinds of calibration can fix this issue and yield nearly identical
results. This shows that sigmoid calibration can deal with situations where
the calibration curve of the base classifier is sigmoid (e.g., for LinearSVC)
but not where it is transposed-sigmoid (e.g., Gaussian naive Bayes).
"""
print(__doc__)
# Author: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Jan Hendrik Metzen <jhm@informatik.uni-bremen.de>
# License: BSD Style.
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import LinearSVC
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import (brier_score_loss, precision_score, recall_score,
f1_score)
from sklearn.calibration import CalibratedClassifierCV, calibration_curve
from sklearn.cross_validation import train_test_split
# Create dataset of classification task with many redundant and few
# informative features
X, y = datasets.make_classification(n_samples=100000, n_features=20,
n_informative=2, n_redundant=10,
random_state=42)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.99,
random_state=42)
def plot_calibration_curve(est, name, fig_index):
"""Plot calibration curve for est w/o and with calibration. """
# Calibrated with isotonic calibration
isotonic = CalibratedClassifierCV(est, cv=2, method='isotonic')
# Calibrated with sigmoid calibration
sigmoid = CalibratedClassifierCV(est, cv=2, method='sigmoid')
# Logistic regression with no calibration as baseline
lr = LogisticRegression(C=1., solver='lbfgs')
fig = plt.figure(fig_index, figsize=(10, 10))
ax1 = plt.subplot2grid((3, 1), (0, 0), rowspan=2)
ax2 = plt.subplot2grid((3, 1), (2, 0))
ax1.plot([0, 1], [0, 1], "k:", label="Perfectly calibrated")
for clf, name in [(lr, 'Logistic'),
(est, name),
(isotonic, name + ' + Isotonic'),
(sigmoid, name + ' + Sigmoid')]:
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
if hasattr(clf, "predict_proba"):
prob_pos = clf.predict_proba(X_test)[:, 1]
else: # use decision function
prob_pos = clf.decision_function(X_test)
prob_pos = \
(prob_pos - prob_pos.min()) / (prob_pos.max() - prob_pos.min())
clf_score = brier_score_loss(y_test, prob_pos, pos_label=y.max())
print("%s:" % name)
print("\tBrier: %1.3f" % (clf_score))
print("\tPrecision: %1.3f" % precision_score(y_test, y_pred))
print("\tRecall: %1.3f" % recall_score(y_test, y_pred))
print("\tF1: %1.3f\n" % f1_score(y_test, y_pred))
fraction_of_positives, mean_predicted_value = \
calibration_curve(y_test, prob_pos, n_bins=10)
ax1.plot(mean_predicted_value, fraction_of_positives, "s-",
label="%s (%1.3f)" % (name, clf_score))
ax2.hist(prob_pos, range=(0, 1), bins=10, label=name,
histtype="step", lw=2)
ax1.set_ylabel("Fraction of positives")
ax1.set_ylim([-0.05, 1.05])
ax1.legend(loc="lower right")
ax1.set_title('Calibration plots (reliability curve)')
ax2.set_xlabel("Mean predicted value")
ax2.set_ylabel("Count")
ax2.legend(loc="upper center", ncol=2)
plt.tight_layout()
# Plot calibration cuve for Gaussian Naive Bayes
plot_calibration_curve(GaussianNB(), "Naive Bayes", 1)
# Plot calibration cuve for Linear SVC
plot_calibration_curve(LinearSVC(), "SVC", 2)
plt.show()
| bsd-3-clause |
0x0all/scikit-learn | sklearn/manifold/tests/test_mds.py | 324 | 1862 | import numpy as np
from numpy.testing import assert_array_almost_equal
from nose.tools import assert_raises
from sklearn.manifold import mds
def test_smacof():
# test metric smacof using the data of "Modern Multidimensional Scaling",
# Borg & Groenen, p 154
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.451, .252],
[.016, -.238],
[-.200, .524]])
X, _ = mds.smacof(sim, init=Z, n_components=2, max_iter=1, n_init=1)
X_true = np.array([[-1.415, -2.471],
[1.633, 1.107],
[.249, -.067],
[-.468, 1.431]])
assert_array_almost_equal(X, X_true, decimal=3)
def test_smacof_error():
# Not symmetric similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# Not squared similarity matrix:
sim = np.array([[0, 5, 9, 4],
[5, 0, 2, 2],
[4, 2, 1, 0]])
assert_raises(ValueError, mds.smacof, sim)
# init not None and not correct format:
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
Z = np.array([[-.266, -.539],
[.016, -.238],
[-.200, .524]])
assert_raises(ValueError, mds.smacof, sim, init=Z, n_init=1)
def test_MDS():
sim = np.array([[0, 5, 3, 4],
[5, 0, 2, 2],
[3, 2, 0, 1],
[4, 2, 1, 0]])
mds_clf = mds.MDS(metric=False, n_jobs=3, dissimilarity="precomputed")
mds_clf.fit(sim)
| bsd-3-clause |
mworks/mworks | examples/Examples/FindTheCircle/analysis/Python/selection_counts.py | 1 | 1241 | import sys
from matplotlib import pyplot
import numpy
sys.path.insert(0, '/Library/Application Support/MWorks/Scripting/Python')
from mworks.data import MWKFile
def selection_counts(filename):
with MWKFile(filename) as f:
r_codec = f.reverse_codec
red_code = r_codec['red_selected']
green_code = r_codec['green_selected']
blue_code = r_codec['blue_selected']
red_count = 0
green_count = 0
blue_count = 0
for evt in f.get_events_iter(codes=[red_code, green_code, blue_code]):
if evt.data:
if evt.code == red_code:
red_count += 1
elif evt.code == green_code:
green_count += 1
else:
assert evt.code == blue_code
blue_count += 1
index = numpy.arange(3)
pyplot.bar(index,
[red_count, green_count, blue_count],
0.5,
color = ['r', 'g', 'b'],
align = 'center')
pyplot.xticks(index, ['Red', 'Green', 'Blue'])
pyplot.title('Selection Counts')
pyplot.show()
if __name__ == '__main__':
selection_counts(sys.argv[1])
| mit |
aetilley/scikit-learn | sklearn/utils/graph.py | 289 | 6239 | """
Graph utilities and algorithms
Graphs are represented with their adjacency matrices, preferably using
sparse matrices.
"""
# Authors: Aric Hagberg <hagberg@lanl.gov>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Jake Vanderplas <vanderplas@astro.washington.edu>
# License: BSD 3 clause
import numpy as np
from scipy import sparse
from .validation import check_array
from .graph_shortest_path import graph_shortest_path
###############################################################################
# Path and connected component analysis.
# Code adapted from networkx
def single_source_shortest_path_length(graph, source, cutoff=None):
"""Return the shortest path length from source to all reachable nodes.
Returns a dictionary of shortest path lengths keyed by target.
Parameters
----------
graph: sparse matrix or 2D array (preferably LIL matrix)
Adjacency matrix of the graph
source : node label
Starting node for path
cutoff : integer, optional
Depth to stop the search - only
paths of length <= cutoff are returned.
Examples
--------
>>> from sklearn.utils.graph import single_source_shortest_path_length
>>> import numpy as np
>>> graph = np.array([[ 0, 1, 0, 0],
... [ 1, 0, 1, 0],
... [ 0, 1, 0, 1],
... [ 0, 0, 1, 0]])
>>> single_source_shortest_path_length(graph, 0)
{0: 0, 1: 1, 2: 2, 3: 3}
>>> single_source_shortest_path_length(np.ones((6, 6)), 2)
{0: 1, 1: 1, 2: 0, 3: 1, 4: 1, 5: 1}
"""
if sparse.isspmatrix(graph):
graph = graph.tolil()
else:
graph = sparse.lil_matrix(graph)
seen = {} # level (number of hops) when seen in BFS
level = 0 # the current level
next_level = [source] # dict of nodes to check at next level
while next_level:
this_level = next_level # advance to next level
next_level = set() # and start a new list (fringe)
for v in this_level:
if v not in seen:
seen[v] = level # set the level of vertex v
next_level.update(graph.rows[v])
if cutoff is not None and cutoff <= level:
break
level += 1
return seen # return all path lengths as dictionary
if hasattr(sparse, 'connected_components'):
connected_components = sparse.connected_components
else:
from .sparsetools import connected_components
###############################################################################
# Graph laplacian
def graph_laplacian(csgraph, normed=False, return_diag=False):
""" Return the Laplacian matrix of a directed graph.
For non-symmetric graphs the out-degree is used in the computation.
Parameters
----------
csgraph : array_like or sparse matrix, 2 dimensions
compressed-sparse graph, with shape (N, N).
normed : bool, optional
If True, then compute normalized Laplacian.
return_diag : bool, optional
If True, then return diagonal as well as laplacian.
Returns
-------
lap : ndarray
The N x N laplacian matrix of graph.
diag : ndarray
The length-N diagonal of the laplacian matrix.
diag is returned only if return_diag is True.
Notes
-----
The Laplacian matrix of a graph is sometimes referred to as the
"Kirchoff matrix" or the "admittance matrix", and is useful in many
parts of spectral graph theory. In particular, the eigen-decomposition
of the laplacian matrix can give insight into many properties of the graph.
For non-symmetric directed graphs, the laplacian is computed using the
out-degree of each node.
"""
if csgraph.ndim != 2 or csgraph.shape[0] != csgraph.shape[1]:
raise ValueError('csgraph must be a square matrix or array')
if normed and (np.issubdtype(csgraph.dtype, np.int)
or np.issubdtype(csgraph.dtype, np.uint)):
csgraph = check_array(csgraph, dtype=np.float64, accept_sparse=True)
if sparse.isspmatrix(csgraph):
return _laplacian_sparse(csgraph, normed=normed,
return_diag=return_diag)
else:
return _laplacian_dense(csgraph, normed=normed,
return_diag=return_diag)
def _laplacian_sparse(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
if not graph.format == 'coo':
lap = (-graph).tocoo()
else:
lap = -graph.copy()
diag_mask = (lap.row == lap.col)
if not diag_mask.sum() == n_nodes:
# The sparsity pattern of the matrix has holes on the diagonal,
# we need to fix that
diag_idx = lap.row[diag_mask]
diagonal_holes = list(set(range(n_nodes)).difference(diag_idx))
new_data = np.concatenate([lap.data, np.ones(len(diagonal_holes))])
new_row = np.concatenate([lap.row, diagonal_holes])
new_col = np.concatenate([lap.col, diagonal_holes])
lap = sparse.coo_matrix((new_data, (new_row, new_col)),
shape=lap.shape)
diag_mask = (lap.row == lap.col)
lap.data[diag_mask] = 0
w = -np.asarray(lap.sum(axis=1)).squeeze()
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap.data /= w[lap.row]
lap.data /= w[lap.col]
lap.data[diag_mask] = (1 - w_zeros[lap.row[diag_mask]]).astype(
lap.data.dtype)
else:
lap.data[diag_mask] = w[lap.row[diag_mask]]
if return_diag:
return lap, w
return lap
def _laplacian_dense(graph, normed=False, return_diag=False):
n_nodes = graph.shape[0]
lap = -np.asarray(graph) # minus sign leads to a copy
# set diagonal to zero
lap.flat[::n_nodes + 1] = 0
w = -lap.sum(axis=0)
if normed:
w = np.sqrt(w)
w_zeros = (w == 0)
w[w_zeros] = 1
lap /= w
lap /= w[:, np.newaxis]
lap.flat[::n_nodes + 1] = (1 - w_zeros).astype(lap.dtype)
else:
lap.flat[::n_nodes + 1] = w.astype(lap.dtype)
if return_diag:
return lap, w
return lap
| bsd-3-clause |