repo_name
stringlengths 6
112
| path
stringlengths 4
204
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 714
810k
| license
stringclasses 15
values |
---|---|---|---|---|---|
fengzhyuan/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
huzq/scikit-learn | sklearn/covariance/_elliptic_envelope.py | 3 | 8067 | # Author: Virgile Fritsch <virgile.fritsch@inria.fr>
#
# License: BSD 3 clause
import numpy as np
from . import MinCovDet
from ..utils.validation import check_is_fitted, check_array
from ..utils.validation import _deprecate_positional_args
from ..metrics import accuracy_score
from ..base import OutlierMixin
class EllipticEnvelope(OutlierMixin, MinCovDet):
"""An object for detecting outliers in a Gaussian distributed dataset.
Read more in the :ref:`User Guide <outlier_detection>`.
Parameters
----------
store_precision : bool, default=True
Specify if the estimated precision is stored.
assume_centered : bool, default=False
If True, the support of robust location and covariance estimates
is computed, and a covariance estimate is recomputed from it,
without centering the data.
Useful to work with data whose mean is significantly equal to
zero but is not exactly zero.
If False, the robust location and covariance are directly computed
with the FastMCD algorithm without additional treatment.
support_fraction : float, default=None
The proportion of points to be included in the support of the raw
MCD estimate. If None, the minimum value of support_fraction will
be used within the algorithm: `[n_sample + n_features + 1] / 2`.
Range is (0, 1).
contamination : float, default=0.1
The amount of contamination of the data set, i.e. the proportion
of outliers in the data set. Range is (0, 0.5).
random_state : int or RandomState instance, default=None
Determines the pseudo random number generator for shuffling
the data. Pass an int for reproducible results across multiple function
calls. See :term: `Glossary <random_state>`.
Attributes
----------
location_ : ndarray of shape (n_features,)
Estimated robust location
covariance_ : ndarray of shape (n_features, n_features)
Estimated robust covariance matrix
precision_ : ndarray of shape (n_features, n_features)
Estimated pseudo inverse matrix.
(stored only if store_precision is True)
support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute the
robust estimates of location and shape.
offset_ : float
Offset used to define the decision function from the raw scores.
We have the relation: ``decision_function = score_samples - offset_``.
The offset depends on the contamination parameter and is defined in
such a way we obtain the expected number of outliers (samples with
decision function < 0) in training.
.. versionadded:: 0.20
raw_location_ : ndarray of shape (n_features,)
The raw robust estimated location before correction and re-weighting.
raw_covariance_ : ndarray of shape (n_features, n_features)
The raw robust estimated covariance before correction and re-weighting.
raw_support_ : ndarray of shape (n_samples,)
A mask of the observations that have been used to compute
the raw robust estimates of location and shape, before correction
and re-weighting.
dist_ : ndarray of shape (n_samples,)
Mahalanobis distances of the training set (on which :meth:`fit` is
called) observations.
Examples
--------
>>> import numpy as np
>>> from sklearn.covariance import EllipticEnvelope
>>> true_cov = np.array([[.8, .3],
... [.3, .4]])
>>> X = np.random.RandomState(0).multivariate_normal(mean=[0, 0],
... cov=true_cov,
... size=500)
>>> cov = EllipticEnvelope(random_state=0).fit(X)
>>> # predict returns 1 for an inlier and -1 for an outlier
>>> cov.predict([[0, 0],
... [3, 3]])
array([ 1, -1])
>>> cov.covariance_
array([[0.7411..., 0.2535...],
[0.2535..., 0.3053...]])
>>> cov.location_
array([0.0813... , 0.0427...])
See Also
--------
EmpiricalCovariance, MinCovDet
Notes
-----
Outlier detection from covariance estimation may break or not
perform well in high-dimensional settings. In particular, one will
always take care to work with ``n_samples > n_features ** 2``.
References
----------
.. [1] Rousseeuw, P.J., Van Driessen, K. "A fast algorithm for the
minimum covariance determinant estimator" Technometrics 41(3), 212
(1999)
"""
@_deprecate_positional_args
def __init__(self, *, store_precision=True, assume_centered=False,
support_fraction=None, contamination=0.1,
random_state=None):
super().__init__(
store_precision=store_precision,
assume_centered=assume_centered,
support_fraction=support_fraction,
random_state=random_state)
self.contamination = contamination
def fit(self, X, y=None):
"""Fit the EllipticEnvelope model.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training data.
y : Ignored
Not used, present for API consistency by convention.
"""
super().fit(X)
self.offset_ = np.percentile(-self.dist_, 100. * self.contamination)
return self
def decision_function(self, X):
"""Compute the decision function of the given observations.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
decision : ndarray of shape (n_samples, )
Decision function of the samples.
It is equal to the shifted Mahalanobis distances.
The threshold for being an outlier is 0, which ensures a
compatibility with other outlier detection algorithms.
"""
check_is_fitted(self)
negative_mahal_dist = self.score_samples(X)
return negative_mahal_dist - self.offset_
def score_samples(self, X):
"""Compute the negative Mahalanobis distances.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
negative_mahal_distances : array-like of shape (n_samples,)
Opposite of the Mahalanobis distances.
"""
check_is_fitted(self)
return -self.mahalanobis(X)
def predict(self, X):
"""
Predict the labels (1 inlier, -1 outlier) of X according to the
fitted model.
Parameters
----------
X : array-like of shape (n_samples, n_features)
The data matrix.
Returns
-------
is_inlier : ndarray of shape (n_samples,)
Returns -1 for anomalies/outliers and +1 for inliers.
"""
X = check_array(X)
is_inlier = np.full(X.shape[0], -1, dtype=int)
values = self.decision_function(X)
is_inlier[values >= 0] = 1
return is_inlier
def score(self, X, y, sample_weight=None):
"""Returns the mean accuracy on the given test data and labels.
In multi-label classification, this is the subset accuracy
which is a harsh metric since you require for each sample that
each label set be correctly predicted.
Parameters
----------
X : array-like of shape (n_samples, n_features)
Test samples.
y : array-like of shape (n_samples,) or (n_samples, n_outputs)
True labels for X.
sample_weight : array-like of shape (n_samples,), default=None
Sample weights.
Returns
-------
score : float
Mean accuracy of self.predict(X) w.r.t. y.
"""
return accuracy_score(y, self.predict(X), sample_weight=sample_weight)
| bsd-3-clause |
Bleyddyn/malpi | train/vae.py | 1 | 3838 | '''This script demonstrates how to build a variational autoencoder with Keras.
#Reference
- Auto-Encoding Variational Bayes
https://arxiv.org/abs/1312.6114
From: https://blog.keras.io/building-autoencoders-in-keras.html
'''
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import norm
from keras.layers import Input, Dense, Lambda
from keras.models import Model
from keras import backend as K
from keras import metrics
from keras.datasets import mnist
batch_size = 100
original_dim = 784
latent_dim = 2
intermediate_dim = 256
epochs = 50
epsilon_std = 1.0
x = Input(shape=(original_dim,))
h = Dense(intermediate_dim, activation='relu')(x)
z_mean = Dense(latent_dim)(h)
z_log_var = Dense(latent_dim)(h)
def sampling(args):
z_mean, z_log_var = args
epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
stddev=epsilon_std)
return z_mean + K.exp(z_log_var / 2) * epsilon
# note that "output_shape" isn't necessary with the TensorFlow backend
z = Lambda(sampling, output_shape=(latent_dim,))([z_mean, z_log_var])
# we instantiate these layers separately so as to reuse them later
decoder_h = Dense(intermediate_dim, activation='relu')
decoder_mean = Dense(original_dim, activation='sigmoid')
h_decoded = decoder_h(z)
x_decoded_mean = decoder_mean(h_decoded)
# instantiate VAE model
vae = Model(x, x_decoded_mean)
# Compute VAE loss
def vae_loss(x, x_decoded_mean):
xent_loss = original_dim * metrics.binary_crossentropy(x, x_decoded_mean)
kl_loss = - 0.5 * K.mean(1 + z_log_var - K.square(z_mean) - K.exp(z_log_var), axis=-1)
return xent_loss + kl_loss
# vae_loss = K.mean(xent_loss + kl_loss)
#vae.add_loss(vae_loss)
#model.compile(loss='categorical_crossentropy', optimizer=optimizer, metrics=[metrics.categorical_accuracy] )
vae.compile(loss=vae_loss, optimizer='rmsprop', metrics=None )
vae.summary()
# train the VAE on MNIST digits
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
vae.fit(x_train, x_train,
shuffle=True,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, x_test))
# build a model to project inputs on the latent space
encoder = Model(x, z_mean)
# display a 2D plot of the digit classes in the latent space
x_test_encoded = encoder.predict(x_test, batch_size=batch_size)
plt.figure(figsize=(6, 6))
plt.scatter(x_test_encoded[:, 0], x_test_encoded[:, 1], c=y_test)
plt.colorbar()
plt.show()
# build a digit generator that can sample from the learned distribution
decoder_input = Input(shape=(latent_dim,))
_h_decoded = decoder_h(decoder_input)
_x_decoded_mean = decoder_mean(_h_decoded)
generator = Model(decoder_input, _x_decoded_mean)
# display a 2D manifold of the digits
n = 15 # figure with 15x15 digits
digit_size = 28
figure = np.zeros((digit_size * n, digit_size * n))
# linearly spaced coordinates on the unit square were transformed through the inverse CDF (ppf) of the Gaussian
# to produce values of the latent variables z, since the prior of the latent space is Gaussian
grid_x = norm.ppf(np.linspace(0.05, 0.95, n))
grid_y = norm.ppf(np.linspace(0.05, 0.95, n))
for i, yi in enumerate(grid_x):
for j, xi in enumerate(grid_y):
z_sample = np.array([[xi, yi]])
x_decoded = generator.predict(z_sample)
digit = x_decoded[0].reshape(digit_size, digit_size)
figure[i * digit_size: (i + 1) * digit_size,
j * digit_size: (j + 1) * digit_size] = digit
plt.figure(figsize=(10, 10))
plt.imshow(figure, cmap='Greys_r')
plt.show()
| mit |
sigurdurb/ru_canvas | gen_groupset.py | 1 | 5650 | #!/usr/bin/python3
'''https://github.com/ucfopen/canvasapi'''
from canvasapi import Canvas
from canvas_config import *
import pandas as pd
from itertools import chain
from math import isnan
'''Course id is in your url https://reykjavik.instructure.com/courses/{Course_ID}'''
COURSE_ID = 254
ASSIGN_ID = 3049
'''This script generates csv for groupsets where each group has only one row.
It adds rubriks parts to a column name so TA's can mark their name and if they are done with
the question.
The rest of the students that have the assignment but are not
a group or/and have not submitted are also put in afterwards at the end of the file'''
def main():
canvas = Canvas(API_URL, API_KEY)
# First, retrieve the Course object
course = canvas.get_course(COURSE_ID)
group_cats_lis = course.list_group_categories()
print(*group_cats_lis,sep="\n")
g_cat_id = int(input("Enter the (number) of the group category you want to generate: "))
g_cat = [i for i in group_cats_lis if g_cat_id == i.id]
rm_nosubs = input("Do you want to remove those who have not submitted? (y/n): ")
sec_params = {"include[]":["students"]}
sections = course.list_sections(**sec_params)
csv_name = "Assign_" + str(ASSIGN_ID) # "This is changed dynamicly to the groupset name"
max_group_cnt = 0
all_rows = []
if g_cat:
g_cat = g_cat[-1]
csv_name += "_" + g_cat.name
print("Getting students in groups")
groups = g_cat.list_groups()
for g in groups:
if g.members_count == 0:
continue
user_details = []
users = g.list_users()
for user in users:
sec_name = find_sections(sections, user.id)
guser_row = [user.name, int(user.id), sec_name]
user_details.extend(guser_row)
max_group_cnt = int(max(max_group_cnt,int(len(user_details) / 3)))
group_details = [g_cat.name, int(g_cat.id), g.name, int(g.id)]
group_details.extend(user_details)
all_rows.append(group_details)
else:
print("No id was found for Group Category number:", int(g_cat_id))
answer = input("Do you wish to continue getting the students for the assignment? (y/n): ")
if answer != "y":
exit(1)
header_cols = ['Student', 'Student_ID','StudentSection']
# If there are groups with students in them then we want to add the extra headers
# So this script will also work if you have only an empty groupset and only individuals
if all_rows:
extra_st_cols = list(chain.from_iterable(("Student"+str(i),"Student"+str(i)+"_ID","Student"+str(i)+"Section") for i in range(2,max_group_cnt+1)))
group_cols = ['GroupSet', 'GroupSetID', 'Group', 'GroupID']
header_cols[0:0] = group_cols
header_cols.extend(extra_st_cols)
df = pd.DataFrame(all_rows, columns=header_cols)
'''Beginning of getting rubrik '''
set_rubrik_headers(course, df)
'''End of getting rubrik '''
'''Beginning of adding students that have the assignment but are not in a group
and also finding their sections'''
print("Adding rest of students that have this assignment")
params = {"include[]": ["user"]}
subs = course.list_submissions(ASSIGN_ID,**params)
id_cols = [x for x in header_cols if "_ID" in x]
for sub in subs:
boomap = []
for idc in id_cols:
boomap.extend([df[idc].isin([sub.user_id]).any()])
if idc == id_cols[-1]:
if not any(boomap):
#print("Adding:", sub.user['name'], sub.user_id)
sec_name = find_sections(sections, sub.user_id)
df = df.append({'Student':sub.user['name'],'Student_ID':int(sub.user_id),'StudentSection': sec_name},ignore_index=True)
# Doing this afterwards simply because we only need it for one student in the row
set_submission_date(subs, df)
if rm_nosubs == 'y':
print("Removing students/groups that have not submitted")
df = df.drop(df[df['SubmittedAt'] == "Nothing submitted"].index)
csv_name += ".csv"
all_id_cols = [x for x in header_cols if "ID" in x]
# Changing the types of the ID columns so the csv does not have any extra zeroes for the ids no matter if european or usa sheet
for col in all_id_cols:
df[col] = df[col].apply(float_to_str_no_decimal)
df.to_csv(csv_name, index_label = 'Nr', float_format='%.2f', sep=',', decimal=".")
print("Successfully created CSV:", csv_name)
def find_sections(sections, user_id):
sec_name = ""
# Student can be listed in many sections so need to check all
for sec in sections:
for st in sec.students:
if st['id'] == user_id:
sec_name += "/" + sec.name if sec_name != "" else sec.name
return sec_name
def set_rubrik_headers(course, df):
assign = course.get_assignment(ASSIGN_ID)
try:
if assign.rubric is not None:
print("Getting rubrik for assignment:", assign.name)
# This is for VERK's and REIR color coding of TA's and completed assignment status
for crit in assign.rubric:
col = crit['description'] + "(" + str(crit['points']) + "/" + str(assign.points_possible) + ")"
df[col] = ""
df[crit['description']+"x"] = ""
print("Added rubrik for assignment: ", assign.name)
else:
print("No rubrik found for assignment:", assign.name)
except AttributeError:
print("No rubric for this assignment", assign.name)
def set_submission_date(subs, df):
# Resetting it just in case, and because we are using .loc
df.reset_index(drop=True, inplace=True)
df["SubmittedAt"] = ""
df["Grade"] = ""
for i in range(0,df.shape[0]):
st_id = df.loc[i, "Student_ID"]
for sub in subs:
if sub.user_id == st_id:
df.loc[i,"Grade"] = sub.score if sub.score != None else float(0)
df.loc[i,"SubmittedAt"] = "Nothing submitted" if sub.submitted_at == None else sub.submitted_at
def float_to_str_no_decimal(val):
return "" if isnan(val) else "%d" % val
if __name__ == '__main__':
main()
| mit |
kedaio/tushare | tushare/util/upass.py | 3 | 1304 | # -*- coding:utf-8 -*-
"""
Created on 2015/08/24
@author: Jimmy Liu
@group : waditu
@contact: jimmysoa@sina.cn
"""
import pandas as pd
import os
from tushare.stock import cons as ct
BK = 'bk'
def set_token(token):
df = pd.DataFrame([token], columns=['token'])
df.to_csv(ct.TOKEN_F_P, index=False)
def get_token():
if os.path.exists(ct.TOKEN_F_P):
df = pd.read_csv(ct.TOKEN_F_P)
return str(df.ix[0]['token'])
else:
print(ct.TOKEN_ERR_MSG)
return None
def set_broker(broker='', user='', passwd=''):
df = pd.DataFrame([[broker, user, passwd]],
columns=['broker', 'user', 'passwd'],
dtype=object)
if os.path.exists(BK):
all = pd.read_csv(BK, dtype=object)
if (all[all.broker == broker]['user']).any():
all = all[all.broker != broker]
all = all.append(df, ignore_index=True)
all.to_csv(BK, index=False)
else:
df.to_csv(BK, index=False)
def get_broker(broker=''):
if os.path.exists(BK):
df = pd.read_csv(BK, dtype=object)
if broker == '':
return df
else:
return df[df.broker == broker]
else:
return None
def remove_broker():
os.remove(BK)
| bsd-3-clause |
bsmrstu-warriors/Moytri---The-Drone-Aider | Lib/site-packages/numpy/linalg/linalg.py | 53 | 61098 | """Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh','lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError']
import sys
from numpy.core import array, asarray, zeros, empty, transpose, \
intc, single, double, csingle, cdouble, inexact, complexfloating, \
newaxis, ravel, all, Inf, dot, add, multiply, identity, sqrt, \
maximum, flatnonzero, diagonal, arange, fastCopyAndTranspose, sum, \
isfinite, size, finfo, absolute, log, exp
from numpy.lib import triu
from numpy.linalg import lapack_lite
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError, 'Singular matrix'
numpy.linalg.linalg.LinAlgError: Singular matrix
"""
pass
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if len(a.shape) != 2:
raise LinAlgError, '%d-dimensional array given. Array must be \
two-dimensional' % len(a.shape)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError, 'Array must be square'
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError, "Array must not contain infs or NaNs"
def _assertNonEmpty(*arrays):
for a in arrays:
if size(a) == 0:
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=len(b.shape))``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorinv
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a,wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = range(0, an)
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : array_like, shape (M, M)
Coefficient matrix.
b : array_like, shape (M,) or (M, N)
Ordinate or "dependent variable" values.
Returns
-------
x : ndarray, shape (M,) or (M, N) depending on b
Solution to the system a x = b
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
`solve` is a wrapper for the LAPACK routines `dgesv`_ and
`zgesv`_, the former being used if `a` is real-valued, the latter if
it is complex-valued. The solution to the system of linear equations
is computed using an LU decomposition [1]_ with partial pivoting and
row interchanges.
.. _dgesv: http://www.netlib.org/lapack/double/dgesv.f
.. _zgesv: http://www.netlib.org/lapack/complex16/zgesv.f
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> (np.dot(a, x) == b).all()
True
"""
a, _ = _makearray(a)
b, wrap = _makearray(b)
one_eq = len(b.shape) == 1
if one_eq:
b = b[:, newaxis]
_assertRank2(a, b)
_assertSquareness(a)
n_eq = a.shape[0]
n_rhs = b.shape[1]
if n_eq != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
# lapack_routine = _findLapackRoutine('gesv', t)
if isComplexType(t):
lapack_routine = lapack_lite.zgesv
else:
lapack_routine = lapack_lite.dgesv
a, b = _fastCopyAndTranspose(t, a, b)
a, b = _to_native_byte_order(a, b)
pivots = zeros(n_eq, fortran_int)
results = lapack_routine(n_eq, n_rhs, a, n_eq, pivots, b, n_eq, 0)
if results['info'] > 0:
raise LinAlgError, 'Singular matrix'
if one_eq:
return wrap(b.ravel().astype(result_t))
else:
return wrap(b.transpose().astype(result_t))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[:ind] + a.shape[ind:]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError, "Invalid ind argument."
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : array_like, shape (M, M)
Matrix to be inverted.
Returns
-------
ainv : ndarray or matrix, shape (M, M)
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is singular or not square.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = LA.inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = LA.inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
"""
a, wrap = _makearray(a)
return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : array_like, shape (M, M)
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : ndarray, or matrix object if `a` is, shape (M, M)
Lower-triangular Cholesky factor of a.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
m = a.shape[0]
n = a.shape[1]
if isComplexType(t):
lapack_routine = lapack_lite.zpotrf
else:
lapack_routine = lapack_lite.dpotrf
results = lapack_routine(_L, n, a, m, 0)
if results['info'] > 0:
raise LinAlgError, 'Matrix is not positive definite - \
Cholesky decomposition cannot be computed'
s = triu(a, k=0).transpose()
if (s.dtype != result_t):
s = s.astype(result_t)
return wrap(s)
# QR decompostion
def qr(a, mode='full'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like
Matrix to be factored, of shape (M, N).
mode : {'full', 'r', 'economic'}, optional
Specifies the values to be returned. 'full' is the default.
Economic mode is slightly faster then 'r' mode if only `r` is needed.
Returns
-------
q : ndarray of float or complex, optional
The orthonormal matrix, of shape (M, K). Only returned if
``mode='full'``.
r : ndarray of float or complex, optional
The upper-triangular matrix, of shape (K, N) with K = min(M, N).
Only returned when ``mode='full'`` or ``mode='r'``.
a2 : ndarray of float or complex, optional
Array of shape (M, N), only returned when ``mode='economic``'.
The diagonal and the upper triangle of `a2` contains `r`, while
the rest of the matrix is undefined.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved, so if `a` is of type `matrix`,
all the return values will be matrices too.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
a, wrap = _makearray(a)
_assertRank2(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# economic mode. Isn't actually economic.
if mode[0] == 'e':
if t != result_t :
a = a.astype(result_t)
return a.T
# generate r
r = _fastCopyAndTranspose(result_t, a[:,:mn])
for i in range(mn):
r[i,:i].fill(0.0)
# 'r'-mode, that is, calculate only r
if mode[0] == 'r':
return r
# from here on: build orthonormal matrix q from a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mn, mn, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError, '%s returns %d' % (routine_name, results['info'])
q = _fastCopyAndTranspose(result_t, a[:mn,:])
return wrap(q), wrap(r)
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
that sets those routines' flags to return only the eigenvalues of
general real and complex arrays, respectively.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
rwork = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, w,
dummy, 1, dummy, 1, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _N, n, a, n, wr, wi,
dummy, 1, dummy, 1, work, lwork, 0)
if all(wi == 0.):
w = wr
result_t = _realType(result_t)
else:
w = wr+1j*wi
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : array_like, shape (M, M)
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd
that sets those routines' flags to return only the eigenvalues of
real symmetric and complex Hermitian arrays, respectively.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288+0.j, 5.82842712+0.j])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
return w.astype(result_t)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : array_like, shape (M, M)
A square array of real or complex elements.
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered, nor are they
necessarily real for real arrays (though for real arrays
complex-valued eigenvalues should occur in conjugate pairs).
v : ndarray, shape (M, M)
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
eigvals : eigenvalues of a non-symmetric array.
Notes
-----
This is a simple interface to the LAPACK routines dgeev and zgeev
which compute the eigenvalues and eigenvectors of, respectively,
general real- and complex-valued square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[i,:], v[i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
_assertFinite(a)
a, t, result_t = _convertarray(a) # convert to double or cdouble type
a = _to_native_byte_order(a)
real_t = _linalgRealType(t)
n = a.shape[0]
dummy = zeros((1,), t)
if isComplexType(t):
# Complex routines take different arguments
lapack_routine = lapack_lite.zgeev
w = zeros((n,), t)
v = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
rwork = zeros((2*n,), real_t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, -1, rwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, w,
dummy, 1, v, n, work, lwork, rwork, 0)
else:
lapack_routine = lapack_lite.dgeev
wr = zeros((n,), t)
wi = zeros((n,), t)
vr = zeros((n, n), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, -1, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_N, _V, n, a, n, wr, wi,
dummy, 1, vr, n, work, lwork, 0)
if all(wi == 0.0):
w = wr
v = vr
result_t = _realType(result_t)
else:
w = wr+1j*wi
v = array(vr, w.dtype)
ind = flatnonzero(wi != 0.0) # indices of complex e-vals
for i in range(len(ind)//2):
v[ind[2*i]] = vr[ind[2*i]] + 1j*vr[ind[2*i+1]]
v[ind[2*i+1]] = vr[ind[2*i]] - 1j*vr[ind[2*i+1]]
result_t = _complexType(result_t)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
vt = v.transpose().astype(result_t)
return w.astype(result_t), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : array_like, shape (M, M)
A complex Hermitian or real symmetric matrix.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Returns
-------
w : ndarray, shape (M,)
The eigenvalues, not necessarily ordered.
v : ndarray, or matrix object if `a` is, shape (M, M)
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
This is a simple interface to the LAPACK routines dsyevd and zheevd,
which compute the eigenvalues and eigenvectors of real symmetric and
complex Hermitian arrays, respectively.
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
"""
UPLO = asbytes(UPLO)
a, wrap = _makearray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
liwork = 5*n+3
iwork = zeros((liwork,), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zheevd
w = zeros((n,), real_t)
lwork = 1
work = zeros((lwork,), t)
lrwork = 1
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
rwork, -1, iwork, liwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
lrwork = int(rwork[0])
rwork = zeros((lrwork,), real_t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
rwork, lrwork, iwork, liwork, 0)
else:
lapack_routine = lapack_lite.dsyevd
w = zeros((n,), t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, -1,
iwork, liwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(_V, UPLO, n, a, n, w, work, lwork,
iwork, liwork, 0)
if results['info'] > 0:
raise LinAlgError, 'Eigenvalues did not converge'
at = a.transpose().astype(result_t)
return w.astype(_realType(result_t)), wrap(at)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : ndarray
Unitary matrix. The shape of `u` is (`M`, `M`) or (`M`, `K`)
depending on value of ``full_matrices``.
s : ndarray
The singular values, sorted so that ``s[i] >= s[i+1]``. `s` is
a 1-d array of length min(`M`, `N`).
v : ndarray
Unitary matrix of shape (`N`, `N`) or (`K`, `N`), depending on
``full_matrices``.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertRank2(a)
_assertNonEmpty(a)
m, n = a.shape
t, result_t = _commonType(a)
real_t = _linalgRealType(t)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
s = zeros((min(n, m),), real_t)
if compute_uv:
if full_matrices:
nu = m
nvt = n
option = _A
else:
nu = min(n, m)
nvt = min(n, m)
option = _S
u = zeros((nu, m), t)
vt = zeros((n, nvt), t)
else:
option = _N
nu = 1
nvt = 1
u = empty((1, 1), t)
vt = empty((1, 1), t)
iwork = zeros((8*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgesdd
rwork = zeros((5*min(m, n)*min(m, n) + 5*min(m, n),), real_t)
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgesdd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(option, m, n, a, m, s, u, m, vt, nvt,
work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge'
s = s.astype(_realType(result_t))
if compute_uv:
u = u.transpose().astype(result_t)
vt = vt.transpose().astype(result_t)
return wrap(u), s, wrap(vt)
else:
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : array_like, shape (M, N)
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x,compute_uv=False)
return s[0]/s[-1]
else:
return norm(x,p)*norm(inv(x),p)
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the
array that are greater than `tol`.
Parameters
----------
M : array_like
array of <=2 dimensions
tol : {None, float}
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * eps``.
Notes
-----
Golub and van Loan [1]_ define "numerical rank deficiency" as using
tol=eps*S[0] (where S[0] is the maximum singular value and thus the
2-norm of the matrix). This is one definition of rank deficiency,
and the one we use here. When floating point roundoff is the main
concern, then "numerical rank deficiency" is a reasonable choice. In
some cases you may prefer other definitions. The most useful measure
of the tolerance depends on the operations you intend to use on your
matrix. For example, if your data come from uncertain measurements
with uncertainties greater than floating point epsilon, choosing a
tolerance near that uncertainty may be preferable. The tolerance
may be absolute if the uncertainties are absolute rather than
relative.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*.
Baltimore: Johns Hopkins University Press, 1996.
Examples
--------
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : ndarray, shape (N, M)
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
_assertNonEmpty(a)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.;
res = dot(transpose(vt), multiply(s[:, newaxis],transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, than a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
sign : float or complex
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : float
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to `sign * np.exp(logdet)`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
.. versionadded:: 2.0.0.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
See Also
--------
det
"""
a = asarray(a)
_assertRank2(a)
_assertSquareness(a)
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
n = a.shape[0]
if isComplexType(t):
lapack_routine = lapack_lite.zgetrf
else:
lapack_routine = lapack_lite.dgetrf
pivots = zeros((n,), fortran_int)
results = lapack_routine(n, n, a, n, pivots, 0)
info = results['info']
if (info < 0):
raise TypeError, "Illegal input to Fortran routine"
elif (info > 0):
return (t(0.0), _realType(t)(-Inf))
sign = 1. - 2. * (add.reduce(pivots != arange(1, n + 1)) % 2)
d = diagonal(a)
absd = absolute(d)
sign *= multiply.reduce(d / absd)
log(absd, absd)
logdet = add.reduce(absd, axis=-1)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : array_like, shape (M, M)
Input array.
Returns
-------
det : ndarray
Determinant of `a`.
Notes
-----
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
"""
sign, logdet = slogdet(a)
return sign * exp(logdet)
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : array_like, shape (M, N)
"Coefficient" matrix.
b : array_like, shape (M,) or (M, K)
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
Singular values are set to zero if they are smaller than `rcond`
times the largest singular value of `a`.
Returns
-------
x : ndarray, shape (N,) or (N, K)
Least-squares solution. The shape of `x` depends on the shape of
`b`.
residues : ndarray, shape (), (1,), or (K,)
Sums of residues; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or > M, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : ndarray, shape (min(M,N),)
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print m, c
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = len(b.shape) == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError, 'Incompatible dimensions'
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0],:n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError, 'SVD did not converge in Linear Least Squares'
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t)
st = s[:min(n, m)].copy().astype(result_real_t)
return wrap(x), wrap(resids), results['rank'], st
def norm(x, ord=None):
"""
Matrix or vector norm.
This function is able to return one of seven different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like, shape (M,) or (M, N)
Input array.
ord : {non-zero int, inf, -inf, 'fro'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
Returns
-------
n : float
Norm of the matrix or vector.
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4
>>> LA.norm(b, np.inf)
9
>>> LA.norm(a, -np.inf)
0
>>> LA.norm(b, -np.inf)
2
>>> LA.norm(a, 1)
20
>>> LA.norm(b, 1)
7
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
"""
x = asarray(x)
if ord is None: # check the default case first and handle it immediately
return sqrt(add.reduce((x.conj() * x).ravel().real))
nd = x.ndim
if nd == 1:
if ord == Inf:
return abs(x).max()
elif ord == -Inf:
return abs(x).min()
elif ord == 0:
return (x != 0).sum() # Zero norm
elif ord == 1:
return abs(x).sum() # special case for speedup
elif ord == 2:
return sqrt(((x.conj()*x).real).sum()) # special case for speedup
else:
try:
ord + 1
except TypeError:
raise ValueError, "Invalid norm order for vectors."
return ((abs(x)**ord).sum())**(1.0/ord)
elif nd == 2:
if ord == 2:
return svd(x, compute_uv=0).max()
elif ord == -2:
return svd(x, compute_uv=0).min()
elif ord == 1:
return abs(x).sum(axis=0).max()
elif ord == Inf:
return abs(x).sum(axis=1).max()
elif ord == -1:
return abs(x).sum(axis=0).min()
elif ord == -Inf:
return abs(x).sum(axis=1).min()
elif ord in ['fro','f']:
return sqrt(add.reduce((x.conj() * x).real.ravel()))
else:
raise ValueError, "Invalid norm order for matrices."
else:
raise ValueError, "Improper number of dimensions to norm."
| gpl-3.0 |
altairpearl/scikit-learn | sklearn/manifold/tests/test_t_sne.py | 10 | 22275 | import sys
from sklearn.externals.six.moves import cStringIO as StringIO
import numpy as np
import scipy.sparse as sp
from sklearn.neighbors import BallTree
from sklearn.utils.testing import assert_less_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils import check_random_state
from sklearn.manifold.t_sne import _joint_probabilities
from sklearn.manifold.t_sne import _joint_probabilities_nn
from sklearn.manifold.t_sne import _kl_divergence
from sklearn.manifold.t_sne import _kl_divergence_bh
from sklearn.manifold.t_sne import _gradient_descent
from sklearn.manifold.t_sne import trustworthiness
from sklearn.manifold.t_sne import TSNE
from sklearn.manifold import _barnes_hut_tsne
from sklearn.manifold._utils import _binary_search_perplexity
from sklearn.datasets import make_blobs
from scipy.optimize import check_grad
from scipy.spatial.distance import pdist
from scipy.spatial.distance import squareform
from sklearn.metrics.pairwise import pairwise_distances
def test_gradient_descent_stops():
# Test stopping conditions of gradient descent.
class ObjectiveSmallGradient:
def __init__(self):
self.it = -1
def __call__(self, _):
self.it += 1
return (10 - self.it) / 10.0, np.array([1e-5])
def flat_function(_):
return 0.0, np.ones(1)
# Gradient norm
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=1e-5, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 1.0)
assert_equal(it, 0)
assert("gradient norm" in out)
# Error difference
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=100,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.2, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.9)
assert_equal(it, 1)
assert("error difference" in out)
# Maximum number of iterations without improvement
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
flat_function, np.zeros(1), 0, n_iter=100,
n_iter_without_progress=10, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=-1.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 11)
assert("did not make any progress" in out)
# Maximum number of iterations
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
_, error, it = _gradient_descent(
ObjectiveSmallGradient(), np.zeros(1), 0, n_iter=11,
n_iter_without_progress=100, momentum=0.0, learning_rate=0.0,
min_gain=0.0, min_grad_norm=0.0, min_error_diff=0.0, verbose=2)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert_equal(error, 0.0)
assert_equal(it, 10)
assert("Iteration 10" in out)
def test_binary_search():
# Test if the binary search finds Gaussians with desired perplexity.
random_state = check_random_state(0)
distances = random_state.randn(50, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
desired_perplexity = 25.0
P = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
P = np.maximum(P, np.finfo(np.double).eps)
mean_perplexity = np.mean([np.exp(-np.sum(P[i] * np.log(P[i])))
for i in range(P.shape[0])])
assert_almost_equal(mean_perplexity, desired_perplexity, decimal=3)
def test_binary_search_neighbors():
# Binary perplexity search approximation.
# Should be approximately equal to the slow method when we use
# all points as neighbors.
n_samples = 500
desired_perplexity = 25.0
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
P1 = _binary_search_perplexity(distances, None, desired_perplexity,
verbose=0)
# Test that when we use all the neighbors the results are identical
k = n_samples
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2 = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
assert_array_almost_equal(P1, P2, decimal=4)
# Test that the highest P_ij are the same when few neighbors are used
for k in np.linspace(80, n_samples, 10):
k = int(k)
topn = k * 10 # check the top 10 *k entries out of k * k entries
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
P2k = _binary_search_perplexity(distances, neighbors_nn,
desired_perplexity, verbose=0)
idx = np.argsort(P1.ravel())[::-1]
P1top = P1.ravel()[idx][:topn]
P2top = P2k.ravel()[idx][:topn]
assert_array_almost_equal(P1top, P2top, decimal=2)
def test_binary_perplexity_stability():
# Binary perplexity search should be stable.
# The binary_search_perplexity had a bug wherein the P array
# was uninitialized, leading to sporadically failing tests.
k = 10
n_samples = 100
random_state = check_random_state(0)
distances = random_state.randn(n_samples, 2).astype(np.float32)
# Distances shouldn't be negative
distances = np.abs(distances.dot(distances.T))
np.fill_diagonal(distances, 0.0)
last_P = None
neighbors_nn = np.argsort(distances, axis=1)[:, :k].astype(np.int64)
for _ in range(100):
P = _binary_search_perplexity(distances.copy(), neighbors_nn.copy(),
3, verbose=0)
P1 = _joint_probabilities_nn(distances, neighbors_nn, 3, verbose=0)
if last_P is None:
last_P = P
last_P1 = P1
else:
assert_array_almost_equal(P, last_P, decimal=4)
assert_array_almost_equal(P1, last_P1, decimal=4)
def test_gradient():
# Test gradient of Kullback-Leibler divergence.
random_state = check_random_state(0)
n_samples = 50
n_features = 2
n_components = 2
alpha = 1.0
distances = random_state.randn(n_samples, n_features).astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
X_embedded = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, desired_perplexity=25.0,
verbose=0)
def fun(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[0]
def grad(params):
return _kl_divergence(params, P, alpha, n_samples, n_components)[1]
assert_almost_equal(check_grad(fun, grad, X_embedded.ravel()), 0.0,
decimal=5)
def test_trustworthiness():
# Test trustworthiness score.
random_state = check_random_state(0)
# Affine transformation
X = random_state.randn(100, 2)
assert_equal(trustworthiness(X, 5.0 + X / 10.0), 1.0)
# Randomly shuffled
X = np.arange(100).reshape(-1, 1)
X_embedded = X.copy()
random_state.shuffle(X_embedded)
assert_less(trustworthiness(X, X_embedded), 0.6)
# Completely different
X = np.arange(5).reshape(-1, 1)
X_embedded = np.array([[0], [2], [4], [1], [3]])
assert_almost_equal(trustworthiness(X, X_embedded, n_neighbors=1), 0.2)
def test_preserve_trustworthiness_approximately():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
# The Barnes-Hut approximation uses a different method to estimate
# P_ij using only a number of nearest neighbors instead of all
# points (so that k = 3 * perplexity). As a result we set the
# perplexity=5, so that the number of neighbors is 5%.
n_components = 2
methods = ['exact', 'barnes_hut']
X = random_state.randn(100, n_components).astype(np.float32)
for init in ('random', 'pca'):
for method in methods:
tsne = TSNE(n_components=n_components, perplexity=50,
learning_rate=100.0, init=init, random_state=0,
method=method)
X_embedded = tsne.fit_transform(X)
T = trustworthiness(X, X_embedded, n_neighbors=1)
assert_almost_equal(T, 1.0, decimal=1)
def test_optimization_minimizes_kl_divergence():
"""t-SNE should give a lower KL divergence with more iterations."""
random_state = check_random_state(0)
X, _ = make_blobs(n_features=3, random_state=random_state)
kl_divergences = []
for n_iter in [200, 250, 300]:
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
n_iter=n_iter, random_state=0)
tsne.fit_transform(X)
kl_divergences.append(tsne.kl_divergence_)
assert_less_equal(kl_divergences[1], kl_divergences[0])
assert_less_equal(kl_divergences[2], kl_divergences[1])
def test_fit_csr_matrix():
# X can be a sparse matrix.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_components=2, perplexity=10, learning_rate=100.0,
random_state=0, method='exact')
X_embedded = tsne.fit_transform(X_csr)
assert_almost_equal(trustworthiness(X_csr, X_embedded, n_neighbors=1), 1.0,
decimal=1)
def test_preserve_trustworthiness_approximately_with_precomputed_distances():
# Nearest neighbors should be preserved approximately.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
D = squareform(pdist(X), "sqeuclidean")
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
metric="precomputed", random_state=0, verbose=0)
X_embedded = tsne.fit_transform(D)
assert_almost_equal(trustworthiness(D, X_embedded, n_neighbors=1,
precomputed=True), 1.0, decimal=1)
def test_early_exaggeration_too_small():
# Early exaggeration factor must be >= 1.
tsne = TSNE(early_exaggeration=0.99)
assert_raises_regexp(ValueError, "early_exaggeration .*",
tsne.fit_transform, np.array([[0.0]]))
def test_too_few_iterations():
# Number of gradient descent iterations must be at least 200.
tsne = TSNE(n_iter=199)
assert_raises_regexp(ValueError, "n_iter .*", tsne.fit_transform,
np.array([[0.0]]))
def test_non_square_precomputed_distances():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed")
assert_raises_regexp(ValueError, ".* square distance matrix",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_init_not_available():
# 'init' must be 'pca', 'random', or numpy array.
m = "'init' must be 'pca', 'random', or a numpy array"
assert_raises_regexp(ValueError, m, TSNE, init="not available")
def test_init_ndarray():
# Initialize TSNE with ndarray and test fit
tsne = TSNE(init=np.zeros((100, 2)))
X_embedded = tsne.fit_transform(np.ones((100, 5)))
assert_array_equal(np.zeros((100, 2)), X_embedded)
def test_init_ndarray_precomputed():
# Initialize TSNE with ndarray and metric 'precomputed'
# Make sure no FutureWarning is thrown from _fit
tsne = TSNE(init=np.zeros((100, 2)), metric="precomputed")
tsne.fit(np.zeros((100, 100)))
def test_distance_not_available():
# 'metric' must be valid.
tsne = TSNE(metric="not available")
assert_raises_regexp(ValueError, "Unknown metric not available.*",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_pca_initialization_not_compatible_with_precomputed_kernel():
# Precomputed distance matrices must be square matrices.
tsne = TSNE(metric="precomputed", init="pca")
assert_raises_regexp(ValueError, "The parameter init=\"pca\" cannot be "
"used with metric=\"precomputed\".",
tsne.fit_transform, np.array([[0.0], [1.0]]))
def test_answer_gradient_two_points():
# Test the tree with only a single set of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0]])
pos_output = np.array([[-4.961291e-05, -1.072243e-04],
[9.259460e-05, 2.702024e-04]])
neighbors = np.array([[1],
[0]])
grad_output = np.array([[-2.37012478e-05, -6.29044398e-05],
[2.37012478e-05, 6.29044398e-05]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_answer_gradient_four_points():
# Four points tests the tree with multiple levels of children.
#
# These tests & answers have been checked against the reference
# implementation by LvdM.
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[5.81128448e-05, -7.78033454e-06],
[-5.81526851e-05, 7.80976444e-06],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output)
def test_skip_num_points_gradient():
# Test the kwargs option skip_num_points.
#
# Skip num points should make it such that the Barnes_hut gradient
# is not calculated for indices below skip_num_point.
# Aside from skip_num_points=2 and the first two gradient rows
# being set to zero, these data points are the same as in
# test_answer_gradient_four_points()
pos_input = np.array([[1.0, 0.0], [0.0, 1.0],
[5.0, 2.0], [7.3, 2.2]])
pos_output = np.array([[6.080564e-05, -7.120823e-05],
[-1.718945e-04, -4.000536e-05],
[-2.271720e-04, 8.663310e-05],
[-1.032577e-04, -3.582033e-05]])
neighbors = np.array([[1, 2, 3],
[0, 2, 3],
[1, 0, 3],
[1, 2, 0]])
grad_output = np.array([[0.0, 0.0],
[0.0, 0.0],
[4.24275173e-08, -3.69569698e-08],
[-2.58720939e-09, 7.52706374e-09]])
_run_answer_test(pos_input, pos_output, neighbors, grad_output,
False, 0.1, 2)
def _run_answer_test(pos_input, pos_output, neighbors, grad_output,
verbose=False, perplexity=0.1, skip_num_points=0):
distances = pairwise_distances(pos_input).astype(np.float32)
args = distances, perplexity, verbose
pos_output = pos_output.astype(np.float32)
neighbors = neighbors.astype(np.int64)
pij_input = _joint_probabilities(*args)
pij_input = squareform(pij_input).astype(np.float32)
grad_bh = np.zeros(pos_output.shape, dtype=np.float32)
_barnes_hut_tsne.gradient(pij_input, pos_output, neighbors,
grad_bh, 0.5, 2, 1, skip_num_points=0)
assert_array_almost_equal(grad_bh, grad_output, decimal=4)
def test_verbose():
# Verbose options write to stdout.
random_state = check_random_state(0)
tsne = TSNE(verbose=2)
X = random_state.randn(5, 2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
tsne.fit_transform(X)
finally:
out = sys.stdout.getvalue()
sys.stdout.close()
sys.stdout = old_stdout
assert("[t-SNE]" in out)
assert("Computing pairwise distances" in out)
assert("Computed conditional probabilities" in out)
assert("Mean sigma" in out)
assert("Finished" in out)
assert("early exaggeration" in out)
assert("Finished" in out)
def test_chebyshev_metric():
# t-SNE should allow metrics that cannot be squared (issue #3526).
random_state = check_random_state(0)
tsne = TSNE(metric="chebyshev")
X = random_state.randn(5, 2)
tsne.fit_transform(X)
def test_reduction_to_one_component():
# t-SNE should allow reduction to one component (issue #4154).
random_state = check_random_state(0)
tsne = TSNE(n_components=1)
X = random_state.randn(5, 2)
X_embedded = tsne.fit(X).embedding_
assert(np.all(np.isfinite(X_embedded)))
def test_no_sparse_on_barnes_hut():
# No sparse matrices allowed on Barnes-Hut.
random_state = check_random_state(0)
X = random_state.randn(100, 2)
X[(np.random.randint(0, 100, 50), np.random.randint(0, 2, 50))] = 0.0
X_csr = sp.csr_matrix(X)
tsne = TSNE(n_iter=199, method='barnes_hut')
assert_raises_regexp(TypeError, "A sparse matrix was.*",
tsne.fit_transform, X_csr)
def test_64bit():
# Ensure 64bit arrays are handled correctly.
random_state = check_random_state(0)
methods = ['barnes_hut', 'exact']
for method in methods:
for dt in [np.float32, np.float64]:
X = random_state.randn(100, 2).astype(dt)
tsne = TSNE(n_components=2, perplexity=2, learning_rate=100.0,
random_state=0, method=method)
tsne.fit_transform(X)
def test_barnes_hut_angle():
# When Barnes-Hut's angle=0 this corresponds to the exact method.
angle = 0.0
perplexity = 10
n_samples = 100
for n_components in [2, 3]:
n_features = 5
degrees_of_freedom = float(n_components - 1.0)
random_state = check_random_state(0)
distances = random_state.randn(n_samples, n_features)
distances = distances.astype(np.float32)
distances = distances.dot(distances.T)
np.fill_diagonal(distances, 0.0)
params = random_state.randn(n_samples, n_components)
P = _joint_probabilities(distances, perplexity, False)
kl, gradex = _kl_divergence(params, P, degrees_of_freedom, n_samples,
n_components)
k = n_samples - 1
bt = BallTree(distances)
distances_nn, neighbors_nn = bt.query(distances, k=k + 1)
neighbors_nn = neighbors_nn[:, 1:]
Pbh = _joint_probabilities_nn(distances, neighbors_nn,
perplexity, False)
kl, gradbh = _kl_divergence_bh(params, Pbh, neighbors_nn,
degrees_of_freedom, n_samples,
n_components, angle=angle,
skip_num_points=0, verbose=False)
assert_array_almost_equal(Pbh, P, decimal=5)
assert_array_almost_equal(gradex, gradbh, decimal=5)
def test_quadtree_similar_point():
# Introduce a point into a quad tree where a similar point already exists.
# Test will hang if it doesn't complete.
Xs = []
# check the case where points are actually different
Xs.append(np.array([[1, 2], [3, 4]], dtype=np.float32))
# check the case where points are the same on X axis
Xs.append(np.array([[1.0, 2.0], [1.0, 3.0]], dtype=np.float32))
# check the case where points are arbitrarily close on X axis
Xs.append(np.array([[1.00001, 2.0], [1.00002, 3.0]], dtype=np.float32))
# check the case where points are the same on Y axis
Xs.append(np.array([[1.0, 2.0], [3.0, 2.0]], dtype=np.float32))
# check the case where points are arbitrarily close on Y axis
Xs.append(np.array([[1.0, 2.00001], [3.0, 2.00002]], dtype=np.float32))
# check the case where points are arbitrarily close on both axes
Xs.append(np.array([[1.00001, 2.00001], [1.00002, 2.00002]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - x axis
Xs.append(np.array([[1, 0.0003817754041], [2, 0.0003817753750]],
dtype=np.float32))
# check the case where points are arbitrarily close on both axes
# close to machine epsilon - y axis
Xs.append(np.array([[0.0003817754041, 1.0], [0.0003817753750, 2.0]],
dtype=np.float32))
for X in Xs:
counts = np.zeros(3, dtype='int64')
_barnes_hut_tsne.check_quadtree(X, counts)
m = "Tree consistency failed: unexpected number of points at root node"
assert_equal(counts[0], counts[1], m)
m = "Tree consistency failed: unexpected number of points on the tree"
assert_equal(counts[0], counts[2], m)
def test_index_offset():
# Make sure translating between 1D and N-D indices are preserved
assert_equal(_barnes_hut_tsne.test_index2offset(), 1)
assert_equal(_barnes_hut_tsne.test_index_offset(), 1)
| bsd-3-clause |
shogun-toolbox/shogun | examples/undocumented/python/graphical/cluster_kpp.py | 2 | 2115 | """Graphical example illustrating improvement of convergence of KMeans
when cluster centers are initialized by KMeans++ algorithm.
In this example, 4 vertices of a rectangle are chosen: (0,0) (0,100) (10,0) (10,100).
There are 500 points normally distributed about each vertex.
Therefore, the ideal cluster centers for k=2 are the global minima ie (5,0) (5,100).
Written (W) 2014 Parijat Mazumdar
"""
import matplotlib.pyplot as plt
import numpy as np
import shogun as sg
k = 2
num = 500
d1 = np.concatenate((np.random.randn(1, num), 10. * np.random.randn(1, num)), 0)
d2 = np.concatenate((np.random.randn(1, num), 10. * np.random.randn(1, num)), 0) + np.array([[10.], [0.]])
d3 = np.concatenate((np.random.randn(1, num), 10. * np.random.randn(1, num)), 0) + np.array([[0.], [100.]])
d4 = np.concatenate((np.random.randn(1, num), 10. * np.random.randn(1, num)), 0) + np.array([[10.], [100.]])
traindata = np.concatenate((d1, d2, d3, d4), 1)
feat_train = sg.create_features(traindata)
distance = sg.create_distance('EuclideanDistance')
distance.init(feat_train, feat_train)
kmeans = sg.create_machine('KMeans', k=k, distance=distance, kmeanspp=True)
kmeans.train()
centerspp = kmeans.get('cluster_centers')
radipp = kmeans.get('radiuses')
kmeans = sg.create_machine('KMeans', k=k, distance=distance)
kmeans.train()
centers = kmeans.get('cluster_centers')
radi = kmeans.get('radiuses')
plt.figure('KMeans with KMeans++')
plt.plot(d1[0], d1[1], 'rx')
plt.plot(d2[0], d2[1], 'bx')
plt.plot(d3[0], d3[1], 'gx')
plt.plot(d4[0], d4[1], 'cx')
plt.plot(centerspp[0, :], centerspp[1, :], 'ko')
for i in range(k):
t = np.linspace(0, 2 * np.pi, 100)
plt.plot(radipp[i] * np.cos(t) + centerspp[0, i], radipp[i] * np.sin(t) + centerspp[1, i], 'k-')
plt.figure('KMeans without KMeans++')
plt.plot(d1[0], d1[1], 'rx')
plt.plot(d2[0], d2[1], 'bx')
plt.plot(d3[0], d3[1], 'gx')
plt.plot(d4[0], d4[1], 'cx')
plt.plot(centers[0, :], centers[1, :], 'ko')
for i in range(k):
t = np.linspace(0, 2 * np.pi, 100)
plt.plot(radi[i] * np.cos(t) + centers[0, i], radi[i] * np.sin(t) + centers[1, i], 'k-')
plt.show()
| bsd-3-clause |
deepakantony/sms-tools | lectures/06-Harmonic-model/plots-code/monophonic-polyphonic.py | 3 | 2250 | import numpy as np
import matplotlib.pyplot as plt
from scipy.signal import hamming, triang, blackmanharris
import sys, os, functools, time
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../software/models/'))
import sineModel as SM
import stft as STFT
import utilFunctions as UF
plt.figure(1, figsize=(9, 6))
plt.subplot(211)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/carnatic.wav'))
x1 = x[4.35*fs:]
w = np.blackman(1301)
N = 2048
H = 250
t = -70
minSineDur = .02
maxnSines = 150
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (carnatic.wav)')
plt.subplot(212)
(fs, x) = UF.wavread(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../../sounds/vignesh.wav'))
w = np.blackman(1101)
N = 2048
H = 250
t = -90
minSineDur = .1
maxnSines = 200
freqDevOffset = 20
freqDevSlope = 0.02
mX, pX = STFT.stftAnal(x, w, N, H)
tfreq, tmag, tphase = SM.sineModelAnal(x, fs, w, N, H, t, maxnSines, minSineDur, freqDevOffset, freqDevSlope)
maxplotfreq = 3000.0
maxplotbin = int(N*maxplotfreq/fs)
numFrames = int(mX[:,0].size)
frmTime = H*np.arange(numFrames)/float(fs)
binFreq = np.arange(maxplotbin+1)*float(fs)/N
plt.pcolormesh(frmTime, binFreq, np.transpose(mX[:,:maxplotbin+1]))
plt.autoscale(tight=True)
tracks = tfreq*np.less(tfreq, maxplotfreq)
tracks[tracks<=0] = np.nan
plt.plot(frmTime, tracks, color='k', lw=1.5)
plt.autoscale(tight=True)
plt.title('mX + sine frequencies (vignesh.wav)')
plt.tight_layout()
plt.savefig('monophonic-polyphonic.png')
plt.show() | agpl-3.0 |
nspi/vbcg | src/gui_windowSignal.py | 1 | 2909 | #!/usr/bin/env python
# -*- coding: ascii -*-
"""gui_windowSignal.py - GUI element: frame that displays signal"""
import Tkinter as Tk
import matplotlib
from Queue import LifoQueue
from gui_signalPlotter import GuiSignalPlotter
from gui_signalProcessor import GuiSignalProcessor
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure as Mat_figure
matplotlib.use('Tkagg')
class WindowSignal(Tk.Frame):
"""In this frame the signal extracted from the video stream is shown"""
def __init__(self, parent, tk_root, thread, cam, statusbar, video_display):
global root
self.root = tk_root
# Store camera object
self.cameraInstance = cam
# Store statusbar object
self.statusbar = statusbar
# Store video display object
self.video_display = video_display
# Create GUI
self.__create_gui()
def __create_gui(self):
# Add subplots
self.figure = Mat_figure(figsize=(5, 3), dpi=100)
self.subplotTop = self.figure.add_subplot(211)
self.subplotBottom = self.figure.add_subplot(212)
# Add labels
self.subplotTop.set_xlabel('Frames')
self.subplotBottom.set_xlabel('Hz')
# Change font size
matplotlib.rcParams.update({'font.size': 10})
# Create canvas
self.canvas = FigureCanvasTkAgg(self.figure, master=self.root)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
# We store the data that is interchanged from GuiSignalProcessor to GuiSignalPlotter in a queue
self.dataQueue = LifoQueue()
# Create thread that displays signal
self.signalPlotThread = GuiSignalPlotter(self.root, self.cameraInstance, self.figure, self.canvas,
self.subplotTop, self.subplotBottom, self.statusbar,
self.video_display, self.dataQueue)
# Create thread that processes signal
self.signalProcessorThread = GuiSignalProcessor(self.root, self.cameraInstance, self.figure, self.canvas,
self.subplotTop, self.subplotBottom, self.statusbar,
self.video_display, self.dataQueue)
# Start both threads
self.signalPlotThread.start()
self.signalProcessorThread.start()
def get_signal_processor(self):
return self.signalProcessorThread
def get_signal_plotter(self):
return self.signalPlotThread
def clear(self):
self.figure.clf()
def closeThreads(self):
"""Closes signal plotting and processing threads"""
self.signalPlotThread.close_signal_plotter_thread()
self.signalProcessorThread.close_signal_processor_thread()
| gpl-3.0 |
alexriss/imagex | imagex/colormap.py | 1 | 1543 | import matplotlib.colors
cdict = {'red': ((0.0, 0.0, 0.0),(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),(1.0, 1.0, 1.0),)}
greys_linear = matplotlib.colors.LinearSegmentedColormap('greys_linear', cdict) # always have trouble with the brightness values
# Alex's custom colormaps
cdict_BlueA = {'red': ((0.0, 0.0, 0.0),(0.25, 0.094, 0.094),(0.67, 0.353, 0.353),(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),(0.25, 0.137, 0.137),(0.67, 0.537, 0.537),(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),(0.25, 0.2, 0.2),(0.67, 0.749, 0.749),(1.0, 1.0, 1.0))}
# little brighter version of BlueA
cdict_BlueAb = {'red': ((0.0, 0.0, 0.0),(0.22, 0.094, 0.094),(0.60, 0.353, 0.353),(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),(0.22, 0.137, 0.137),(0.60, 0.537, 0.537),(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),(0.22, 0.2, 0.2),(0.60, 0.749, 0.749),(1.0, 1.0, 1.0))}
cdict_BlueA2 = {'red': ((0.0, 0.0, 0.0), (0.25, 0.055, 0.055),(0.67, 0.212, 0.212),(1.0, 1.0, 1.0)),
'green': ((0.0, 0.0, 0.0),(0.25, 0.106, 0.106),(0.67, 0.455, 0.455),(1.0, 1.0, 1.0)),
'blue': ((0.0, 0.0, 0.0),(0.25, 0.231, 0.231),(0.67, 0.749, 0.749),(1.0, 1.0, 1.0))}
BlueA = matplotlib.colors.LinearSegmentedColormap('BlueA', cdict_BlueA)
BlueAb = matplotlib.colors.LinearSegmentedColormap('BlueAb', cdict_BlueAb)
BlueA2 = matplotlib.colors.LinearSegmentedColormap('BlueA2', cdict_BlueA2) | gpl-3.0 |
siutanwong/scikit-learn | examples/tree/plot_tree_regression.py | 206 | 1476 | """
===================================================================
Decision Tree Regression
===================================================================
A 1D regression with decision tree.
The :ref:`decision trees <tree>` is
used to fit a sine curve with addition noisy observation. As a result, it
learns local linear regressions approximating the sine curve.
We can see that if the maximum depth of the tree (controlled by the
`max_depth` parameter) is set too high, the decision trees learn too fine
details of the training data and learn from the noise, i.e. they overfit.
"""
print(__doc__)
# Import the necessary modules and libraries
import numpy as np
from sklearn.tree import DecisionTreeRegressor
import matplotlib.pyplot as plt
# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(5 * rng.rand(80, 1), axis=0)
y = np.sin(X).ravel()
y[::5] += 3 * (0.5 - rng.rand(16))
# Fit regression model
regr_1 = DecisionTreeRegressor(max_depth=2)
regr_2 = DecisionTreeRegressor(max_depth=5)
regr_1.fit(X, y)
regr_2.fit(X, y)
# Predict
X_test = np.arange(0.0, 5.0, 0.01)[:, np.newaxis]
y_1 = regr_1.predict(X_test)
y_2 = regr_2.predict(X_test)
# Plot the results
plt.figure()
plt.scatter(X, y, c="k", label="data")
plt.plot(X_test, y_1, c="g", label="max_depth=2", linewidth=2)
plt.plot(X_test, y_2, c="r", label="max_depth=5", linewidth=2)
plt.xlabel("data")
plt.ylabel("target")
plt.title("Decision Tree Regression")
plt.legend()
plt.show()
| bsd-3-clause |
Richert/BrainNetworks | BasalGanglia/stn_gpe_worker.py | 1 | 5607 | # my_cgs_worker.py
from pyrates.utility.grid_search import ClusterWorkerTemplate
import os
from pandas import DataFrame
from pyrates.utility import grid_search, welch
import numpy as np
from copy import deepcopy
class MinimalWorker(ClusterWorkerTemplate):
def worker_postprocessing(self, **kwargs):
self.processed_results = DataFrame(data=None, columns=self.results.columns)
for idx, data in self.results.iteritems():
self.processed_results.loc[:, idx] = data * 1e3
self.processed_results.index = self.results.index * 1e-3
class ExtendedWorker(MinimalWorker):
def worker_gs(self, *args, **kwargs):
kwargs_tmp = deepcopy(kwargs)
conditions = kwargs_tmp.pop('conditions')
model_vars = kwargs_tmp.pop('model_vars')
param_grid = kwargs_tmp.pop('param_grid')
results, gene_ids = [], param_grid.index
for c_dict in conditions:
for key in model_vars:
if key in c_dict and type(c_dict[key]) is float:
c_dict[key] = np.zeros((param_grid.shape[0],)) + c_dict[key]
elif key in param_grid:
c_dict[key] = param_grid[key]
param_grid_tmp = DataFrame.from_dict(c_dict)
f = terminate_at_threshold
f.terminal = True
r, self.result_map, sim_time = grid_search(*args, param_grid=param_grid_tmp, events=f, **deepcopy(kwargs_tmp))
r = r.droplevel(2, axis=1)
if any(r.values[-1, :] > 10.0):
invalid_genes = []
for id in param_grid.index:
if r.loc[r.index[-1], ('r_e', f'circuit_{id}')] > 10.0 or \
r.loc[r.index[-1], ('r_i', f'circuit_{id}')] > 10.0:
invalid_genes.append(id)
param_grid.drop(index=id, inplace=True)
kwargs['param_grid'] = param_grid
sim_time = self.worker_gs(*args, **kwargs)
for r in self.results:
for id in invalid_genes:
r[('r_e', f'circuit_{id}')] = np.zeros((r.shape[0],)) + 1e6
r[('r_i', f'circuit_{id}')] = np.zeros((r.shape[0],)) + 1e6
return sim_time
else:
results.append(r)
self.results = results
return sim_time
def worker_postprocessing(self, **kwargs):
kwargs_tmp = kwargs.copy()
param_grid = kwargs_tmp.pop('param_grid')
freq_targets = kwargs_tmp.pop('freq_targets')
targets = kwargs_tmp.pop('y')
self.processed_results = DataFrame(data=None, columns=['fitness', 'frequency', 'power', 'r_e', 'r_i'])
# calculate fitness
for gene_id in param_grid.index:
outputs, freq, pow = [], [], []
for i, r in enumerate(self.results):
r = r * 1e3
r.index = r.index * 1e-3
cutoff = r.index[-1]*0.1
mean_re = np.mean(r['r_e'][f'circuit_{gene_id}'].loc[cutoff:])
mean_ri = np.mean(r['r_i'][f'circuit_{gene_id}'].loc[cutoff:])
outputs.append([mean_re, mean_ri])
tmin = 0.0 if i == 4 else cutoff
psds, freqs = welch(r['r_i'][f'circuit_{gene_id}'], tmin=tmin, fmin=5.0, fmax=200.0)
freq.append(freqs)
pow.append(psds)
dist1 = fitness(outputs, targets)
dist2 = analyze_oscillations(freq_targets, freq, pow)
idx = np.argmax(pow[-1][0])
r = self.results[0]
cutoff = r.index[-1]*0.1
self.processed_results.loc[gene_id, 'fitness'] = dist1+dist2
self.processed_results.loc[gene_id, 'frequency'] = freq[-1][idx]
self.processed_results.loc[gene_id, 'power'] = pow[-1][0][idx]
self.processed_results.loc[gene_id, 'r_e'] = np.mean(r['r_e'][f'circuit_{gene_id}'].loc[cutoff:])*1e3
self.processed_results.loc[gene_id, 'r_i'] = np.mean(r['r_i'][f'circuit_{gene_id}'].loc[cutoff:])*1e3
def fitness(y, t):
y = np.asarray(y).flatten()
t = np.asarray(t).flatten()
diff = np.asarray([0.0 if np.isnan(t_tmp) else y_tmp - t_tmp for y_tmp, t_tmp in zip(y, t)])
return np.sqrt(np.mean(diff**2))
def analyze_oscillations(freq_targets, freqs, pows):
dist = []
for i, (t, f, p) in enumerate(zip(freq_targets, freqs, pows)):
if type(t) is list:
f_tmp = f[np.argmax(p)]
dist.append(f_tmp)
if f_tmp < t[0]:
freq_targets[i] = t[0]
elif f_tmp > t[1]:
freq_targets[i] = t[1]
else:
freq_targets[i] = f_tmp
elif np.isnan(t):
dist.append(0.0)
elif t:
f_tmp = f[np.argmax(p)]
dist.append(f_tmp)
else:
p_tmp = np.max(p)
dist.append(p_tmp)
return fitness(dist, freq_targets)
def terminate_at_threshold(t, y, *args):
threshold = 10000.0
return np.sqrt(np.mean(y**2)) - threshold
if __name__ == "__main__":
cgs_worker = ExtendedWorker()
#cgs_worker.worker_init()
cgs_worker.worker_init(
config_file="/nobackup/spanien1/rgast/PycharmProjects/BrainNetworks/BasalGanglia/stn_gpe_optimization3/Config/DefaultConfig_0.yaml",
subgrid="/nobackup/spanien1/rgast/PycharmProjects/BrainNetworks/BasalGanglia/stn_gpe_optimization3/Grids/Subgrids/DefaultGrid_14/spanien/spanien_Subgrid_0.h5",
result_file="~/my_result.h5",
build_dir=os.getcwd()
)
| apache-2.0 |
Midafi/scikit-image | skimage/viewer/viewers/core.py | 33 | 13265 | """
ImageViewer class for viewing and interacting with images.
"""
import numpy as np
from ... import io, img_as_float
from ...util.dtype import dtype_range
from ...exposure import rescale_intensity
from ..qt import QtWidgets, Qt, Signal
from ..widgets import Slider
from ..utils import (dialogs, init_qtapp, figimage, start_qtapp,
update_axes_image)
from ..utils.canvas import BlitManager, EventManager
from ..plugins.base import Plugin
__all__ = ['ImageViewer', 'CollectionViewer']
def mpl_image_to_rgba(mpl_image):
"""Return RGB image from the given matplotlib image object.
Each image in a matplotlib figure has its own colormap and normalization
function. Return RGBA (RGB + alpha channel) image with float dtype.
Parameters
----------
mpl_image : matplotlib.image.AxesImage object
The image being converted.
Returns
-------
img : array of float, shape (M, N, 4)
An image of float values in [0, 1].
"""
image = mpl_image.get_array()
if image.ndim == 2:
input_range = (mpl_image.norm.vmin, mpl_image.norm.vmax)
image = rescale_intensity(image, in_range=input_range)
# cmap complains on bool arrays
image = mpl_image.cmap(img_as_float(image))
elif image.ndim == 3 and image.shape[2] == 3:
# add alpha channel if it's missing
image = np.dstack((image, np.ones_like(image)))
return img_as_float(image)
class ImageViewer(QtWidgets.QMainWindow):
"""Viewer for displaying images.
This viewer is a simple container object that holds a Matplotlib axes
for showing images. `ImageViewer` doesn't subclass the Matplotlib axes (or
figure) because of the high probability of name collisions.
Subclasses and plugins will likely extend the `update_image` method to add
custom overlays or filter the displayed image.
Parameters
----------
image : array
Image being viewed.
Attributes
----------
canvas, fig, ax : Matplotlib canvas, figure, and axes
Matplotlib canvas, figure, and axes used to display image.
image : array
Image being viewed. Setting this value will update the displayed frame.
original_image : array
Plugins typically operate on (but don't change) the *original* image.
plugins : list
List of attached plugins.
Examples
--------
>>> from skimage import data
>>> image = data.coins()
>>> viewer = ImageViewer(image) # doctest: +SKIP
>>> viewer.show() # doctest: +SKIP
"""
dock_areas = {'top': Qt.TopDockWidgetArea,
'bottom': Qt.BottomDockWidgetArea,
'left': Qt.LeftDockWidgetArea,
'right': Qt.RightDockWidgetArea}
# Signal that the original image has been changed
original_image_changed = Signal(np.ndarray)
def __init__(self, image, useblit=True):
# Start main loop
init_qtapp()
super(ImageViewer, self).__init__()
#TODO: Add ImageViewer to skimage.io window manager
self.setAttribute(Qt.WA_DeleteOnClose)
self.setWindowTitle("Image Viewer")
self.file_menu = QtWidgets.QMenu('&File', self)
self.file_menu.addAction('Open file', self.open_file,
Qt.CTRL + Qt.Key_O)
self.file_menu.addAction('Save to file', self.save_to_file,
Qt.CTRL + Qt.Key_S)
self.file_menu.addAction('Quit', self.close,
Qt.CTRL + Qt.Key_Q)
self.menuBar().addMenu(self.file_menu)
self.main_widget = QtWidgets.QWidget()
self.setCentralWidget(self.main_widget)
if isinstance(image, Plugin):
plugin = image
image = plugin.filtered_image
plugin.image_changed.connect(self._update_original_image)
# When plugin is started, start
plugin._started.connect(self._show)
self.fig, self.ax = figimage(image)
self.canvas = self.fig.canvas
self.canvas.setParent(self)
self.ax.autoscale(enable=False)
self._tools = []
self.useblit = useblit
if useblit:
self._blit_manager = BlitManager(self.ax)
self._event_manager = EventManager(self.ax)
self._image_plot = self.ax.images[0]
self._update_original_image(image)
self.plugins = []
self.layout = QtWidgets.QVBoxLayout(self.main_widget)
self.layout.addWidget(self.canvas)
status_bar = self.statusBar()
self.status_message = status_bar.showMessage
sb_size = status_bar.sizeHint()
cs_size = self.canvas.sizeHint()
self.resize(cs_size.width(), cs_size.height() + sb_size.height())
self.connect_event('motion_notify_event', self._update_status_bar)
def __add__(self, plugin):
"""Add plugin to ImageViewer"""
plugin.attach(self)
self.original_image_changed.connect(plugin._update_original_image)
if plugin.dock:
location = self.dock_areas[plugin.dock]
dock_location = Qt.DockWidgetArea(location)
dock = QtWidgets.QDockWidget()
dock.setWidget(plugin)
dock.setWindowTitle(plugin.name)
self.addDockWidget(dock_location, dock)
horiz = (self.dock_areas['left'], self.dock_areas['right'])
dimension = 'width' if location in horiz else 'height'
self._add_widget_size(plugin, dimension=dimension)
return self
def _add_widget_size(self, widget, dimension='width'):
widget_size = widget.sizeHint()
viewer_size = self.frameGeometry()
dx = dy = 0
if dimension == 'width':
dx = widget_size.width()
elif dimension == 'height':
dy = widget_size.height()
w = viewer_size.width()
h = viewer_size.height()
self.resize(w + dx, h + dy)
def open_file(self, filename=None):
"""Open image file and display in viewer."""
if filename is None:
filename = dialogs.open_file_dialog()
if filename is None:
return
image = io.imread(filename)
self._update_original_image(image)
def update_image(self, image):
"""Update displayed image.
This method can be overridden or extended in subclasses and plugins to
react to image changes.
"""
self._update_original_image(image)
def _update_original_image(self, image):
self.original_image = image # update saved image
self.image = image.copy() # update displayed image
self.original_image_changed.emit(image)
def save_to_file(self, filename=None):
"""Save current image to file.
The current behavior is not ideal: It saves the image displayed on
screen, so all images will be converted to RGB, and the image size is
not preserved (resizing the viewer window will alter the size of the
saved image).
"""
if filename is None:
filename = dialogs.save_file_dialog()
if filename is None:
return
if len(self.ax.images) == 1:
io.imsave(filename, self.image)
else:
underlay = mpl_image_to_rgba(self.ax.images[0])
overlay = mpl_image_to_rgba(self.ax.images[1])
alpha = overlay[:, :, 3]
# alpha can be set by channel of array or by a scalar value.
# Prefer the alpha channel, but fall back to scalar value.
if np.all(alpha == 1):
alpha = np.ones_like(alpha) * self.ax.images[1].get_alpha()
alpha = alpha[:, :, np.newaxis]
composite = (overlay[:, :, :3] * alpha +
underlay[:, :, :3] * (1 - alpha))
io.imsave(filename, composite)
def closeEvent(self, event):
self.close()
def _show(self, x=0):
self.move(x, 0)
for p in self.plugins:
p.show()
super(ImageViewer, self).show()
self.activateWindow()
self.raise_()
def show(self, main_window=True):
"""Show ImageViewer and attached plugins.
This behaves much like `matplotlib.pyplot.show` and `QWidget.show`.
"""
self._show()
if main_window:
start_qtapp()
return [p.output() for p in self.plugins]
def redraw(self):
if self.useblit:
self._blit_manager.redraw()
else:
self.canvas.draw_idle()
@property
def image(self):
return self._img
@image.setter
def image(self, image):
self._img = image
update_axes_image(self._image_plot, image)
# update display (otherwise image doesn't fill the canvas)
h, w = image.shape[:2]
self.ax.set_xlim(0, w)
self.ax.set_ylim(h, 0)
# update color range
clim = dtype_range[image.dtype.type]
if clim[0] < 0 and image.min() >= 0:
clim = (0, clim[1])
self._image_plot.set_clim(clim)
if self.useblit:
self._blit_manager.background = None
self.redraw()
def reset_image(self):
self.image = self.original_image.copy()
def connect_event(self, event, callback):
"""Connect callback function to matplotlib event and return id."""
cid = self.canvas.mpl_connect(event, callback)
return cid
def disconnect_event(self, callback_id):
"""Disconnect callback by its id (returned by `connect_event`)."""
self.canvas.mpl_disconnect(callback_id)
def _update_status_bar(self, event):
if event.inaxes and event.inaxes.get_navigate():
self.status_message(self._format_coord(event.xdata, event.ydata))
else:
self.status_message('')
def add_tool(self, tool):
if self.useblit:
self._blit_manager.add_artists(tool.artists)
self._tools.append(tool)
self._event_manager.attach(tool)
def remove_tool(self, tool):
if tool not in self._tools:
return
if self.useblit:
self._blit_manager.remove_artists(tool.artists)
self._tools.remove(tool)
self._event_manager.detach(tool)
def _format_coord(self, x, y):
# callback function to format coordinate display in status bar
x = int(x + 0.5)
y = int(y + 0.5)
try:
return "%4s @ [%4s, %4s]" % (self.image[y, x], x, y)
except IndexError:
return ""
class CollectionViewer(ImageViewer):
"""Viewer for displaying image collections.
Select the displayed frame of the image collection using the slider or
with the following keyboard shortcuts:
left/right arrows
Previous/next image in collection.
number keys, 0--9
0% to 90% of collection. For example, "5" goes to the image in the
middle (i.e. 50%) of the collection.
home/end keys
First/last image in collection.
Parameters
----------
image_collection : list of images
List of images to be displayed.
update_on : {'move' | 'release'}
Control whether image is updated on slide or release of the image
slider. Using 'on_release' will give smoother behavior when displaying
large images or when writing a plugin/subclass that requires heavy
computation.
"""
def __init__(self, image_collection, update_on='move', **kwargs):
self.image_collection = image_collection
self.index = 0
self.num_images = len(self.image_collection)
first_image = image_collection[0]
super(CollectionViewer, self).__init__(first_image)
slider_kws = dict(value=0, low=0, high=self.num_images - 1)
slider_kws['update_on'] = update_on
slider_kws['callback'] = self.update_index
slider_kws['value_type'] = 'int'
self.slider = Slider('frame', **slider_kws)
self.layout.addWidget(self.slider)
#TODO: Adjust height to accomodate slider; the following doesn't work
# s_size = self.slider.sizeHint()
# cs_size = self.canvas.sizeHint()
# self.resize(cs_size.width(), cs_size.height() + s_size.height())
def update_index(self, name, index):
"""Select image on display using index into image collection."""
index = int(round(index))
if index == self.index:
return
# clip index value to collection limits
index = max(index, 0)
index = min(index, self.num_images - 1)
self.index = index
self.slider.val = index
self.update_image(self.image_collection[index])
def keyPressEvent(self, event):
if type(event) == QtWidgets.QKeyEvent:
key = event.key()
# Number keys (code: 0 = key 48, 9 = key 57) move to deciles
if 48 <= key < 58:
index = 0.1 * int(key - 48) * self.num_images
self.update_index('', index)
event.accept()
else:
event.ignore()
else:
event.ignore()
| bsd-3-clause |
ngoix/OCRF | sklearn/ensemble/tests/test_bagging.py | 34 | 25693 | """
Testing for the bagging ensemble module (sklearn.ensemble.bagging).
"""
# Author: Gilles Louppe
# License: BSD 3 clause
import numpy as np
from sklearn.base import BaseEstimator
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_warns_message
from sklearn.dummy import DummyClassifier, DummyRegressor
from sklearn.model_selection import GridSearchCV, ParameterGrid
from sklearn.ensemble import BaggingClassifier, BaggingRegressor
from sklearn.linear_model import Perceptron, LogisticRegression
from sklearn.neighbors import KNeighborsClassifier, KNeighborsRegressor
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.svm import SVC, SVR
from sklearn.pipeline import make_pipeline
from sklearn.feature_selection import SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston, load_iris, make_hastie_10_2
from sklearn.utils import check_random_state
from scipy.sparse import csc_matrix, csr_matrix
rng = check_random_state(0)
# also load the iris dataset
# and randomly permute it
iris = load_iris()
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
# also load the boston dataset
# and randomly permute it
boston = load_boston()
perm = rng.permutation(boston.target.size)
boston.data = boston.data[perm]
boston.target = boston.target[perm]
def test_classification():
# Check classification for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [1, 2, 4],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyClassifier(),
Perceptron(),
DecisionTreeClassifier(),
KNeighborsClassifier(),
SVC()]:
for params in grid:
BaggingClassifier(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_classification():
# Check classification for various parameter settings on sparse input.
class CustomSVC(SVC):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVC, self).fit(X, y)
self.data_type_ = type(X)
return self
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
for f in ['predict', 'predict_proba', 'predict_log_proba', 'decision_function']:
# Trained on sparse format
sparse_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = getattr(sparse_classifier, f)(X_test_sparse)
# Trained on dense format
dense_classifier = BaggingClassifier(
base_estimator=CustomSVC(decision_function_shape='ovr'),
random_state=1,
**params
).fit(X_train, y_train)
dense_results = getattr(dense_classifier, f)(X_test)
assert_array_equal(sparse_results, dense_results)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert all([t == sparse_type for t in types])
def test_regression():
# Check regression for various parameter settings.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
grid = ParameterGrid({"max_samples": [0.5, 1.0],
"max_features": [0.5, 1.0],
"bootstrap": [True, False],
"bootstrap_features": [True, False]})
for base_estimator in [None,
DummyRegressor(),
DecisionTreeRegressor(),
KNeighborsRegressor(),
SVR()]:
for params in grid:
BaggingRegressor(base_estimator=base_estimator,
random_state=rng,
**params).fit(X_train, y_train).predict(X_test)
def test_sparse_regression():
# Check regression for various parameter settings on sparse input.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data[:50],
boston.target[:50],
random_state=rng)
class CustomSVR(SVR):
"""SVC variant that records the nature of the training set"""
def fit(self, X, y):
super(CustomSVR, self).fit(X, y)
self.data_type_ = type(X)
return self
parameter_sets = [
{"max_samples": 0.5,
"max_features": 2,
"bootstrap": True,
"bootstrap_features": True},
{"max_samples": 1.0,
"max_features": 4,
"bootstrap": True,
"bootstrap_features": True},
{"max_features": 2,
"bootstrap": False,
"bootstrap_features": True},
{"max_samples": 0.5,
"bootstrap": True,
"bootstrap_features": False},
]
for sparse_format in [csc_matrix, csr_matrix]:
X_train_sparse = sparse_format(X_train)
X_test_sparse = sparse_format(X_test)
for params in parameter_sets:
# Trained on sparse format
sparse_classifier = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train_sparse, y_train)
sparse_results = sparse_classifier.predict(X_test_sparse)
# Trained on dense format
dense_results = BaggingRegressor(
base_estimator=CustomSVR(),
random_state=1,
**params
).fit(X_train, y_train).predict(X_test)
sparse_type = type(X_train_sparse)
types = [i.data_type_ for i in sparse_classifier.estimators_]
assert_array_equal(sparse_results, dense_results)
assert all([t == sparse_type for t in types])
assert_array_equal(sparse_results, dense_results)
def test_bootstrap_samples():
# Test that bootstrapping samples generate non-perfect base estimators.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
base_estimator = DecisionTreeRegressor().fit(X_train, y_train)
# without bootstrap, all trees are perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=False,
random_state=rng).fit(X_train, y_train)
assert_equal(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
# with bootstrap, trees are no longer perfect on the training set
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_samples=1.0,
bootstrap=True,
random_state=rng).fit(X_train, y_train)
assert_greater(base_estimator.score(X_train, y_train),
ensemble.score(X_train, y_train))
def test_bootstrap_features():
# Test that bootstrapping features may generate duplicate features.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_equal(boston.data.shape[1], np.unique(features).shape[0])
ensemble = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
max_features=1.0,
bootstrap_features=True,
random_state=rng).fit(X_train, y_train)
for features in ensemble.estimators_features_:
assert_greater(boston.data.shape[1], np.unique(features).shape[0])
def test_probability():
# Predict probabilities.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
with np.errstate(divide="ignore", invalid="ignore"):
# Normal case
ensemble = BaggingClassifier(base_estimator=DecisionTreeClassifier(),
random_state=rng).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
# Degenerate case, where some classes are missing
ensemble = BaggingClassifier(base_estimator=LogisticRegression(),
random_state=rng,
max_samples=5).fit(X_train, y_train)
assert_array_almost_equal(np.sum(ensemble.predict_proba(X_test),
axis=1),
np.ones(len(X_test)))
assert_array_almost_equal(ensemble.predict_proba(X_test),
np.exp(ensemble.predict_log_proba(X_test)))
def test_oob_score_classification():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
for base_estimator in [DecisionTreeClassifier(), SVC()]:
clf = BaggingClassifier(base_estimator=base_estimator,
n_estimators=100,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingClassifier(base_estimator=base_estimator,
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_oob_score_regression():
# Check that oob prediction is a good estimation of the generalization
# error.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf = BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=50,
bootstrap=True,
oob_score=True,
random_state=rng).fit(X_train, y_train)
test_score = clf.score(X_test, y_test)
assert_less(abs(test_score - clf.oob_score_), 0.1)
# Test with few estimators
assert_warns(UserWarning,
BaggingRegressor(base_estimator=DecisionTreeRegressor(),
n_estimators=1,
bootstrap=True,
oob_score=True,
random_state=rng).fit,
X_train,
y_train)
def test_single_estimator():
# Check singleton ensembles.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
clf1 = BaggingRegressor(base_estimator=KNeighborsRegressor(),
n_estimators=1,
bootstrap=False,
bootstrap_features=False,
random_state=rng).fit(X_train, y_train)
clf2 = KNeighborsRegressor().fit(X_train, y_train)
assert_array_equal(clf1.predict(X_test), clf2.predict(X_test))
def test_error():
# Test that it gives proper exception on deficient input.
X, y = iris.data, iris.target
base = DecisionTreeClassifier()
# Test max_samples
assert_raises(ValueError,
BaggingClassifier(base, max_samples=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples=1000).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_samples="foobar").fit, X, y)
# Test max_features
assert_raises(ValueError,
BaggingClassifier(base, max_features=-1).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=0.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=2.0).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features=5).fit, X, y)
assert_raises(ValueError,
BaggingClassifier(base, max_features="foobar").fit, X, y)
# Test support of decision_function
assert_false(hasattr(BaggingClassifier(base).fit(X, y), 'decision_function'))
def test_parallel_classification():
# Check parallel classification.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
# predict_proba
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict_proba(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict_proba(X_test)
assert_array_almost_equal(y1, y3)
# decision_function
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
decisions1 = ensemble.decision_function(X_test)
ensemble.set_params(n_jobs=2)
decisions2 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions2)
ensemble = BaggingClassifier(SVC(decision_function_shape='ovr'),
n_jobs=1,
random_state=0).fit(X_train, y_train)
decisions3 = ensemble.decision_function(X_test)
assert_array_almost_equal(decisions1, decisions3)
def test_parallel_regression():
# Check parallel regression.
rng = check_random_state(0)
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
ensemble.set_params(n_jobs=1)
y1 = ensemble.predict(X_test)
ensemble.set_params(n_jobs=2)
y2 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y2)
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=1,
random_state=0).fit(X_train, y_train)
y3 = ensemble.predict(X_test)
assert_array_almost_equal(y1, y3)
def test_gridsearch():
# Check that bagging ensembles can be grid-searched.
# Transform iris into a binary classification task
X, y = iris.data, iris.target
y[y == 2] = 1
# Grid search with scoring based on decision_function
parameters = {'n_estimators': (1, 2),
'base_estimator__C': (1, 2)}
GridSearchCV(BaggingClassifier(SVC()),
parameters,
scoring="roc_auc").fit(X, y)
def test_base_estimator():
# Check base_estimator and its default values.
rng = check_random_state(0)
# Classification
X_train, X_test, y_train, y_test = train_test_split(iris.data,
iris.target,
random_state=rng)
ensemble = BaggingClassifier(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(DecisionTreeClassifier(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeClassifier))
ensemble = BaggingClassifier(Perceptron(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, Perceptron))
# Regression
X_train, X_test, y_train, y_test = train_test_split(boston.data,
boston.target,
random_state=rng)
ensemble = BaggingRegressor(None,
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(DecisionTreeRegressor(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, DecisionTreeRegressor))
ensemble = BaggingRegressor(SVR(),
n_jobs=3,
random_state=0).fit(X_train, y_train)
assert_true(isinstance(ensemble.base_estimator_, SVR))
def test_bagging_with_pipeline():
estimator = BaggingClassifier(make_pipeline(SelectKBest(k=1),
DecisionTreeClassifier()),
max_features=2)
estimator.fit(iris.data, iris.target)
class DummyZeroEstimator(BaseEstimator):
def fit(self, X, y):
self.classes_ = np.unique(y)
return self
def predict(self, X):
return self.classes_[np.zeros(X.shape[0], dtype=int)]
def test_bagging_sample_weight_unsupported_but_passed():
estimator = BaggingClassifier(DummyZeroEstimator())
rng = check_random_state(0)
estimator.fit(iris.data, iris.target).predict(iris.data)
assert_raises(ValueError, estimator.fit, iris.data, iris.target,
sample_weight=rng.randint(10, size=(iris.data.shape[0])))
def test_warm_start(random_state=42):
# Test if fitting incrementally with warm start gives a forest of the
# right size and the same results as a normal fit.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf_ws = None
for n_estimators in [5, 10]:
if clf_ws is None:
clf_ws = BaggingClassifier(n_estimators=n_estimators,
random_state=random_state,
warm_start=True)
else:
clf_ws.set_params(n_estimators=n_estimators)
clf_ws.fit(X, y)
assert_equal(len(clf_ws), n_estimators)
clf_no_ws = BaggingClassifier(n_estimators=10, random_state=random_state,
warm_start=False)
clf_no_ws.fit(X, y)
assert_equal(set([tree.random_state for tree in clf_ws]),
set([tree.random_state for tree in clf_no_ws]))
def test_warm_start_smaller_n_estimators():
# Test if warm start'ed second fit with smaller n_estimators raises error.
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True)
clf.fit(X, y)
clf.set_params(n_estimators=4)
assert_raises(ValueError, clf.fit, X, y)
def test_warm_start_equal_n_estimators():
# Test that nothing happens when fitting without increasing n_estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf = BaggingClassifier(n_estimators=5, warm_start=True, random_state=83)
clf.fit(X_train, y_train)
y_pred = clf.predict(X_test)
# modify X to nonsense values, this should not change anything
X_train += 1.
assert_warns_message(UserWarning,
"Warm-start fitting without increasing n_estimators does not",
clf.fit, X_train, y_train)
assert_array_equal(y_pred, clf.predict(X_test))
def test_warm_start_equivalence():
# warm started classifier with 5+5 estimators should be equivalent to
# one classifier with 10 estimators
X, y = make_hastie_10_2(n_samples=20, random_state=1)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=43)
clf_ws = BaggingClassifier(n_estimators=5, warm_start=True,
random_state=3141)
clf_ws.fit(X_train, y_train)
clf_ws.set_params(n_estimators=10)
clf_ws.fit(X_train, y_train)
y1 = clf_ws.predict(X_test)
clf = BaggingClassifier(n_estimators=10, warm_start=False,
random_state=3141)
clf.fit(X_train, y_train)
y2 = clf.predict(X_test)
assert_array_almost_equal(y1, y2)
def test_warm_start_with_oob_score_fails():
# Check using oob_score and warm_start simultaneously fails
X, y = make_hastie_10_2(n_samples=20, random_state=1)
clf = BaggingClassifier(n_estimators=5, warm_start=True, oob_score=True)
assert_raises(ValueError, clf.fit, X, y)
def test_oob_score_removed_on_warm_start():
X, y = make_hastie_10_2(n_samples=2000, random_state=1)
clf = BaggingClassifier(n_estimators=50, oob_score=True)
clf.fit(X, y)
clf.set_params(warm_start=True, oob_score=False, n_estimators=100)
clf.fit(X, y)
assert_raises(AttributeError, getattr, clf, "oob_score_")
| bsd-3-clause |
peteWT/cec_apl | Allocation model/Transmodel_v12.py | 2 | 11962 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Deployment model for distributed gasifiers
# Jose Daniel Lara
from __future__ import division
from pyomo.environ import *
import googlemaps
import numpy as np
import matplotlib.pyplot as plt
# Replace the API key below with a valid API key.
gmaps = googlemaps.Client(key='AIzaSyAh2PIcLDrPecSSR36z2UNubqphdHwIw7M')
model = ConcreteModel()
# Load data from files.
biomass_file = open('sources.dat', 'r')
biomass_list = biomass_file.read().splitlines()
biomass_file.close()
substation_file = open('destinations.dat', 'r')
substation_list = substation_file.read().splitlines()
substation_file.close()
# Data for the piecewise approximation
# What are the units? Are they generator size (kW) and total cost ($/kW)?
size = [1, 2, 3, 5, 10]
cost = [4000, 6500, 7500, 9300, 13000]
# A more "python" style way of specifying this data is with tuples:
# unit_cost_data = [(size_mw, unit_cost_d_per_mw), ...]
unit_cost_data = [
(1, 4000),
(2, 6500),
(3, 7500),
(5, 9300),
(10, 13000)
]
# The x and y are two parts of a single record. The tuple-based list
# ensures that each record is complete. Iteration would proceed like so:
# for (size, cost) in unit_cost_data:
# print(size, cost)
# Conventions for naming model components:
# SETS_ALL_CAPS
# VarsCamelCase
# params_pothole_case
# Constraints_Words_Capitalized_With_Underscores
# Define sets and initialize them from data above.
model.SOURCES = Set(initialize=biomass_list, doc='Location of Biomass sources')
model.SUBSTATIONS = Set(initialize=substation_list, doc='Location of Substations')
model.ROUTES = Set(dimen=2, doc='Allows routes from sources to sinks',
initialize=lambda mdl: (mdl.SOURCES * mdl.SUBSTATIONS))
model.PW = Set(initialize=range(1, len(size) + 1), doc='Set for the PW approx')
# Define Parameters
# Cost related parameters
model.installation_cost_var = Param(initialize=150,
doc='Variable installation cost per kW')
model.installation_cost_fix = Param(initialize=5000,
doc='Fixed cost of installing in the site')
model.om_cost_fix = Param(initialize=100,
doc='Fixed cost of operation per installed kW')
model.om_cost_var = Param(initialize=40,
doc='Variable cost of operation per installed kW')
model.biomass_cost = Param(model.SOURCES,
initialize={'Seattle, WA, USA': 12,
'San Diego, CA, USA': 32,
'memphis': 20,
'portland': 22,
'salt-lake-city': 23,
'washington-dc': 25},
doc='Cost of biomass per ton')
model.transport_cost = Param(initialize=25,
doc='Freight in dollars per ton per km')
model.fit_tariff = Param(model.SUBSTATIONS,
initialize={'New York, NY, USA': 1,
'Chicago, IL, USA': 2.4,
'Topeka, KY, USA': 1.5,
'boston': 1.4,
'dallas': 1.65,
'kansas-cty': 1.65,
'los-angeles': 1.0},
doc='Payment FIT $/kWh')
# Limits related parameters
model.source_biomass_max = Param(model.SOURCES,
initialize={'Seattle, WA, USA': 2350,
'San Diego, CA, USA': 2600,
'memphis': 1200,
'portland': 2000,
'salt-lake-city': 2100,
'washington-dc': 2500},
doc='Capacity of supply in tons')
model.max_capacity = Param(initialize=1000, doc='Max installation per site kW')
model.min_capacity = Param(initialize=100, doc='Min installation per site kW')
# Operational parameters
model.heat_rate = Param(initialize=0.8333, doc='Heat rate kWh/Kg')
model.capacity_factor = Param(initialize=0.85, doc='Gasifier capacity factor')
# Distances from googleAPI, matrx_distance is a dictionary, first it extends
# the biomass list to include the substations for the distance calculations
matrx_distance = gmaps.distance_matrix(biomass_list,
substation_list, mode="driving")
# Extract distances from the google maps API results
distance_table = {}
for (bio_idx, biomass_source) in enumerate(biomass_list):
for (sub_idx, substation_dest) in enumerate(substation_list):
distance_table[biomass_source, substation_dest] = 0.001 * (
matrx_distance['rows'][bio_idx]['elements'][sub_idx]['distance']['value'])
model.distances = Param(model.ROUTES,
initialize=distance_table, doc='Distance in km')
def calculate_lines(x, y):
"""
Calculate lines to connect a series of points, given matching vectors
of x,y coordinates. This only makes sense for monotolically increasing
values.
This function does not perform a data integrity check.
"""
slope_list = {}
intercept_list = {}
for i in range(0, len(x) - 1):
slope_list[i+1] = (y[i] - y[i+1]) / (x[i] - x[i+1])
intercept_list[i+1] = y[i+1] - slope_list[i+1] * x[i+1]
return slope_list, intercept_list
install_cost_slope, install_cost_intercept = calculate_lines(size, cost)
# Add meaningful doc for the components below.
model.install_cost_slope = Param(model.PW, initialize=install_cost_slope, doc='PW c_i')
model.install_cost_intercept = Param(model.PW, initialize=install_cost_intercept, doc='PW d_i')
# Define variables
# Generator Variables
model.CapInstalled = Var(model.SUBSTATIONS, within=NonNegativeReals,
doc='Installed Capacity kW')
model.InstallorNot = Var(model.SUBSTATIONS, within=Binary,
doc='Decision to install or not')
model.BiomassTransported = Var(model.ROUTES, within=NonNegativeReals,
doc='Biomass shipment quantities in tons')
# What is z_i ?
model.z_i = Var(model.SUBSTATIONS, within=NonNegativeReals,
doc='Variable for PW of installation cost')
# Define contraints
# Here b is the index for sources and s the index for substations
# This set of constraints define the energy balances in the system
# It is confusing & potentially problematic that your function for the
# constraint is named identically to the constraint itself. A standard
# Pyomo convention is to use the suffix "_rule", like Subs_Nodal_Balance_rule.
def Subs_Nodal_Balance(mdl, s):
# The units don't make sense here unless you include a time duration
# on the left side
# Left side: kW * %_cap_factor -> kW
# Right side: kWh/kg * kg -> kWh
# kW != kWh
return mdl.CapInstalled[s] * mdl.capacity_factor == (
sum(mdl.heat_rate * mdl.BiomassTransported[b, s]
for b in mdl.SOURCES))
# This logic will be buggy if your routes don't connect every biomass
# sources to every substation. There are multiple ways to address this
# one of which is to iterate over the list of routes and filter it to
# suppliers of this substation.
# for (b, s2) in mdl.ROUTES if s == s2
# You could also process the routes in advance and have an indexed set of
# suppliers for each substation_dest that you could access like so:
# for b in mdl.BIOMASS_SUPPLIERS[s]
# You could also define BiomassTransported[] for every combination of
# biomass source & substation, then constrain ones that lack routes to be 0.
model.Subs_Nodal_Balance = Constraint(model.SUBSTATIONS, rule=Subs_Nodal_Balance,
doc='Energy Balance at the substation')
def Sources_Nodal_Limit(mdl, b):
return sum(mdl.BiomassTransported[b, s] for s in model.SUBSTATIONS) <= (
model.source_biomass_max[b])
# This logic will be buggy if routes don't connect everything. See note above.
model.Sources_Nodal_Limit = Constraint(model.SOURCES, rule=Sources_Nodal_Limit,
doc='Limit of biomass supply at source')
# This set of constraints define the limits to the power at the substation
def Install_Decision_Max(mdl, s):
return mdl.CapInstalled[s] <= mdl.InstallorNot[s] * mdl.max_capacity
model.Install_Decision_Max = Constraint(
model.SUBSTATIONS, rule=Install_Decision_Max,
doc='Limit the maximum installed capacity and bind the continuous decision to the binary InstallorNot variable.')
def Install_Decision_Min(mdl, s):
return mdl.min_capacity * mdl.InstallorNot[s] <= mdl.CapInstalled[s]
model.Install_Decision_Min = Constraint(model.SUBSTATIONS, rule=Install_Decision_Min,
doc='Installed capacity must exceed the minimum threshold.')
# This set of constraints define the piece-wise linear approximation of
# installation cost
def Pw_Install_Cost(mdl, s):
# Logic is confusing! You accept substation s as an argument, then
# ignore that value and iterate over all substations.
# The for loops below will always return values for the first substation
# & the first line segment .. calling return inside a loop will exit the loop.
for s in mdl.SUBSTATIONS:
for p in mdl.PW:
return mdl.z_i[s] == (mdl.install_cost_slope[p] * mdl.CapInstalled[s] +
mdl.install_cost_intercept[p])
model.Pw_Install_Cost = Constraint(model.SUBSTATIONS, rule=Pw_Install_Cost,
doc='PW constraint')
# Define Objective Function.
def objective_rule(mdl):
return (
# Capacity costs
sum(mdl.z_i[s] for s in mdl.SUBSTATIONS)
# O&M costs (variable & fixed)
+ sum((mdl.om_cost_fix + mdl.capacity_factor * mdl.om_cost_var) * mdl.CapInstalled[s]
for s in mdl.SUBSTATIONS)
# The next line is buggy; uses same om_cost_fix param as previous line, but
# units don't cancel out: $/kw-installed * [unitless binary value] != $
+ sum((model.om_cost_fix) * mdl.InstallorNot[s]
for s in mdl.SUBSTATIONS)
# Transportation costs
+ sum(mdl.distances[r] * model.BiomassTransported[r]
for r in mdl.ROUTES)
# Biomass acquisition costs. These will be buggy if routes aren't the
# entire cross product. See note in Subs_Nodal_Balance.
# Why are these costs subtracted instead of added?
- sum(mdl.biomass_cost[b] * sum(mdl.BiomassTransported[b, s] for s in mdl.SUBSTATIONS)
for b in mdl.SOURCES)
# Gross profits (per month?)
- sum(mdl.fit_tariff[s] * mdl.CapInstalled[s] * mdl.capacity_factor * 30 * 24
for s in mdl.SUBSTATIONS)
)
# Rename objective to be more descriptive. Maybe "net_profits"
model.objective = Objective(rule=objective_rule, sense=minimize,
doc='Define objective function')
# Display of the output #
# plt.plot(size, cost)
# plt.show()
def pyomo_postprocess(options=None, instance=None, results=None):
model.CapInstalled.display()
model.BiomassTransported.display()
# This is an optional code path that allows the script to be run outside of
# pyomo command-line. For example: python transport.py
if __name__ == '__main__':
# This emulates what the pyomo command-line tools does
from pyomo.opt import SolverFactory
import pyomo.environ
opt = SolverFactory("gurobi")
results = opt.solve(model, tee=True)
# sends results to stdout
results.write()
print("\nDisplaying Solution\n" + '-' * 60)
pyomo_postprocess(None, None, results)
| mit |
pnedunuri/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
rueckstiess/dopamine | agents/valuebased/faestimator.py | 1 | 2529 | from numpy import *
from random import choice
from dopamine.agents.valuebased.estimator import Estimator
from dopamine.fapprox import LWPRFA
from matplotlib import pyplot as plt
class FAEstimator(Estimator):
conditions = {'discreteStates':False, 'discreteActions':True}
def __init__(self, stateDim, actionNum, faClass=LWPRFA, ordered=False):
""" initialize with the state dimension and number of actions. """
self.stateDim = stateDim
self.actionNum = actionNum
self.faClass = faClass
self.ordered = ordered
self.fas = []
# create memory for ordered estimator
if self.ordered:
self.memory = []
# define training and target array
self.reset()
def getBestAction(self, state):
""" returns the action with maximal value in the given state. if several
actions have the same value, pick one at random.
"""
state = state.flatten()
values = array([self.getValue(state, array([a])) for a in range(self.actionNum)])
maxvalues = where(values == values.max())[0]
if len(maxvalues) > 0:
action = array([choice(maxvalues)])
else:
# this should not happen, but it does in rare cases, return the first action
action = array([0])
return action
def getValue(self, state, action):
""" returns the value of the given (state,action) pair as float. """
action = int(action.item())
if self.ordered and (action in self.memory):
return -inf
state = state.flatten()
return self.fas[action].predict(state).item()
def updateValue(self, state, action, value):
state = state.flatten()
self.fas[int(action.item())].update(state, value)
def reset(self):
""" clear collected training set. """
# special case to clean up lwpr models that were pickled
if self.faClass == LWPRFA:
for fa in self.fas:
fa._cleanup()
self.fas = [self.faClass(self.stateDim, 1) for i in range(self.actionNum)]
def train(self):
""" train individual models for each actions seperately. """
for a in range(self.actionNum):
self.fas[a].train()
def rememberAction(self, action):
if self.ordered:
self.memory.append(int(action.item()))
def resetMemory(self):
if self.ordered:
self.memory = []
| gpl-3.0 |
RhysU/suzerain | postproc/perfect.py | 1 | 42290 | #!/usr/bin/env python
"""Usage: perfect.py [OPTIONS...] H5FILE...
Produces TBD
Options:
-h Display this help message and exit
-o OUTSUFFIX Save the output file PLOT.OUTSUFFIX instead of displaying
File H5FILE should have been produced by the perfect_summary application.
"""
from __future__ import division, print_function
import collections
import getopt
import h5py
import logging
import math
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import os
import pandas as pd
import sys
matplotlib.rcParams['text.usetex'] = True
# Some of the labels used in the plotting routines require \mathscr
# Jump through some extra hoops to make sure we don't introduce
# duplicates into the preamble.
def unique_append(l, v):
if v not in l:
l.append(v)
unique_append(matplotlib.rcParams['text.latex.preamble'],
r'\usepackage[charter]{mathdesign}')
# Prepare a logger to produce messages
logging.basicConfig(level=logging.INFO, format='%(levelname)s %(message)s')
l = logging.getLogger(os.path.splitext(os.path.basename(__file__))[0])
class Bunch(dict):
"An implementation of the Bunch pattern."
def __init__(self, **kw):
dict.__init__(self, kw)
self.__dict__ = self
def maybe_assume_uncorrelated(data, keyAB, keyA, keyB=None, warn=True):
"""
If "AB" is not in dictionary data then assume Cov(A,B) = 0 implying
E[AB] = E[A] * E[B]. This is obviously not ideal from a rigor
perspective and therefore warnings are emitted whenever warn is True.
"""
if keyAB in data:
pass
elif keyB is None:
data[keyAB] = data[keyA]**2
l.warn("Estimated unknown %s by neglecting higher moment of %s"
% (keyAB, keyA))
else:
data[keyAB] = data[keyA] * data[keyB]
l.warn("Estimated unknown %s by assuming %s and %s are uncorrelated"
% (keyAB, keyA, keyB))
class Data(object):
"""
Storage of data loaded and computed from a single summary file.
When enforceSymmetry is True and the file represents channel data,
the upper and lower halves of the channel are assumed to be
symmetric causing the profile data to be averaged.
"""
# FIXME Correct symmetry enforcement for antisymmetric quantities
def __init__(self, filename, enforceSymmetry=False):
"Prepare averaged information and derived quantities from a filename"
# Load file and prepare scalar and vector storage locations
# Automatically unpack a variety of data mapping it into Bunches
self.file = h5py.File(filename, 'r')
self.y = self.file['y'][()]
self.code = Bunch(alpha = self.file['alpha'][0],
beta = self.file['beta' ][0],
gamma = self.file['gamma'][0],
Ma = self.file['Ma' ][0],
Re = self.file['Re' ][0],
Pr = self.file['Pr' ][0])
self.htdelta = self.file['htdelta'][0]
# Additionally, "bl.*" or "chan.*" is mapped into self.*.
self.bar = Bunch() # From "mu" attribute of bar_*
self.bulk = Bunch() # Populated after construction
self.rms = Bunch() # Populated after construction
self.local = Bunch() # Populated after construction
self.lower = Bunch() # From lower_*
self.sigma = Bunch() # From "mu_sigma" attribute of bar_*
self.tilde = Bunch() # Populated after construction
self.tke = Bunch() # Populated after construction
self.upper = Bunch() # From upper_*
self.weight = Bunch() # From *_weights
for k, v in self.file.iteritems():
if k.startswith("bar_"):
self.bar [k[4:]] = v.attrs["mu"]
self.sigma[k[4:]] = v.attrs["mu_sigma"]
elif k.startswith("bulk_"):
self.bulk[k[5:]] = v[0]
elif k.startswith("lower_"):
self.lower[k[6:]] = v[()]
elif k.startswith("upper_"):
self.upper[k[6:]] = v[()]
elif k.endswith("_weights"):
self.weight[k[:-8]] = v[()]
elif k.startswith("bl."):
self.__dict__[k[3:]] = Bunch(
**{a:b[0] for a,b in v.attrs.items()})
elif k.startswith("chan."):
self.__dict__[k[5:]] = Bunch(
**{a:b[0] for a,b in v.attrs.items()})
# Employ symmetry assumptions, if requested and applicable
# to improve mean estimates and associated uncertainties.
# Based upon
# X ~ N(\mu_x, \sigma_x^2)
# Y ~ N(\mu_y, \sigma_y^2)
# Z = a X + b Y
# ~ N(a \mu_x + b \mu_y, a^2 \sigma_x^2 + b^2 \sigma_y^2)
# implying
# std(Z) = sqrt(std(x)**2 + std(y)**2) / 2
# which leads to the computations below.
# Notice self.file is NOT adjusted, only mean profile information,
# and also that the domain is not truncated to only the lower half.
if self.htdelta >= 0 and enforceSymmetry:
for k, v in self.bar.iteritems():
self.bar[k] = (v + np.flipud(v)) / 2
for k, v in self.sigma.iteritems():
self.sigma[k] = np.sqrt(v**2 + np.flipud(v)**2) / 2
self.enforceSymmetry = True
else:
self.enforceSymmetry = False
# Compute scaling information per the incoming data, including...
if "visc" in self.__dict__ and "wall" in self.__dict__:
# ...classical incompressible plus units and...
self.plus = Bunch()
self.plus.u = self.visc.u_tau
self.plus.y = (self.wall.rho*self.y*self.plus.u / self.wall.mu
*self.code.Re)
# ...variable density star units per Huang et al JFM 1995
# (Beware this does weird things on the upper channel half)
self.star = Bunch()
self.star.u = np.sqrt(self.visc.tau_w / self.bar.rho)
self.star.y = (self.bar.rho*self.y*self.star.u / self.bar.mu
*self.code.Re)
else:
l.warn("Star and plus unit computations not performed"
" because /{bl,chan}.{visc,wall} not found")
# Compute a whole mess of derived information, if possible
try:
from perfect_decl import pointwise
pointwise(self.code.gamma,
self.code.Ma, self.code.Re, self.code.Pr, self.y,
self.bar, self.rms, self.tilde, self.local, self.tke)
except ImportError:
l.warn("Pointwise computations not performed"
" because module 'perfect_decl' not found")
# FIXME Correct symmetry enforcement for antisymmetric quantities
def esterrcov(self, *keys):
"""
Produce an y-dependent empirical covariance matrix for the given
keys returning squared standard errors from self.sigma on
the diagonal.
"""
# Check incoming arguments
if not keys:
raise ValueError("One or more keys must be provided")
for k in keys:
if k not in self.sigma:
raise KeyError("Key %s not in sigma" % k)
if "bar_"+k not in self.file:
raise KeyError("Key %s not in sigma" % "bar_"+k)
# Preallocate space to pack incoming data and store results
ex = self.file["bar_"+k]
nt, ny = ex.shape
nv = len(keys)
out = np.empty((nv, nv, ny), dtype=ex.dtype, order='C')
dat = np.empty((nv, nt), dtype=ex.dtype, order='C')
# Process each y location in turn...
for j in xrange(ny):
# ...pack temporal trace into dat buffer
# (if needed averaging with the opposite channel half)
for i, key in enumerate(keys):
dat[i, :] = self.file["bar_"+key][:,j]
if self.enforceSymmetry:
for i, key in enumerate(keys):
dat[i, :] += self.file["bar_"+key][:,ny-1-j]
dat /= 2
# ...obtain correlation matrix with unit diagonal
t = np.corrcoef(dat, bias=1)
# ...scale correlation by squared standard error
# to obtain a covariance matrix.
for i, key in enumerate(keys):
t[i,:] *= self.sigma[key][j]
t[:,i] *= self.sigma[key][j]
# ...and store the result into out
out[:, :, j] = t
return out
def plot_profiles(data, fbottom=None, ftop=None, **fig_kw):
"""
Plot mean primitive profiles, their RMS fluctuations, and uncertainties.
"""
fig, ax = plt.subplots(2, 2,
sharex=('all' if (data.htdelta >= 0) else False),
squeeze=False, **fig_kw)
bar, tilde, sigma, star = data.bar, data.tilde, data.sigma, data.star
# Change our x axes slightly depending on channel versus boundary layer
if data.htdelta >= 0:
x = star.y
xl = None
else:
x = data.y/data.thick.delta99
xl = r"$y/\delta_{99}$"
#########################################################################
# Build dictionary of means and list of standard errors for upper row
m = collections.OrderedDict()
s = []
m[r"$\bar{\rho}$"] = bar.rho
s.append(sigma.rho)
m[r"$\bar{u}$" ] = bar.u
s.append(sigma.u)
m[r"$\bar{T}$"] = bar.T
s.append(sigma.T)
m[r"$\bar{\mu}$"] = bar.mu
s.append(sigma.mu)
# Plot upper left subfigure
for k, v in m.iteritems():
ax[0][0].plot(x, v, label=k)
if xl:
ax[0][0].set_xlabel(xl)
ax[0][0].set_ylabel(r"$\mu$")
# Plot upper right subfigure
for k, v in m.iteritems():
ax[0][1].semilogy(x, s.pop(0)/v, label=k)
if xl:
ax[0][1].set_xlabel(xl)
ax[0][1].set_ylabel(r"$\sigma_\mu / \mu$")
if fbottom:
ax[0][1].set_ylim(bottom=fbottom)
if ftop:
ax[0][1].set_ylim(top=ftop)
#########################################################################
# Build dictionary of means and standard errors for lower row
# Variance expressions from postproc/propagation.py -d perfect.decl
m.clear()
del s[:]
m[r"$\widetilde{u^{\prime\prime{}2}}$"] = tilde.upp_upp
cov = data.esterrcov("rho",
"rho_u",
"rho_u_u")
s.append(
cov[0,0,:] * ((bar.rho*bar.rho_u_u - 2*bar.rho_u**2)**2/bar.rho**6)
+ cov[0,1,:] * (4*(bar.rho*bar.rho_u_u - 2*bar.rho_u**2)*bar.rho_u/bar.rho**5)
+ cov[0,2,:] * (2*(-bar.rho*bar.rho_u_u + 2*bar.rho_u**2)/bar.rho**4)
+ cov[1,1,:] * (4*bar.rho_u**2/bar.rho**4)
+ cov[1,2,:] * (-4*bar.rho_u/bar.rho**3)
+ cov[2,2,:] * (bar.rho**(-2))
)
m[r"$\widetilde{v^{\prime\prime{}2}}$"] = tilde.vpp_vpp
cov = data.esterrcov("rho",
"rho_v",
"rho_v_v")
s.append(
cov[0,0,:] * ((bar.rho*bar.rho_v_v - 2*bar.rho_v**2)**2/bar.rho**6)
+ cov[0,1,:] * (4*(bar.rho*bar.rho_v_v - 2*bar.rho_v**2)*bar.rho_v/bar.rho**5)
+ cov[0,2,:] * (2*(-bar.rho*bar.rho_v_v + 2*bar.rho_v**2)/bar.rho**4)
+ cov[1,1,:] * (4*bar.rho_v**2/bar.rho**4)
+ cov[1,2,:] * (-4*bar.rho_v/bar.rho**3)
+ cov[2,2,:] * (bar.rho**(-2))
)
m[r"$\widetilde{w^{\prime\prime{}2}}$"] = tilde.wpp_wpp
cov = data.esterrcov("rho",
"rho_w",
"rho_w_w")
s.append(
cov[0,0,:] * ((bar.rho*bar.rho_w_w - 2*bar.rho_w**2)**2/bar.rho**6)
+ cov[0,1,:] * (4*(bar.rho*bar.rho_w_w - 2*bar.rho_w**2)*bar.rho_w/bar.rho**5)
+ cov[0,2,:] * (2*(-bar.rho*bar.rho_w_w + 2*bar.rho_w**2)/bar.rho**4)
+ cov[1,1,:] * (4*bar.rho_w**2/bar.rho**4)
+ cov[1,2,:] * (-4*bar.rho_w/bar.rho**3)
+ cov[2,2,:] * (bar.rho**(-2))
)
m[r"$\widetilde{u^{\prime\prime}v^{\prime\prime}}$"] = tilde.upp_vpp
cov = data.esterrcov("rho",
"rho_u",
"rho_u_v",
"rho_v")
s.append(
cov[0,0,:] * ((bar.rho*bar.rho_u_v - 2*bar.rho_u*bar.rho_v)**2/bar.rho**6)
+ cov[0,1,:] * (2*(bar.rho*bar.rho_u_v - 2*bar.rho_u*bar.rho_v)*bar.rho_v/bar.rho**5)
+ cov[0,2,:] * (2*(-bar.rho*bar.rho_u_v + 2*bar.rho_u*bar.rho_v)/bar.rho**4)
+ cov[0,3,:] * (2*(bar.rho*bar.rho_u_v - 2*bar.rho_u*bar.rho_v)*bar.rho_u/bar.rho**5)
+ cov[1,1,:] * (bar.rho_v**2/bar.rho**4)
+ cov[1,2,:] * (-2*bar.rho_v/bar.rho**3)
+ cov[1,3,:] * (2*bar.rho_u*bar.rho_v/bar.rho**4)
+ cov[2,2,:] * (bar.rho**(-2))
+ cov[2,3,:] * (-2*bar.rho_u/bar.rho**3)
+ cov[3,3,:] * (bar.rho_u**2/bar.rho**4)
)
m[r"$k$"] = tilde.k
cov = data.esterrcov("rho",
"rho_u",
"rho_u_u",
"rho_v",
"rho_v_v",
"rho_w",
"rho_w_w")
s.append(
+ cov[0,0,:] * ((bar.rho*bar.rho_u_u + bar.rho*bar.rho_v_v + bar.rho*bar.rho_w_w - 2*bar.rho_u**2 - 2*bar.rho_v**2 - 2*bar.rho_w**2)**2/(4*bar.rho**6))
+ cov[0,1,:] * ((bar.rho*bar.rho_u_u + bar.rho*bar.rho_v_v + bar.rho*bar.rho_w_w - 2*bar.rho_u**2 - 2*bar.rho_v**2 - 2*bar.rho_w**2)*bar.rho_u/bar.rho**5)
+ cov[0,2,:] * ((-bar.rho*bar.rho_u_u/2 - bar.rho*bar.rho_v_v/2 - bar.rho*bar.rho_w_w/2 + bar.rho_u**2 + bar.rho_v**2 + bar.rho_w**2)/bar.rho**4)
+ cov[0,3,:] * ((bar.rho*bar.rho_u_u + bar.rho*bar.rho_v_v + bar.rho*bar.rho_w_w - 2*bar.rho_u**2 - 2*bar.rho_v**2 - 2*bar.rho_w**2)*bar.rho_v/bar.rho**5)
+ cov[0,4,:] * ((-bar.rho*bar.rho_u_u/2 - bar.rho*bar.rho_v_v/2 - bar.rho*bar.rho_w_w/2 + bar.rho_u**2 + bar.rho_v**2 + bar.rho_w**2)/bar.rho**4)
+ cov[0,5,:] * ((bar.rho*bar.rho_u_u + bar.rho*bar.rho_v_v + bar.rho*bar.rho_w_w - 2*bar.rho_u**2 - 2*bar.rho_v**2 - 2*bar.rho_w**2)*bar.rho_w/bar.rho**5)
+ cov[0,6,:] * ((-bar.rho*bar.rho_u_u/2 - bar.rho*bar.rho_v_v/2 - bar.rho*bar.rho_w_w/2 + bar.rho_u**2 + bar.rho_v**2 + bar.rho_w**2)/bar.rho**4)
+ cov[1,1,:] * (bar.rho_u**2/bar.rho**4)
+ cov[1,2,:] * (-bar.rho_u/bar.rho**3)
+ cov[1,3,:] * (2*bar.rho_u*bar.rho_v/bar.rho**4)
+ cov[1,4,:] * (-bar.rho_u/bar.rho**3)
+ cov[1,5,:] * (2*bar.rho_u*bar.rho_w/bar.rho**4)
+ cov[1,6,:] * (-bar.rho_u/bar.rho**3)
+ cov[2,2,:] * (1/(4*bar.rho**2))
+ cov[2,3,:] * (-bar.rho_v/bar.rho**3)
+ cov[2,4,:] * (1/(2*bar.rho**2))
+ cov[2,5,:] * (-bar.rho_w/bar.rho**3)
+ cov[2,6,:] * (1/(2*bar.rho**2))
+ cov[3,3,:] * (bar.rho_v**2/bar.rho**4)
+ cov[3,4,:] * (-bar.rho_v/bar.rho**3)
+ cov[3,5,:] * (2*bar.rho_v*bar.rho_w/bar.rho**4)
+ cov[3,6,:] * (-bar.rho_v/bar.rho**3)
+ cov[4,4,:] * (1/(4*bar.rho**2))
+ cov[4,5,:] * (-bar.rho_w/bar.rho**3)
+ cov[4,6,:] * (1/(2*bar.rho**2))
+ cov[5,5,:] * (bar.rho_w**2/bar.rho**4)
+ cov[5,6,:] * (-bar.rho_w/bar.rho**3)
+ cov[6,6,:] * (1/(4*bar.rho**2))
)
# Plot lower left subfigure (includes normalization)
for k, v in m.iteritems():
ax[1][0].plot(star.y, v / star.u**2, label=k)
ax[1][0].set_xlabel(r"$y^\ast$")
ax[1][0].set_ylabel(r"$\mu / u_\tau^{\ast{}2}$")
# Plot lower right subfigure (includes normalization)
for k, v in m.iteritems():
ax[1][1].semilogy(star.y, np.sqrt(s.pop(0))/np.abs(v), label=k)
ax[1][1].set_xlabel(r"$y^\ast$")
ax[1][1].set_ylabel(r"$\sigma_\mu / \left|\mu\right|$")
if fbottom:
ax[1][1].set_ylim(bottom=fbottom)
if ftop:
ax[1][1].set_ylim(top=ftop)
if data.htdelta >= 0:
# Truncate at half channel width
ax[0][0].set_xlim(right=np.median(star.y))
ax[0][1].set_xlim(right=np.median(star.y))
ax[1][0].set_xlim(right=np.median(star.y))
ax[1][1].set_xlim(right=np.median(star.y))
# Add legends on rightmost images
ax[0][1].legend(frameon=False, loc='best')
ax[1][1].legend(frameon=False, loc='best')
else:
# Truncate at boundary layer thickness
ax[0][0].set_xlim(right=1.0)
ax[0][1].set_xlim(right=1.0)
ax[1][0].set_xlim(right=star.y[np.argmax(data.y/data.thick.delta99 >= 1.0)])
ax[1][1].set_xlim(right=star.y[np.argmax(data.y/data.thick.delta99 >= 1.0)])
# Add legends on leftmost images
ax[0][0].legend(frameon=False, loc='best')
ax[1][0].legend(frameon=False, loc='best')
return fig
# TODO Smooth per B-splines using ' from scipy.interpolate import interp1d'
# TODO Add a show_residual option like plot_rho, plot_rho_u, etc.
def plot_tke(data, y=None, vert=1, thresh=None, merge_pflux=False,
ax=None, **plotargs):
"""
Plot TKE budgets from data permitting rescaling and thresholding.
If thresh is not None, it is taken as the fraction of maximum
production used as a cutoff for suppressing lines. Positive
thresh retains only lines with maxima above that value while
negative thresh retains only lines with maxima below that value.
Guarini et al 2000 used 25. Regardless, identically zero curves
are always suppressed.
If merge_pflux is True, these two pressure-related flux terms
\bar{p}\nabla\cdot\overline{u''}/\mbox{Ma}^2$
-\nabla\cdot\bar{\rho}\widetilde{T''u''}/\gamma/\mbox{Ma}^2$
are reported as a single curve.
"""
# Get a new axis if one was not supplied
if not ax:
fig, ax = plt.subplots()
# Plot along y unless requested otherwise
if y is None:
y = data.y
# Plotting cutoff based on magnitude of the production term.
max_production = np.max(np.abs(vert * data.tke.production))
def pthresh(y, q, *args, **kwargs):
max_q = np.max(np.abs(q))
if max_q == 0:
pass # Always suppress identically zero data
elif thresh is None:
return ax.plot(y, q, *args, **kwargs)
elif thresh >= 0:
if max_q >= max_production / +thresh:
return ax.plot(y, q, *args, **kwargs)
elif thresh < 0:
if max_q < max_production / -thresh:
return ax.plot(y, q, *args, **kwargs)
else:
assert False, "Sanity error"
# Produce plots in order of most to least likely to exceed thresh
# This causes any repeated linetypes to be fairly simple to distinguish
# Likely to exceed threshold but we will check anyway
pthresh(y, vert * data.tke.production,
label=r"$- \bar{\rho}\widetilde{u''\otimes{}u''}:\nabla\tilde{u}$",
**plotargs)
pthresh(y, vert * data.tke.dissipation,
label=r"$- \bar{\rho}\epsilon / \mbox{Re}$",
**plotargs)
pthresh(y, vert * data.tke.transport,
label=r"$- \nabla\cdot \bar{\rho} \widetilde{{u''}^{2}u''} / 2$",
**plotargs)
pthresh(y, vert * data.tke.diffusion,
label=r"$\nabla\cdot \overline{\tau{}u''}/\mbox{Re}$",
**plotargs)
# Conceivable that these will not exceed the threshold
# Permit two different ways to view the pressure terms per merge_pflux
if merge_pflux:
pthresh(y, vert * data.tke.pmassflux
+ vert * data.tke.pheatflux,
label=r"$\left("
r"\bar{p}\nabla\cdot\overline{u''}"
r"-\nabla\cdot\bar{\rho}\widetilde{T''u''}/\gamma"
r"\right)/\mbox{Ma}^2$",
**plotargs)
# Pressure dilatation appears inside conditional to preserve ordering
pthresh(y, vert * data.tke.pdilatation,
label=r"$\overline{p' \nabla\cdot{}u''}/\mbox{Ma}^2$",
**plotargs)
else:
pthresh(y, vert * data.tke.pmassflux,
label=r"$\bar{p}\nabla\cdot\overline{u''}/\mbox{Ma}^2$",
**plotargs)
# Pressure dilatation appears inside conditional to preserve ordering
pthresh(y, vert * data.tke.pdilatation,
label=r"$\overline{p' \nabla\cdot{}u''}/\mbox{Ma}^2$",
**plotargs)
pthresh(y, vert * data.tke.pheatflux,
label=r"$-\nabla\cdot\bar{\rho}\widetilde{T''u''}/\gamma/\mbox{Ma}^2$",
**plotargs)
pthresh(y, vert * data.tke.convection,
label=r"$- \nabla\cdot\bar{\rho}k\tilde{u}$",
**plotargs)
pthresh(y, vert * (data.tke.forcing + data.tke.constraint),
label=r"$\overline{f\cdot{}u''}$",
**plotargs)
pthresh(y, vert * data.tke.slowgrowth,
label=r"$\overline{\mathscr{S}_{\rho{}u}\cdot{}u''}$",
**plotargs)
return ax.figure
# TODO Smooth per B-splines using ' from scipy.interpolate import interp1d'
def plot_rho(data, y=None, vert=1, show_residual=False, ax=None, **plotargs):
"""
Plot density budgets from data permitting rescaling.
"""
# Get a new axis if one was not supplied
if not ax:
fig, ax = plt.subplots()
# Plot along y unless requested otherwise
if y is None:
y = data.y
# Gather all pointwise quantities into a label -> value dictionary
curves = collections.OrderedDict()
# See perfect.decl for more background
curves[r"$- \nabla\cdot\bar{\rho}\tilde{u}$"] = (
- data.bar.rho_v__y
)
curves[r"$ \overline{\mathscr{S}_{\rho}}$"] = (
+ data.bar.Srho
)
curves[r"$ \overline{\mathscr{C}_{\rho}}$"] = (
+ data.bar.Crho
)
# Produce the plot for nontrivial quantities
if show_residual:
residual = np.zeros_like(y)
for key, val in curves.iteritems():
if show_residual:
residual += val
if np.abs(val).max() > 0:
ax.plot(y, vert * val, label=key, **plotargs)
if show_residual:
ax.plot(y, vert * residual, 'k:', label="Residual")
return ax.figure
# TODO Smooth per B-splines using ' from scipy.interpolate import interp1d'
def plot_rho_u(data, y=None, vert=1, ax=None, show_residual=False, **plotargs):
"""
Plot streamwise momentum budgets from data permitting rescaling.
"""
# Get a new axis if one was not supplied
if not ax:
fig, ax = plt.subplots()
# Plot along y unless requested otherwise
if y is None:
y = data.y
# Gather all pointwise quantities into a label -> value dictionary
curves = collections.OrderedDict()
# See perfect.decl for more background
curves[r"$- \nabla\cdot\left.\tilde{u}\otimes\bar{\rho}\tilde{u}\right.$"] = (
- data.tilde.v*data.bar.rho_u__y - data.bar.rho_u*data.tilde.v__y
)
curves[r"$- \frac{1}{\textrm{Ma}^2}\nabla{}\bar{p}$"] = (
- np.zeros_like(y) / data.code.Ma**2
)
curves[r"$ \nabla\cdot\left. \bar{\tau}/\textrm{Re} \right.$"] = (
+ data.bar.tauxy__y / data.code.Re
)
curves[r"$- \nabla\cdot\left. \bar{\rho} \widetilde{u''\otimes{}u''} \right.$"] = (
- data.bar.rho*data.tilde.upp_vpp__y
- data.tilde.upp_vpp*data.bar.rho__y
)
curves[r"$ \bar{f}$"] = (
+ data.bar.fx
)
curves[r"$ \overline{\mathscr{S}_{\rho{}u}}$"] = (
+ data.bar.Srhou
)
curves[r"$ \overline{\mathscr{C}_{\rho{}u}}$"] = (
+ data.bar.Crhou
)
# Produce the plot for nontrivial quantities
if show_residual:
residual = np.zeros_like(y)
for key, val in curves.iteritems():
if show_residual:
residual += val
if np.abs(val).max() > 0:
ax.plot(y, vert * val, label=key, **plotargs)
if show_residual:
ax.plot(y, vert * residual, 'k:', label="Residual")
return ax.figure
# TODO Smooth per B-splines using ' from scipy.interpolate import interp1d'
def plot_rho_v(data, y=None, vert=1, ax=None, show_residual=False, **plotargs):
"""
Plot wall-normal momentum budgets from data permitting rescaling.
"""
# Get a new axis if one was not supplied
if not ax:
fig, ax = plt.subplots()
# Plot along y unless requested otherwise
if y is None:
y = data.y
# Gather all pointwise quantities into a label -> value dictionary
curves = collections.OrderedDict()
# See perfect.decl for more background
curves[r"$- \nabla\cdot\left.\tilde{u}\otimes\bar{\rho}\tilde{u}\right.$"] = (
- data.tilde.v*data.bar.rho_v__y - data.bar.rho_v*data.tilde.v__y
)
curves[r"$- \frac{1}{\textrm{Ma}^2}\nabla{}\bar{p}$"] = (
- data.bar.p__y / data.code.Ma**2
)
curves[r"$ \nabla\cdot\left. \bar{\tau}/\textrm{Re} \right.$"] = (
+ data.bar.tauyy__y / data.code.Re
)
curves[r"$- \nabla\cdot\left. \bar{\rho} \widetilde{u''\otimes{}u''} \right.$"] = (
- data.bar.rho*data.tilde.vpp_vpp__y
- data.tilde.vpp_vpp*data.bar.rho__y
)
curves[r"$ \bar{f}$"] = (
+ data.bar.fy
)
curves[r"$ \overline{\mathscr{S}_{\rho{}u}}$"] = (
+ data.bar.Srhov
)
curves[r"$ \overline{\mathscr{C}_{\rho{}u}}$"] = (
+ data.bar.Crhov
)
# Produce the plot for nontrivial quantities
if show_residual:
residual = np.zeros_like(y)
for key, val in curves.iteritems():
if show_residual:
residual += val
if np.abs(val).max() > 0:
ax.plot(y, vert * val, label=key, **plotargs)
if show_residual:
ax.plot(y, vert * residual, 'k:', label="Residual")
return ax.figure
# TODO Smooth per B-splines using ' from scipy.interpolate import interp1d'
def plot_rho_w(data, y=None, vert=1, ax=None, show_residual=False, **plotargs):
"""
Plot spanwise momentum budgets from data permitting rescaling.
"""
# Get a new axis if one was not supplied
if not ax:
fig, ax = plt.subplots()
# Plot along y unless requested otherwise
if y is None:
y = data.y
# Gather all pointwise quantities into a label -> value dictionary
curves = collections.OrderedDict()
# See perfect.decl for more background
curves[r"$- \nabla\cdot\left.\tilde{u}\otimes\bar{\rho}\tilde{u}\right.$"] = (
- data.tilde.v*data.bar.rho_w__y - data.bar.rho_w*data.tilde.v__y
)
curves[r"$- \frac{1}{\textrm{Ma}^2}\nabla{}\bar{p}$"] = (
- np.zeros_like(y) / data.code.Ma**2
)
curves[r"$ \nabla\cdot\left. \bar{\tau}/\textrm{Re} \right.$"] = (
+ data.bar.tauyz__y / data.code.Re
)
curves[r"$- \nabla\cdot\left. \bar{\rho} \widetilde{u''\otimes{}u''} \right.$"] = (
- data.bar.rho*data.tilde.vpp_wpp__y
- data.tilde.vpp_wpp*data.bar.rho__y
)
curves[r"$ \bar{f}$"] = (
+ data.bar.fz
)
curves[r"$ \overline{\mathscr{S}_{\rho{}u}}$"] = (
+ data.bar.Srhow
)
curves[r"$ \overline{\mathscr{C}_{\rho{}u}}$"] = (
+ data.bar.Crhow
)
# Produce the plot for nontrivial quantities
if show_residual:
residual = np.zeros_like(y)
for key, val in curves.iteritems():
if show_residual:
residual += val
if np.abs(val).max() > 0:
ax.plot(y, vert * val, label=key, **plotargs)
if show_residual:
ax.plot(y, vert * residual, 'k:', label="Residual")
return ax.figure
# TODO Smooth per B-splines using ' from scipy.interpolate import interp1d'
def plot_rho_E(data, y=None, vert=1, ax=None, show_residual=False, **plotargs):
"""
Plot total energy budgets from data permitting rescaling.
"""
# Get a new axis if one was not supplied
if not ax:
fig, ax = plt.subplots()
# Plot along y unless requested otherwise
if y is None:
y = data.y
# Gather all pointwise quantities into a label -> value dictionary
curves = collections.OrderedDict()
# See perfect.decl for more background
curves[r"$- \nabla\cdot\bar{\rho}\tilde{H}\tilde{u}$"] = (
- data.bar.rho_v*data.tilde.H__y - data.tilde.H*data.bar.rho_v__y
)
curves[r"$ \textrm{Ma}^{2} \nabla\cdot\left. \frac{\bar{\tau} \tilde{u}}{\textrm{Re}} \right.$"] = (
+ (data.code.Ma**2/data.code.Re)*( data.tilde.u*data.bar.tauxy__y + data.bar.tauxy*data.tilde.u__y
+ data.tilde.v*data.bar.tauyy__y + data.bar.tauyy*data.tilde.v__y
+ data.tilde.w*data.bar.tauyz__y + data.bar.tauyz*data.tilde.w__y )
)
curves[r"$- \textrm{Ma}^{2} \nabla\cdot\left( \bar{\rho} \widetilde{u''\otimes{}u''} \right) \tilde{u}$"] = (
- (data.code.Ma**2)*( data.bar.rho_u*data.tilde.upp_vpp__y+data.tilde.upp_vpp*data.bar.rho_u__y
+ data.bar.rho_v*data.tilde.vpp_vpp__y+data.tilde.vpp_vpp*data.bar.rho_v__y
+ data.bar.rho_w*data.tilde.vpp_wpp__y+data.tilde.vpp_wpp*data.bar.rho_w__y )
)
curves[r"$- \frac{1}{2}\textrm{Ma}^{2} \nabla\cdot\left. \bar{\rho}\widetilde{{u''}^{2}u''} \right.$"] = (
- (data.code.Ma*data.code.Ma/2)*( data.bar.rho*data.tilde.upp2vpp__y + data.tilde.upp2vpp*data.bar.rho__y )
)
curves[r"$ \textrm{Ma}^{2} \nabla\cdot\left. \frac{\overline{\tau{}u''}}{\textrm{Re}} \right.$"] = (
+ (data.code.Ma*data.code.Ma/data.code.Re)*( data.bar.tauuppy__y )
)
curves[r"$ \frac{1}{\gamma-1} \nabla\cdot\left. \frac{ \bar{\mu} \widetilde{\nabla{}T} }{\textrm{Re}\textrm{Pr}} \right.$"] = (
+ (
data.tilde.nu*data.bar.rho_grady_T__y + data.bar.rho_grady_T*data.tilde.nu__y
) / ((data.code.gamma-1)*data.code.Re*data.code.Pr)
)
curves[r"$ \frac{1}{\gamma-1} \nabla\cdot\left. \frac{ \bar{\rho} \widetilde{\nu'' \left(\nabla{}T\right)''} } { \textrm{Re}\textrm{Pr} } \right.$"] = (
+ (
data.bar.rho*data.tilde.nupp_gradyTpp__y + data.tilde.nupp_gradyTpp*data.bar.rho__y
) / ((data.code.gamma-1)*data.code.Re*data.code.Pr)
)
curves[r"$- \frac{1}{\gamma-1} \nabla\cdot\left. \bar{\rho} \widetilde{T''u''} \right.$"] = (
- (
data.bar.rho*data.tilde.Tpp_vpp__y + data.tilde.Tpp_vpp*data.bar.rho__y
) / (data.code.gamma-1)
)
curves[r"$ \textrm{Ma}^{2} \bar{f}\cdot\tilde{u}$"] = (
+ data.code.Ma*data.code.Ma*(data.bar.fx*data.tilde.u + data.bar.fy*data.tilde.v + data.bar.fz*data.tilde.w)
)
curves[r"$ \textrm{Ma}^{2} \overline{f\cdot{}u''}$"] = (
+ data.code.Ma*data.code.Ma*data.bar.f_dot_upp
)
curves[r"$ \bar{q}_b$"] = (
+ data.bar.qb
)
curves[r"$ \overline{\mathscr{S}_{\rho{}E}}$"] = (
+ data.bar.SrhoE
)
curves[r"$ \overline{\mathscr{C}_{\rho{}E}}$"] = (
+ data.bar.CrhoE
)
# Produce the plot for nontrivial quantities
if show_residual:
residual = np.zeros_like(y)
for key, val in curves.iteritems():
if show_residual:
residual += val
if np.abs(val).max() > 0:
ax.plot(y, vert * val, label=key, **plotargs)
if show_residual:
ax.plot(y, vert * residual, 'k:', label="Residual")
return ax.figure
def traceframe(grepkey, fnames, zerotime=False):
"""
Build a DataFrame from data within possibly many state.dat, qoi.dat,
or bc.dat files using the 4th column 't' as the index. Logging level,
time since binary launch, and time step number information is omitted.
If zerotime is True, time will be shifted so the first time
index is zero.
"""
r = pd.DataFrame()
for fname in fnames:
f = os.popen('grep "%s" %s' % (grepkey, fname))
t = pd.read_table(f, index_col=3, sep=r"\s*")
t = t.drop(t.columns[0:2]+t.columns[3:4], axis=1)
r = r.combine_first(t)
# Based on http://stackoverflow.com/questions/14110721
# Inplace=True would be nice but it isn't available in older Pandas
if zerotime:
t = r.reset_index()
t.t -= t.t[0]
r = t.set_index(['t'])
return r
# I wanted the newer matplotlib LogLocator so I stole it. C'est dommage.
class LogLocator(ticker.Locator):
"""
Determine the tick locations for log axes
"""
def __init__(self, base=10.0, subs=[1.0], numdecs=4, numticks=15):
"""
place ticks on the location= base**i*subs[j]
"""
self.base(base)
self.subs(subs)
self.numticks = numticks
self.numdecs = numdecs
def base(self, base):
"""
set the base of the log scaling (major tick every base**i, i integer)
"""
self._base = base + 0.0
def subs(self, subs):
"""
set the minor ticks the log scaling every base**i*subs[j]
"""
if subs is None:
self._subs = None # autosub
else:
self._subs = np.asarray(subs) + 0.0
def __call__(self):
'Return the locations of the ticks'
vmin, vmax = self.axis.get_view_interval()
return self.tick_values(vmin, vmax)
def tick_values(self, vmin, vmax):
b = self._base
# dummy axis has no axes attribute
if hasattr(self.axis, 'axes') and self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
decades = np.arange(vmax - self.numdecs, vmax)
ticklocs = b ** decades
return ticklocs
if vmin <= 0.0:
if self.axis is not None:
vmin = self.axis.get_minpos()
if vmin <= 0.0 or not np.isfinite(vmin):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
vmin = math.log(vmin) / math.log(b)
vmax = math.log(vmax) / math.log(b)
if vmax < vmin:
vmin, vmax = vmax, vmin
numdec = math.floor(vmax) - math.ceil(vmin)
if self._subs is None: # autosub
if numdec > 10:
subs = np.array([1.0])
elif numdec > 6:
subs = np.arange(2.0, b, 2.0)
else:
subs = np.arange(2.0, b)
else:
subs = self._subs
stride = 1
while numdec / stride + 1 > self.numticks:
stride += 1
decades = np.arange(math.floor(vmin) - stride,
math.ceil(vmax) + 2 * stride, stride)
if hasattr(self, '_transform'):
ticklocs = self._transform.inverted().transform(decades)
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = np.ravel(np.outer(subs, ticklocs))
else:
if len(subs) > 1 or (len(subs == 1) and subs[0] != 1.0):
ticklocs = []
for decadeStart in b ** decades:
ticklocs.extend(subs * decadeStart)
else:
ticklocs = b ** decades
return self.raise_if_exceeds(np.asarray(ticklocs))
def view_limits(self, vmin, vmax):
'Try to choose the view limits intelligently'
b = self._base
if vmax < vmin:
vmin, vmax = vmax, vmin
if self.axis.axes.name == 'polar':
vmax = math.ceil(math.log(vmax) / math.log(b))
vmin = b ** (vmax - self.numdecs)
return vmin, vmax
minpos = self.axis.get_minpos()
if minpos <= 0 or not np.isfinite(minpos):
raise ValueError(
"Data has no positive values, and therefore can not be "
"log-scaled.")
if vmin <= minpos:
vmin = minpos
if not is_decade(vmin, self._base):
vmin = decade_down(vmin, self._base)
if not is_decade(vmax, self._base):
vmax = decade_up(vmax, self._base)
if vmin == vmax:
vmin = decade_down(vmin, self._base)
vmax = decade_up(vmax, self._base)
result = mtransforms.nonsingular(vmin, vmax)
return result
# TODO Split into two plots-- scenario tracking vs relaminarization
# TODO Add Re_\tau as a precursor to fluctuation collapse
def plot_relaminarization(dnames,
Re_theta=None,
Ma_e=None,
ratio_T=None,
v_wallplus=None,
p_ex=None,
delta99=None,
zerotime=False,
**kwargs):
"""
Prepare a relaminarization study plot given job directories dnames.
If provided, Ma_e, p_ex, etc. are used to plot target values.
"""
# Load the data from various source files
pbulk = traceframe('prod.bulk', (s+"/qoi.dat" for s in dnames), zerotime)
pg = traceframe('bl.pg', (s+"/qoi.dat" for s in dnames), zerotime)
qoi = traceframe('bl.qoi', (s+"/qoi.dat" for s in dnames), zerotime)
Re = traceframe('bl.Re', (s+"/qoi.dat" for s in dnames), zerotime)
thick = traceframe('bl.thick', (s+"/qoi.dat" for s in dnames), zerotime)
visc = traceframe('bl.visc', (s+"/qoi.dat" for s in dnames), zerotime)
famax = traceframe('favre.amax', (s+"/qoi.dat" for s in dnames), zerotime)
# Produce a relaminarization summary figure
fig = plt.figure(**kwargs)
#
ax = fig.add_subplot(911)
ax.ticklabel_format(useOffset=False)
ax.plot(Re.index, Re['Re_delta2'].values)
if Re_theta:
ax.hlines(Re_theta, qoi.index.min(), qoi.index.max(), 'r', '-.')
ax.set_ylabel(r'$\mbox{Re}_{\theta}$')
ax.yaxis.set_major_locator(ticker.MaxNLocator(4))
ax.set_xticklabels([])
#
ax = fig.add_subplot(912)
ax.ticklabel_format(useOffset=False)
ax.plot(qoi.index, qoi['Ma_e'].values)
if Ma_e:
ax.hlines(Ma_e, qoi.index.min(), qoi.index.max(), 'r', '-.')
ax.set_ylabel(r'$\mbox{Ma}_{e}$')
ax.yaxis.set_major_locator(ticker.MaxNLocator(4))
ax.set_xticklabels([])
#
ax = fig.add_subplot(913)
ax.ticklabel_format(useOffset=False)
ax.plot(qoi.index, qoi['ratio_T'].values)
if ratio_T:
ax.hlines(ratio_T, qoi.index.min(), qoi.index.max(), 'r', '-.')
ax.set_ylabel(r'$T_e/T_w$')
ax.yaxis.set_major_locator(ticker.MaxNLocator(4))
ax.set_xticklabels([])
#
ax = fig.add_subplot(914)
ax.ticklabel_format(useOffset=False)
ax.plot(visc.index, visc['v_wallplus'].values)
if v_wallplus:
ax.hlines(v_wallplus, visc.index.min(), visc.index.max(), 'r', '-.')
ax.set_ylabel(r'$v_w^+$')
ax.yaxis.set_major_locator(ticker.MaxNLocator(4))
ax.set_xticklabels([])
#
ax = fig.add_subplot(915)
ax.ticklabel_format(useOffset=False)
ax.plot(pg.index, pg['p_ex'].values)
if p_ex:
ax.hlines(p_ex, pg.index.min(), pg.index.max(), 'r', '-.')
ax.set_ylabel(r'$p^\ast_{e,\xi}$')
ax.yaxis.set_major_locator(ticker.MaxNLocator(4))
ax.set_xticklabels([])
#
ax = fig.add_subplot(916)
ax.ticklabel_format(useOffset=False)
ax.plot(thick.index, thick['delta99'].values)
if delta99:
ax.hlines(delta99, thick.index.min(), thick.index.max(), 'r', '-.')
ax.set_ylabel(r'$\delta_{99}$')
ax.yaxis.set_major_locator(ticker.MaxNLocator(4))
ax.set_xticklabels([])
#
ax = fig.add_subplot(917)
ax.ticklabel_format(useOffset=False)
ax.plot(pbulk.index, pbulk['total'].values)
ax.set_ylabel("Integrated\n"
#r"$\overline{\rho u'' \otimes{} u''}:\nabla\tilde{u}$",
"production",
multialignment='center')
if np.log10(pbulk['total'].values.max() / pbulk['total'].values.min()) > 1:
ax.set_yscale('log')
ax.yaxis.set_major_locator(LogLocator(numticks=3))
else:
ax.set_yscale('linear')
ax.yaxis.set_major_locator(ticker.MaxNLocator(3))
if ax.get_ylim()[0] < 1e-9:
ax.set_ylim(bottom=1e-9)
ax.set_xticklabels([])
#
ax = fig.add_subplot(918)
ax.ticklabel_format(useOffset=False)
ax.plot(famax.index, np.abs(famax['uu'].values))
ax.plot(famax.index, np.abs(famax['uv'].values))
ax.plot(famax.index, np.abs(famax['uw'].values))
ax.plot(famax.index, np.abs(famax['vv'].values))
ax.plot(famax.index, np.abs(famax['vw'].values))
ax.plot(famax.index, np.abs(famax['ww'].values))
ax.set_yscale('log')
ax.set_ylabel("max\n"
r"$\left|\widetilde{u_i''u_j''}\right|$",
multialignment='center')
if ax.get_ylim()[0] < 1e-8:
ax.set_ylim(bottom=1e-8)
ax.yaxis.set_major_locator(LogLocator(numticks=3))
ax.set_xticklabels([])
#
ax = fig.add_subplot(919)
ax.ticklabel_format(useOffset=False)
ax.plot(visc.index, visc['cf'].values)
ax.set_ylabel(r'$c_f$')
ax.yaxis.set_major_locator(ticker.MaxNLocator( 4))
#
ax.set_xlabel(r'Nondimensional time $t\,u_0 / l_0$')
#
for ax in fig.axes:
ax.xaxis.set_major_locator(ticker.MaxNLocator(10))
ax.margins(0.025, 0.08)
#
fig.tight_layout()
fig.subplots_adjust(hspace=0.10)
return fig
class Usage(Exception):
def __init__(self, msg):
self.msg = msg
def main(argv=None):
# Permit interactive use
if argv is None:
argv = sys.argv
# Parse and check incoming command line arguments
outsuffix = None
try:
try:
opts, args = getopt.getopt(argv[1:], "h", ["help"])
except getopt.error as msg:
raise Usage(msg)
for o, a in opts:
if o in ("-h", "--help"):
print(__doc__)
return 0
elif o == "-o":
outsuffix = a
except Usage as err:
print(err.msg, file=sys.stderr)
return 2
# Push interactive mode off (in case we get used from IPython)
was_interactive = plt.isinteractive()
plt.interactive(False)
# If not saving, then display.
if not outsuffix:
plt.show()
# Pop interactive mode
plt.interactive(was_interactive)
if __name__ == "__main__":
sys.exit(main())
| gpl-3.0 |
ky822/scikit-learn | sklearn/datasets/tests/test_base.py | 205 | 5878 | import os
import shutil
import tempfile
import warnings
import nose
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@nose.tools.with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
| bsd-3-clause |
NicovincX2/Python-3.5 | Probabilités/Loi de probabilité/poisson_distr_mle.py | 1 | 1399 | # -*- coding: utf-8 -*-
import os
def poisson_theta_mle(d):
"""
Computes the Maximum Likelihood Estimate for a given 1D training
dataset from a Poisson distribution.
"""
return sum(d) / len(d)
import math
def likelihood_poisson(x, lam):
"""
Computes the class-conditional probability for an univariate
Poisson distribution
"""
if x // 1 != x:
likelihood = 0
else:
likelihood = math.e**(-lam) * lam**(x) / math.factorial(x)
return likelihood
# Drawing training data
import numpy as np
true_param = 1.0
poisson_data = np.random.poisson(lam=true_param, size=100)
mle_poiss = poisson_theta_mle(poisson_data)
print('MLE:', mle_poiss)
# Plot Probability Density Function
from matplotlib import pyplot as plt
x_range = np.arange(0, 5, 0.1)
y_true = [likelihood_poisson(x, true_param) for x in x_range]
y_mle = [likelihood_poisson(x, mle_poiss) for x in x_range]
plt.figure(figsize=(10, 8))
plt.plot(x_range, y_true, lw=2, alpha=0.5, linestyle='--',
label='true parameter ($\lambda={}$)'.format(true_param))
plt.plot(x_range, y_mle, lw=2, alpha=0.5,
label='MLE ($\lambda={}$)'.format(mle_poiss))
plt.title(
'Poisson probability density function for the true and estimated parameters')
plt.ylabel('p(x|theta)')
plt.xlim([-1, 5])
plt.xlabel('random variable x')
plt.legend()
plt.show()
os.system("pause")
| gpl-3.0 |
CDSFinance/zipline | zipline/assets/futures.py | 9 | 5290 | from pandas import Timestamp, Timedelta
from pandas.tseries.tools import normalize_date
class FutureChain(object):
""" Allows users to look up future contracts.
Parameters
----------
asset_finder : AssetFinder
An AssetFinder for future contract lookups, in particular the
AssetFinder of the TradingAlgorithm instance.
get_datetime : function
A function that returns the simulation datetime, in particular
the get_datetime method of the TradingAlgorithm instance.
root_symbol : str
The root symbol of a future chain.
as_of_date : pandas.Timestamp, optional
Date at which the chain determination is rooted. I.e. the
existing contract whose notice date is first after this date is
the primary contract, etc. If not provided, the current
simulation date is used as the as_of_date.
Attributes
----------
root_symbol : str
The root symbol of the future chain.
as_of_date
The current as-of date of this future chain.
Methods
-------
as_of(dt)
offset(time_delta)
Raises
------
RootSymbolNotFound
Raised when the FutureChain is initialized with a root symbol for which
a future chain could not be found.
"""
def __init__(self, asset_finder, get_datetime, root_symbol,
as_of_date=None):
self.root_symbol = root_symbol
# Reference to the algo's AssetFinder for contract lookups
self._asset_finder = asset_finder
# Reference to the algo's get_datetime to know the current dt
self._algorithm_get_datetime = get_datetime
# If an as_of_date is provided, self._as_of_date uses that
# value, otherwise None. This attribute backs the as_of_date property.
if as_of_date:
self._as_of_date = normalize_date(as_of_date)
else:
self._as_of_date = None
# Attribute to cache the most up-to-date chain, and the dt when it was
# last updated.
self._current_chain = []
self._last_updated = None
# Get the initial chain, since self._last_updated is None.
self._maybe_update_current_chain()
def __repr__(self):
# NOTE: The string returned cannot be used to instantiate this
# exact FutureChain, since we don't want to display the asset
# finder and get_datetime function to the user.
if self._as_of_date:
return "FutureChain(root_symbol='%s', as_of_date='%s')" % (
self.root_symbol, self.as_of_date)
else:
return "FutureChain(root_symbol='%s')" % self.root_symbol
def _get_datetime(self):
"""
Returns the normalized simulation datetime.
Returns
-------
pandas.Timestamp
The normalized datetime of FutureChain's TradingAlgorithm.
"""
return normalize_date(
Timestamp(self._algorithm_get_datetime(), tz='UTC')
)
@property
def as_of_date(self):
"""
The current as-of date of this future chain.
Returns
-------
pandas.Timestamp
The user-provided as_of_date if given, otherwise the
current datetime of the simulation.
"""
if self._as_of_date is not None:
return self._as_of_date
else:
return self._get_datetime()
def _maybe_update_current_chain(self):
""" Updates the current chain if it's out of date, then returns
it.
Returns
-------
list
The up-to-date current chain, a list of Future objects.
"""
dt = self._get_datetime()
if (self._last_updated is None) or (self._last_updated != dt):
self._current_chain = self._asset_finder.lookup_future_chain(
self.root_symbol,
self.as_of_date,
dt
)
self._last_updated = dt
return self._current_chain
def __getitem__(self, key):
return self._maybe_update_current_chain()[key]
def __len__(self):
return len(self._maybe_update_current_chain())
def __iter__(self):
return iter(self._maybe_update_current_chain())
def as_of(self, dt):
""" Get the future chain for this root symbol as of a specific date.
Parameters
----------
dt : datetime.datetime or pandas.Timestamp or str, optional
The as_of_date for the new chain.
Returns
-------
FutureChain
"""
return FutureChain(
asset_finder=self._asset_finder,
get_datetime=self._algorithm_get_datetime,
root_symbol=self.root_symbol,
as_of_date=dt
)
def offset(self, time_delta):
""" Get the future chain for this root symbol with a given
offset from the current as_of_date.
Parameters
----------
time_delta : datetime.timedelta or pandas.Timedelta or str
The offset from the current as_of_date for the new chain.
Returns
-------
FutureChain
"""
return self.as_of(self.as_of_date + Timedelta(time_delta))
| apache-2.0 |
ferchault/iago | src/iago/DatabaseProvider.py | 1 | 9729 | # standard modules
import ast
import utils
import json
# third-party modules
import pandas as pd
class DB(object):
def __init__(self):
self._groups = utils.SafeDict()
#: Plane data as calculated by :func:`iago.Analyser.Analyser.dynamic_plane`
self.planes = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'name': ('Plane name', None),
'normal_x': ('Normal vector: x component', None),
'normal_y': ('Normal vector: y component', None),
'normal_z': ('Normal vector: z component', None),
'support_x': ('Support point: x component', 'angstrom'),
'support_y': ('Support point: y component', 'angstrom'),
'support_z': ('Support point: z component', 'angstrom'),
})
#: Atom-atom distance data as calculated by :func:`iago.Analyser.Analyser.dynamic_distance`
self.distances = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'name': ('Distance set name', None),
'atom1': ('First atom index', None),
'atom2': ('Second atom index', None),
'dist': ('Distance', 'angstrom')
})
#: Atom-plane distance data as calculated by :func:`iago.Analyser.Analyser.dynamic_distance`
self.planedistances = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'name': ('Distance set name', None),
'plane': ('Plane name', None),
'atom1': ('First atom index', None),
'dist': ('Distance', 'angstrom')
})
self.energies = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'total': ('Total energy', 'hartree'),
'conserved': ('Conserved quantity', 'hartree'),
'coreself': ('Core-Self energy', 'hartree'),
'corehamiltonian': ('Core Hamiltonian', 'hartree'),
'hartree': ('Hartree energy', 'hartree'),
'xc': ('Exchange-Correlation energy', 'hartree'),
'hfx': ('Hartree-Fock Exchange energy', 'hartree'),
'dispersion': ('Dispersion energy', 'hartree'),
'potential': ('Potential energy', 'hartree'),
'kinetic': ('Kinetic energy', 'hartree'),
'drift': ('Energy drift per atom', 'kelvin')
})
self.cells = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'a': ('First cell length', 'angstrom'),
'b': ('Second cell length', 'angstrom'),
'c': ('Third cell length', 'angstrom'),
'alpha': ('First cell angle', 'degrees'),
'beta': ('Second cell angle', 'degrees'),
'gamma': ('Third cell angle', 'degrees'),
'volume': ('Cell volume', 'angstrom**3'),
})
self.ensembles = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'temperature': ('Temperature', 'kelvin'),
'pressure': ('Pressure', 'bar'),
})
self.meta = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'iasd': ('Integrated absolute spin density', None),
's2': ('Determinant S**2', None),
'scfcycles': ('Number of SCF cycles', None),
'otcycles': ('Number of outer SCF cycles', None),
'globaleri': ('Number of ERI evaluated', None)
})
self.points = utils.annotated_data_frame({
'run': ('Run', None),
'frame': ('Frame number', None),
'name': ('Point set name', None),
'x': ('X position', 'angstrom'),
'y': ('Y position', 'angstrom'),
'z': ('Z position', 'angstrom'),
'fractional_x': ('Fractional x position', None),
'fractional_y': ('Fractional y position', None),
'fractional_z': ('Fractional z position', None),
})
self.input = utils.Map()
self.output = pd.DataFrame()
self._data_tables = 'meta ensembles cells energies'.split()
self._stock_tables = 'planes distances planedistances points'.split()
@staticmethod
def _transfer_missing_units(origin, destination, columns):
for column in columns:
destination._iago_units[column] = origin._iago_units[column]
destination._iago_comments[column] = origin._iago_comments[column]
def assign_output_columns(self):
for table in self._data_tables:
overlap = (set(self.output.columns) & set(self.__dict__[table].columns)) - set(['run', 'frame'])
if len(overlap) == 0:
continue
newtable = self.__dict__[table].append(self.output[['run', 'frame'] + list(overlap)])
DB._transfer_missing_units(self.__dict__[table], newtable, ['run', 'frame'] + list(overlap))
self.__dict__[table] = newtable
self.output.drop(list(overlap), inplace=True, axis=1)
@property
def groups(self):
""" Known static atom groups.
:return: Dictionary with group names as key, list of 0-based atom indices as value.
"""
return self._groups
def write(self, fh, format='hdf5'):
""" Writes the database to disk or stream.
:param fh: File handle or filename.
:param format: File format. Either hdf5 or json.
"""
format = format.lower()
if format == 'json':
self._write_json(fh)
elif format == 'hdf5':
self._write_hdf5(fh)
else:
raise ValueError('No such format supported: %s' % format)
def _write_hdf5(self, fh):
""" Writes the database as hdf5 to disk.
:param fh: File handle or filename."""
hdf = pd.HDFStore(fh, mode='a')
hdf.put('groups', self.groups.to_dataframe())
for table in self._stock_tables:
hdf.put(table, getattr(self, table))
hdf.put('%s_meta' % table, pd.DataFrame.from_dict(getattr(self, table).annotations_to_dict()))
# String properties
sdf = pd.DataFrame.from_dict({'labels': ['input', ], 'values': [str(self.input), ]})
hdf.put('strings', sdf)
hdf.put('output', self.output)
# data tables
for table in self._data_tables:
hdf.put(table, getattr(self, table))
hdf.put('%s_meta' % table, pd.DataFrame.from_dict(getattr(self, table).annotations_to_dict()))
# finalise
hdf.close()
def _write_json(self, fh):
""" Writes the database as json to disk.
:param fh: File handle or filename."""
if not hasattr(fh, 'write'):
fh = open(fh, 'w')
data = dict()
# groups
data['groups'] = self.groups
for table in self._stock_tables:
data[table] = getattr(self, table).to_dict('list')
data['%s-meta' % table] = getattr(self, table).annotations_to_dict()
# input / output
data['input'] = self.input
data['output'] = self.output.to_dict('list')
# data['output-meta'] = self.output.annotations_to_dict()
# data tables
for table in self._data_tables:
data[table] = self.__dict__[table].to_dict('list')
data[table + '-meta'] = self.__dict__[table].annotations_to_dict()
# finalise
fh.write(json.dumps(data, separators=(',', ':')))
def read(self, handle=None, name=None, format='json'):
""" Reads the database from disk or stream.
:param handle: File handle. Either `handle` or `name` has to be specified.
:param name: File name. Either `handle` or `name` has to be specified.
:param format: Either JSON or HDF5.
"""
if handle is None and name is None:
raise ValueError('Nothing to read specified.')
if format == 'json':
if handle is None:
handle = open(name)
self._read_json(handle)
elif format == 'hdf5':
self._read_hdf5(name)
else:
raise ValueError('No such format supported: %s' % format)
def _read_hdf5(self, filename):
""" Reads the HDF5 database from disk.
:param fh: File handle or filename.
"""
hdf = pd.HDFStore(filename, mode='r')
self._groups = utils.SafeDict.from_dataframe(hdf.get('groups'))
for table in self._stock_tables:
pdt = hdf.get(table)
meta = hdf.get('%s_meta' % table).to_dict(orient='list')
setattr(self, table, utils.annotated_data_frame(meta, pdt))
# String properties
sdf = hdf.get('strings') #.to_dict(orient='list')
input = sdf[sdf['labels']=='input']['values'].values[0]
self.input = utils.Map(ast.literal_eval(input))
self.output = hdf.get('output')
# data tables
for table in self._data_tables:
pdt = hdf.get(table)
meta = hdf.get('%s_meta' % table).to_dict(orient='list')
setattr(self, table, utils.annotated_data_frame(meta, pdt))
# finalise
hdf.close()
def _read_json(self, fh):
""" Reads the JSON database from disk or stream.
:param fh: File handle or filename.
"""
data = json.load(fh)
# groups
try:
self._groups = utils.SafeDict(data['groups'])
except KeyError:
pass
# planes
try:
self.planes = utils.annotated_data_frame(data['planes-meta'], data['planes'])
except KeyError:
pass
# distances
try:
self.distances = utils.annotated_data_frame(data['distances-meta'], data['distances'])
except KeyError:
pass
try:
self.planedistances = utils.annotated_data_frame(data['planedistances-meta'], data['planedistances'])
except KeyError:
pass
# points
try:
self.points = utils.annotated_data_frame(data['points-meta'], data['points'])
except KeyError:
pass
# input / output
try:
self.input = utils.Map(data['input'])
except KeyError:
pass
# self.output = utils.AnnotatedDataFrame(data['output-meta'], data['output'])
try:
self.output = pd.DataFrame.from_dict(data['output'])
except KeyError:
pass
# data tables
for table in self._data_tables:
try:
self.__dict__[table] = utils.annotated_data_frame(data[table + '-meta'], data[table])
except KeyError:
pass
# cleanup
fh.close()
def monitor(self, quantity):
for table in self._data_tables:
tobj = self.__dict__[table]
if quantity in tobj.columns:
# keep dependency local to this function
import matplotlib.pyplot as plt
plt.plot(tobj['frame'], tobj[quantity])
unit = tobj.explain(quantity).Unit.values[0]
if unit == 'No unit available.':
unit = 'n/a'
label = '%s in %s' % (tobj.explain(quantity).Comment.values[0], unit)
plt.ylabel(label)
plt.xlabel('Frame')
return
raise ValueError('No such quantity.') | mit |
bakeronit/bio_script | old/basicStat.py | 1 | 3837 | #! usr/bin/env python
# -*- coding: utf-8 -*-
# filename:basicStat.py
import time
import argparse
import collections
import matplotlib
import os
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
parser= argparse.ArgumentParser(description='some basic statistics of fastq file')
parser.add_argument('input',type=str, help = 'input fastq - must given')
parser.add_argument('-o','--outdir',type=str,default='./',help='outfile file directory, default is ./')
args=parser.parse_args()
ISOTIMEFORMAT = '%Y-%m-%d %X'
def ASCIItoquality33(ch):
return ord(ch)-33
#def ASCIItoquality64(ch):
# return ord(cd)-64
def basicStat(filename):
baseCount = collections.Counter() # count how many ATCGN or other abnormal in sequece
with open (filename,'r') as f:
row=0
baseNum=0
read_length=0
for line in f:
line = line.rstrip()
if row%4 == 1:
if read_length == 0:
n=len(line) #min length of reads
m=len(line) #max length of reads
#read_length=len(line)
read_length=[n,m]
#gc = [0] * (read_length+20) # get stastistic of gc content cross read at every base postion. std ~ 20bp
#gc_len=[0] * (read_length + 20)
gc=[[0]*2 for l in range(read_length[1]+20)] #gc[count_gc,count_base]
quality=[0]*60
baseNum+=len(line)
baseCount.update(line) # update counter for every read
read_length[0]=len(line) if len(line) < read_length[0] else read_length[0]
read_length[1]=len(line) if len(line) > read_length[1] else read_length[1]
for i in range(len(line)):
gc[i][1]+=1
if line[i] == 'G' or line[i] == 'C':
gc[i][0]+=1
if row%4 == 3: # quanlity stastistic
for q in line:
quality[ASCIItoquality33(q)]+=1 # default is phred33
row+=1 # after perform +1 , row equal the number of lines have been read.
GC_content = (baseCount['G'] + baseCount['C'])*100/baseNum
fh = open(os.path.join(args.outdir,'Stat.out'),'w') #output file
print('>>Total sequece:%d'%(row/4),file = fh)
print('>>Read length:%d-%d'%(read_length[0],read_length[1]), file=fh)
print('>>Total Base:%d'%baseNum , file = fh)
print('>>Readable:%.2fGb\t%.2fMb'%((baseNum/10e9),(baseNum/10e6)), file = fh)
print('>>Base Composition:', file = fh)
for i in baseCount:
print(i+":%d"%baseCount[i], file = fh)
print('>>GC content:%.2f'%GC_content + '%', file = fh)
#fh2 = open(args.outfile,'w+')
gc_per_base=[0]*read_length[1]
print('\n>>GC content across per base:', file = fh)
for i in range(read_length[1]):
gc_per_base[i] = gc[i][0]/gc[i][1] # total line divide 4 equal number of reads.
print('%d\t%.2f'%((i+1),(gc_per_base[i]*100))+'%', file = fh)
return gc_per_base,read_length, filename,quality
########## main function #################
print("start time:" + time.strftime(ISOTIMEFORMAT,time.localtime()))
gc,read_length ,filename,quality= basicStat(args.input)
#print(quality)
import numpy as np
import matplotlib.pyplot as plt
plt.figure()
plt.plot(gc)
plt.xlim(0,read_length[1])
plt.ylim(0.2,0.8)
plt.xlabel('Per base')
plt.ylabel('GC%')
plt.title('GC content across all bases in '+ filename)
plt.savefig(os.path.join(args.outdir,'GC.png'))
plt.figure()
x=np.arange(len(quality))
plt.bar(x,quality,alpha= .5, color='g')
plt.savefig(os.path.join(args.outdir,'hist.png'))
n,bins,patches=plt.hist(quality,)
print("end time:" + time.strftime(ISOTIMEFORMAT,time.localtime()))
| gpl-2.0 |
davidgbe/scikit-learn | sklearn/externals/joblib/parallel.py | 79 | 35628 | """
Helpers for embarrassingly parallel code.
"""
# Author: Gael Varoquaux < gael dot varoquaux at normalesup dot org >
# Copyright: 2010, Gael Varoquaux
# License: BSD 3 clause
from __future__ import division
import os
import sys
import gc
import warnings
from math import sqrt
import functools
import time
import threading
import itertools
from numbers import Integral
try:
import cPickle as pickle
except:
import pickle
from ._multiprocessing_helpers import mp
if mp is not None:
from .pool import MemmapingPool
from multiprocessing.pool import ThreadPool
from .format_stack import format_exc, format_outer_frames
from .logger import Logger, short_format_time
from .my_exceptions import TransportableException, _mk_exception
from .disk import memstr_to_kbytes
from ._compat import _basestring
VALID_BACKENDS = ['multiprocessing', 'threading']
# Environment variables to protect against bad situations when nesting
JOBLIB_SPAWNED_PROCESS = "__JOBLIB_SPAWNED_PARALLEL__"
# In seconds, should be big enough to hide multiprocessing dispatching
# overhead.
# This settings was found by running benchmarks/bench_auto_batching.py
# with various parameters on various platforms.
MIN_IDEAL_BATCH_DURATION = .2
# Should not be too high to avoid stragglers: long jobs running alone
# on a single worker while other workers have no work to process any more.
MAX_IDEAL_BATCH_DURATION = 2
# Under Python 3.4+ use the 'forkserver' start method by default: this makes it
# possible to avoid crashing 3rd party libraries that manage an internal thread
# pool that does not tolerate forking
if hasattr(mp, 'get_start_method'):
method = os.environ.get('JOBLIB_START_METHOD')
if (method is None and mp.get_start_method() == 'fork'
and 'forkserver' in mp.get_all_start_methods()):
method = 'forkserver'
DEFAULT_MP_CONTEXT = mp.get_context(method=method)
else:
DEFAULT_MP_CONTEXT = None
class BatchedCalls(object):
"""Wrap a sequence of (func, args, kwargs) tuples as a single callable"""
def __init__(self, iterator_slice):
self.items = list(iterator_slice)
self._size = len(self.items)
def __call__(self):
return [func(*args, **kwargs) for func, args, kwargs in self.items]
def __len__(self):
return self._size
###############################################################################
# CPU count that works also when multiprocessing has been disabled via
# the JOBLIB_MULTIPROCESSING environment variable
def cpu_count():
""" Return the number of CPUs.
"""
if mp is None:
return 1
return mp.cpu_count()
###############################################################################
# For verbosity
def _verbosity_filter(index, verbose):
""" Returns False for indices increasingly apart, the distance
depending on the value of verbose.
We use a lag increasing as the square of index
"""
if not verbose:
return True
elif verbose > 10:
return False
if index == 0:
return False
verbose = .5 * (11 - verbose) ** 2
scale = sqrt(index / verbose)
next_scale = sqrt((index + 1) / verbose)
return (int(next_scale) == int(scale))
###############################################################################
class WorkerInterrupt(Exception):
""" An exception that is not KeyboardInterrupt to allow subprocesses
to be interrupted.
"""
pass
###############################################################################
class SafeFunction(object):
""" Wraps a function to make it exception with full traceback in
their representation.
Useful for parallel computing with multiprocessing, for which
exceptions cannot be captured.
"""
def __init__(self, func):
self.func = func
def __call__(self, *args, **kwargs):
try:
return self.func(*args, **kwargs)
except KeyboardInterrupt:
# We capture the KeyboardInterrupt and reraise it as
# something different, as multiprocessing does not
# interrupt processing for a KeyboardInterrupt
raise WorkerInterrupt()
except:
e_type, e_value, e_tb = sys.exc_info()
text = format_exc(e_type, e_value, e_tb, context=10,
tb_offset=1)
if issubclass(e_type, TransportableException):
raise
else:
raise TransportableException(text, e_type)
###############################################################################
def delayed(function, check_pickle=True):
"""Decorator used to capture the arguments of a function.
Pass `check_pickle=False` when:
- performing a possibly repeated check is too costly and has been done
already once outside of the call to delayed.
- when used in conjunction `Parallel(backend='threading')`.
"""
# Try to pickle the input function, to catch the problems early when
# using with multiprocessing:
if check_pickle:
pickle.dumps(function)
def delayed_function(*args, **kwargs):
return function, args, kwargs
try:
delayed_function = functools.wraps(function)(delayed_function)
except AttributeError:
" functools.wraps fails on some callable objects "
return delayed_function
###############################################################################
class ImmediateComputeBatch(object):
"""Sequential computation of a batch of tasks.
This replicates the async computation API but actually does not delay
the computations when joblib.Parallel runs in sequential mode.
"""
def __init__(self, batch):
# Don't delay the application, to avoid keeping the input
# arguments in memory
self.results = batch()
def get(self):
return self.results
###############################################################################
class BatchCompletionCallBack(object):
"""Callback used by joblib.Parallel's multiprocessing backend.
This callable is executed by the parent process whenever a worker process
has returned the results of a batch of tasks.
It is used for progress reporting, to update estimate of the batch
processing duration and to schedule the next batch of tasks to be
processed.
"""
def __init__(self, dispatch_timestamp, batch_size, parallel):
self.dispatch_timestamp = dispatch_timestamp
self.batch_size = batch_size
self.parallel = parallel
def __call__(self, out):
self.parallel.n_completed_tasks += self.batch_size
this_batch_duration = time.time() - self.dispatch_timestamp
if (self.parallel.batch_size == 'auto'
and self.batch_size == self.parallel._effective_batch_size):
# Update the smoothed streaming estimate of the duration of a batch
# from dispatch to completion
old_duration = self.parallel._smoothed_batch_duration
if old_duration == 0:
# First record of duration for this batch size after the last
# reset.
new_duration = this_batch_duration
else:
# Update the exponentially weighted average of the duration of
# batch for the current effective size.
new_duration = 0.8 * old_duration + 0.2 * this_batch_duration
self.parallel._smoothed_batch_duration = new_duration
self.parallel.print_progress()
if self.parallel._original_iterator is not None:
self.parallel.dispatch_next()
###############################################################################
class Parallel(Logger):
''' Helper class for readable parallel mapping.
Parameters
-----------
n_jobs: int, default: 1
The maximum number of concurrently running jobs, such as the number
of Python worker processes when backend="multiprocessing"
or the size of the thread-pool when backend="threading".
If -1 all CPUs are used. If 1 is given, no parallel computing code
is used at all, which is useful for debugging. For n_jobs below -1,
(n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all
CPUs but one are used.
backend: str or None, default: 'multiprocessing'
Specify the parallelization backend implementation.
Supported backends are:
- "multiprocessing" used by default, can induce some
communication and memory overhead when exchanging input and
output data with the with the worker Python processes.
- "threading" is a very low-overhead backend but it suffers
from the Python Global Interpreter Lock if the called function
relies a lot on Python objects. "threading" is mostly useful
when the execution bottleneck is a compiled extension that
explicitly releases the GIL (for instance a Cython loop wrapped
in a "with nogil" block or an expensive call to a library such
as NumPy).
verbose: int, optional
The verbosity level: if non zero, progress messages are
printed. Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.
pre_dispatch: {'all', integer, or expression, as in '3*n_jobs'}
The number of batches (of tasks) to be pre-dispatched.
Default is '2*n_jobs'. When batch_size="auto" this is reasonable
default and the multiprocessing workers shoud never starve.
batch_size: int or 'auto', default: 'auto'
The number of atomic tasks to dispatch at once to each
worker. When individual evaluations are very fast, multiprocessing
can be slower than sequential computation because of the overhead.
Batching fast computations together can mitigate this.
The ``'auto'`` strategy keeps track of the time it takes for a batch
to complete, and dynamically adjusts the batch size to keep the time
on the order of half a second, using a heuristic. The initial batch
size is 1.
``batch_size="auto"`` with ``backend="threading"`` will dispatch
batches of a single task at a time as the threading backend has
very little overhead and using larger batch size has not proved to
bring any gain in that case.
temp_folder: str, optional
Folder to be used by the pool for memmaping large arrays
for sharing memory with worker processes. If None, this will try in
order:
- a folder pointed by the JOBLIB_TEMP_FOLDER environment variable,
- /dev/shm if the folder exists and is writable: this is a RAMdisk
filesystem available by default on modern Linux distributions,
- the default system temporary folder that can be overridden
with TMP, TMPDIR or TEMP environment variables, typically /tmp
under Unix operating systems.
Only active when backend="multiprocessing".
max_nbytes int, str, or None, optional, 1M by default
Threshold on the size of arrays passed to the workers that
triggers automated memory mapping in temp_folder. Can be an int
in Bytes, or a human-readable string, e.g., '1M' for 1 megabyte.
Use None to disable memmaping of large arrays.
Only active when backend="multiprocessing".
Notes
-----
This object uses the multiprocessing module to compute in
parallel the application of a function to many different
arguments. The main functionality it brings in addition to
using the raw multiprocessing API are (see examples for details):
* More readable code, in particular since it avoids
constructing list of arguments.
* Easier debugging:
- informative tracebacks even when the error happens on
the client side
- using 'n_jobs=1' enables to turn off parallel computing
for debugging without changing the codepath
- early capture of pickling errors
* An optional progress meter.
* Interruption of multiprocesses jobs with 'Ctrl-C'
* Flexible pickling control for the communication to and from
the worker processes.
* Ability to use shared memory efficiently with worker
processes for large numpy-based datastructures.
Examples
--------
A simple example:
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
Reshaping the output when the function has several return
values:
>>> from math import modf
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=1)(delayed(modf)(i/2.) for i in range(10))
>>> res, i = zip(*r)
>>> res
(0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5, 0.0, 0.5)
>>> i
(0.0, 0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0, 4.0, 4.0)
The progress meter: the higher the value of `verbose`, the more
messages::
>>> from time import sleep
>>> from sklearn.externals.joblib import Parallel, delayed
>>> r = Parallel(n_jobs=2, verbose=5)(delayed(sleep)(.1) for _ in range(10)) #doctest: +SKIP
[Parallel(n_jobs=2)]: Done 1 out of 10 | elapsed: 0.1s remaining: 0.9s
[Parallel(n_jobs=2)]: Done 3 out of 10 | elapsed: 0.2s remaining: 0.5s
[Parallel(n_jobs=2)]: Done 6 out of 10 | elapsed: 0.3s remaining: 0.2s
[Parallel(n_jobs=2)]: Done 9 out of 10 | elapsed: 0.5s remaining: 0.1s
[Parallel(n_jobs=2)]: Done 10 out of 10 | elapsed: 0.5s finished
Traceback example, note how the line of the error is indicated
as well as the values of the parameter passed to the function that
triggered the exception, even though the traceback happens in the
child process::
>>> from heapq import nlargest
>>> from sklearn.externals.joblib import Parallel, delayed
>>> Parallel(n_jobs=2)(delayed(nlargest)(2, n) for n in (range(4), 'abcde', 3)) #doctest: +SKIP
#...
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
TypeError Mon Nov 12 11:37:46 2012
PID: 12934 Python 2.7.3: /usr/bin/python
...........................................................................
/usr/lib/python2.7/heapq.pyc in nlargest(n=2, iterable=3, key=None)
419 if n >= size:
420 return sorted(iterable, key=key, reverse=True)[:n]
421
422 # When key is none, use simpler decoration
423 if key is None:
--> 424 it = izip(iterable, count(0,-1)) # decorate
425 result = _nlargest(n, it)
426 return map(itemgetter(0), result) # undecorate
427
428 # General case, slowest method
TypeError: izip argument #1 must support iteration
___________________________________________________________________________
Using pre_dispatch in a producer/consumer situation, where the
data is generated on the fly. Note how the producer is first
called a 3 times before the parallel loop is initiated, and then
called to generate new data on the fly. In this case the total
number of iterations cannot be reported in the progress messages::
>>> from math import sqrt
>>> from sklearn.externals.joblib import Parallel, delayed
>>> def producer():
... for i in range(6):
... print('Produced %s' % i)
... yield i
>>> out = Parallel(n_jobs=2, verbose=100, pre_dispatch='1.5*n_jobs')(
... delayed(sqrt)(i) for i in producer()) #doctest: +SKIP
Produced 0
Produced 1
Produced 2
[Parallel(n_jobs=2)]: Done 1 jobs | elapsed: 0.0s
Produced 3
[Parallel(n_jobs=2)]: Done 2 jobs | elapsed: 0.0s
Produced 4
[Parallel(n_jobs=2)]: Done 3 jobs | elapsed: 0.0s
Produced 5
[Parallel(n_jobs=2)]: Done 4 jobs | elapsed: 0.0s
[Parallel(n_jobs=2)]: Done 5 out of 6 | elapsed: 0.0s remaining: 0.0s
[Parallel(n_jobs=2)]: Done 6 out of 6 | elapsed: 0.0s finished
'''
def __init__(self, n_jobs=1, backend='multiprocessing', verbose=0,
pre_dispatch='2 * n_jobs', batch_size='auto',
temp_folder=None, max_nbytes='1M', mmap_mode='r'):
self.verbose = verbose
self._mp_context = DEFAULT_MP_CONTEXT
if backend is None:
# `backend=None` was supported in 0.8.2 with this effect
backend = "multiprocessing"
elif hasattr(backend, 'Pool') and hasattr(backend, 'Lock'):
# Make it possible to pass a custom multiprocessing context as
# backend to change the start method to forkserver or spawn or
# preload modules on the forkserver helper process.
self._mp_context = backend
backend = "multiprocessing"
if backend not in VALID_BACKENDS:
raise ValueError("Invalid backend: %s, expected one of %r"
% (backend, VALID_BACKENDS))
self.backend = backend
self.n_jobs = n_jobs
if (batch_size == 'auto'
or isinstance(batch_size, Integral) and batch_size > 0):
self.batch_size = batch_size
else:
raise ValueError(
"batch_size must be 'auto' or a positive integer, got: %r"
% batch_size)
self.pre_dispatch = pre_dispatch
self._temp_folder = temp_folder
if isinstance(max_nbytes, _basestring):
self._max_nbytes = 1024 * memstr_to_kbytes(max_nbytes)
else:
self._max_nbytes = max_nbytes
self._mmap_mode = mmap_mode
# Not starting the pool in the __init__ is a design decision, to be
# able to close it ASAP, and not burden the user with closing it
# unless they choose to use the context manager API with a with block.
self._pool = None
self._output = None
self._jobs = list()
self._managed_pool = False
# This lock is used coordinate the main thread of this process with
# the async callback thread of our the pool.
self._lock = threading.Lock()
def __enter__(self):
self._managed_pool = True
self._initialize_pool()
return self
def __exit__(self, exc_type, exc_value, traceback):
self._terminate_pool()
self._managed_pool = False
def _effective_n_jobs(self):
n_jobs = self.n_jobs
if n_jobs == 0:
raise ValueError('n_jobs == 0 in Parallel has no meaning')
elif mp is None or n_jobs is None:
# multiprocessing is not available or disabled, fallback
# to sequential mode
return 1
elif n_jobs < 0:
n_jobs = max(mp.cpu_count() + 1 + n_jobs, 1)
return n_jobs
def _initialize_pool(self):
"""Build a process or thread pool and return the number of workers"""
n_jobs = self._effective_n_jobs()
# The list of exceptions that we will capture
self.exceptions = [TransportableException]
if n_jobs == 1:
# Sequential mode: do not use a pool instance to avoid any
# useless dispatching overhead
self._pool = None
elif self.backend == 'threading':
self._pool = ThreadPool(n_jobs)
elif self.backend == 'multiprocessing':
if mp.current_process().daemon:
# Daemonic processes cannot have children
self._pool = None
warnings.warn(
'Multiprocessing-backed parallel loops cannot be nested,'
' setting n_jobs=1',
stacklevel=3)
return 1
elif threading.current_thread().name != 'MainThread':
# Prevent posix fork inside in non-main posix threads
self._pool = None
warnings.warn(
'Multiprocessing backed parallel loops cannot be nested'
' below threads, setting n_jobs=1',
stacklevel=3)
return 1
else:
already_forked = int(os.environ.get(JOBLIB_SPAWNED_PROCESS, 0))
if already_forked:
raise ImportError('[joblib] Attempting to do parallel computing '
'without protecting your import on a system that does '
'not support forking. To use parallel-computing in a '
'script, you must protect your main loop using "if '
"__name__ == '__main__'"
'". Please see the joblib documentation on Parallel '
'for more information'
)
# Set an environment variable to avoid infinite loops
os.environ[JOBLIB_SPAWNED_PROCESS] = '1'
# Make sure to free as much memory as possible before forking
gc.collect()
poolargs = dict(
max_nbytes=self._max_nbytes,
mmap_mode=self._mmap_mode,
temp_folder=self._temp_folder,
verbose=max(0, self.verbose - 50),
context_id=0, # the pool is used only for one call
)
if self._mp_context is not None:
# Use Python 3.4+ multiprocessing context isolation
poolargs['context'] = self._mp_context
self._pool = MemmapingPool(n_jobs, **poolargs)
# We are using multiprocessing, we also want to capture
# KeyboardInterrupts
self.exceptions.extend([KeyboardInterrupt, WorkerInterrupt])
else:
raise ValueError("Unsupported backend: %s" % self.backend)
return n_jobs
def _terminate_pool(self):
if self._pool is not None:
self._pool.close()
self._pool.terminate() # terminate does a join()
self._pool = None
if self.backend == 'multiprocessing':
os.environ.pop(JOBLIB_SPAWNED_PROCESS, 0)
def _dispatch(self, batch):
"""Queue the batch for computing, with or without multiprocessing
WARNING: this method is not thread-safe: it should be only called
indirectly via dispatch_one_batch.
"""
# If job.get() catches an exception, it closes the queue:
if self._aborting:
return
if self._pool is None:
job = ImmediateComputeBatch(batch)
self._jobs.append(job)
self.n_dispatched_batches += 1
self.n_dispatched_tasks += len(batch)
self.n_completed_tasks += len(batch)
if not _verbosity_filter(self.n_dispatched_batches, self.verbose):
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(time.time() - self._start_time)
))
else:
dispatch_timestamp = time.time()
cb = BatchCompletionCallBack(dispatch_timestamp, len(batch), self)
job = self._pool.apply_async(SafeFunction(batch), callback=cb)
self._jobs.append(job)
self.n_dispatched_tasks += len(batch)
self.n_dispatched_batches += 1
def dispatch_next(self):
"""Dispatch more data for parallel processing
This method is meant to be called concurrently by the multiprocessing
callback. We rely on the thread-safety of dispatch_one_batch to protect
against concurrent consumption of the unprotected iterator.
"""
if not self.dispatch_one_batch(self._original_iterator):
self._iterating = False
self._original_iterator = None
def dispatch_one_batch(self, iterator):
"""Prefetch the tasks for the next batch and dispatch them.
The effective size of the batch is computed here.
If there are no more jobs to dispatch, return False, else return True.
The iterator consumption and dispatching is protected by the same
lock so calling this function should be thread safe.
"""
if self.batch_size == 'auto' and self.backend == 'threading':
# Batching is never beneficial with the threading backend
batch_size = 1
elif self.batch_size == 'auto':
old_batch_size = self._effective_batch_size
batch_duration = self._smoothed_batch_duration
if (batch_duration > 0 and
batch_duration < MIN_IDEAL_BATCH_DURATION):
# The current batch size is too small: the duration of the
# processing of a batch of task is not large enough to hide
# the scheduling overhead.
ideal_batch_size = int(
old_batch_size * MIN_IDEAL_BATCH_DURATION / batch_duration)
# Multiply by two to limit oscilations between min and max.
batch_size = max(2 * ideal_batch_size, 1)
self._effective_batch_size = batch_size
if self.verbose >= 10:
self._print("Batch computation too fast (%.4fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
elif (batch_duration > MAX_IDEAL_BATCH_DURATION and
old_batch_size >= 2):
# The current batch size is too big. If we schedule overly long
# running batches some CPUs might wait with nothing left to do
# while a couple of CPUs a left processing a few long running
# batches. Better reduce the batch size a bit to limit the
# likelihood of scheduling such stragglers.
self._effective_batch_size = batch_size = old_batch_size // 2
if self.verbose >= 10:
self._print("Batch computation too slow (%.2fs.) "
"Setting batch_size=%d.", (
batch_duration, batch_size))
else:
# No batch size adjustment
batch_size = old_batch_size
if batch_size != old_batch_size:
# Reset estimation of the smoothed mean batch duration: this
# estimate is updated in the multiprocessing apply_async
# CallBack as long as the batch_size is constant. Therefore
# we need to reset the estimate whenever we re-tune the batch
# size.
self._smoothed_batch_duration = 0
else:
# Fixed batch size strategy
batch_size = self.batch_size
with self._lock:
tasks = BatchedCalls(itertools.islice(iterator, batch_size))
if not tasks:
# No more tasks available in the iterator: tell caller to stop.
return False
else:
self._dispatch(tasks)
return True
def _print(self, msg, msg_args):
"""Display the message on stout or stderr depending on verbosity"""
# XXX: Not using the logger framework: need to
# learn to use logger better.
if not self.verbose:
return
if self.verbose < 50:
writer = sys.stderr.write
else:
writer = sys.stdout.write
msg = msg % msg_args
writer('[%s]: %s\n' % (self, msg))
def print_progress(self):
"""Display the process of the parallel execution only a fraction
of time, controlled by self.verbose.
"""
if not self.verbose:
return
elapsed_time = time.time() - self._start_time
# This is heuristic code to print only 'verbose' times a messages
# The challenge is that we may not know the queue length
if self._original_iterator:
if _verbosity_filter(self.n_dispatched_batches, self.verbose):
return
self._print('Done %3i tasks | elapsed: %s',
(self.n_completed_tasks,
short_format_time(elapsed_time),
))
else:
index = self.n_dispatched_batches
# We are finished dispatching
total_tasks = self.n_dispatched_tasks
# We always display the first loop
if not index == 0:
# Display depending on the number of remaining items
# A message as soon as we finish dispatching, cursor is 0
cursor = (total_tasks - index + 1
- self._pre_dispatch_amount)
frequency = (total_tasks // self.verbose) + 1
is_last_item = (index + 1 == total_tasks)
if (is_last_item or cursor % frequency):
return
remaining_time = (elapsed_time / (index + 1) *
(self.n_dispatched_tasks - index - 1.))
self._print('Done %3i out of %3i | elapsed: %s remaining: %s',
(index + 1,
total_tasks,
short_format_time(elapsed_time),
short_format_time(remaining_time),
))
def retrieve(self):
self._output = list()
while self._iterating or len(self._jobs) > 0:
if len(self._jobs) == 0:
# Wait for an async callback to dispatch new jobs
time.sleep(0.01)
continue
# We need to be careful: the job list can be filling up as
# we empty it and Python list are not thread-safe by default hence
# the use of the lock
with self._lock:
job = self._jobs.pop(0)
try:
self._output.extend(job.get())
except tuple(self.exceptions) as exception:
# Stop dispatching any new job in the async callback thread
self._aborting = True
if isinstance(exception, TransportableException):
# Capture exception to add information on the local
# stack in addition to the distant stack
this_report = format_outer_frames(context=10,
stack_start=1)
report = """Multiprocessing exception:
%s
---------------------------------------------------------------------------
Sub-process traceback:
---------------------------------------------------------------------------
%s""" % (this_report, exception.message)
# Convert this to a JoblibException
exception_type = _mk_exception(exception.etype)[0]
exception = exception_type(report)
# Kill remaining running processes without waiting for
# the results as we will raise the exception we got back
# to the caller instead of returning any result.
with self._lock:
self._terminate_pool()
if self._managed_pool:
# In case we had to terminate a managed pool, let
# us start a new one to ensure that subsequent calls
# to __call__ on the same Parallel instance will get
# a working pool as they expect.
self._initialize_pool()
raise exception
def __call__(self, iterable):
if self._jobs:
raise ValueError('This Parallel instance is already running')
# A flag used to abort the dispatching of jobs in case an
# exception is found
self._aborting = False
if not self._managed_pool:
n_jobs = self._initialize_pool()
else:
n_jobs = self._effective_n_jobs()
if self.batch_size == 'auto':
self._effective_batch_size = 1
iterator = iter(iterable)
pre_dispatch = self.pre_dispatch
if pre_dispatch == 'all' or n_jobs == 1:
# prevent further dispatch via multiprocessing callback thread
self._original_iterator = None
self._pre_dispatch_amount = 0
else:
self._original_iterator = iterator
if hasattr(pre_dispatch, 'endswith'):
pre_dispatch = eval(pre_dispatch)
self._pre_dispatch_amount = pre_dispatch = int(pre_dispatch)
# The main thread will consume the first pre_dispatch items and
# the remaining items will later be lazily dispatched by async
# callbacks upon task completions.
iterator = itertools.islice(iterator, pre_dispatch)
self._start_time = time.time()
self.n_dispatched_batches = 0
self.n_dispatched_tasks = 0
self.n_completed_tasks = 0
self._smoothed_batch_duration = 0.0
try:
self._iterating = True
while self.dispatch_one_batch(iterator):
pass
if pre_dispatch == "all" or n_jobs == 1:
# The iterable was consumed all at once by the above for loop.
# No need to wait for async callbacks to trigger to
# consumption.
self._iterating = False
self.retrieve()
# Make sure that we get a last message telling us we are done
elapsed_time = time.time() - self._start_time
self._print('Done %3i out of %3i | elapsed: %s finished',
(len(self._output), len(self._output),
short_format_time(elapsed_time)))
finally:
if not self._managed_pool:
self._terminate_pool()
self._jobs = list()
output = self._output
self._output = None
return output
def __repr__(self):
return '%s(n_jobs=%s)' % (self.__class__.__name__, self.n_jobs)
| bsd-3-clause |
HeraclesHX/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
ADicksonLab/wepy | src/wepy/reporter/wexplore/dashboard.py | 1 | 9109 | import os.path as osp
from collections import defaultdict
import itertools as it
import logging
from warnings import warn
from wepy.reporter.dashboard import ResamplerDashboardSection
import numpy as np
import pandas as pd
from tabulate import tabulate
class WExploreDashboardSection(ResamplerDashboardSection):
RESAMPLER_SECTION_TEMPLATE = \
"""
Resampling Algorithm: {{ name }}
Parameters:
- Max Number of Regions: {{ max_n_regions }}
- Max Region Sizes: {{ max_region_sizes }}
Number of Regions per level:
{{ regions_per_level }}
** Walker Assignments
{{ walker_table }}
** Region Hierarchy
Defined Regions with the number of child regions per parent region:
{{ region_hierarchy }}
** Leaf Region Table
{{ leaf_region_table }}
** WExplore Log
{{ wexplore_log }}
"""
def __init__(self, resampler=None,
max_n_regions=None,
max_region_sizes=None,
**kwargs
):
if 'name' not in kwargs:
kwargs['name'] = 'WExploreResampler'
super().__init__(resampler=resampler,
max_n_regions=max_n_regions,
max_region_sizes=max_region_sizes,
**kwargs
)
if resampler is not None:
self.max_n_regions = resampler.max_n_regions
self.max_region_sizes = resampler.max_region_sizes
else:
assert max_n_regions is not None, \
"If a resampler is not given must give parameters: max_n_regions"
assert max_region_sizes is not None, \
"If a resampler is not given must give parameters: max_n_regions"
self.max_n_regions = max_n_regions
self.max_region_sizes = max_region_sizes
self.n_levels = len(self.max_n_regions)
# updatables
self.root_region = ()
init_leaf_region = tuple([0 for i in range(self.n_levels)])
self.region_ids = [init_leaf_region]
self.regions_per_level = []
self.children_per_region = {}
# resampling
self.walker_assignments = []
self.walker_image_distances = []
self.curr_region_probabilities = defaultdict(int)
self.curr_region_counts = defaultdict(int)
#wexplore
self.branch_records = []
def _leaf_regions_to_all_regions(self, region_ids):
# make a set of all the regions starting with the root region
regions = set([self.root_region])
for region_id in region_ids:
for i in range(len(region_id)):
regions.add(region_id[0:i+1])
regions = list(regions)
regions.sort()
return regions
def update_values(self, **kwargs):
# the region assignments for walkers
assignments = []
walker_weights = [walker.weight for walker in kwargs['new_walkers']]
# re-initialize the current weights dictionary
self.curr_region_probabilities = defaultdict(int)
self.curr_region_counts = defaultdict(int)
for walker_record in kwargs['resampling_data']:
assignment = tuple(walker_record['region_assignment'])
walker_idx = walker_record['walker_idx'][0]
assignments.append((walker_idx, assignment))
# calculate the probabilities and counts of the regions
# given the current distribution of walkers
self.curr_region_probabilities[assignment] += walker_weights[walker_idx]
self.curr_region_counts[assignment] += 1
# sort them to get the walker indices in the right order
assignments.sort()
# then just get the assignment since it is sorted
self.walker_assignments = [assignment for walker, assignment in assignments]
# add to the records for region creation in WExplore
for resampler_record in kwargs['resampler_data']:
# get the values
new_leaf_id = tuple(resampler_record['new_leaf_id'])
branching_level = resampler_record['branching_level'][0]
walker_image_distance = resampler_record['distance'][0]
# add the new leaf id to the list of regions in the order they were created
self.region_ids.append(new_leaf_id)
# make a new record for a branching event which is:
# (region_id, level branching occurred, distance of walker that triggered the branching)
branch_record = (new_leaf_id,
branching_level,
walker_image_distance)
# save it in the records
self.branch_records.append(branch_record)
# count the number of child regions each region has
self.children_per_region = {}
all_regions = self._leaf_regions_to_all_regions(self.region_ids)
for region_id in all_regions:
# if its a leaf region it has no children
if len(region_id) == self.n_levels:
self.children_per_region[region_id] = 0
# all others we cound how many children it has
else:
# get all regions that have this one as a root
children_idxs = set()
for poss_child_id in all_regions:
# get the root at the level of this region for the child
poss_child_root = poss_child_id[0:len(region_id)]
# if the root is the same we keep it without
# counting children below the next level, but we skip the same region
if (poss_child_root == region_id) and (poss_child_id != region_id):
child_idx = poss_child_id[len(region_id)]
children_idxs.add(child_idx)
# count the children of this region
self.children_per_region[region_id] = len(children_idxs)
# count the number of regions at each level
self.regions_per_level = [0 for i in range(self.n_levels)]
for region_id, n_children in self.children_per_region.items():
level = len(region_id)
# skip the leaves
if level == self.n_levels:
continue
self.regions_per_level[level] += n_children
def gen_fields(self, **kwargs):
fields = super().gen_fields(**kwargs)
regions = self._leaf_regions_to_all_regions(self.region_ids)
region_children = [self.children_per_region[region] for region in regions]
region_children_pairs = it.chain(*zip(regions, region_children))
region_hierarchy = '\n'.join(
['{} {}' for i in range(len(regions))]
).format(*region_children_pairs)
# make a table for the regions
region_table_colnames = ('region', 'n_walkers', 'curr_weight')
region_table_d = {}
region_table_d['region'] = self.region_ids
region_table_d['n_walkers'] = [self.curr_region_counts[region]
for region in self.region_ids]
region_table_d['curr_weight'] = [self.curr_region_probabilities[region]
for region in self.region_ids]
leaf_region_table_df = pd.DataFrame(region_table_d,
columns=region_table_colnames)
leaf_region_table_df.set_index('region', drop=True)
leaf_region_table_str = tabulate(leaf_region_table_df,
headers=leaf_region_table_df.columns,
tablefmt='orgtbl')
# log of branching events
branching_table_colnames = ('new_leaf_id', 'branching_level', 'trigger_distance')
branching_table_df = pd.DataFrame(self.branch_records, columns=branching_table_colnames)
branching_table_str = tabulate(branching_table_df,
headers=branching_table_df.columns,
tablefmt='orgtbl')
## walker weights
walker_weights = [walker.weight for walker in kwargs['new_walkers']]
# make the table of walkers using pandas, using the order here
walker_table_colnames = ('weight', 'assignment')
walker_table_d = {}
walker_table_d['weight'] = walker_weights
walker_table_d['assignment'] = self.walker_assignments
walker_table_df = pd.DataFrame(walker_table_d, columns=walker_table_colnames)
walker_table_str = tabulate(walker_table_df,
headers=walker_table_df,
tablefmt='orgtbl')
new_fields = {
'max_n_regions' : self.max_n_regions,
'max_region_sizes' : self.max_region_sizes,
'regions_per_level' : self.regions_per_level,
'region_hierarchy' : region_hierarchy,
'leaf_region_table' : leaf_region_table_str,
'wexplore_log' : branching_table_str,
'walker_table' : walker_table_str
}
fields.update(new_fields)
return fields
| mit |
GusBus4/ardupilot | libraries/AP_Math/tools/geodesic_grid/plot.py | 110 | 2876 | # Copyright (C) 2016 Intel Corporation. All rights reserved.
#
# This file is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This file is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program. If not, see <http://www.gnu.org/licenses/>.
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
from mpl_toolkits.mplot3d import Axes3D
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
import icosahedron as ico
import grid
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.set_xlim3d(-2, 2)
ax.set_ylim3d(-2, 2)
ax.set_zlim3d(-2, 2)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.invert_zaxis()
ax.invert_xaxis()
ax.set_aspect('equal')
added_polygons = set()
added_sections = set()
def polygons(polygons):
for p in polygons:
polygon(p)
def polygon(polygon):
added_polygons.add(polygon)
def section(s):
added_sections.add(s)
def sections(sections):
for s in sections:
section(s)
def show(subtriangles=False):
polygons = []
facecolors = []
triangles_indexes = set()
subtriangle_facecolors = (
'#CCCCCC',
'#CCE5FF',
'#E5FFCC',
'#FFCCCC',
)
if added_sections:
subtriangles = True
for p in added_polygons:
try:
i = ico.triangles.index(p)
except ValueError:
polygons.append(p)
continue
if subtriangles:
sections(range(i * 4, i * 4 + 4))
else:
triangles_indexes.add(i)
polygons.append(p)
facecolors.append('#DDDDDD')
for s in added_sections:
triangles_indexes.add(int(s / 4))
subtriangle_index = s % 4
polygons.append(grid.section_triangle(s))
facecolors.append(subtriangle_facecolors[subtriangle_index])
ax.add_collection3d(Poly3DCollection(
polygons,
facecolors=facecolors,
edgecolors="#777777",
))
for i in triangles_indexes:
t = ico.triangles[i]
mx = my = mz = 0
for x, y, z in t:
mx += x
my += y
mz += z
ax.text(mx / 2.6, my / 2.6, mz / 2.6, i, color='#444444')
if subtriangles:
ax.legend(
handles=tuple(
mpatches.Patch(color=c, label='Sub-triangle #%d' % i)
for i, c in enumerate(subtriangle_facecolors)
),
)
plt.show()
| gpl-3.0 |
meduz/scikit-learn | examples/linear_model/plot_logistic_l1_l2_sparsity.py | 384 | 2601 | """
==============================================
L1 Penalty and Sparsity in Logistic Regression
==============================================
Comparison of the sparsity (percentage of zero coefficients) of solutions when
L1 and L2 penalty are used for different values of C. We can see that large
values of C give more freedom to the model. Conversely, smaller values of C
constrain the model more. In the L1 penalty case, this leads to sparser
solutions.
We classify 8x8 images of digits into two classes: 0-4 against 5-9.
The visualization shows coefficients of the models for varying C.
"""
print(__doc__)
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn import datasets
from sklearn.preprocessing import StandardScaler
digits = datasets.load_digits()
X, y = digits.data, digits.target
X = StandardScaler().fit_transform(X)
# classify small against large digits
y = (y > 4).astype(np.int)
# Set regularization parameter
for i, C in enumerate((100, 1, 0.01)):
# turn down tolerance for short training time
clf_l1_LR = LogisticRegression(C=C, penalty='l1', tol=0.01)
clf_l2_LR = LogisticRegression(C=C, penalty='l2', tol=0.01)
clf_l1_LR.fit(X, y)
clf_l2_LR.fit(X, y)
coef_l1_LR = clf_l1_LR.coef_.ravel()
coef_l2_LR = clf_l2_LR.coef_.ravel()
# coef_l1_LR contains zeros due to the
# L1 sparsity inducing norm
sparsity_l1_LR = np.mean(coef_l1_LR == 0) * 100
sparsity_l2_LR = np.mean(coef_l2_LR == 0) * 100
print("C=%.2f" % C)
print("Sparsity with L1 penalty: %.2f%%" % sparsity_l1_LR)
print("score with L1 penalty: %.4f" % clf_l1_LR.score(X, y))
print("Sparsity with L2 penalty: %.2f%%" % sparsity_l2_LR)
print("score with L2 penalty: %.4f" % clf_l2_LR.score(X, y))
l1_plot = plt.subplot(3, 2, 2 * i + 1)
l2_plot = plt.subplot(3, 2, 2 * (i + 1))
if i == 0:
l1_plot.set_title("L1 penalty")
l2_plot.set_title("L2 penalty")
l1_plot.imshow(np.abs(coef_l1_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
l2_plot.imshow(np.abs(coef_l2_LR.reshape(8, 8)), interpolation='nearest',
cmap='binary', vmax=1, vmin=0)
plt.text(-8, 3, "C = %.2f" % C)
l1_plot.set_xticks(())
l1_plot.set_yticks(())
l2_plot.set_xticks(())
l2_plot.set_yticks(())
plt.show()
| bsd-3-clause |
nicproulx/mne-python | examples/decoding/plot_decoding_spatio_temporal_source.py | 3 | 5916 | """
==========================
Decoding source space data
==========================
Decoding, a.k.a MVPA or supervised machine learning applied to MEG
data in source space on the left cortical surface. Here f-test feature
selection is employed to confine the classification to the potentially
relevant features. The classifier then is trained to selected features of
epochs in source space.
"""
# Author: Denis A. Engemann <denis.engemann@gmail.com>
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
#
# License: BSD (3-clause)
import mne
import os
import numpy as np
from mne import io
from mne.datasets import sample
from mne.minimum_norm import apply_inverse_epochs, read_inverse_operator
print(__doc__)
data_path = sample.data_path()
fname_fwd = data_path + 'MEG/sample/sample_audvis-meg-oct-6-fwd.fif'
fname_evoked = data_path + '/MEG/sample/sample_audvis-ave.fif'
subjects_dir = data_path + '/subjects'
subject = os.environ['SUBJECT'] = subjects_dir + '/sample'
os.environ['SUBJECTS_DIR'] = subjects_dir
###############################################################################
# Set parameters
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
event_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw-eve.fif'
fname_cov = data_path + '/MEG/sample/sample_audvis-cov.fif'
label_names = 'Aud-rh', 'Vis-rh'
fname_inv = data_path + '/MEG/sample/sample_audvis-meg-oct-6-meg-inv.fif'
tmin, tmax = -0.2, 0.5
event_id = dict(aud_r=2, vis_r=4) # load contra-lateral conditions
# Setup for reading the raw data
raw = io.read_raw_fif(raw_fname, preload=True)
raw.filter(2, None) # replace baselining with high-pass
events = mne.read_events(event_fname)
# Set up pick list: MEG - bad channels (modify to your needs)
raw.info['bads'] += ['MEG 2443'] # mark bads
picks = mne.pick_types(raw.info, meg=True, eeg=False, stim=True, eog=True,
exclude='bads')
# Read epochs
epochs = mne.Epochs(raw, events, event_id, tmin, tmax, proj=True,
picks=picks, baseline=None, preload=True,
reject=dict(grad=4000e-13, eog=150e-6),
decim=5) # decimate to save memory and increase speed
epochs.equalize_event_counts(list(event_id.keys()))
epochs_list = [epochs[k] for k in event_id]
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = "dSPM" # use dSPM method (could also be MNE or sLORETA)
n_times = len(epochs.times)
n_vertices = 3732
n_epochs = len(epochs.events)
# Load data and compute inverse solution and stcs for each epoch.
noise_cov = mne.read_cov(fname_cov)
inverse_operator = read_inverse_operator(fname_inv)
X = np.zeros([n_epochs, n_vertices, n_times])
# to save memory, we'll load and transform our epochs step by step.
for condition_count, ep in zip([0, n_epochs // 2], epochs_list):
stcs = apply_inverse_epochs(ep, inverse_operator, lambda2,
method, pick_ori="normal", # saves us memory
return_generator=True)
for jj, stc in enumerate(stcs):
X[condition_count + jj] = stc.lh_data
###############################################################################
# Decoding in sensor space using a linear SVM
# Make arrays X and y such that :
# X is 3d with X.shape[0] is the total number of epochs to classify
# y is filled with integers coding for the class to predict
# We must have X.shape[0] equal to y.shape[0]
# we know the first half belongs to the first class, the second one
y = np.repeat([0, 1], len(X) / 2) # belongs to the second class
X = X.reshape(n_epochs, n_vertices * n_times)
# we have to normalize the data before supplying them to our classifier
X -= X.mean(axis=0)
X /= X.std(axis=0)
# prepare classifier
from sklearn.svm import SVC # noqa
from sklearn.cross_validation import ShuffleSplit # noqa
# Define a monte-carlo cross-validation generator (reduce variance):
n_splits = 10
clf = SVC(C=1, kernel='linear')
cv = ShuffleSplit(len(X), n_splits, test_size=0.2)
# setup feature selection and classification pipeline
from sklearn.feature_selection import SelectKBest, f_classif # noqa
from sklearn.pipeline import Pipeline # noqa
# we will use an ANOVA f-test to preselect relevant spatio-temporal units
feature_selection = SelectKBest(f_classif, k=500) # take the best 500
# to make life easier we will create a pipeline object
anova_svc = Pipeline([('anova', feature_selection), ('svc', clf)])
# initialize score and feature weights result arrays
scores = np.zeros(n_splits)
feature_weights = np.zeros([n_vertices, n_times])
# hold on, this may take a moment
for ii, (train, test) in enumerate(cv):
anova_svc.fit(X[train], y[train])
y_pred = anova_svc.predict(X[test])
y_test = y[test]
scores[ii] = np.sum(y_pred == y_test) / float(len(y_test))
feature_weights += feature_selection.inverse_transform(clf.coef_) \
.reshape(n_vertices, n_times)
print('Average prediction accuracy: %0.3f | standard deviation: %0.3f'
% (scores.mean(), scores.std()))
# prepare feature weights for visualization
feature_weights /= (ii + 1) # create average weights
# create mask to avoid division error
feature_weights = np.ma.masked_array(feature_weights, feature_weights == 0)
# normalize scores for visualization purposes
feature_weights /= feature_weights.std(axis=1)[:, None]
feature_weights -= feature_weights.mean(axis=1)[:, None]
# unmask, take absolute values, emulate f-value scale
feature_weights = np.abs(feature_weights.data) * 10
vertices = [stc.lh_vertno, np.array([], int)] # empty array for right hemi
stc_feat = mne.SourceEstimate(feature_weights, vertices=vertices,
tmin=stc.tmin, tstep=stc.tstep,
subject='sample')
brain = stc_feat.plot(views=['lat'], transparent=True,
initial_time=0.1, time_unit='s')
| bsd-3-clause |
ZhiangChen/soft_arm | src/tracking_pfn.py | 1 | 9700 | #!/usr/bin/env python
"""
pfn tracking real target
"""
from pf_network import *
import rospy
import numpy as np
from geometry_msgs.msg import PoseArray as PA
from geometry_msgs.msg import Vector3
from geometry_msgs.msg import PoseStamped as PS
from soft_arm.srv import *
from sensor_msgs.msg import PointCloud as PC
from geometry_msgs.msg import Point
import pickle
from simulator import Sim
import matplotlib.pyplot as plt
np.random.seed(0)
tf.set_random_seed(0)
MAX_EPISODES = 1
MAX_EP_STEPS = 200
X_OFFSET = 0.0917
Y_OFFSET = -0.4439
Z_OFFSET = 0.039
S_DIM = 3
A_DIM = 3
A_BOUND = 10.0
TRAIN_POINT = 2000
MEMORY_NUM = 2000
class Trainer(object):
def __init__(self):
""" Initializing DDPG """
self.sim = Sim()
self.pfn = PFN(a_dim=A_DIM, s_dim=S_DIM, batch_size=8,
memory_capacity=MEMORY_NUM, lr=0.001, bound=A_BOUND)
self.ep_reward = 0.0
self.current_action = np.array([.0, .0, .0])
self.done = True # if the episode is done
self.reward_record = list()
self.ep_record = list()
self.fig = plt.gcf()
self.fig.show()
self.fig.canvas.draw()
print("Initialized PFN")
""" Setting communication"""
self.pc = PC()
self.pc.header.frame_id = 'world'
self.pub = rospy.Publisher('normalized_state', PC, queue_size=10)
self.pub1 = rospy.Publisher('state', PC, queue_size=10)
self.sub = rospy.Subscriber('Robot_1/pose', PS, self.callback, queue_size=1)
rospy.wait_for_service('airpress_control', timeout=5)
self.target_PS = PS()
self.action_V3 = Vector3()
self.updated = False # if s is updated
self.got_target = False
print("Initialized communication")
""" Reading targets """
""" The data should be w.r.t origin by base position """
self.ends = pickle.load(open('./data/targets.p', 'rb'))
self.x_offset = X_OFFSET
self.y_offset = Y_OFFSET
self.z_offset = Z_OFFSET
self.sample_target()
print("Read target data")
#self.pfn.restore_momery()
self.pfn.restore_model('model_pfn')
memory_ep = np.ones((MAX_EP_STEPS, 3 + 3 + 1 + 1)) * -100
self.current_ep = 0
self.current_step = 0
while not (rospy.is_shutdown()):
self.updated = False
while (not self.updated) & (not rospy.is_shutdown()):
rospy.sleep(0.1)
real_target = self.real_target.copy()
s = self.normalize_state(real_target)
action, act_var = self.pfn.choose_action2(s)
self.action_V3.x, self.action_V3.y, self.action_V3.z \
= action[0], action[1], action[2]
self.run_action(self.action_V3)
print '\n'
#rospy.sleep(1.0)
'''
if self.current_ep < MAX_EPISODES:
if self.current_step < MAX_EP_STEPS:
#rospy.sleep(0.5)
s = self.normalize_state(self.target)
#print 'x'
#print s
action, prob = self.pfn.choose_action(s)
s_ = self.sim.update_pose(action)[-1,:]
self.compute_reward(self.target, s_)
#print action
#print self.pfn.raw_action(s)
#print self.target
#print s_
#print np.linalg.norm(self.target - s_)
#print self.reward
transition = np.hstack((self.target, action, self.reward, prob))
memory_ep[self.current_step] = transition
self.current_step += 1
state = np.array([s_, self.target])
self.pub_state(state)
if self.pfn.pointer > TRAIN_POINT:
self.pfn.learn()
else:
best_i = np.argsort(memory_ep[:,-2])[-1]
best_action = memory_ep[best_i,3:6]
best_s_ = self.sim.update_pose(best_action)[-1, :]
best_distance = np.linalg.norm(self.target - best_s_)
""" #best action
print '*'
j = np.argsort(memory_ep[:,-2])[0]
print memory_ep[best_i, :]
print memory_ep[j, :]
print best_s_
print self.target
print best_action
print best_distance
"""
self.current_step = 0
mean_action, act_var = self.pfn.choose_action2(s)
mean_s_ = self.sim.update_pose(mean_action)[-1, :]
mean_distance = np.linalg.norm(mean_s_ - self.target)
target_action,w = self.compute_weighted_action(memory_ep)
s_ = self.sim.update_pose(target_action)[-1,:]
target_distance = np.linalg.norm(s_ - self.target)
self.compute_reward(self.target, s_)
self.pfn.store_transition(s, target_action, mean_distance, np.var(w))
self.pfn.store_transition(s, best_action, best_distance, w[best_i])
self.current_ep += 1
self.sample_target()
memory_ep = np.ones((MAX_EP_STEPS, 3 + 3 + 1 + 1)) * -100
if self.current_ep% 10 ==0:
self.reward_record.append(mean_distance)
self.ep_record.append(self.current_ep)
plt.plot(self.ep_record, self.reward_record)
plt.ylim([0.0, 0.1])
self.fig.canvas.draw()
self.fig.savefig('learning.png')
print('\n')
#print s
print('Episode:', self.current_ep)
print("Mean Action:")
print mean_action
print("Mean Distance:")
print mean_distance
print("Action Variance:")
print act_var
print("Target Action:")
print target_action
print("Target Distance:")
print target_distance
print("Weights Variance:")
print np.var(w)
print('*' * 40)
self.pfn.save_model()
self.pfn.save_memory()
else:
s = self.normalize_state(self.target)
action, act_var = self.pfn.choose_action2(s)
s_ = self.sim.update_pose(action)[-1,:]
self.compute_reward(self.target, s_)
print("Mean Action:")
print action
print("Action Variance:")
print act_var
print("Distance: %f" % np.linalg.norm(self.target-s_))
print("Reward: %f" % self.reward)
print '\n'
state = np.array([s_, self.target])
self.pub_state(state)
rospy.sleep(1)
self.sample_target()
'''
def callback(self, ps):
x = ps.pose.position.x - self.x_offset
y = ps.pose.position.y - self.y_offset
z = ps.pose.position.z - self.z_offset
self.real_target = np.array([x,y,z])
self.updated = True
def run_action(self,control):
try:
print control
client = rospy.ServiceProxy('airpress_control', OneSeg)
resp = client(control)
return resp.status
except rospy.ServiceException, e:
print "Service call failed: %s"%e
def compute_weighted_action(self, memory_ep):
sum = np.sum(memory_ep[:,-2]) + 1e-6
try:
w = memory_ep[:,-2]/sum
except:
print("Sum %f" % sum)
print w
target_action = np.average(memory_ep[:,S_DIM:S_DIM+A_DIM], axis=0, weights=w)
return target_action, w
def sample_target(self):
self.target = self.ends[np.random.randint(self.ends.shape[0])]
def compute_reward(self,end,target):
error = target - end
self.reward = 20**(-np.log2(2.5*np.linalg.norm(error)))
#print np.linalg.norm(error)
def pub_state(self, state):
pts = list()
for i in range(state.shape[0]):
pt = Point()
pt.x = state[i,0]
pt.y = state[i,1]
pt.z = state[i,2]
pts.append(pt)
self.pc.points = pts
self.pub.publish(self.pc)
pts = list()
for i in range(state.shape[0]):
pt = Point()
pt.x = state[i, 0] / 10.0
pt.y = state[i, 1] / 10.0
pt.z = state[i, 2] / 35.0 + 0.42
pts.append(pt)
self.pc.points = pts
self.pub1.publish(self.pc)
def normalize_state(self,state):
offset = np.array([0,0,0.42])
scaler = np.array([10,10,35])
s = state - offset
s = np.multiply(s, scaler)
return s
def calculate_dist(self, state):
offset = np.array([0, 0, 0.42])
scaler = np.array([10, 10, 35])
s = np.multiply(state,1.0/scaler)
s += offset
return np.linalg.norm(s)
if __name__ == '__main__':
rospy.init_node('trainer',anonymous=True)
trainer = Trainer()
print("Shutting down ROS node trainer") | mit |
shakamunyi/tensorflow | tensorflow/contrib/timeseries/examples/lstm.py | 13 | 9268 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A more advanced example, of building an RNN-based time series model."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from os import path
import numpy
import tensorflow as tf
from tensorflow.contrib.timeseries.python.timeseries import estimators as ts_estimators
from tensorflow.contrib.timeseries.python.timeseries import model as ts_model
try:
import matplotlib # pylint: disable=g-import-not-at-top
matplotlib.use("TkAgg") # Need Tk for interactive plots.
from matplotlib import pyplot # pylint: disable=g-import-not-at-top
HAS_MATPLOTLIB = True
except ImportError:
# Plotting requires matplotlib, but the unit test running this code may
# execute in an environment without it (i.e. matplotlib is not a build
# dependency). We'd still like to test the TensorFlow-dependent parts of this
# example.
HAS_MATPLOTLIB = False
_MODULE_PATH = path.dirname(__file__)
_DATA_FILE = path.join(_MODULE_PATH, "data/multivariate_periods.csv")
class _LSTMModel(ts_model.SequentialTimeSeriesModel):
"""A time series model-building example using an RNNCell."""
def __init__(self, num_units, num_features, dtype=tf.float32):
"""Initialize/configure the model object.
Note that we do not start graph building here. Rather, this object is a
configurable factory for TensorFlow graphs which are run by an Estimator.
Args:
num_units: The number of units in the model's LSTMCell.
num_features: The dimensionality of the time series (features per
timestep).
dtype: The floating point data type to use.
"""
super(_LSTMModel, self).__init__(
# Pre-register the metrics we'll be outputting (just a mean here).
train_output_names=["mean"],
predict_output_names=["mean"],
num_features=num_features,
dtype=dtype)
self._num_units = num_units
# Filled in by initialize_graph()
self._lstm_cell = None
self._lstm_cell_run = None
self._predict_from_lstm_output = None
def initialize_graph(self, input_statistics):
"""Save templates for components, which can then be used repeatedly.
This method is called every time a new graph is created. It's safe to start
adding ops to the current default graph here, but the graph should be
constructed from scratch.
Args:
input_statistics: A math_utils.InputStatistics object.
"""
super(_LSTMModel, self).initialize_graph(input_statistics=input_statistics)
self._lstm_cell = tf.nn.rnn_cell.LSTMCell(num_units=self._num_units)
# Create templates so we don't have to worry about variable reuse.
self._lstm_cell_run = tf.make_template(
name_="lstm_cell",
func_=self._lstm_cell,
create_scope_now_=True)
# Transforms LSTM output into mean predictions.
self._predict_from_lstm_output = tf.make_template(
name_="predict_from_lstm_output",
func_=
lambda inputs: tf.layers.dense(inputs=inputs, units=self.num_features),
create_scope_now_=True)
def get_start_state(self):
"""Return initial state for the time series model."""
return (
# Keeps track of the time associated with this state for error checking.
tf.zeros([], dtype=tf.int64),
# The previous observation or prediction.
tf.zeros([self.num_features], dtype=self.dtype),
# The state of the RNNCell (batch dimension removed since this parent
# class will broadcast).
[tf.squeeze(state_element, axis=0)
for state_element
in self._lstm_cell.zero_state(batch_size=1, dtype=self.dtype)])
def _filtering_step(self, current_times, current_values, state, predictions):
"""Update model state based on observations.
Note that we don't do much here aside from computing a loss. In this case
it's easier to update the RNN state in _prediction_step, since that covers
running the RNN both on observations (from this method) and our own
predictions. This distinction can be important for probabilistic models,
where repeatedly predicting without filtering should lead to low-confidence
predictions.
Args:
current_times: A [batch size] integer Tensor.
current_values: A [batch size, self.num_features] floating point Tensor
with new observations.
state: The model's state tuple.
predictions: The output of the previous `_prediction_step`.
Returns:
A tuple of new state and a predictions dictionary updated to include a
loss (note that we could also return other measures of goodness of fit,
although only "loss" will be optimized).
"""
state_from_time, prediction, lstm_state = state
with tf.control_dependencies(
[tf.assert_equal(current_times, state_from_time)]):
# Subtract the mean and divide by the variance of the series. Slightly
# more efficient if done for a whole window (using the normalize_features
# argument to SequentialTimeSeriesModel).
transformed_values = self._scale_data(current_values)
# Use mean squared error across features for the loss.
predictions["loss"] = tf.reduce_mean(
(prediction - transformed_values) ** 2, axis=-1)
# Keep track of the new observation in model state. It won't be run
# through the LSTM until the next _imputation_step.
new_state_tuple = (current_times, transformed_values, lstm_state)
return (new_state_tuple, predictions)
def _prediction_step(self, current_times, state):
"""Advance the RNN state using a previous observation or prediction."""
_, previous_observation_or_prediction, lstm_state = state
lstm_output, new_lstm_state = self._lstm_cell_run(
inputs=previous_observation_or_prediction, state=lstm_state)
next_prediction = self._predict_from_lstm_output(lstm_output)
new_state_tuple = (current_times, next_prediction, new_lstm_state)
return new_state_tuple, {"mean": self._scale_back_data(next_prediction)}
def _imputation_step(self, current_times, state):
"""Advance model state across a gap."""
# Does not do anything special if we're jumping across a gap. More advanced
# models, especially probabilistic ones, would want a special case that
# depends on the gap size.
return state
def _exogenous_input_step(
self, current_times, current_exogenous_regressors, state):
"""Update model state based on exogenous regressors."""
raise NotImplementedError(
"Exogenous inputs are not implemented for this example.")
def train_and_predict(csv_file_name=_DATA_FILE, training_steps=200):
"""Train and predict using a custom time series model."""
# Construct an Estimator from our LSTM model.
estimator = ts_estimators.TimeSeriesRegressor(
model=_LSTMModel(num_features=5, num_units=128),
optimizer=tf.train.AdamOptimizer(0.001))
reader = tf.contrib.timeseries.CSVReader(
csv_file_name,
column_names=((tf.contrib.timeseries.TrainEvalFeatures.TIMES,)
+ (tf.contrib.timeseries.TrainEvalFeatures.VALUES,) * 5))
train_input_fn = tf.contrib.timeseries.RandomWindowInputFn(
reader, batch_size=4, window_size=32)
estimator.train(input_fn=train_input_fn, steps=training_steps)
evaluation_input_fn = tf.contrib.timeseries.WholeDatasetInputFn(reader)
evaluation = estimator.evaluate(input_fn=evaluation_input_fn, steps=1)
# Predict starting after the evaluation
(predictions,) = tuple(estimator.predict(
input_fn=tf.contrib.timeseries.predict_continuation_input_fn(
evaluation, steps=100)))
times = evaluation["times"][0]
observed = evaluation["observed"][0, :, :]
predicted_mean = numpy.squeeze(numpy.concatenate(
[evaluation["mean"][0], predictions["mean"]], axis=0))
all_times = numpy.concatenate([times, predictions["times"]], axis=0)
return times, observed, all_times, predicted_mean
def main(unused_argv):
if not HAS_MATPLOTLIB:
raise ImportError(
"Please install matplotlib to generate a plot from this example.")
(observed_times, observations,
all_times, predictions) = train_and_predict()
pyplot.axvline(99, linestyle="dotted")
observed_lines = pyplot.plot(
observed_times, observations, label="Observed", color="k")
predicted_lines = pyplot.plot(
all_times, predictions, label="Predicted", color="b")
pyplot.legend(handles=[observed_lines[0], predicted_lines[0]],
loc="upper left")
pyplot.show()
if __name__ == "__main__":
tf.app.run(main=main)
| apache-2.0 |
siutanwong/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
achim1/HErmes | HErmes/visual/plotting.py | 2 | 42350 | """
Define some
"""
from __future__ import print_function
from builtins import range
from builtins import object
from copy import deepcopy as copy
import numpy as n
import numpy as np
import dashi as d
import pylab as p
import matplotlib.ticker
from hepbasestack.colors import get_color_palette
#from .colors import get_color_palette
from .canvases import YStackedCanvas
from ..utils import Logger
from ..utils import flatten
from .. import fitting as fit
d.visual()
###############################################
def create_arrow(ax, x_0, y_0, dx, dy, length,\
width = .1, shape="right",\
fc="k", ec="k",\
alpha=1., log=False):
"""
Create an arrow object for plots. This is typically a large
arrow, which can used to indicate a region in the plot which
is excluded by a cut.
Args:
ax (matplotlib.axes._subplots.AxesSubplot): The axes where the arrow
will be attached to
x_0 (float): x-origin of the arrow
y_0 (float): y-origin of the arrow
dx (float): x length of the arrow
dy (float): y length of the arrow
length (float): additional scaling parameter to scale
the length of the arrow
Keyword Args:
width (float): thickness of arrow
shape (str): either "full", "left" or "right"
fc (str): facecolor
ec (str): edgecolor
alpha (float): 0 -1 alpha value of the arrow
log (bool): I for logscale, the proportions of the arrow will be adjusted accorginly.
Returns:
matplotlib.axes._subplots.AxesSubplot
"""
head_starts_at_zero = False
head_width = width*5
head_length = length*0.1
if log: # slightly bigger arrow
head_width = width*6
head_length = length*0.2
arrow_params={'length_includes_head':False,\
'shape':shape,\
'head_starts_at_zero':head_starts_at_zero}
arr = ax.arrow(x_0, y_0, dx*length,\
dy*length, fc=fc, ec=ec,\
alpha=alpha, width=width,\
head_width=head_width,\
head_length=head_length,\
**arrow_params)
return ax
###############################################
def meshgrid(xs, ys):
"""
Create x and y data for matplotlib pcolormesh and
similar plotting functions.
Args:
xs (np.ndarray): 1d x bins
ys (np.ndarray): 2d y bins
Returns:
tuple (np.ndarray, np.ndarray, np.ndarray): 2d X and 2d Y matrices as well as a placeholder for the Z array
"""
xlen = len(xs)
ylen = len(ys)
allx, ally = [], []
# prepare xs
for __ in range(ylen):
allx.append(xs)
allx = np.array(allx)
allx = allx.T
# prepare ys
for __ in range(xlen):
ally.append(ys)
ally = np.array(ally)
zs = np.zeros([xlen, ylen])
return allx, ally, zs
###############################################
def line_plot(quantities,
bins=None,
xlabel='',
add_ratio=None,
ratiolabel='',
colors=None,
figure_factory=None):
"""
Args:
quantities:
Keyword Args:
bins:
xlabel:
add_ratio (tuple): (["data1"],["data2"])
ratiolabel (str):
colors:
figure_factory (callable): Factory function returning matplotolib.Figure
Returns:
"""
# FIXME XXX under development
raise NotImplementedError
if add_ratio is not None:
canvas = YStackedCanvas(subplot_yheights=(0.2, 0.7),
space_between_plots=0.0)
ax0 = canvas.select_axes(-1)
data = np.array(quantities[add_ratio[0]]) / np.array(quantities[add_ratio[1]])
data = data * 100
thebins = np.array(bins[add_ratio[0]])
bin_size = abs(thebins[1] - thebins[0]) / 2 * np.ones(len(thebins))
thebins = thebins + bin_size
ax0.plot(thebins, data, color='gray')
ax0.scatter(thebins, data, marker='o', s=50, color='gray')
ax0.grid(1)
ax0.set_ylabel(ratiolabel)
ax0.spines['top'].set_visible(False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['right'].set_visible(False)
ax = canvas.select_axes(0)
lgax = ax0
else:
if figure_factory is None:
fig = p.figure()
else:
fig = figure_factory()
ax = fig.gca()
lgax = ax
for reconame in quantities:
thebins = np.array(bins[reconame])
bin_size = abs(thebins[1] - thebins[0]) / 2 * np.ones(len(thebins))
thebins = thebins + bin_size
label = reconame.replace('-', '')
if colors is not None:
ax.plot(thebins, quantities[reconame], label=label, color=colors[reconame])
ax.scatter(thebins, quantities[reconame], marker='o', s=50, c=colors[reconame])
else:
ax.plot(thebins, quantities[reconame], label=label)
ax.scatter(thebins, quantities[reconame], marker='o', s=50)
legend_kwargs = {'bbox_to_anchor': [0.0, 1.0, 1.0, 0.102],'loc': 3,
'frameon': False,
'ncol': 2,
'borderaxespad': 0,
'mode': 'expand',
'handlelength': 2,
'numpoints': 1
}
if len(quantities.keys()) == 3:
legend_kwargs['ncol'] = 3
ax.grid(1)
ax.set_ylabel('$\\cos(\\Psi)$')
ax.set_xlabel(xlabel)
sb.despine()
if add_ratio:
ax.spines['top'].set_visible(False)
ax0.spines['bottom'].set_visible(False)
ax0.spines['left'].set_visible(False)
legend_kwargs['bbox_to_anchor'] = [0, 1.3, 1.0, 0.102]
ax.legend(**legend_kwargs)
p.subplots_adjust(hspace=0.2)
return canvas.figure
ax.legend(**legend_kwargs)
return fig
###############################################
def gaussian_model_fit(data,
startparams=(0,0.2),
fitrange=((None,None), (None,None)),
fig=None,
norm=True,
bins=80,
xlabel='$\\theta_{{rec}} - \\theta_{{true}}$'):
"""
A plot with a gaussian fitted to data. A histogram of the data will be created and a gaussian
will be fitted, with 68 and 95 percentiles indicated in the plot.
Args:
data (array-like) : input data with a (preferably) gaussian distribution
Keyword Args:
startparams (tuple) : a set of startparams of the gaussian fit. If only
mu/sigma are given, then the plot will be normalized
fig (matplotlib.Figure) : pre-created figure to draw the plot in
bins (array-like or int) : bins for the underliying histogram
fitrange (tuple(min, max): min-max range for the gaussian fit
xlabel (str) : label for the x-axes
"""
if len(startparams) == 3:
tofit = lambda x,mean,sigma,amp : amp*fit.gauss(x,mean,sigma)
else:
tofit = fit.gauss
mod = fit.Model(tofit, startparams=startparams)
mod.add_data(data, create_distribution=True, bins=bins, normalize=norm)
limits = []
notempty = False
for k in fitrange:
thislimit = []
for j in k:
if j is None:
continue
else:
notempty = True
thislimit.append(j)
limits.append(tuple(thislimit))
limits = tuple(limits)
print (limits)
if notempty:
mod.fit_to_data(limits=limits)
else:
mod.fit_to_data()
thecolors = get_color_palette()
fig = mod.plot_result(log=False, xlabel=xlabel, add_parameter_text=(
('$\\mu$& {:4.2e}\\\\', 0), ('$\\sigma$& {:4.2e}\\\\', 1)), datacolor=thecolors[3], modelcolor=thecolors[3], histostyle='line', model_alpha=0.7, fig=fig)
ax = fig.gca()
ax.grid(1)
ax.set_ylim(ymax=1.1 * max(mod.data))
upper68 = mod.distribution.stats.mean + mod.distribution.stats.std
lower68 = mod.distribution.stats.mean - mod.distribution.stats.std
lower95 = mod.distribution.stats.mean - 2 * mod.distribution.stats.std
upper95 = mod.distribution.stats.mean + 2 * mod.distribution.stats.std
ax.axvspan(lower68, upper68, facecolor=thecolors[8], alpha=0.7, ec='none')
ax.axvspan(lower95, upper95, facecolor=thecolors[8], alpha=0.3, ec='none')
ax.text(lower68 * 0.9, max(mod.data) * 0.98, '68\\%', color=thecolors[3], fontsize=20)
ax.text(lower95 * 0.9, max(mod.data) * 0.85, '95\\%', color=thecolors[3], fontsize=20)
return (mod, fig)
###############################################
def gaussian_fwhm_fit(data,
startparams=(0,0.2,1),\
fitrange=((None,None), (None,None), (None, None)),\
fig=None,\
bins=80,\
xlabel='$\\theta_{{rec}} - \\theta_{{true}}$'):
"""
A plot with a gaussian fitted to data. A histogram of the data will be created and a gaussian
will be fitted, with 68 and 95 percentiles indicated in the plot. The gaussian will be in a form
so that the fwhm can be read directly from it. The "width" parameter of the gaussian is NOT the
standard deviation, but FWHM!
Args:
data (array-like) : input data with a (preferably) gaussian distribution
Keyword Args:
startparams (tuple) : a set of startparams of the gaussian fit. It is a 3
parameter fit with mu, fwhm and amplitude
fitrange (tuple) : if desired, the fit can be restrained. One tuple of (min, max) per
parameter
fig (matplotlib.Figure) : pre-created figure to draw the plot in
bins (array-like or int) : bins for the underliying histogram
xlabel (str) : label for the x-axes
"""
mod = fit.Model(fit.fwhm_gauss, startparams=startparams)
mod.add_data(data, create_distribution=True, bins=bins, normalize=False)
limits = []
notempty = False
for k in fitrange:
thislimit = []
for j in k:
if j is None:
continue
else:
notempty = True
thislimit.append(j)
limits.append(tuple(thislimit))
limits = tuple(limits)
print (limits)
if notempty:
mod.fit_to_data(limits=limits)
else:
mod.fit_to_data()
thecolors = get_color_palette()
fig = mod.plot_result(log=False, xlabel=xlabel, add_parameter_text=(
('$\\mu$& {:4.2e}\\\\', 0), ('FWHM& {:4.2e}\\\\', 1), ('AMP& {:4.2e}\\\\',2)), datacolor=thecolors[3], modelcolor=thecolors[3], histostyle='line', model_alpha=0.7, fig=fig)
ax = fig.gca()
ax.grid(1)
ax.set_ylim(ymax=1.1 * max(mod.data))
upper68 = mod.distribution.stats.mean + mod.distribution.stats.std
lower68 = mod.distribution.stats.mean - mod.distribution.stats.std
lower95 = mod.distribution.stats.mean - 2 * mod.distribution.stats.std
upper95 = mod.distribution.stats.mean + 2 * mod.distribution.stats.std
ax.axvspan(lower68, upper68, facecolor=thecolors[8], alpha=0.7, ec='none')
ax.axvspan(lower95, upper95, facecolor=thecolors[8], alpha=0.3, ec='none')
ax.text(lower68 * 0.9, max(mod.data) * 0.98, '68\\%', color=thecolors[3], fontsize=20)
ax.text(lower95 * 0.9, max(mod.data) * 0.85, '95\\%', color=thecolors[3], fontsize=20)
return (mod, fig)
###############################################
class VariableDistributionPlot(object):
"""
A plot which shows the distribution of a certain variable.
Cuts can be indicated with lines and arrows. This class defines
(and somehow enforces) a certain style.
"""
def __init__(self, cuts=None,\
color_palette="dark",\
bins=None,\
xlabel=None):
"""
Keyword Args:
bins (array-like): desired binning, if None use default
cuts (HErmes.selection.cut.Cut): conditions applied to the dataset.
the cut will not be performed, but visualized
instead with a line and a arrow indicatng the
cutted region
color_palette (str): use this palette for the plotting
xlabel (str): descriptive string for x-label
"""
self.histograms = {}
self.histratios = {}
self.cumuls = {}
self.plotratio = False
self.plotcumul = False
self.canvas = None
if (xlabel is None):
self.label = ''
else:
self.label = xlabel
self.name = ''
self.bins = bins
if cuts is None:
cuts = []
self.cuts = cuts
if isinstance(color_palette, str):
self.color_palette = get_color_palette(color_palette)
else:
self.color_palette = color_palette
self.plot_options = dict()
def add_cuts(self, cut):
"""
Add a cut to the the plot which can be indicated by an arrow
Args:
cuts (HErmes.selection.cuts.Cut):
Returns:
None
"""
self.cuts.append(cut)
def add_data(self, variable_data,\
name, bins=None,\
weights=None, label=''):
"""
Histogram the added data and store internally
Args:
name (string): the name of a category
variable_data (array): the actual data
Keyword Args:
bins (array): histogram binning
weights (array): weights for the histogram
label (str): A label for the data when plotted
"""
if bins is None:
bins = self.bins
if weights is None:
self.histograms[name] = d.factory.hist1d(variable_data, bins)
else:
Logger.debug(f"Found {len(weights)} weights and {len(variable_data)} data points")
assert len(weights) == len(variable_data),\
f"Mismatch between len(weights) {len(weights)} and len(variable_data) {len(variable_data)}"
self.histograms[name] = d.factory.hist1d(variable_data, bins, weights=weights)
self.label = label
self.name = name
def add_variable(self, category,
variable_name,
external_weights=None,
transform=None):
"""
Convenience interface if data is sorted in categories already
Args:
category (HErmese.variables.category.Category): Get variable from this category
variable_name (string): The name of the variable
Keyword Args:
external_weights (np.ndarray): Supply an array for weighting. This will OVERIDE ANY INTERNAL WEIGHTING MECHANISM and use the supplied weights.
transform (callable): Apply transformation todata
"""
if category.plot_options:
self.plot_options[category.name] = copy(category.plot_options)
if self.bins is None:
self.bins = category.vardict[variable_name].bins
self.name = variable_name
if external_weights is None:
Logger.warning("Internal weighting mechanism is broken at the moment, FIXME!")
#weights = category.weights
#if len(weights) == 0:
# weights = None
weights = None
else:
weights = external_weights
if transform is None: transform = lambda x : x
data = category.get(variable_name)
#print (variable_name)
#print (data)
#print (data[0])
# FIXME: check this
# check pandas series and
# numpy array difference
# FIMXME: values was as_matrix before - adapt changes in requirements.txt
try:
data = data.values
except:
pass
# hack for applying the weights
if hasattr(data[0],"__iter__"):
if weights is not None:
Logger.warning("Multi array data for {} de tected. Trying to apply weights".format(variable_name))
tmpweights = np.array([weights[i]*np.ones(len(data[i])) for i in range(len(data))])
Logger.warning("Weights broken, assuming flatten as transformation")
weights = flatten(tmpweights)
self.add_data(transform(data),\
category.name,\
self.bins, weights=weights,\
label=category.vardict[variable_name].label)
def add_cumul(self, name):
"""
Add a cumulative distribution to the plot
Args:
name (str): the name of the category
"""
assert name in self.histograms, "Need to add data first"
self.cumuls[name] = self.histograms[name].normalized()
def indicate_cut(self, ax, arrow=True):
"""
If cuts are given, indicate them by lines
Args:
ax (pylab.axes): axes to draw on
"""
vmin, vmax = ax.get_ylim()
hmin, hmax = ax.get_xlim()
for cut in self.cuts:
for name, (operator, value) in cut:
# there might be more than one
# cut which should be
# drawn on this plot
# so go through ALL of them.
if name != self.name:
continue
Logger.debug('Found cut! {0} on {1}'.format(name,value))
width = vmax/50.
# create a line a cut position
ax.vlines(value, ymin=vmin, ymax=vmax, linestyle=':')
length = (hmax - hmin)*0.1
# mark part of the plot as "forbidden"
# and create arrow if desired
if operator in ('>','>='):
shape = 'right'
ax.axvspan(value, hmax,\
facecolor=self.color_palette["prohibited"],\
alpha=0.5)
else:
shape = 'left'
ax.axvspan(hmin, value,\
facecolor=self.color_palette["prohibited"],\
alpha=0.5)
if arrow:
ax = create_arrow(ax, value, vmax*0.1,\
-1., 0, length, width=width,\
shape=shape, log=True)
def add_ratio(self, nominator, denominator,\
total_ratio=None, total_ratio_errors=None, \
log=False, label="data/$\Sigma$ bg"):
"""
Add a ratio plot to the canvas
Args:
nominator (list or str): name(s) of the categorie(s) which
will be the nominator in the ratio
denominator (list or str): name(s) of the categorie(s) which
will be the nominator in the ratio
Keyword Args:
total_ratio (bool): Indicate the total ratio with a line in the plot
total_ratio_errors (bool): Draw error region around total ratio
log (bool): draw ratio plot in log-scale
label (str): y-label for the ratio plot
"""
if not isinstance(nominator, list):
nominator = [nominator]
if not isinstance(denominator, list):
denominator = [denominator]
name = "".join(nominator) + "_" + "".join(denominator)
first_nominator = nominator.pop()
nominator_hist = self.histograms[first_nominator]
nominator_ws = self.histograms[first_nominator].stats.weightsum
for name in nominator:
nominator_hist += self.histograms[name]
nominator_ws += self.histograms[name].stats.weightsum
first_denominator = denominator.pop()
denominator_hist = self.histograms[first_denominator]
denominator_ws = self.histograms[first_denominator].stats.weightsum
for name in denominator:
denominator_hist += self.histograms[name]
denominator_ws += self.histograms[name].stats.weightsum
nominator_hist.normalized()
denominator_hist.normalized()
ratio = d.histfuncs.histratio(nominator_hist, denominator_hist,\
log=False, ylabel=label)
if total_ratio is None:
total_ratio = nominator_ws/denominator_ws
Logger.info("Calculated scalar ratio of {:4.2f} from histos".format(total_ratio))
#ratio.y[ratio.y > 0] = ratio.y[ratio.y > 0] + total_ratio -1
self.histratios[name] = (ratio, total_ratio, total_ratio_errors,\
label)
return name
def _draw_distribution(self, ax, name,
log=True,\
cumulative=False,
normalized=False,
ylabel="rate/bin [1/s]"):
"""
Paint the histograms!
Args:
Keyword Args:
normalized (bool): draw by number of events normalized distribution
log (bool): Use a logarithmic scale for the y-axis
cumulative (bool): Show a cumulative distribution
normalized (bool): normalize by number of entries
ylable (str): shown label on y-axis
"""
try:
cfg = copy(self.plot_options[name])
except KeyError:
Logger.warning("No plot configuration available for {}".format(name))
cfg = {"histotype": "line",
"label": name,
"linestyle" : {"color": "k",
"linewidth": 3
}
}
log = False
if "linestyle" in cfg:
color = cfg["linestyle"].pop('color')
if isinstance(color, int):
color = self.color_palette[color]
if 'scatterstyle' in cfg:
scattercolor = cfg["scatterstyle"].pop('color')
if isinstance(scattercolor,int):
scattercolor = self.color_palette[scattercolor]
if cumulative:
histograms = self.cumuls
log = False
else:
histograms = self.histograms
if normalized and not cumulative:
histograms[name] = histograms[name].normalized()
if cfg['histotype'] == 'scatter':
histograms[name].scatter(log=log,cumulative=cumulative,\
label=cfg["label"],\
color=scattercolor, **cfg["scatterstyle"])
elif cfg['histotype'] == "line":
# apply th alpha only to the "fill" setting
linecfg = copy(cfg["linestyle"])
if "alpha" in linecfg:
linecfg.pop("alpha")
linecfg["filled"] = False
if "filled" in cfg["linestyle"]:
histograms[name].line(log=log, cumulative=cumulative,\
label=cfg["label"], color=color,\
**linecfg)
histograms[name].line(log=log, cumulative=cumulative,\
label=None, color=color,\
**cfg["linestyle"])
else:
histograms[name].line(log=log, cumulative=cumulative,\
label=cfg["label"], color=color,\
**cfg["linestyle"])
elif cfg['histotype'] == "overlay":
histograms[name].line(log=log, cumulative=cumulative,\
label=cfg["label"], color=color,\
**cfg["linestyle"])
histograms[name].scatter(log=log, cumulative=cumulative,\
color=scattercolor,\
**cfg["scatterstyle"])
if cumulative:
ax.set_ylabel('fraction')
else:
ax.set_ylabel(ylabel)
def _draw_histratio(self, name, axes, ylim=(0.1,2.5)):
"""
Plot one of the ratios
Returns:
tuple (float,float) : the min and max of the ratio
"""
ratio,total_ratio,total_ratio_errors,label = self.histratios[name]
ratio.scatter(c="k", marker="o", markersize=3)
axes.hlines(total_ratio,axes.get_xlim()[0],axes.get_xlim()[1],linestyle="--")
if total_ratio_errors is not None:
axes.hlines(total_ratio + total_ratio_errors,axes.get_xlim()[0],axes.get_xlim()[1],linestyle=":")
axes.hlines(total_ratio - total_ratio_errors,axes.get_xlim()[0],axes.get_xlim()[1],linestyle=":")
xs = n.linspace(axes.get_xlim()[0],axes.get_xlim()[1],200)
axes.fill_between(xs,total_ratio - total_ratio_errors,\
total_ratio + total_ratio_errors,\
facecolor="grey", alpha=0.3)
#axes.set_ylim(ylim)
axes.set_ylabel(label)
axes.grid(1)
if not ratio.y is None:
if not ratio.yerr is None:
ymin = min(ratio.y - ratio.yerr)
ymax = max(ratio.y + ratio.yerr)
ymin -= (0.1*ymin)
ymax += (0.1*ymax)
return ymin, ymax
else:
ymin = min(ratio.y)
ymax = max(ration.y)
ymin -= (0.1*ymin)
ymax += (0.1*ymax)
return ymin, ymax
else:
return 0,0
def _locate_axes(self, combined_cumul, combined_ratio, combined_distro):
axes_locator = []
if self.cumuls:
if combined_cumul:
axes_locator.append((0, "c"))
else:
axes_locator += [(x,"c") for x in range(len(self.cumuls))]
if self.histratios:
if combined_ratio:
if axes_locator:
axes_locator.append((axes_locator[-1] + 1,"r"))
else:
axes_locator.append((0,"r"))
else:
if axes_locator:
axes_locator +=[(x+ len(axes_locator),"r") for x in range(len(self.histratios))]
if self.histograms:
if combined_distro:
if axes_locator:
axes_locator.append((axes_locator[-1] + 1,"h"))
else:
axes_locator.append((0,"h"))
else:
if axes_locator:
axes_locator +=[(x+ len(axes_locator),"h") for x in range(len(self.histograms))]
return axes_locator
def plot(self,
axes_locator=((0, "c",.2), (1, "r",.2), (2, "h",.5)),\
combined_distro=True,\
combined_ratio=True,\
combined_cumul=True,
normalized=True,
style="classic",\
log=True,
legendwidth = 1.5,
ylabel="rate/bin [1/s]",
figure_factory=None,
zoomin=False,
adjust_ticks=lambda x : x):
"""
Create the plot
Keyword Args:
axes_locator (tuple): A specialized tuple defining where the axes should be located in the plot
tuple has the following form:
( (PLOTA), (PLOTB), ...) where PLOTA is a tuple itself of the form (int, str, int)
describing (plotnumber, plottype, height of the axes in the figure)
plottype can be either: "c" - cumulative
"r" - ratio
"h" - histogram
combined_distro:
combined_ratio:
combined_cumul:
log (bool):
style (str): Apply a simple style to the plot. Options are "modern" or "classic"
normalized (bool):
figure_factor (fcn): Must return a matplotlib figure, use for custom formatting
zoomin (bool): If True, select the yrange in a way that the interesting part of the
histogram is shown. Caution is needed, since this might lead to an
overinterpretation of fluctuations.
adjust_ticks (fcn): A function, applied on a matplotlib axes
which will set the proper axis ticks
Returns:
"""
Logger.info(f"Found {len(self.histograms)} distributions")
Logger.info(f"Found {len(self.histratios)} ratios")
Logger.info(f"Found {len(self.cumuls)} cumulative distributions")
if not axes_locator:
axes_locator = self._locate_axes(combined_cumul,\
combined_ratio,\
combined_distro)
# calculate the amount of needed axes
# assert len(axes_locator) == len(heights), "Need to specify exactly as many heights as plots you want to have"
heights = [k[2] for k in axes_locator]
self.canvas = YStackedCanvas(subplot_yheights=heights,\
figure_factory=figure_factory)
cu_axes = [x for x in axes_locator if x[1] == "c"]
h_axes = [x for x in axes_locator if x[1] == "h"]
r_axes = [x for x in axes_locator if x[1] == "r"]
maxheights = []
minheights = []
for ax in cu_axes:
cur_ax = self.canvas.select_axes(ax[0])
if combined_cumul:
for k in list(self.cumuls.keys()):
self._draw_distribution(cur_ax, k, cumulative=True,log=log, ylabel=ylabel)
break
else:
k = self.cumuls[list(self.cumuls.keys())[ax[0]]]
self._draw_distribution(cur_ax,cumulative=True,log=log, ylabel=ylabel)
for ax in r_axes:
cur_ax = self.canvas.select_axes(ax[0])
if combined_ratio:
for k in list(self.histratios.keys()):
ymin, ymax = self._draw_histratio(k,cur_ax)
ymin -= (ymin*0.1)
ymax += (ymax*0.1)
cur_ax.set_ylim(ymin,ymax)
# FIXME: good tick spacing
#major_tick_space = 1
#minor_tick_space = 0.1
## in case there are too many ticks
#nticks = float(ymax - ymin)/major_tick_space
#while nticks > 4:
# major_tick_space += 1
##if ymax - ymin < 1:
## major_tick_space =
#cur_ax.xaxis.set_major_locator(matplotlib.ticker.MultipleLocator(minor_tick_space))
break
else:
k = self.histratios[list(self.histratios.keys())[ax[0]]]
ymin, ymax = self._draw_histratio(k,cur_ax)
ymin -= (ymin*0.1)
ymax += (ymax*0.1)
cur_ax.set_ylim(ymin,ymax)
for ax in h_axes:
cur_ax = self.canvas.select_axes(ax[0])
if combined_distro:
for k in list(self.histograms.keys()):
Logger.debug("drawing..{}".format(k))
self._draw_distribution(cur_ax,k,log=log, normalized=normalized, ylabel=ylabel)
break
else:
k = self.histograms[list(self.histograms.keys())[ax[0]]]
ymax, ymin = self._draw_distribution(cur_ax,k,log=log, normalized=normalized, ylabel=ylabel)
cur_ax.set_ylim(ymin=ymin - 0.1*ymin,ymax=1.1*ymax)
cur_ax.grid(True)
lgax = self.canvas.select_axes(-1) # most upper one
ncol = 2 if len(self.histograms) <= 4 else 3
if style == "classic":
# draw the legend in the box above the plot
legend_kwargs = {"bbox_to_anchor": [0., 1.0, 1., .102],
"loc": 3,
"frameon": True,
"ncol": ncol,
"framealpha": 1.,
"borderaxespad": 0,
"mode": "expand",
"handlelength": 2,
"numpoints": 1}
lg = lgax.legend(**legend_kwargs)
if lg is not None:
lg.get_frame().set_linewidth(legendwidth)
lg.get_frame().set_edgecolor("k")
else:
Logger.warning("Can not set legendwidth!")
if style == "modern":
# be more casual
lgax.legend()
# plot the cuts
if self.cuts:
for ax in h_axes:
cur_ax = self.canvas.select_axes(ax[0])
self.indicate_cut(cur_ax, arrow=True)
for ax in r_axes + cu_axes:
cur_ax = self.canvas.select_axes(ax[0])
self.indicate_cut(cur_ax, arrow=False)
# cleanup
leftplotedge, rightplotedge, minplotrange, maxplotrange = self.optimal_plotrange_histo(self.histograms.values())
if minplotrange == maxplotrange:
Logger.debug("Detected histogram with most likely a single bin!")
Logger.debug("Adjusting plotrange")
else:
if zoomin:
figure_span = maxplotrange - minplotrange
minplotrange -= (figure_span*0.1)
maxplotrange += (figure_span*0.1)
else: # start at zero and show the boring part
minplotrange = 0
maxplotrange += (maxplotrange*0.1)
if log:
maxplotrange = 10**(np.log10(maxplotrange) + 1)
if maxplotrange < 1:
minplotrange -= (minplotrange*0.01)
else:
minplotrange = 0 # will be switched to symlog by default
for ax in h_axes:
self.canvas.select_axes(ax[0]).set_ylim(ymin=minplotrange, ymax=maxplotrange)
self.canvas.select_axes(ax[0]).set_xlim(xmin=leftplotedge, xmax=rightplotedge)
if log:
if maxplotrange < 1:
self.canvas.select_axes(ax[0]).set_yscale("log")
else:
self.canvas.select_axes(ax[0]).set_yscale("symlog")
for ax in cu_axes:
self.canvas.select_axes(ax[0]).set_xlim(xmin=leftplotedge, xmax=rightplotedge)
for ax in r_axes:
self.canvas.select_axes(ax[0]).set_xlim(xmin=leftplotedge, xmax=rightplotedge)
self.canvas.eliminate_lower_yticks()
# set the label on the lowest axes
self.canvas.axes[0].set_xlabel(self.label)
minor_tick_space = self.canvas.axes[0].xaxis.get_ticklocs()
minor_tick_space = (minor_tick_space[1] - minor_tick_space[0])/10.
if minor_tick_space < 0.1:
Logger.debug("Adjusting for small numbers in tick spacing, tickspace detectected {}".format(minor_tick_space))
minor_tick_space = 0.1
self.canvas.axes[0].xaxis.set_minor_locator(matplotlib.ticker.MultipleLocator(minor_tick_space))
for x in self.canvas.axes[1:]:
p.setp(x.get_xticklabels(), visible=False)
p.setp(x.get_xticklines(), visible=False)
x.xaxis.set_tick_params(which="both",\
length=0,\
width=0,\
bottom=False,\
labelbottom=False)
for x in self.canvas.figure.axes:
x.spines["right"].set_visible(True)
adjust_ticks(x)
for ax in h_axes:
#self.canvas.select_axes(ax[0]).ticklabel_format(useOffset=False, style='plain', axis="y")
self.canvas.select_axes(ax[0]).get_yaxis().get_offset_text().set_x(-0.1)
if ((len(h_axes) == 1) and (style == "modern")):
self.canvas.select_axes(-1).spines["top"].set_visible(False)
self.canvas.select_axes(-1).spines["right"].set_visible(False)
@staticmethod
def optimal_plotrange_histo(histograms):
"""
Get most suitable x and y limits for a bunc of histograms
Args:
histograms (list(d.factory.hist1d)): The histograms in question
Returns:
tuple (float, float, float, float): xmin, xmax, ymin, ymax
"""
leftplotedge = n.inf
rightplotedge = -n.inf
minplotrange = n.inf
maxplotrange = -n.inf
for h in histograms:
if not h.bincontent.any():
continue
if h.bincenters[h.bincontent > 0][0] < leftplotedge:
leftplotedge = h.bincenters[h.bincontent > 0][0]
leftplotedge -= h.binwidths[0]
if h.bincenters[h.bincontent > 0][-1] > rightplotedge:
rightplotedge = h.bincenters[h.bincontent > 0][-1]
rightplotedge += h.binwidths[0]
if min(h.bincontent[h.bincontent > 0]) < minplotrange:
minplotrange = min(h.bincontent[h.bincontent > 0])
if max(h.bincontent[h.bincontent > 0]) > maxplotrange:
maxplotrange = max(h.bincontent[h.bincontent > 0])
Logger.info("Estimated plotrange of xmin {} , xmax {}, ymin {}, ymax {}".format(leftplotedge, rightplotedge, minplotrange, maxplotrange))
return leftplotedge, rightplotedge, minplotrange, maxplotrange
def add_legend(self, **kwargs):
"""
Add a legend to the plot. If no kwargs are passed,
use some reasonable default.
Keyword Args:
will be passed to pylab.legend
"""
if not kwargs:
kwargs = {"bbox_to_anchor": (0.,1.0, 1., .102),\
"loc" : 3, "ncol" :3,\
"mode": "expand",\
"framealpha": 1,\
"borderaxespad": 0.,\
"handlelength": 2,\
"numpoints": 1}
self.canvas.global_legend(**kwargs)
# #######################################################
#
# def error_distribution_plot(h,
# xlabel = r"$\log(E_{rec}/E_{ref})$",
# name = "E",
# median = False):
# """
#
#
# Args:
# h:
# xlabel:
# name:
# median:
#
# Returns:
#
# """
# par = HistoFitter(h, Gauss)
# fig = p.figure(figsize=(6,4),dpi=350)
# ax = fig.gca()
# if not median: ax.plot(h.bincenters, Gauss(par,h.bincenters),color="k",lw=2)
# h.line(filled=True,color="k",lw=2,fc="grey",alpha=.5)#hatch="//")
# h.line(color="k",lw=2)
# ax.grid(1)
# ax.set_ylim(ymax=1.1*max(h.bincontent))
# ax.set_xlim(xmin=h.bincenters[0],xmax=h.bincenters[-1])
# ax.set_xlabel(xlabel)
# ax.set_ylabel("Normalized bincount")
# if median: ax.vlines(h.stats.median,0,1.1*max(h.bincontent),linestyles="dashed")
# textstr ="Gaussian fit:\n"
# textstr += "$\mu$ = " + "%4.3f" %par[1] + "\n" + "$\sigma$ = " + "%4.2f" %par[2]
# if median:
# textstr = "Median:\n %4.3f" %h.stats.median
# CreateTextbox(ax,textstr,boxstyle="square",xcoord=.65,fontsize=16,alpha=.9)
# #Thesisize(ax)
# #print h.bincontent[h.bincenters > -.1][h.bincenters < .1].cumsum()[-1]
#
# #ChisquareTest(h,Gauss(par,h.bincenters),xmin=-.001,xmax=.001)
# #savename = Multisavefig(plotdir_stat,"parameter-reso-" + name,3,orientation="portrait",pad_inches=.3,bbox_inches="tight")[0]#pad_inches=.5,bbox_inche s="tight")[0]
# return fig
#
# ####################################################
#
# def HistoFitter(histo,func,startmean=0,startsigma=.2):
#
# def error(p,x,y):
# return n.sqrt((func(p,x) - y)**2)
#
# #print histo.bincontent.std()
# histo.stats.mean
# p0 = [max(histo.bincontent),histo.stats.mean,histo.stats.var]
# output = optimize.leastsq(error,p0,args=(histo.bincenters,histo.bincontent),full_output=1)
# par = output[0]
# covar = output[1]
# rchisquare = scipy.stats.chisquare(1*histo.bincontent,f_exp=(1*func(par,histo.bincenters)))[0]/(1*(len(histo.bincenters) -len(par)))
# #print par,covar
# #print "chisquare/ndof",rchisquare
# #print histo.bincontent[:10], func(par,histo.bincenters)[:10]
# #print "ks2_samp", scipy.stats.ks_2samp(histo.bincontent,func(par,histo.bincenters))
# return par
#
# #####################################################
#
# def create_textbox(ax, textstr, boxstyle="round",\
# facecolor="white", alpha=.7,\
# xcoord=0.05, ycoord=0.95, fontsize=14):
# """
# Create a textbox on a given axis
#
# Args:
# ax:
# textstr:
# boxstyle:
# facecolor:
# alpha:
# xcoord:
# ycoord:
# fontsize:
#
# Returns:
# the given ax object
# """
# props = dict(boxstyle=boxstyle, facecolor=facecolor, alpha=alpha)
# # place a text box in upper left in axes coords
# ax.text(xcoord, ycoord, textstr,\
# transform=ax.transAxes,\
# fontsize=fontsize,\
# verticalalignment='top', bbox=props)
# return ax
#
# ######################################################
#
# def ChisquareTest(histo, fit, xmin=-.2, xmax=.2, ndof=3):
# data = histo.bincontent[histo.bincenters > xmin][histo.bincenters < xmax]
# fit = fit[histo.bincenters > xmin][histo.bincenters < xmax]
# #print data,fit
# print (scipy.stats.chisquare(data,f_exp=fit)[0]/(len(fit) - ndof))
# print (scipy.stats.ks_2samp(data,fit))
# print (scipy.stats.anderson(data))
| gpl-2.0 |
PyAbel/PyAbel | doc/transform_methods/rbasex-SVD.py | 1 | 1324 | from __future__ import division, print_function
import numpy as np
from scipy.linalg import inv, svd
import matplotlib.pyplot as plt
from abel.rbasex import _bs_rbasex
Rmax = 40
# SVD for 0th-order inverse Abel transform
P, = _bs_rbasex(Rmax, 0, False)
A = inv(P.T)
V, s, UT = svd(A)
# setup x axis
def setx():
plt.xlim((0, Rmax))
plt.xticks([0, 1/4 * Rmax, 1/2 * Rmax, 3/4 * Rmax, Rmax],
['$0$', '', '', '', '$r_{\\rm max}$'])
# plot i-th +- 0, 1 singular vectors
def plotu(i, title):
plt.title('$\\mathbf{v}_i,\\quad i = ' + title + '$')
i = int(i)
plt.plot(V[:, i - 1], '#DD0000')
plt.plot(V[:, i], '#00AA00')
plt.plot(V[:, i + 1], '#0000FF')
setx()
fig = plt.figure(figsize=(6, 6), frameon=False)
# singular values
plt.subplot(321)
plt.title('$\\sigma_i$')
plt.plot(s, 'k')
setx()
plt.ylim(bottom=0)
# vectors near 0
plt.subplot(322)
plotu(1, '0, 1, 2')
# vectors near 1/4
plt.subplot(323)
plotu(1/4 * Rmax, '\\frac{1}{4} r_{\\rm max} \\pm 0, 1')
# vectors near middle
plt.subplot(324)
plotu(1/2 * Rmax, '\\frac{1}{2} r_{\\rm max} \\pm 0, 1')
# vectors near 3/4
plt.subplot(325)
plotu(3/4 * Rmax, '\\frac{3}{4} r_{\\rm max} \\pm 0, 1')
# vectors near end
plt.subplot(326)
plotu(Rmax - 1, 'r_{\\rm max} - 2, 1, 0')
plt.tight_layout()
#plt.savefig('rbasex-SVD.svg')
#plt.show()
| mit |
trankmichael/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
jseabold/scikit-learn | sklearn/datasets/tests/test_lfw.py | 230 | 7880 | """This test for the LFW require medium-size data dowloading and processing
If the data has not been already downloaded by running the examples,
the tests won't run (skipped).
If the test are run, the first execution will be long (typically a bit
more than a couple of minutes) but as the dataset loader is leveraging
joblib, successive runs will be fast (less than 200ms).
"""
import random
import os
import shutil
import tempfile
import numpy as np
from sklearn.externals import six
try:
try:
from scipy.misc import imsave
except ImportError:
from scipy.misc.pilutil import imsave
except ImportError:
imsave = None
from sklearn.datasets import load_lfw_pairs
from sklearn.datasets import load_lfw_people
from sklearn.datasets import fetch_lfw_pairs
from sklearn.datasets import fetch_lfw_people
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.utils.testing import SkipTest
from sklearn.utils.testing import raises
SCIKIT_LEARN_DATA = tempfile.mkdtemp(prefix="scikit_learn_lfw_test_")
SCIKIT_LEARN_EMPTY_DATA = tempfile.mkdtemp(prefix="scikit_learn_empty_test_")
LFW_HOME = os.path.join(SCIKIT_LEARN_DATA, 'lfw_home')
FAKE_NAMES = [
'Abdelatif_Smith',
'Abhati_Kepler',
'Camara_Alvaro',
'Chen_Dupont',
'John_Lee',
'Lin_Bauman',
'Onur_Lopez',
]
def setup_module():
"""Test fixture run once and common to all tests of this module"""
if imsave is None:
raise SkipTest("PIL not installed.")
if not os.path.exists(LFW_HOME):
os.makedirs(LFW_HOME)
random_state = random.Random(42)
np_rng = np.random.RandomState(42)
# generate some random jpeg files for each person
counts = {}
for name in FAKE_NAMES:
folder_name = os.path.join(LFW_HOME, 'lfw_funneled', name)
if not os.path.exists(folder_name):
os.makedirs(folder_name)
n_faces = np_rng.randint(1, 5)
counts[name] = n_faces
for i in range(n_faces):
file_path = os.path.join(folder_name, name + '_%04d.jpg' % i)
uniface = np_rng.randint(0, 255, size=(250, 250, 3))
try:
imsave(file_path, uniface)
except ImportError:
raise SkipTest("PIL not installed")
# add some random file pollution to test robustness
with open(os.path.join(LFW_HOME, 'lfw_funneled', '.test.swp'), 'wb') as f:
f.write(six.b('Text file to be ignored by the dataset loader.'))
# generate some pairing metadata files using the same format as LFW
with open(os.path.join(LFW_HOME, 'pairsDevTrain.txt'), 'wb') as f:
f.write(six.b("10\n"))
more_than_two = [name for name, count in six.iteritems(counts)
if count >= 2]
for i in range(5):
name = random_state.choice(more_than_two)
first, second = random_state.sample(range(counts[name]), 2)
f.write(six.b('%s\t%d\t%d\n' % (name, first, second)))
for i in range(5):
first_name, second_name = random_state.sample(FAKE_NAMES, 2)
first_index = random_state.choice(np.arange(counts[first_name]))
second_index = random_state.choice(np.arange(counts[second_name]))
f.write(six.b('%s\t%d\t%s\t%d\n' % (first_name, first_index,
second_name, second_index)))
with open(os.path.join(LFW_HOME, 'pairsDevTest.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
with open(os.path.join(LFW_HOME, 'pairs.txt'), 'wb') as f:
f.write(six.b("Fake place holder that won't be tested"))
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
if os.path.isdir(SCIKIT_LEARN_DATA):
shutil.rmtree(SCIKIT_LEARN_DATA)
if os.path.isdir(SCIKIT_LEARN_EMPTY_DATA):
shutil.rmtree(SCIKIT_LEARN_EMPTY_DATA)
@raises(IOError)
def test_load_empty_lfw_people():
fetch_lfw_people(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_people_deprecation():
msg = ("Function 'load_lfw_people' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_people(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_people,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_people():
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
min_faces_per_person=3, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_people.images.shape, (10, 62, 47))
assert_equal(lfw_people.data.shape, (10, 2914))
# the target is array of person integer ids
assert_array_equal(lfw_people.target, [2, 0, 1, 0, 2, 0, 2, 1, 1, 2])
# names of the persons can be found using the target_names array
expected_classes = ['Abdelatif Smith', 'Abhati Kepler', 'Onur Lopez']
assert_array_equal(lfw_people.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion and not limit on the number of picture per person
lfw_people = fetch_lfw_people(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_people.images.shape, (17, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_people.target,
[0, 0, 1, 6, 5, 6, 3, 6, 0, 3, 6, 1, 2, 4, 5, 1, 2])
assert_array_equal(lfw_people.target_names,
['Abdelatif Smith', 'Abhati Kepler', 'Camara Alvaro',
'Chen Dupont', 'John Lee', 'Lin Bauman', 'Onur Lopez'])
@raises(ValueError)
def test_load_fake_lfw_people_too_restrictive():
fetch_lfw_people(data_home=SCIKIT_LEARN_DATA, min_faces_per_person=100, download_if_missing=False)
@raises(IOError)
def test_load_empty_lfw_pairs():
fetch_lfw_pairs(data_home=SCIKIT_LEARN_EMPTY_DATA, download_if_missing=False)
def test_load_lfw_pairs_deprecation():
msg = ("Function 'load_lfw_pairs' has been deprecated in 0.17 and will be "
"removed in 0.19."
"Use fetch_lfw_pairs(download_if_missing=False) instead.")
assert_warns_message(DeprecationWarning, msg, load_lfw_pairs,
data_home=SCIKIT_LEARN_DATA)
def test_load_fake_lfw_pairs():
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA, download_if_missing=False)
# The data is croped around the center as a rectangular bounding box
# arounthe the face. Colors are converted to gray levels:
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 62, 47))
# the target is whether the person is the same or not
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
# names of the persons can be found using the target_names array
expected_classes = ['Different persons', 'Same person']
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
# It is possible to ask for the original data without any croping or color
# conversion
lfw_pairs_train = fetch_lfw_pairs(data_home=SCIKIT_LEARN_DATA,
resize=None, slice_=None, color=True, download_if_missing=False)
assert_equal(lfw_pairs_train.pairs.shape, (10, 2, 250, 250, 3))
# the ids and class names are the same as previously
assert_array_equal(lfw_pairs_train.target, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
assert_array_equal(lfw_pairs_train.target_names, expected_classes)
| bsd-3-clause |
BiaDarkia/scikit-learn | examples/manifold/plot_mds.py | 88 | 2731 | """
=========================
Multi-dimensional scaling
=========================
An illustration of the metric and non-metric MDS on generated noisy data.
The reconstructed points using the metric MDS and non metric MDS are slightly
shifted to avoid overlapping.
"""
# Author: Nelle Varoquaux <nelle.varoquaux@gmail.com>
# License: BSD
print(__doc__)
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
from sklearn import manifold
from sklearn.metrics import euclidean_distances
from sklearn.decomposition import PCA
n_samples = 20
seed = np.random.RandomState(seed=3)
X_true = seed.randint(0, 20, 2 * n_samples).astype(np.float)
X_true = X_true.reshape((n_samples, 2))
# Center the data
X_true -= X_true.mean()
similarities = euclidean_distances(X_true)
# Add noise to the similarities
noise = np.random.rand(n_samples, n_samples)
noise = noise + noise.T
noise[np.arange(noise.shape[0]), np.arange(noise.shape[0])] = 0
similarities += noise
mds = manifold.MDS(n_components=2, max_iter=3000, eps=1e-9, random_state=seed,
dissimilarity="precomputed", n_jobs=1)
pos = mds.fit(similarities).embedding_
nmds = manifold.MDS(n_components=2, metric=False, max_iter=3000, eps=1e-12,
dissimilarity="precomputed", random_state=seed, n_jobs=1,
n_init=1)
npos = nmds.fit_transform(similarities, init=pos)
# Rescale the data
pos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((pos ** 2).sum())
npos *= np.sqrt((X_true ** 2).sum()) / np.sqrt((npos ** 2).sum())
# Rotate the data
clf = PCA(n_components=2)
X_true = clf.fit_transform(X_true)
pos = clf.fit_transform(pos)
npos = clf.fit_transform(npos)
fig = plt.figure(1)
ax = plt.axes([0., 0., 1., 1.])
s = 100
plt.scatter(X_true[:, 0], X_true[:, 1], color='navy', s=s, lw=0,
label='True Position')
plt.scatter(pos[:, 0], pos[:, 1], color='turquoise', s=s, lw=0, label='MDS')
plt.scatter(npos[:, 0], npos[:, 1], color='darkorange', s=s, lw=0, label='NMDS')
plt.legend(scatterpoints=1, loc='best', shadow=False)
similarities = similarities.max() / similarities * 100
similarities[np.isinf(similarities)] = 0
# Plot the edges
start_idx, end_idx = np.where(pos)
# a sequence of (*line0*, *line1*, *line2*), where::
# linen = (x0, y0), (x1, y1), ... (xm, ym)
segments = [[X_true[i, :], X_true[j, :]]
for i in range(len(pos)) for j in range(len(pos))]
values = np.abs(similarities)
lc = LineCollection(segments,
zorder=0, cmap=plt.cm.Blues,
norm=plt.Normalize(0, values.max()))
lc.set_array(similarities.flatten())
lc.set_linewidths(0.5 * np.ones(len(segments)))
ax.add_collection(lc)
plt.show()
| bsd-3-clause |
garnertb/fire-risk | scripts/tct_bayesian.py | 2 | 7803 | from __future__ import division
import pylab as pl
import numpy as np
import pandas as pd
class tctGibbs:
#initialization
# inputs:
def __init__(self,cro=76,cbu=20,cwo=4,talup=120,tallow=90,
tdisup=80,tdislow=40,tturup=100,tturlow=60,tarrup=420,
tarrlow=300,tsupup=180,tsuplow=60,sizemin=100,sizeroom=2000,
sizebldg=10000,sizemax=3000000,
upA=0.047,lowA=0.0029,upalph=2,lowalph=1):
#observed bin values
self.cro = cro #contained to room
self.cbu = cbu #contained to building
self.cwo = cwo #contained to world
#specify constraints on the fixed parameters
self.talup = talup # upper bound on t_alarm
self.tallow = tallow # lower bound on t_alarm
self.tdisup = tdisup # upper bound on t_dispatch
self.tdislow = tdislow # lower bound on t_dispatch
self.tturup = tturup # upper bound on t_turnout
self.tturlow = tturlow # lower bound on t_turnout
self.tarrup = tarrup # upper bound on t_arrival
self.tarrlow = tarrlow # lower bound on t_arrival
self.tsupup = tsupup # upper bound on t_suppression
self.tsuplow = tsuplow # lower bound on t_suppression
#specify expected fire sizes for spread behavior (kW)
self.sizemin = sizemin #natively set to 0.01 to avoid numerical singularities on lower bound
self.sizeroom = sizeroom #threshold for binning a fire into contained to room
self.sizebldg = sizebldg #threshold on binning a fire into contained to building
self.sizemax = sizemax #reasonable physical threshold for a structure fire
#Note: value of 3,000,000 taken from FDS prediction
#of peak HRR for WTC 1 fire on 9/11, from NCSTAR 1-5
#Figure 6-30
#specify original bounds on A and alpha
self.upA = upA
self.lowA = lowA
self.upalph = upalph
self.lowalph = lowalph
#calculate total number of fires
self.n_fires=cro+cbu+cwo
#instantiate initial draws for A and alpha
self.ARoom=np.random.uniform(lowA,upA)
self.ABldg=np.random.uniform(lowA,upA)
self.ABeyond=np.random.uniform(lowA,upA)
self.alphRoom=np.random.uniform(lowalph,upalph)
self.alphBldg=np.random.uniform(lowalph,upalph)
self.alphBeyond=np.random.uniform(lowalph,upalph)
#instantiate variables for tcor fires
self.tcorRoom = 0
self.tcorBldg = 0
self.tcorBeyond = 0
#create initial containers for all of the task time variables
self.tal = np.random.uniform(tallow,talup) # upper bound on t_alarm
self.tdis = np.random.uniform(tdislow,tdisup) # upper bound on t_dispatch
self.ttur = np.random.uniform(tturlow,tturup) # upper bound on t_turnout
self.tarr = np.random.uniform(tarrlow,tarrup)# upper bound on t_arrival
self.tsup = np.random.uniform(tsuplow,tsupup) # upper bound on t_suppression
self.tfiretasks = self.tal+self.tdis+self.ttur+self.tarr+self.tsup
#Create draw functions for the Gibbs sampler
#Draw new values for fire department timing
def draw_tfiretasks(self):
self.tal = np.random.uniform(self.tallow,self.talup) # upper bound on t_alarm
self.tdis = np.random.uniform(self.tdislow,self.tdisup) # upper bound on t_dispatch
self.ttur = np.random.uniform(self.tturlow,self.tturup) # upper bound on t_turnout
self.tarr = np.random.uniform(self.tarrlow,self.tarrup)# upper bound on t_arrival
self.tsup = np.random.uniform(self.tsuplow,self.tsupup) # upper bound on t_suppression
self.tfiretasks = self.tal+self.tdis+self.ttur+self.tarr+self.tsup
#Draw the tcor values for relevant fires
#Inputs: relevant Qmin and Qmax thresholds and current A and alph values
def draw_tcor(self,Qmin,Qmax,A,alph):
lowtcor = (Qmin/A)**(1/alph)-self.tfiretasks
uptcor = (Qmax/A)**(1/alph)-self.tfiretasks
return np.random.uniform(lowtcor,uptcor)
#Draw the A values for relevant fires
#Inputs: relevant Qmin and Qmax thresholds and current tcor and alph values
def draw_A(self,Qmin,Qmax,tcor,alph):
lowA = (Qmin)/(max(tcor+self.tfiretasks,0.0001)**(alph))
upA = min((Qmax)/(max(tcor+self.tfiretasks,0.0001)**(alph)),
Qmin/self.tfiretasks**2)
#return np.random.uniform(max(lowA,self.lowA),min(upA,self.upA))
return np.random.uniform(lowA,upA)
#Draw the tcor values for room fires
#Inputs: relevant Qmin and Qmax thresholds and current tcor and A values
def draw_alph(self,Qmin,Qmax,tcor,A):
lowalph = (pl.log(Qmin)-pl.log(A))/pl.log(max(tcor+self.tfiretasks,0.0001))
upalph = (pl.log(Qmax)-pl.log(A))/pl.log(max(tcor+self.tfiretasks,0.0001))
if(upalph < self.lowalph):
upalph = self.lowalph
#return np.random.uniform(max(self.lowalph,lowalph),min(self.upalph,upalph))
#return np.random.uniform(self.lowalph,self.upalph)
return self.upalph
#Gibbs sampling function
def fireGibbs(self,n_iter,burn,thin,Qmin,Qmax,tcor,A,alph):
print 'fireGibbs called'
n_store = int(np.ceil((n_iter-burn))/thin+0.00001)
gibbstcor = np.full(n_store,-1)
gibbsA = np.full(n_store,-1)
gibbsalph = np.full(n_store,-1)
s = 0
for i in range(0,n_iter):
self.draw_tfiretasks()
A = self.draw_A(Qmin,Qmax,tcor,alph)
tcor = self.draw_tcor(Qmin,Qmax,A,alph)
alph = self.draw_alph(Qmin,Qmax,tcor,A)
if(i >= burn and i%thin==0):
gibbstcor[s] = tcor
gibbsA[s] = A
gibbsalph[s] = alph
s = s+1
return(gibbstcor,gibbsA,gibbsalph)
#output storage function
def gibbs_store(self,gibbsoutputlist,filenameoutputlist):
for i in range(0,len(gibbsoutputlist)):
f=open('raw_output/'+filenameoutputlist[i],'wb')
np.savetxt(f,gibbsoutputlist[i],delimiter=',')
f.close()
#Main class running function
def runGibbs(self,n_iter=1000,burn=500,thin=5):
#Run room fires first and output
gibbstcor,gibbsA,gibbsalph = self.fireGibbs(n_iter,burn,thin,self.sizemin,
self.sizeroom,self.tcorRoom,
self.ARoom,self.alphRoom)
#store output
self.gibbs_store([gibbstcor,gibbsA,gibbsalph],['tcorRoom.csv',
'ARoom.csv','alphRoom.csv'])
#Run building fires next and output
gibbstcor,gibbsA,gibbsalph = self.fireGibbs(n_iter,burn,thin,self.sizeroom,
self.sizebldg,self.tcorBldg,
self.ABldg,self.alphBldg)
#store output
self.gibbs_store([gibbstcor,gibbsA,gibbsalph],['tcorBldg.csv',
'ABldg.csv','alphBldg.csv'])
#Run beyond building fires last and output
gibbstcor,gibbsA,gibbsalph = self.fireGibbs(n_iter,burn,thin,self.sizebldg,
self.sizemax,self.tcorBeyond,
self.ABeyond,self.alphBeyond)
#store output
self.gibbs_store([gibbstcor,gibbsA,gibbsalph],['tcorBeyond.csv',
'ABeyond.csv','alphBeyond.csv'])
test = tctGibbs()
test.runGibbs(100000,100,10)
| mit |
mmp2/megaman | megaman/utils/eigendecomp.py | 1 | 15990 | # LICENSE: Simplified BSD https://github.com/mmp2/megaman/blob/master/LICENSE
import warnings
import numpy as np
from scipy import sparse
from scipy.linalg import eigh, eig
from scipy.sparse.linalg import lobpcg, eigs, eigsh
from sklearn.utils.validation import check_random_state
from .validation import check_array
EIGEN_SOLVERS = ['auto', 'dense', 'arpack', 'lobpcg']
BAD_EIGEN_SOLVERS = {}
AMG_KWDS = ['strength', 'aggregate', 'smooth', 'max_levels', 'max_coarse']
try:
from pyamg import smoothed_aggregation_solver
PYAMG_LOADED = True
EIGEN_SOLVERS.append('amg')
except ImportError:
PYAMG_LOADED = False
BAD_EIGEN_SOLVERS['amg'] = """The eigen_solver was set to 'amg',
but pyamg is not available. Please either
install pyamg or use another method."""
def check_eigen_solver(eigen_solver, solver_kwds, size=None, nvec=None):
"""Check that the selected eigensolver is valid
Parameters
----------
eigen_solver : string
string value to validate
size, nvec : int (optional)
if both provided, use the specified problem size and number of vectors
to determine the optimal method to use with eigen_solver='auto'
Returns
-------
eigen_solver : string
The eigen solver. This only differs from the input if
eigen_solver == 'auto' and `size` is specified.
"""
if eigen_solver in BAD_EIGEN_SOLVERS:
raise ValueError(BAD_EIGEN_SOLVERS[eigen_solver])
elif eigen_solver not in EIGEN_SOLVERS:
raise ValueError("Unrecognized eigen_solver: '{0}'."
"Should be one of: {1}".format(eigen_solver,
EIGEN_SOLVERS))
if size is not None and nvec is not None:
# do some checks of the eigensolver
if eigen_solver == 'lobpcg' and size < 5 * nvec + 1:
warnings.warn("lobpcg does not perform well with small matrices or "
"with large numbers of vectors. Switching to 'dense'")
eigen_solver = 'dense'
solver_kwds = None
elif eigen_solver == 'auto':
if size > 200 and nvec < 10:
if PYAMG_LOADED:
eigen_solver = 'amg'
solver_kwds = None
else:
eigen_solver = 'arpack'
solver_kwds = None
else:
eigen_solver = 'dense'
solver_kwds = None
return eigen_solver, solver_kwds
def _is_symmetric(M, tol = 1e-8):
if sparse.isspmatrix(M):
conditions = np.abs((M - M.T).data) < tol
else:
conditions = np.abs((M - M.T)) < tol
return(np.all(conditions))
def eigen_decomposition(G, n_components=8, eigen_solver='auto',
random_state=None,
drop_first=True, largest=True, solver_kwds=None):
"""
Function to compute the eigendecomposition of a square matrix.
Parameters
----------
G : array_like or sparse matrix
The square matrix for which to compute the eigen-decomposition.
n_components : integer, optional
The number of eigenvectors to return
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
attempt to choose the best method for input data (default)
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type.
This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
Algebraic Multigrid solver (requires ``pyamg`` to be installed)
It can be faster on very large, sparse problems, but may also lead
to instabilities.
random_state : int seed, RandomState instance, or None (default)
A pseudo random number generator used for the initialization of the
lobpcg eigen vectors decomposition when eigen_solver == 'amg'.
By default, arpack is used.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
lambdas, diffusion_map : eigenvalues, eigenvectors
"""
n_nodes = G.shape[0]
if drop_first:
n_components = n_components + 1
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=n_nodes,
nvec=n_components)
random_state = check_random_state(random_state)
# Convert G to best type for eigendecomposition
if sparse.issparse(G):
if G.getformat() is not 'csr':
G.tocsr()
G = G.astype(np.float)
# Check for symmetry
is_symmetric = _is_symmetric(G)
# Try Eigen Methods:
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, G.shape[0])
if is_symmetric:
if largest:
which = 'LM'
else:
which = 'SM'
lambdas, diffusion_map = eigsh(G, k=n_components, which=which,
v0=v0,**(solver_kwds or {}))
else:
if largest:
which = 'LR'
else:
which = 'SR'
lambdas, diffusion_map = eigs(G, k=n_components, which=which,
**(solver_kwds or {}))
lambdas = np.real(lambdas)
diffusion_map = np.real(diffusion_map)
elif eigen_solver == 'amg':
# separate amg & lobpcg keywords:
if solver_kwds is not None:
amg_kwds = {}
lobpcg_kwds = solver_kwds.copy()
for kwd in AMG_KWDS:
if kwd in solver_kwds.keys():
amg_kwds[kwd] = solver_kwds[kwd]
del lobpcg_kwds[kwd]
else:
amg_kwds = None
lobpcg_kwds = None
if not is_symmetric:
raise ValueError("lobpcg requires symmetric matrices.")
if not sparse.issparse(G):
warnings.warn("AMG works better for sparse matrices")
# Use AMG to get a preconditioner and speed up the eigenvalue problem.
ml = smoothed_aggregation_solver(check_array(G, accept_sparse = ['csr']),**(amg_kwds or {}))
M = ml.aspreconditioner()
n_find = min(n_nodes, 5 + 2*n_components)
X = random_state.rand(n_nodes, n_find)
X[:, 0] = (G.diagonal()).ravel()
lambdas, diffusion_map = lobpcg(G, X, M=M, largest=largest,**(lobpcg_kwds or {}))
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
elif eigen_solver == "lobpcg":
if not is_symmetric:
raise ValueError("lobpcg requires symmetric matrices.")
n_find = min(n_nodes, 5 + 2*n_components)
X = random_state.rand(n_nodes, n_find)
lambdas, diffusion_map = lobpcg(G, X, largest=largest,**(solver_kwds or {}))
sort_order = np.argsort(lambdas)
if largest:
lambdas = lambdas[sort_order[::-1]]
diffusion_map = diffusion_map[:, sort_order[::-1]]
else:
lambdas = lambdas[sort_order]
diffusion_map = diffusion_map[:, sort_order]
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
elif eigen_solver == 'dense':
if sparse.isspmatrix(G):
G = G.todense()
if is_symmetric:
lambdas, diffusion_map = eigh(G,**(solver_kwds or {}))
else:
lambdas, diffusion_map = eig(G,**(solver_kwds or {}))
sort_index = np.argsort(lambdas)
lambdas = lambdas[sort_index]
diffusion_map[:,sort_index]
if largest:# eigh always returns eigenvalues in ascending order
lambdas = lambdas[::-1] # reverse order the e-values
diffusion_map = diffusion_map[:, ::-1] # reverse order the vectors
lambdas = lambdas[:n_components]
diffusion_map = diffusion_map[:, :n_components]
return (lambdas, diffusion_map)
def null_space(M, k, k_skip=1, eigen_solver='arpack',
random_state=None, solver_kwds=None):
"""
Find the null space of a matrix M: eigenvectors associated with 0 eigenvalues
Parameters
----------
M : {array, matrix, sparse matrix, LinearOperator}
Input covariance matrix: should be symmetric positive semi-definite
k : integer
Number of eigenvalues/vectors to return
k_skip : integer, optional
Number of low eigenvalues to skip.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state: numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
null_space : estimated k vectors of the null space
error : estimated error (sum of eigenvalues)
Notes
-----
dense solver key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eigh.html
for symmetric problems and
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.linalg.eig.html#scipy.linalg.eig
for non symmetric problems.
arpack sovler key words: see
http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigsh.html
for symmetric problems and http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.sparse.linalg.eigs.html#scipy.sparse.linalg.eigs
for non symmetric problems.
lobpcg solver keywords: see
http://docs.scipy.org/doc/scipy/reference/generated/scipy.sparse.linalg.lobpcg.html
amg solver keywords: see
http://pyamg.googlecode.com/svn/branches/1.0.x/Docs/html/pyamg.aggregation.html#module-pyamg.aggregation.aggregation
(Note amg solver uses lobpcg and also accepts lobpcg keywords)
"""
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=M.shape[0],
nvec=k + k_skip)
random_state = check_random_state(random_state)
if eigen_solver == 'arpack':
# This matches the internal initial state used by ARPACK
v0 = random_state.uniform(-1, 1, M.shape[0])
try:
eigen_values, eigen_vectors = eigsh(M, k + k_skip, sigma=0.0,
v0=v0,**(solver_kwds or {}))
except RuntimeError as msg:
raise ValueError("Error in determining null-space with ARPACK. "
"Error message: '%s'. "
"Note that method='arpack' can fail when the "
"weight matrix is singular or otherwise "
"ill-behaved. method='dense' is recommended. "
"See online documentation for more information."
% msg)
return eigen_vectors[:, k_skip:], np.sum(eigen_values[k_skip:])
elif eigen_solver == 'dense':
if hasattr(M, 'toarray'):
M = M.toarray()
eigen_values, eigen_vectors = eigh(M, eigvals=(0, k+k_skip),overwrite_a=True,
**(solver_kwds or {}))
index = np.argsort(np.abs(eigen_values))
eigen_vectors = eigen_vectors[:, index]
eigen_values = eigen_values[index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
# eigen_values, eigen_vectors = eigh(
# M, eigvals=(k_skip, k + k_skip - 1), overwrite_a=True)
# index = np.argsort(np.abs(eigen_values))
# return eigen_vectors[:, index], np.sum(eigen_values)
elif (eigen_solver == 'amg' or eigen_solver == 'lobpcg'):
# M should be positive semi-definite. Add 1 to make it pos. def.
try:
M = sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values -1
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
except np.linalg.LinAlgError: # try again with bigger increase
warnings.warn("LOBPCG failed the first time. Increasing Pos Def adjustment.")
M = 2.0*sparse.identity(M.shape[0]) + M
n_components = min(k + k_skip + 10, M.shape[0])
eigen_values, eigen_vectors = eigen_decomposition(M, n_components,
eigen_solver = eigen_solver,
drop_first = False,
largest = False,
random_state=random_state,
solver_kwds=solver_kwds)
eigen_values = eigen_values - 2
index = np.argsort(np.abs(eigen_values))
eigen_values = eigen_values[index]
eigen_vectors = eigen_vectors[:, index]
return eigen_vectors[:, k_skip:k+1], np.sum(eigen_values[k_skip:k+1])
else:
raise ValueError("Unrecognized eigen_solver '%s'" % eigen_solver)
| bsd-2-clause |
zihua/scikit-learn | sklearn/utils/tests/test_class_weight.py | 50 | 13151 | import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.datasets import make_blobs
from sklearn.utils.class_weight import compute_class_weight
from sklearn.utils.class_weight import compute_sample_weight
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raise_message
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
def test_compute_class_weight():
# Test (and demo) compute_class_weight.
y = np.asarray([2, 2, 2, 3, 3, 4])
classes = np.unique(y)
cw = assert_warns(DeprecationWarning,
compute_class_weight, "auto", classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_true(cw[0] < cw[1] < cw[2])
cw = compute_class_weight("balanced", classes, y)
# total effect of samples is preserved
class_counts = np.bincount(y)[2:]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_true(cw[0] < cw[1] < cw[2])
def test_compute_class_weight_not_present():
# Raise error when y does not contain all class labels
classes = np.arange(4)
y = np.asarray([0, 0, 0, 1, 1, 2])
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
# Raise error when y has items not in classes
classes = np.arange(2)
assert_raises(ValueError, compute_class_weight, "auto", classes, y)
assert_raises(ValueError, compute_class_weight, "balanced", classes, y)
assert_raises(ValueError, compute_class_weight, {0: 1., 1: 2.}, classes, y)
def test_compute_class_weight_dict():
classes = np.arange(3)
class_weights = {0: 1.0, 1: 2.0, 2: 3.0}
y = np.asarray([0, 0, 1, 2])
cw = compute_class_weight(class_weights, classes, y)
# When the user specifies class weights, compute_class_weights should just
# return them.
assert_array_almost_equal(np.asarray([1.0, 2.0, 3.0]), cw)
# When a class weight is specified that isn't in classes, a ValueError
# should get raised
msg = 'Class label 4 not present.'
class_weights = {0: 1.0, 1: 2.0, 2: 3.0, 4: 1.5}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
msg = 'Class label -1 not present.'
class_weights = {-1: 5.0, 0: 1.0, 1: 2.0, 2: 3.0}
assert_raise_message(ValueError, msg, compute_class_weight, class_weights,
classes, y)
def test_compute_class_weight_invariance():
# Test that results with class_weight="balanced" is invariant wrt
# class imbalance if the number of samples is identical.
# The test uses a balanced two class dataset with 100 datapoints.
# It creates three versions, one where class 1 is duplicated
# resulting in 150 points of class 1 and 50 of class 0,
# one where there are 50 points in class 1 and 150 in class 0,
# and one where there are 100 points of each class (this one is balanced
# again).
# With balancing class weights, all three should give the same model.
X, y = make_blobs(centers=2, random_state=0)
# create dataset where class 1 is duplicated twice
X_1 = np.vstack([X] + [X[y == 1]] * 2)
y_1 = np.hstack([y] + [y[y == 1]] * 2)
# create dataset where class 0 is duplicated twice
X_0 = np.vstack([X] + [X[y == 0]] * 2)
y_0 = np.hstack([y] + [y[y == 0]] * 2)
# duplicate everything
X_ = np.vstack([X] * 2)
y_ = np.hstack([y] * 2)
# results should be identical
logreg1 = LogisticRegression(class_weight="balanced").fit(X_1, y_1)
logreg0 = LogisticRegression(class_weight="balanced").fit(X_0, y_0)
logreg = LogisticRegression(class_weight="balanced").fit(X_, y_)
assert_array_almost_equal(logreg1.coef_, logreg0.coef_)
assert_array_almost_equal(logreg.coef_, logreg0.coef_)
def test_compute_class_weight_auto_negative():
# Test compute_class_weight when labels are negative
# Test with balanced class labels.
classes = np.array([-2, -1, 0])
y = np.asarray([-1, -1, 0, 0, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1., 1., 1.]))
# Test with unbalanced class labels.
y = np.asarray([-1, 0, 0, -2, -2, -2])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([0.545, 1.636, 0.818]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
assert_equal(len(cw), len(classes))
class_counts = np.bincount(y + 2)
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2. / 3, 2., 1.])
def test_compute_class_weight_auto_unordered():
# Test compute_class_weight when classes are unordered
classes = np.array([1, 0, 3])
y = np.asarray([1, 0, 0, 3, 3, 3])
cw = assert_warns(DeprecationWarning, compute_class_weight, "auto",
classes, y)
assert_almost_equal(cw.sum(), classes.shape)
assert_equal(len(cw), len(classes))
assert_array_almost_equal(cw, np.array([1.636, 0.818, 0.545]), decimal=3)
cw = compute_class_weight("balanced", classes, y)
class_counts = np.bincount(y)[classes]
assert_almost_equal(np.dot(cw, class_counts), y.shape[0])
assert_array_almost_equal(cw, [2., 1., 2. / 3])
def test_compute_sample_weight():
# Test (and demo) compute_sample_weight.
# Test with balanced classes
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with user-defined weights
sample_weight = compute_sample_weight({1: 2, 2: 1}, y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 1., 1., 1.])
# Test with column vector of balanced classes
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with unbalanced classes
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
expected_auto = np.asarray([.6, .6, .6, .6, .6, .6, 1.8])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y)
expected_balanced = np.array([0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 0.7777, 2.3333])
assert_array_almost_equal(sample_weight, expected_balanced, decimal=4)
# Test with `None` weights
sample_weight = compute_sample_weight(None, y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 1.])
# Test with multi-output of balanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with multi-output with user-defined weights
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = compute_sample_weight([{1: 2, 2: 1}, {0: 1, 1: 2}], y)
assert_array_almost_equal(sample_weight, [2., 2., 2., 2., 2., 2.])
# Test with multi-output of unbalanced classes
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [3, -1]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y)
assert_array_almost_equal(sample_weight, expected_balanced ** 2, decimal=3)
def test_compute_sample_weight_with_subsample():
# Test compute_sample_weight with subsamples specified.
# Test with balanced classes and all samples present
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with column vector of balanced classes and all samples present
y = np.asarray([[1], [1], [1], [2], [2], [2]])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y)
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1.])
# Test with a subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning,
compute_sample_weight, "auto", y, range(4))
assert_array_almost_equal(sample_weight, [.5, .5, .5, 1.5, 1.5, 1.5])
sample_weight = compute_sample_weight("balanced", y, range(4))
assert_array_almost_equal(sample_weight, [2. / 3, 2. / 3,
2. / 3, 2., 2., 2.])
# Test with a bootstrap subsample
y = np.asarray([1, 1, 1, 2, 2, 2])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
expected_auto = np.asarray([1 / 3., 1 / 3., 1 / 3., 5 / 3., 5 / 3., 5 / 3.])
assert_array_almost_equal(sample_weight, expected_auto)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
expected_balanced = np.asarray([0.6, 0.6, 0.6, 3., 3., 3.])
assert_array_almost_equal(sample_weight, expected_balanced)
# Test with a bootstrap subsample for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_auto ** 2)
sample_weight = compute_sample_weight("balanced", y, [0, 1, 1, 2, 2, 3])
assert_array_almost_equal(sample_weight, expected_balanced ** 2)
# Test with a missing class
y = np.asarray([1, 1, 1, 2, 2, 2, 3])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
# Test with a missing class for multi-output
y = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1], [2, 2]])
sample_weight = assert_warns(DeprecationWarning, compute_sample_weight,
"auto", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
sample_weight = compute_sample_weight("balanced", y, range(6))
assert_array_almost_equal(sample_weight, [1., 1., 1., 1., 1., 1., 0.])
def test_compute_sample_weight_errors():
# Test compute_sample_weight raises errors expected.
# Invalid preset string
y = np.asarray([1, 1, 1, 2, 2, 2])
y_ = np.asarray([[1, 0], [1, 0], [1, 0], [2, 1], [2, 1], [2, 1]])
assert_raises(ValueError, compute_sample_weight, "ni", y)
assert_raises(ValueError, compute_sample_weight, "ni", y, range(4))
assert_raises(ValueError, compute_sample_weight, "ni", y_)
assert_raises(ValueError, compute_sample_weight, "ni", y_, range(4))
# Not "auto" for subsample
assert_raises(ValueError,
compute_sample_weight, {1: 2, 2: 1}, y, range(4))
# Not a list or preset for multi-output
assert_raises(ValueError, compute_sample_weight, {1: 2, 2: 1}, y_)
# Incorrect length list for multi-output
assert_raises(ValueError, compute_sample_weight, [{1: 2, 2: 1}], y_)
| bsd-3-clause |
sserkez/ocelot | gui/optics.py | 2 | 7275 | '''
user interface for viewing/editing photon optics layouts
'''
from numpy import sin, cos, pi, sqrt, log, array, random, sign
from numpy.linalg import norm
import numpy as np
import matplotlib.pyplot as plt
import scipy.integrate as integrate
#import matplotlib.animation as animation
from ocelot.optics.elements import *
from ocelot.optics.wave import *
def init_plots(views, geo):
scene = Scene()
scene.views = views
scene.fig = plt.figure()
nviews = len(views)
scene.ax = ['']*nviews
scene.profile_im = {}
for iview in range(nviews):
view_id = nviews*100 + 10 + iview + 1
scene.ax[iview] = scene.fig.add_subplot(view_id, autoscale_on=True)
if views[iview] == 'geometry:x' or views[iview] == 'geometry:y':
projection_name = views[iview].split(':')[1]
plot_geometry(scene.ax[iview], geo, projection_name)
scene.ax[iview].grid()
scene.ax[iview].set_title(projection_name)
if views[iview].startswith('detectors'):
if views[iview].startswith('detectors:'):
id = views[iview].split(':')[1]
for obj in geo():
if obj.__class__ == Detector:
print('adding view for detector: ', obj.id)
scene.ax[iview].set_title('detector:' + id)
scene.profile_im[id] = scene.ax[iview]
#scene.profile_im[id] = scene.ax[iview].imshow(obj.matrix.transpose(), cmap='gist_heat',interpolation='none',extent=[0,1,0,1], vmin=0, vmax=10)
return scene
def plot_geometry(ax, geo, proj='y'):
debug("plotting geometry ", proj)
if proj == 'y': idx = 1
if proj == 'x': idx = 0
for o in geo():
if o.__class__ == Mirror:
ang = - np.arctan2(o.no[idx] , o.no[2]) - pi
#print 'ang=', ang
z1 = o.r[2] - o.size[idx] * sin(ang)
z2 = o.r[2]
z3 = o.r[2] + o.size[idx] * sin(ang)
y1 = -o.size[idx] + o.r[idx] + o.size[idx]*(1-cos(ang))
y2 = o.r[idx]
y3 = o.size[idx] + o.r[idx] - o.size[idx]*(1-cos(ang))
li, = ax.plot([z1,z2,z3], [y1,y2,y3], 'b-', lw=3)
y_bnd = np.linspace(y1,y3, 100)
z_bnd = np.linspace(z1, z3, 100)
for z,y in zip(z_bnd[5::10],y_bnd[5::10]):
tick_size = o.size[2]
ax.plot([z,z-tick_size*np.sign(o.no[2])], [y,y-(y_bnd[5] - y_bnd[0])], 'b-', lw=2)
if o.__class__ == EllipticMirror:
#TODO; replace with generic rotation
ang = - np.arctan2(o.no[idx] , o.no[2]) - pi
#print 'ang=', ang
z1 = o.r[2] - o.size[idx] * sin(ang)
z2 = o.r[2]
z3 = o.r[2] + o.size[idx] * sin(ang)
y1 = -o.size[idx] + o.r[idx] + o.size[idx]*(1-cos(ang))
y2 = o.r[idx]
y3 = o.size[idx] + o.r[idx] - o.size[idx]*(1-cos(ang))
#li, = ax.plot([z1,z2,z3], [y1,y2,y3], color="#aa00ff", lw=3)
phi_max = np.arcsin(o.size[idx]/o.a[0])
#y_bnd = np.linspace(y1,y3, 100)
phi_bnd = np.linspace(-phi_max, phi_max, 100)
z_bnd = np.zeros_like(phi_bnd)
y_bnd = np.zeros_like(phi_bnd)
for i in range( len(phi_bnd) ):
z_bnd[i] = o.r[2] + o.a[0]*sin(phi_bnd[i])
y_bnd[i] = o.r[idx] + o.a[1] - o.a[1]*cos(phi_bnd[i])
#for z,y in zip(z_bnd[5:-10:10],y_bnd[5:-10:10]):
n_step = 2
for i in np.arange(0,len(z_bnd) - n_step ,n_step):
tick_size = o.size[2]
#ax.plot([z,z-tick_size*np.sign(o.no[2])], [y,y-(y_bnd[5] - y_bnd[0])], 'b-', lw=2)
ax.plot([z_bnd[i],z_bnd[i+n_step]], [y_bnd[i],y_bnd[i+n_step]], color="#aa00ff", lw=3)
if o.__class__ == ParabolicMirror:
y_bnd = np.linspace(-o.size[idx], o.size[idx], 100)
z_bnd = o.r[2] - o.a[1] * y_bnd**2
#print y_bnd, z_bnd
li, = ax.plot(z_bnd, y_bnd, 'b-', lw=3)
for z,y in zip(z_bnd[5::10],y_bnd[5::10]):
ax.plot([z,z-1.0*np.sign(o.no[2])], [y,y-(y_bnd[5] - y_bnd[0])], 'b-', lw=2)
if o.__class__ == Lense:
y_bnd = np.linspace(-o.D/2,o.D/2,100)
z_bnd1 = (o.r[2]-o.s1) + (o.s1 / (o.D/2)**2 ) * y_bnd**2
z_bnd2 = (o.r[2]+o.s2) - (o.s2 / (o.D/2)**2 ) * y_bnd**2
li, = ax.plot(z_bnd1, y_bnd, 'r-', lw=3)
li, = ax.plot(z_bnd2, y_bnd, 'r-', lw=3)
if o.__class__ == Aperture:
li, = ax.plot([o.r[2],o.r[2]], [o.r[idx] + o.d[idx],o.r[idx] + o.size[idx]], color='#000000', lw=3)
li, = ax.plot([o.r[2],o.r[2]], [o.r[idx] -o.d[idx],o.r[idx] - o.size[idx]], color='#000000', lw=3)
if o.__class__ == Crystal:
li, = ax.plot([o.r[2],o.r[2]], [o.r[idx] - o.size[idx], o.r[idx] + o.size[idx]], color='#999999', lw=3)
if o.__class__ == Grating:
debug("plotting grating")
#TODO; replace with generic rotation
ang = - np.arctan2(o.no[idx] , o.no[2]) - pi
#print 'ang=', ang
z1 = o.r[2] - o.size[idx] * sin(ang)
z2 = o.r[2]
z3 = o.r[2] + o.size[idx] * sin(ang)
y1 = -o.size[idx] + o.r[idx] + o.size[idx]*(1-cos(ang))
y2 = o.r[idx]
y3 = o.size[idx] + o.r[idx] - o.size[idx]*(1-cos(ang))
li, = ax.plot([z1,z2,z3], [y1,y2,y3], color="#AA3377", lw=3)
y_bnd = np.linspace(y1,y3, 100)
z_bnd = np.linspace(z1, z3, 100)
dy = max(abs(y3-y1), abs(z3-z1)) / 20
dz = dy
for z,y in zip(z_bnd[5::10],y_bnd[5::10]):
ax.plot([z-dz,z,z+dz], [y,y+dy, y], color="#AA3377", lw=2)
zmax = np.max([ x.r[2] + x.size[2] for x in geo])
zmin = np.min([x.r[2] - x.size[2] for x in geo])
ymax = np.max( [x.r[1] + x.size[1] for x in geo])
ymin = np.min( [ x.r[1] - x.size[1] for x in geo])
z_margin = (zmax - zmin)*0.1
y_margin = (ymax - ymin)*0.1
#print zmin, zmax, z_margin, ymin, ymax, y_margin
#ax.set_xlim(zmin-z_margin,zmax+z_margin)
#ax.set_ylim(ymin-y_margin,ymax+y_margin)
def plot_rays(ax, rays, proj='x', alpha=0.4):
for r in rays:
debug('plotting ray!', r.r0[0], r.k[0], r.s[0])
for i in range(len(r.r0)):
debug('-->', r.r0[i], r.k[i], r.s[i])
if proj == 'x':
ax.plot([r.r0[i][2], r.r0[i][2] + r.k[i][2]*r.s[i] ], [r.r0[i][0], r.r0[i][0] + r.k[i][0]*r.s[i] ], color='#006600', lw=1, alpha=alpha )
if proj == 'y':
ax.plot([r.r0[i][2], r.r0[i][2] + r.k[i][2]*r.s[i] ], [r.r0[i][1], r.r0[i][1] + r.k[i][1]*r.s[i] ], color='#006600', lw=1, alpha=alpha )
| gpl-3.0 |
ContinuumIO/dask | dask/dataframe/groupby.py | 2 | 62022 | import collections
import itertools as it
import operator
import warnings
import numpy as np
import pandas as pd
from .core import (
DataFrame,
Series,
aca,
map_partitions,
new_dd_object,
no_default,
split_out_on_index,
_extract_meta,
)
from .methods import drop_columns, concat
from .shuffle import shuffle
from .utils import (
make_meta,
insert_meta_param_description,
raise_on_meta_error,
is_series_like,
is_dataframe_like,
)
from ..base import tokenize
from ..utils import derived_from, M, funcname, itemgetter
from ..highlevelgraph import HighLevelGraph
# #############################################
#
# GroupBy implementation notes
#
# Dask groupby supports reductions, i.e., mean, sum and alike, and apply. The
# former do not shuffle the data and are efficiently implemented as tree
# reductions. The latter is implemented by shuffling the underlying partiitons
# such that all items of a group can be found in the same parititon.
#
# The argument to ``.groupby``, the index, can be a ``str``, ``dd.DataFrame``,
# ``dd.Series``, or a list thereof. In operations on the grouped object, the
# divisions of the the grouped object and the items of index have to align.
# Currently, there is no support to shuffle the index values as part of the
# groupby operation. Therefore, the alignment has to be guaranteed by the
# caller.
#
# To operate on matching partitions, most groupby operations exploit the
# corresponding support in ``apply_concat_apply``. Specifically, this function
# operates on matching partitions of frame-like objects passed as varargs.
#
# After the initial chunk step, the passed index is implicitly passed along to
# subsequent operations as the index of the partitions. Groupby operations on
# the individual partitions can then access the index via the ``levels``
# parameter of the ``groupby`` function. The correct argument is determined by
# the ``_determine_levels`` function.
#
# To minimize overhead, series in an index that were obtained by getitem on the
# object to group are not passed as series to the various operations, but as
# columnn keys. This transformation is implemented as ``_normalize_index``.
#
# #############################################
def _determine_levels(index):
"""Determine the correct levels argument to groupby.
"""
if isinstance(index, (tuple, list)) and len(index) > 1:
return list(range(len(index)))
else:
return 0
def _normalize_index(df, index):
"""Replace series with column names in an index wherever possible.
"""
if not isinstance(df, DataFrame):
return index
elif isinstance(index, list):
return [_normalize_index(df, col) for col in index]
elif (
is_series_like(index)
and index.name in df.columns
and index._name == df[index.name]._name
):
return index.name
elif (
isinstance(index, DataFrame)
and set(index.columns).issubset(df.columns)
and index._name == df[index.columns]._name
):
return list(index.columns)
else:
return index
def _maybe_slice(grouped, columns):
"""
Slice columns if grouped is pd.DataFrameGroupBy
"""
# FIXME: update with better groupby object detection (i.e.: ngroups, get_group)
if "groupby" in type(grouped).__name__.lower():
if columns is not None:
if isinstance(columns, (tuple, list, set, pd.Index)):
columns = list(columns)
return grouped[columns]
return grouped
def _is_aligned(df, by):
"""Check if `df` and `by` have aligned indices"""
if is_series_like(by) or is_dataframe_like(by):
return df.index.equals(by.index)
elif isinstance(by, (list, tuple)):
return all(_is_aligned(df, i) for i in by)
else:
return True
def _groupby_raise_unaligned(df, **kwargs):
"""Groupby, but raise if df and `by` key are unaligned.
Pandas supports grouping by a column that doesn't align with the input
frame/series/index. However, the reindexing does not seem to be
threadsafe, and can result in incorrect results. Since grouping by an
unaligned key is generally a bad idea, we just error loudly in dask.
For more information see pandas GH issue #15244 and Dask GH issue #1876."""
by = kwargs.get("by", None)
if by is not None and not _is_aligned(df, by):
msg = (
"Grouping by an unaligned index is unsafe and unsupported.\n"
"This can be caused by filtering only one of the object or\n"
"grouping key. For example, the following works in pandas,\n"
"but not in dask:\n"
"\n"
"df[df.foo < 0].groupby(df.bar)\n"
"\n"
"This can be avoided by either filtering beforehand, or\n"
"passing in the name of the column instead:\n"
"\n"
"df2 = df[df.foo < 0]\n"
"df2.groupby(df2.bar)\n"
"# or\n"
"df[df.foo < 0].groupby('bar')\n"
"\n"
"For more information see dask GH issue #1876."
)
raise ValueError(msg)
elif by is not None and len(by):
# since we're coming through apply, `by` will be a tuple.
# Pandas treats tuples as a single key, and lists as multiple keys
# We want multiple keys
if isinstance(by, str):
by = [by]
kwargs.update(by=list(by))
return df.groupby(**kwargs)
def _groupby_slice_apply(
df, grouper, key, func, *args, group_keys=True, dropna=None, **kwargs
):
# No need to use raise if unaligned here - this is only called after
# shuffling, which makes everything aligned already
dropna = {"dropna": dropna} if dropna is not None else {}
g = df.groupby(grouper, group_keys=group_keys, **dropna)
if key:
g = g[key]
return g.apply(func, *args, **kwargs)
def _groupby_slice_transform(
df, grouper, key, func, *args, group_keys=True, dropna=None, **kwargs
):
# No need to use raise if unaligned here - this is only called after
# shuffling, which makes everything aligned already
dropna = {"dropna": dropna} if dropna is not None else {}
g = df.groupby(grouper, group_keys=group_keys, **dropna)
if key:
g = g[key]
# Cannot call transform on an empty dataframe
if len(df) == 0:
return g.apply(func, *args, **kwargs)
return g.transform(func, *args, **kwargs)
def _groupby_get_group(df, by_key, get_key, columns):
# SeriesGroupBy may pass df which includes group key
grouped = _groupby_raise_unaligned(df, by=by_key)
if get_key in grouped.groups:
if is_dataframe_like(df):
grouped = grouped[columns]
return grouped.get_group(get_key)
else:
# to create empty DataFrame/Series, which has the same
# dtype as the original
if is_dataframe_like(df):
# may be SeriesGroupBy
df = df[columns]
return df.iloc[0:0]
###############################################################
# Aggregation
###############################################################
class Aggregation(object):
"""User defined groupby-aggregation.
This class allows users to define their own custom aggregation in terms of
operations on Pandas dataframes in a map-reduce style. You need to specify
what operation to do on each chunk of data, how to combine those chunks of
data together, and then how to finalize the result.
See :ref:`dataframe.groupby.aggregate` for more.
Parameters
----------
name : str
the name of the aggregation. It should be unique, since intermediate
result will be identified by this name.
chunk : callable
a function that will be called with the grouped column of each
partition. It can either return a single series or a tuple of series.
The index has to be equal to the groups.
agg : callable
a function that will be called to aggregate the results of each chunk.
Again the argument(s) will be grouped series. If ``chunk`` returned a
tuple, ``agg`` will be called with all of them as individual positional
arguments.
finalize : callable
an optional finalizer that will be called with the results from the
aggregation.
Examples
--------
We could implement ``sum`` as follows:
>>> custom_sum = dd.Aggregation(
... name='custom_sum',
... chunk=lambda s: s.sum(),
... agg=lambda s0: s0.sum()
... ) # doctest: +SKIP
>>> df.groupby('g').agg(custom_sum) # doctest: +SKIP
We can implement ``mean`` as follows:
>>> custom_mean = dd.Aggregation(
... name='custom_mean',
... chunk=lambda s: (s.count(), s.sum()),
... agg=lambda count, sum: (count.sum(), sum.sum()),
... finalize=lambda count, sum: sum / count,
... ) # doctest: +SKIP
>>> df.groupby('g').agg(custom_mean) # doctest: +SKIP
Though of course, both of these are built-in and so you don't need to
implement them yourself.
"""
def __init__(self, name, chunk, agg, finalize=None):
self.chunk = chunk
self.agg = agg
self.finalize = finalize
self.__name__ = name
def _groupby_aggregate(
df, aggfunc=None, levels=None, dropna=None, sort=False, **kwargs
):
dropna = {"dropna": dropna} if dropna is not None else {}
return aggfunc(df.groupby(level=levels, sort=sort, **dropna), **kwargs)
def _apply_chunk(df, *index, dropna=None, **kwargs):
func = kwargs.pop("chunk")
columns = kwargs.pop("columns")
dropna = {"dropna": dropna} if dropna is not None else {}
g = _groupby_raise_unaligned(df, by=index, **dropna)
if is_series_like(df) or columns is None:
return func(g, **kwargs)
else:
if isinstance(columns, (tuple, list, set, pd.Index)):
columns = list(columns)
return func(g[columns], **kwargs)
def _var_chunk(df, *index):
if is_series_like(df):
df = df.to_frame()
df = df.copy()
g = _groupby_raise_unaligned(df, by=index)
x = g.sum()
n = g[x.columns].count().rename(columns=lambda c: (c, "-count"))
cols = x.columns
df[cols] = df[cols] ** 2
g2 = _groupby_raise_unaligned(df, by=index)
x2 = g2.sum().rename(columns=lambda c: (c, "-x2"))
return concat([x, x2, n], axis=1)
def _var_combine(g, levels, sort=False):
return g.groupby(level=levels, sort=sort).sum()
def _var_agg(g, levels, ddof, sort=False):
g = g.groupby(level=levels, sort=sort).sum()
nc = len(g.columns)
x = g[g.columns[: nc // 3]]
# chunks columns are tuples (value, name), so we just keep the value part
x2 = g[g.columns[nc // 3 : 2 * nc // 3]].rename(columns=lambda c: c[0])
n = g[g.columns[-nc // 3 :]].rename(columns=lambda c: c[0])
# TODO: replace with _finalize_var?
result = x2 - x ** 2 / n
div = n - ddof
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
assert is_dataframe_like(result)
result[result < 0] = 0 # avoid rounding errors that take us to zero
return result
def _cov_combine(g, levels):
return g
def _cov_finalizer(df, cols, std=False):
vals = []
num_elements = len(list(it.product(cols, repeat=2)))
num_cols = len(cols)
vals = list(range(num_elements))
col_idx_mapping = dict(zip(cols, range(num_cols)))
for i, j in it.combinations_with_replacement(df[cols].columns, 2):
x = col_idx_mapping[i]
y = col_idx_mapping[j]
idx = x + num_cols * y
mul_col = "%s%s" % (i, j)
ni = df["%s-count" % i]
nj = df["%s-count" % j]
n = np.sqrt(ni * nj)
div = n - 1
div[div < 0] = 0
val = (df[mul_col] - df[i] * df[j] / n).values[0] / div.values[0]
if std:
ii = "%s%s" % (i, i)
jj = "%s%s" % (j, j)
std_val_i = (df[ii] - (df[i] ** 2) / ni).values[0] / div.values[0]
std_val_j = (df[jj] - (df[j] ** 2) / nj).values[0] / div.values[0]
val = val / np.sqrt(std_val_i * std_val_j)
vals[idx] = val
if i != j:
idx = num_cols * x + y
vals[idx] = val
level_1 = cols
index = pd.MultiIndex.from_product([level_1, level_1])
return pd.Series(vals, index=index)
def _mul_cols(df, cols):
"""Internal function to be used with apply to multiply
each column in a dataframe by every other column
a b c -> a*a, a*b, b*b, b*c, c*c
"""
_df = type(df)()
for i, j in it.combinations_with_replacement(cols, 2):
col = "%s%s" % (i, j)
_df[col] = df[i] * df[j]
return _df
def _cov_chunk(df, *index):
"""Covariance Chunk Logic
Parameters
----------
df : Pandas.DataFrame
std : bool, optional
When std=True we are calculating with Correlation
Returns
-------
tuple
Processed X, Multipled Cols,
"""
if is_series_like(df):
df = df.to_frame()
df = df.copy()
# mapping columns to str(numerical) values allows us to easily handle
# arbitrary column names (numbers, string, empty strings)
col_mapping = collections.OrderedDict()
for i, c in enumerate(df.columns):
col_mapping[c] = str(i)
df = df.rename(columns=col_mapping)
cols = df._get_numeric_data().columns
# when grouping by external series don't exclude columns
is_mask = any(is_series_like(s) for s in index)
if not is_mask:
index = [col_mapping[k] for k in index]
cols = cols.drop(np.array(index))
g = _groupby_raise_unaligned(df, by=index)
x = g.sum()
level = len(index)
mul = g.apply(_mul_cols, cols=cols).reset_index(level=level, drop=True)
n = g[x.columns].count().rename(columns=lambda c: "{}-count".format(c))
return (x, mul, n, col_mapping)
def _cov_agg(_t, levels, ddof, std=False, sort=False):
sums = []
muls = []
counts = []
# sometime we get a series back from concat combiner
t = list(_t)
cols = t[0][0].columns
for x, mul, n, col_mapping in t:
sums.append(x)
muls.append(mul)
counts.append(n)
col_mapping = col_mapping
total_sums = concat(sums).groupby(level=levels, sort=sort).sum()
total_muls = concat(muls).groupby(level=levels, sort=sort).sum()
total_counts = concat(counts).groupby(level=levels).sum()
result = (
concat([total_sums, total_muls, total_counts], axis=1)
.groupby(level=levels)
.apply(_cov_finalizer, cols=cols, std=std)
)
inv_col_mapping = {v: k for k, v in col_mapping.items()}
idx_vals = result.index.names
idx_mapping = list()
# when index is None we probably have selected a particular column
# df.groupby('a')[['b']].cov()
if len(idx_vals) == 1 and all(n is None for n in idx_vals):
idx_vals = list(set(inv_col_mapping.keys()) - set(total_sums.columns))
for idx, val in enumerate(idx_vals):
idx_name = inv_col_mapping.get(val, val)
idx_mapping.append(idx_name)
if len(result.columns.levels[0]) < len(col_mapping):
# removing index from col_mapping (produces incorrect multiindexes)
try:
col_mapping.pop(idx_name)
except KeyError:
# when slicing the col_map will not have the index
pass
keys = list(col_mapping.keys())
for level in range(len(result.columns.levels)):
result.columns.set_levels(keys, level=level, inplace=True)
result.index.set_names(idx_mapping, inplace=True)
# stacking can lead to a sorted index
s_result = result.stack(dropna=False)
assert is_dataframe_like(s_result)
return s_result
###############################################################
# nunique
###############################################################
def _nunique_df_chunk(df, *index, **kwargs):
levels = kwargs.pop("levels")
name = kwargs.pop("name")
g = _groupby_raise_unaligned(df, by=index)
if len(df) > 0:
grouped = g[[name]].apply(M.drop_duplicates)
# we set the index here to force a possibly duplicate index
# for our reduce step
if isinstance(levels, list):
grouped.index = pd.MultiIndex.from_arrays(
[grouped.index.get_level_values(level=level) for level in levels]
)
else:
grouped.index = grouped.index.get_level_values(level=levels)
else:
# Manually create empty version, since groupby-apply for empty frame
# results in df with no columns
grouped = g[[name]].nunique()
grouped = grouped.astype(df.dtypes[grouped.columns].to_dict())
return grouped
def _drop_duplicates_rename(df):
# Avoid duplicate index labels in a groupby().apply() context
# https://github.com/dask/dask/issues/3039
# https://github.com/pandas-dev/pandas/pull/18882
names = [None] * df.index.nlevels
return df.drop_duplicates().rename_axis(names, copy=False)
def _nunique_df_combine(df, levels, sort=False):
result = df.groupby(level=levels, sort=sort).apply(_drop_duplicates_rename)
if isinstance(levels, list):
result.index = pd.MultiIndex.from_arrays(
[result.index.get_level_values(level=level) for level in levels]
)
else:
result.index = result.index.get_level_values(level=levels)
return result
def _nunique_df_aggregate(df, levels, name, sort=False):
return df.groupby(level=levels, sort=sort)[name].nunique()
def _nunique_series_chunk(df, *index, **_ignored_):
# convert series to data frame, then hand over to dataframe code path
assert is_series_like(df)
df = df.to_frame()
kwargs = dict(name=df.columns[0], levels=_determine_levels(index))
return _nunique_df_chunk(df, *index, **kwargs)
###############################################################
# Aggregate support
#
# Aggregate is implemented as:
#
# 1. group-by-aggregate all partitions into intermediate values
# 2. collect all partitions into a single partition
# 3. group-by-aggregate the result into intermediate values
# 4. transform all intermediate values into the result
#
# In Step 1 and 3 the dataframe is grouped on the same columns.
#
###############################################################
def _make_agg_id(func, column):
return "{!s}-{!s}-{}".format(func, column, tokenize(func, column))
def _normalize_spec(spec, non_group_columns):
"""
Return a list of ``(result_column, func, input_column)`` tuples.
Spec can be
- a function
- a list of functions
- a dictionary that maps input-columns to functions
- a dictionary that maps input-columns to a lists of functions
- a dictionary that maps input-columns to a dictionaries that map
output-columns to functions.
The non-group columns are a list of all column names that are not used in
the groupby operation.
Usually, the result columns are mutli-level names, returned as tuples.
If only a single function is supplied or dictionary mapping columns
to single functions, simple names are returned as strings (see the first
two examples below).
Examples
--------
>>> _normalize_spec('mean', ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'mean', 'b'), ('c', 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', 'count')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
[('a', 'mean', 'a'), ('b', 'count', 'b')]
>>> _normalize_spec(['var', 'mean'], ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'var'), 'var', 'a'), (('a', 'mean'), 'mean', 'a'), \
(('b', 'var'), 'var', 'b'), (('b', 'mean'), 'mean', 'b'), \
(('c', 'var'), 'var', 'c'), (('c', 'mean'), 'mean', 'c')]
>>> spec = collections.OrderedDict([('a', 'mean'), ('b', ['sum', 'count'])])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('b', 'sum'), 'sum', 'b'), \
(('b', 'count'), 'count', 'b')]
>>> spec = collections.OrderedDict()
>>> spec['a'] = ['mean', 'size']
>>> spec['b'] = collections.OrderedDict([('e', 'count'), ('f', 'var')])
>>> _normalize_spec(spec, ['a', 'b', 'c'])
... # doctest: +NORMALIZE_WHITESPACE
[(('a', 'mean'), 'mean', 'a'), (('a', 'size'), 'size', 'a'), \
(('b', 'e'), 'count', 'b'), (('b', 'f'), 'var', 'b')]
"""
if not isinstance(spec, dict):
spec = collections.OrderedDict(zip(non_group_columns, it.repeat(spec)))
res = []
if isinstance(spec, dict):
for input_column, subspec in spec.items():
if isinstance(subspec, dict):
res.extend(
((input_column, result_column), func, input_column)
for result_column, func in subspec.items()
)
else:
if not isinstance(subspec, list):
subspec = [subspec]
res.extend(
((input_column, funcname(func)), func, input_column)
for func in subspec
)
else:
raise ValueError("unsupported agg spec of type {}".format(type(spec)))
compounds = (list, tuple, dict)
use_flat_columns = not any(
isinstance(subspec, compounds) for subspec in spec.values()
)
if use_flat_columns:
res = [(input_col, func, input_col) for (_, func, input_col) in res]
return res
def _build_agg_args(spec):
"""
Create transformation functions for a normalized aggregate spec.
Parameters
----------
spec: a list of (result-column, aggregation-function, input-column) triples.
To work with all arugment forms understood by pandas use
``_normalize_spec`` to normalize the argment before passing it on to
``_build_agg_args``.
Returns
-------
chunk_funcs: a list of (intermediate-column, function, keyword) triples
that are applied on grouped chunks of the initial dataframe.
agg_funcs: a list of (intermediate-column, functions, keword) triples that
are applied on the grouped concatination of the preprocessed chunks.
finalizers: a list of (result-column, function, keyword) triples that are
applied after the ``agg_funcs``. They are used to create final results
from intermediate representations.
"""
known_np_funcs = {np.min: "min", np.max: "max"}
# check that there are no name conflicts for a single input column
by_name = {}
for _, func, input_column in spec:
key = funcname(known_np_funcs.get(func, func)), input_column
by_name.setdefault(key, []).append((func, input_column))
for funcs in by_name.values():
if len(funcs) != 1:
raise ValueError("conflicting aggregation functions: {}".format(funcs))
chunks = {}
aggs = {}
finalizers = []
for (result_column, func, input_column) in spec:
if not isinstance(func, Aggregation):
func = funcname(known_np_funcs.get(func, func))
impls = _build_agg_args_single(result_column, func, input_column)
# overwrite existing result-columns, generate intermediates only once
for spec in impls["chunk_funcs"]:
chunks[spec[0]] = spec
for spec in impls["aggregate_funcs"]:
aggs[spec[0]] = spec
finalizers.append(impls["finalizer"])
chunks = sorted(chunks.values())
aggs = sorted(aggs.values())
return chunks, aggs, finalizers
def _build_agg_args_single(result_column, func, input_column):
simple_impl = {
"sum": (M.sum, M.sum),
"min": (M.min, M.min),
"max": (M.max, M.max),
"count": (M.count, M.sum),
"size": (M.size, M.sum),
"first": (M.first, M.first),
"last": (M.last, M.last),
"prod": (M.prod, M.prod),
}
if func in simple_impl.keys():
return _build_agg_args_simple(
result_column, func, input_column, simple_impl[func]
)
elif func == "var":
return _build_agg_args_var(result_column, func, input_column)
elif func == "std":
return _build_agg_args_std(result_column, func, input_column)
elif func == "mean":
return _build_agg_args_mean(result_column, func, input_column)
elif isinstance(func, Aggregation):
return _build_agg_args_custom(result_column, func, input_column)
else:
raise ValueError("unknown aggregate {}".format(func))
def _build_agg_args_simple(result_column, func, input_column, impl_pair):
intermediate = _make_agg_id(func, input_column)
chunk_impl, agg_impl = impl_pair
return dict(
chunk_funcs=[
(
intermediate,
_apply_func_to_column,
dict(column=input_column, func=chunk_impl),
)
],
aggregate_funcs=[
(
intermediate,
_apply_func_to_column,
dict(column=intermediate, func=agg_impl),
)
],
finalizer=(result_column, itemgetter(intermediate), dict()),
)
def _build_agg_args_var(result_column, func, input_column):
int_sum = _make_agg_id("sum", input_column)
int_sum2 = _make_agg_id("sum2", input_column)
int_count = _make_agg_id("count", input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),
(int_sum2, _compute_sum_of_squares, dict(column=input_column)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count, int_sum2)
],
finalizer=(
result_column,
_finalize_var,
dict(sum_column=int_sum, count_column=int_count, sum2_column=int_sum2),
),
)
def _build_agg_args_std(result_column, func, input_column):
impls = _build_agg_args_var(result_column, func, input_column)
result_column, _, kwargs = impls["finalizer"]
impls["finalizer"] = (result_column, _finalize_std, kwargs)
return impls
def _build_agg_args_mean(result_column, func, input_column):
int_sum = _make_agg_id("sum", input_column)
int_count = _make_agg_id("count", input_column)
return dict(
chunk_funcs=[
(int_sum, _apply_func_to_column, dict(column=input_column, func=M.sum)),
(int_count, _apply_func_to_column, dict(column=input_column, func=M.count)),
],
aggregate_funcs=[
(col, _apply_func_to_column, dict(column=col, func=M.sum))
for col in (int_sum, int_count)
],
finalizer=(
result_column,
_finalize_mean,
dict(sum_column=int_sum, count_column=int_count),
),
)
def _build_agg_args_custom(result_column, func, input_column):
col = _make_agg_id(funcname(func), input_column)
if func.finalize is None:
finalizer = (result_column, operator.itemgetter(col), dict())
else:
finalizer = (
result_column,
_apply_func_to_columns,
dict(func=func.finalize, prefix=col),
)
return dict(
chunk_funcs=[
(col, _apply_func_to_column, dict(func=func.chunk, column=input_column))
],
aggregate_funcs=[
(col, _apply_func_to_columns, dict(func=func.agg, prefix=col))
],
finalizer=finalizer,
)
def _groupby_apply_funcs(df, *index, **kwargs):
"""
Group a dataframe and apply multiple aggregation functions.
Parameters
----------
df: pandas.DataFrame
The dataframe to work on.
index: list of groupers
If given, they are added to the keyword arguments as the ``by``
argument.
funcs: list of result-colum, function, keywordargument triples
The list of functions that are applied on the grouped data frame.
Has to be passed as a keyword argument.
kwargs:
All keyword arguments, but ``funcs``, are passed verbatim to the groupby
operation of the dataframe
Returns
-------
aggregated:
the aggregated dataframe.
"""
if len(index):
# since we're coming through apply, `by` will be a tuple.
# Pandas treats tuples as a single key, and lists as multiple keys
# We want multiple keys
kwargs.update(by=list(index))
funcs = kwargs.pop("funcs")
grouped = _groupby_raise_unaligned(df, **kwargs)
result = collections.OrderedDict()
for result_column, func, func_kwargs in funcs:
r = func(grouped, **func_kwargs)
if isinstance(r, tuple):
for idx, s in enumerate(r):
result["{}-{}".format(result_column, idx)] = s
else:
result[result_column] = r
if is_dataframe_like(df):
return type(df)(result)
else:
# Get the DataFrame type of this Series object
return type(df.head(0).to_frame())(result)
def _compute_sum_of_squares(grouped, column):
# Note: CuDF cannot use `groupby.apply`.
# Need to unpack groupby to compute sum of squares
if hasattr(grouped, "grouper"):
keys = grouped.grouper
else:
# Handle CuDF groupby object (different from pandas)
keys = grouped.grouping.keys
df = grouped.obj[column].pow(2) if column else grouped.obj.pow(2)
return df.groupby(keys).sum()
def _agg_finalize(df, aggregate_funcs, finalize_funcs, level, sort=False):
# finish the final aggregation level
df = _groupby_apply_funcs(df, funcs=aggregate_funcs, level=level, sort=sort)
# and finalize the result
result = collections.OrderedDict()
for result_column, func, kwargs in finalize_funcs:
result[result_column] = func(df, **kwargs)
return type(df)(result)
def _apply_func_to_column(df_like, column, func):
if column is None:
return func(df_like)
return func(df_like[column])
def _apply_func_to_columns(df_like, prefix, func):
if is_dataframe_like(df_like):
columns = df_like.columns
else:
# handle GroupBy objects
columns = df_like._selected_obj.columns
columns = sorted(col for col in columns if col.startswith(prefix))
columns = [df_like[col] for col in columns]
return func(*columns)
def _finalize_mean(df, sum_column, count_column):
return df[sum_column] / df[count_column]
def _finalize_var(df, count_column, sum_column, sum2_column, ddof=1):
n = df[count_column]
x = df[sum_column]
x2 = df[sum2_column]
result = x2 - x ** 2 / n
div = n - ddof
div[div < 0] = 0
result /= div
result[(n - ddof) == 0] = np.nan
return result
def _finalize_std(df, count_column, sum_column, sum2_column, ddof=1):
result = _finalize_var(df, count_column, sum_column, sum2_column, ddof)
return np.sqrt(result)
def _cum_agg_aligned(part, cum_last, index, columns, func, initial):
align = cum_last.reindex(part.set_index(index).index, fill_value=initial)
align.index = part.index
return func(part[columns], align)
def _cum_agg_filled(a, b, func, initial):
union = a.index.union(b.index)
return func(
a.reindex(union, fill_value=initial),
b.reindex(union, fill_value=initial),
fill_value=initial,
)
def _cumcount_aggregate(a, b, fill_value=None):
return a.add(b, fill_value=fill_value) + 1
class _GroupBy(object):
""" Superclass for DataFrameGroupBy and SeriesGroupBy
Parameters
----------
obj: DataFrame or Series
DataFrame or Series to be grouped
by: str, list or Series
The key for grouping
slice: str, list
The slice keys applied to GroupBy result
group_keys: bool
Passed to pandas.DataFrame.groupby()
dropna: bool
Whether to drop null values from groupby index
sort: bool, defult None
Passed along to aggregation methods. If allowed,
the output aggregation will have sorted keys.
"""
def __init__(
self, df, by=None, slice=None, group_keys=True, dropna=None, sort=None
):
assert isinstance(df, (DataFrame, Series))
self.group_keys = group_keys
self.obj = df
# grouping key passed via groupby method
self.index = _normalize_index(df, by)
self.sort = sort
if isinstance(self.index, list):
do_index_partition_align = all(
item.npartitions == df.npartitions if isinstance(item, Series) else True
for item in self.index
)
elif isinstance(self.index, Series):
do_index_partition_align = df.npartitions == self.index.npartitions
else:
do_index_partition_align = True
if not do_index_partition_align:
raise NotImplementedError(
"The grouped object and index of the "
"groupby must have the same divisions."
)
# slicing key applied to _GroupBy instance
self._slice = slice
if isinstance(self.index, list):
index_meta = [
item._meta if isinstance(item, Series) else item for item in self.index
]
elif isinstance(self.index, Series):
index_meta = self.index._meta
else:
index_meta = self.index
self.dropna = {}
if dropna is not None:
self.dropna["dropna"] = dropna
self._meta = self.obj._meta.groupby(
index_meta, group_keys=group_keys, **self.dropna
)
@property
def _meta_nonempty(self):
"""
Return a pd.DataFrameGroupBy / pd.SeriesGroupBy which contains sample data.
"""
sample = self.obj._meta_nonempty
if isinstance(self.index, list):
index_meta = [
item._meta_nonempty if isinstance(item, Series) else item
for item in self.index
]
elif isinstance(self.index, Series):
index_meta = self.index._meta_nonempty
else:
index_meta = self.index
grouped = sample.groupby(index_meta, group_keys=self.group_keys, **self.dropna)
return _maybe_slice(grouped, self._slice)
def _aca_agg(
self,
token,
func,
aggfunc=None,
split_every=None,
split_out=1,
chunk_kwargs={},
aggregate_kwargs={},
):
if aggfunc is None:
aggfunc = func
meta = func(self._meta_nonempty)
columns = meta.name if is_series_like(meta) else meta.columns
token = self._token_prefix + token
levels = _determine_levels(self.index)
return aca(
[self.obj, self.index]
if not isinstance(self.index, list)
else [self.obj] + self.index,
chunk=_apply_chunk,
chunk_kwargs=dict(
chunk=func, columns=columns, **chunk_kwargs, **self.dropna
),
aggregate=_groupby_aggregate,
meta=meta,
token=token,
split_every=split_every,
aggregate_kwargs=dict(
aggfunc=aggfunc, levels=levels, **aggregate_kwargs, **self.dropna
),
split_out=split_out,
split_out_setup=split_out_on_index,
sort=self.sort,
)
def _cum_agg(self, token, chunk, aggregate, initial):
""" Wrapper for cumulative groupby operation """
meta = chunk(self._meta)
columns = meta.name if is_series_like(meta) else meta.columns
index = self.index if isinstance(self.index, list) else [self.index]
name = self._token_prefix + token
name_part = name + "-map"
name_last = name + "-take-last"
name_cum = name + "-cum-last"
# cumulate each partitions
cumpart_raw = map_partitions(
_apply_chunk,
self.obj,
*index,
chunk=chunk,
columns=columns,
token=name_part,
meta=meta,
**self.dropna
)
cumpart_raw_frame = (
cumpart_raw.to_frame() if is_series_like(meta) else cumpart_raw
)
cumpart_ext = cumpart_raw_frame.assign(
**{
i: self.obj[i]
if np.isscalar(i) and i in self.obj.columns
else self.obj.index
for i in index
}
)
# Use pd.Grouper objects to specify that we are grouping by columns.
# Otherwise, pandas will throw an ambiguity warning if the
# DataFrame's index (self.obj.index) was included in the grouping
# specification (self.index). See pandas #14432
index_groupers = [pd.Grouper(key=ind) for ind in index]
cumlast = map_partitions(
_apply_chunk,
cumpart_ext,
*index_groupers,
columns=0 if columns is None else columns,
chunk=M.last,
meta=meta,
token=name_last,
**self.dropna
)
# aggregate cumulated partitions and its previous last element
_hash = tokenize(self, token, chunk, aggregate, initial)
name += "-" + _hash
name_cum += "-" + _hash
dask = {}
dask[(name, 0)] = (cumpart_raw._name, 0)
for i in range(1, self.obj.npartitions):
# store each cumulative step to graph to reduce computation
if i == 1:
dask[(name_cum, i)] = (cumlast._name, i - 1)
else:
# aggregate with previous cumulation results
dask[(name_cum, i)] = (
_cum_agg_filled,
(name_cum, i - 1),
(cumlast._name, i - 1),
aggregate,
initial,
)
dask[(name, i)] = (
_cum_agg_aligned,
(cumpart_ext._name, i),
(name_cum, i),
index,
0 if columns is None else columns,
aggregate,
initial,
)
graph = HighLevelGraph.from_collections(
name, dask, dependencies=[cumpart_raw, cumpart_ext, cumlast]
)
return new_dd_object(graph, name, chunk(self._meta), self.obj.divisions)
def _shuffle(self, meta):
df = self.obj
if isinstance(self.obj, Series):
# Temporarily convert series to dataframe for shuffle
df = df.to_frame("__series__")
convert_back_to_series = True
else:
convert_back_to_series = False
if isinstance(self.index, DataFrame): # add index columns to dataframe
df2 = df.assign(
**{"_index_" + c: self.index[c] for c in self.index.columns}
)
index = self.index
elif isinstance(self.index, Series):
df2 = df.assign(_index=self.index)
index = self.index
else:
df2 = df
index = df._select_columns_or_index(self.index)
df3 = shuffle(df2, index) # shuffle dataframe and index
if isinstance(self.index, DataFrame):
# extract index from dataframe
cols = ["_index_" + c for c in self.index.columns]
index2 = df3[cols]
if is_dataframe_like(meta):
df4 = df3.map_partitions(drop_columns, cols, meta.columns.dtype)
else:
df4 = df3.drop(cols, axis=1)
elif isinstance(self.index, Series):
index2 = df3["_index"]
index2.name = self.index.name
if is_dataframe_like(meta):
df4 = df3.map_partitions(drop_columns, "_index", meta.columns.dtype)
else:
df4 = df3.drop("_index", axis=1)
else:
df4 = df3
index2 = self.index
if convert_back_to_series:
df4 = df4["__series__"].rename(self.obj.name)
return df4, index2
@derived_from(pd.core.groupby.GroupBy)
def cumsum(self, axis=0):
if axis:
return self.obj.cumsum(axis=axis)
else:
return self._cum_agg("cumsum", chunk=M.cumsum, aggregate=M.add, initial=0)
@derived_from(pd.core.groupby.GroupBy)
def cumprod(self, axis=0):
if axis:
return self.obj.cumprod(axis=axis)
else:
return self._cum_agg("cumprod", chunk=M.cumprod, aggregate=M.mul, initial=1)
@derived_from(pd.core.groupby.GroupBy)
def cumcount(self, axis=None):
return self._cum_agg(
"cumcount", chunk=M.cumcount, aggregate=_cumcount_aggregate, initial=-1
)
@derived_from(pd.core.groupby.GroupBy)
def sum(self, split_every=None, split_out=1, min_count=None):
result = self._aca_agg(
token="sum", func=M.sum, split_every=split_every, split_out=split_out
)
if min_count:
return result.where(self.count() >= min_count, other=np.NaN)
else:
return result
@derived_from(pd.core.groupby.GroupBy)
def prod(self, split_every=None, split_out=1, min_count=None):
result = self._aca_agg(
token="prod", func=M.prod, split_every=split_every, split_out=split_out
)
if min_count:
return result.where(self.count() >= min_count, other=np.NaN)
else:
return result
@derived_from(pd.core.groupby.GroupBy)
def min(self, split_every=None, split_out=1):
return self._aca_agg(
token="min", func=M.min, split_every=split_every, split_out=split_out
)
@derived_from(pd.core.groupby.GroupBy)
def max(self, split_every=None, split_out=1):
return self._aca_agg(
token="max", func=M.max, split_every=split_every, split_out=split_out
)
@derived_from(pd.DataFrame)
def idxmin(self, split_every=None, split_out=1, axis=None, skipna=True):
return self._aca_agg(
token="idxmin",
func=M.idxmin,
aggfunc=M.first,
split_every=split_every,
split_out=split_out,
chunk_kwargs=dict(skipna=skipna),
)
@derived_from(pd.DataFrame)
def idxmax(self, split_every=None, split_out=1, axis=None, skipna=True):
return self._aca_agg(
token="idxmax",
func=M.idxmax,
aggfunc=M.first,
split_every=split_every,
split_out=split_out,
chunk_kwargs=dict(skipna=skipna),
)
@derived_from(pd.core.groupby.GroupBy)
def count(self, split_every=None, split_out=1):
return self._aca_agg(
token="count",
func=M.count,
aggfunc=M.sum,
split_every=split_every,
split_out=split_out,
)
@derived_from(pd.core.groupby.GroupBy)
def mean(self, split_every=None, split_out=1):
s = self.sum(split_every=split_every, split_out=split_out)
c = self.count(split_every=split_every, split_out=split_out)
if is_dataframe_like(s):
c = c[s.columns]
return s / c
@derived_from(pd.core.groupby.GroupBy)
def size(self, split_every=None, split_out=1):
return self._aca_agg(
token="size",
func=M.size,
aggfunc=M.sum,
split_every=split_every,
split_out=split_out,
)
@derived_from(pd.core.groupby.GroupBy)
def var(self, ddof=1, split_every=None, split_out=1):
levels = _determine_levels(self.index)
result = aca(
[self.obj, self.index]
if not isinstance(self.index, list)
else [self.obj] + self.index,
chunk=_var_chunk,
aggregate=_var_agg,
combine=_var_combine,
token=self._token_prefix + "var",
aggregate_kwargs={"ddof": ddof, "levels": levels},
combine_kwargs={"levels": levels},
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_on_index,
sort=self.sort,
)
if isinstance(self.obj, Series):
result = result[result.columns[0]]
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.GroupBy)
def std(self, ddof=1, split_every=None, split_out=1):
v = self.var(ddof, split_every=split_every, split_out=split_out)
result = map_partitions(np.sqrt, v, meta=v)
return result
@derived_from(pd.DataFrame)
def corr(self, ddof=1, split_every=None, split_out=1):
"""Groupby correlation:
corr(X, Y) = cov(X, Y) / (std_x * std_y)
"""
return self.cov(split_every=split_every, split_out=split_out, std=True)
@derived_from(pd.DataFrame)
def cov(self, ddof=1, split_every=None, split_out=1, std=False):
"""Groupby covariance is accomplished by
1. Computing intermediate values for sum, count, and the product of
all columns: a b c -> a*a, a*b, b*b, b*c, c*c.
2. The values are then aggregated and the final covariance value is calculated:
cov(X, Y) = X*Y - Xbar * Ybar
When `std` is True calculate Correlation
"""
levels = _determine_levels(self.index)
is_mask = any(is_series_like(s) for s in self.index)
if self._slice:
if is_mask:
self.obj = self.obj[self._slice]
else:
sliced_plus = list(self._slice) + list(self.index)
self.obj = self.obj[sliced_plus]
result = aca(
[self.obj, self.index]
if not isinstance(self.index, list)
else [self.obj] + self.index,
chunk=_cov_chunk,
aggregate=_cov_agg,
combine=_cov_combine,
token=self._token_prefix + "cov",
aggregate_kwargs={"ddof": ddof, "levels": levels, "std": std},
combine_kwargs={"levels": levels},
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_on_index,
sort=self.sort,
)
if isinstance(self.obj, Series):
result = result[result.columns[0]]
if self._slice:
result = result[self._slice]
return result
@derived_from(pd.core.groupby.GroupBy)
def first(self, split_every=None, split_out=1):
return self._aca_agg(
token="first", func=M.first, split_every=split_every, split_out=split_out
)
@derived_from(pd.core.groupby.GroupBy)
def last(self, split_every=None, split_out=1):
return self._aca_agg(
token="last", func=M.last, split_every=split_every, split_out=split_out
)
@derived_from(pd.core.groupby.GroupBy)
def get_group(self, key):
token = self._token_prefix + "get_group"
meta = self._meta.obj
if is_dataframe_like(meta) and self._slice is not None:
meta = meta[self._slice]
columns = meta.columns if is_dataframe_like(meta) else meta.name
return map_partitions(
_groupby_get_group,
self.obj,
self.index,
key,
columns,
meta=meta,
token=token,
)
def aggregate(self, arg, split_every, split_out=1):
if isinstance(self.obj, DataFrame):
if isinstance(self.index, tuple) or np.isscalar(self.index):
group_columns = {self.index}
elif isinstance(self.index, list):
group_columns = {
i for i in self.index if isinstance(i, tuple) or np.isscalar(i)
}
else:
group_columns = set()
if self._slice:
# pandas doesn't exclude the grouping column in a SeriesGroupBy
# like df.groupby('a')['a'].agg(...)
non_group_columns = self._slice
if not isinstance(non_group_columns, list):
non_group_columns = [non_group_columns]
else:
# NOTE: this step relies on the index normalization to replace
# series with their name in an index.
non_group_columns = [
col for col in self.obj.columns if col not in group_columns
]
spec = _normalize_spec(arg, non_group_columns)
elif isinstance(self.obj, Series):
if isinstance(arg, (list, tuple, dict)):
# implementation detail: if self.obj is a series, a pseudo column
# None is used to denote the series itself. This pseudo column is
# removed from the result columns before passing the spec along.
spec = _normalize_spec({None: arg}, [])
spec = [
(result_column, func, input_column)
for ((_, result_column), func, input_column) in spec
]
else:
spec = _normalize_spec({None: arg}, [])
spec = [
(self.obj.name, func, input_column)
for (_, func, input_column) in spec
]
else:
raise ValueError("aggregate on unknown object {}".format(self.obj))
chunk_funcs, aggregate_funcs, finalizers = _build_agg_args(spec)
if isinstance(self.index, (tuple, list)) and len(self.index) > 1:
levels = list(range(len(self.index)))
else:
levels = 0
if not isinstance(self.index, list):
chunk_args = [self.obj, self.index]
else:
chunk_args = [self.obj] + self.index
return aca(
chunk_args,
chunk=_groupby_apply_funcs,
chunk_kwargs=dict(funcs=chunk_funcs),
combine=_groupby_apply_funcs,
combine_kwargs=dict(funcs=aggregate_funcs, level=levels),
aggregate=_agg_finalize,
aggregate_kwargs=dict(
aggregate_funcs=aggregate_funcs, finalize_funcs=finalizers, level=levels
),
token="aggregate",
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_on_index,
sort=self.sort,
)
@insert_meta_param_description(pad=12)
def apply(self, func, *args, **kwargs):
""" Parallel version of pandas GroupBy.apply
This mimics the pandas version except for the following:
1. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
2. Dask's GroupBy.apply is not appropriate for aggregations. For custom
aggregations, use :class:`dask.dataframe.groupby.Aggregation`.
.. warning::
Pandas' groupby-apply can be used to to apply arbitrary functions,
including aggregations that result in one row per group. Dask's
groupby-apply will apply ``func`` once to each partition-group pair,
so when ``func`` is a reduction you'll end up with one row per
partition-group pair. To apply a custom aggregation with Dask,
use :class:`dask.dataframe.groupby.Aggregation`.
Parameters
----------
func: function
Function to apply
args, kwargs : Scalar, Delayed or object
Arguments and keywords to pass to the function.
$META
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
meta = kwargs.get("meta", no_default)
if meta is no_default:
with raise_on_meta_error(
"groupby.apply({0})".format(funcname(func)), udf=True
):
meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)
meta = self._meta_nonempty.apply(func, *meta_args, **meta_kwargs)
msg = (
"`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .apply(func)\n"
" After: .apply(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .apply(func, meta=('x', 'f8')) for series result"
)
warnings.warn(msg, stacklevel=2)
meta = make_meta(meta)
# Validate self.index
if isinstance(self.index, list) and any(
isinstance(item, Series) for item in self.index
):
raise NotImplementedError(
"groupby-apply with a multiple Series is currently not supported"
)
df = self.obj
should_shuffle = not (
df.known_divisions and df._contains_index_name(self.index)
)
if should_shuffle:
df2, index = self._shuffle(meta)
else:
df2 = df
index = self.index
# Perform embarrassingly parallel groupby-apply
kwargs["meta"] = meta
df3 = map_partitions(
_groupby_slice_apply,
df2,
index,
self._slice,
func,
token=funcname(func),
*args,
group_keys=self.group_keys,
**self.dropna,
**kwargs
)
return df3
@insert_meta_param_description(pad=12)
def transform(self, func, *args, **kwargs):
""" Parallel version of pandas GroupBy.transform
This mimics the pandas version except for the following:
1. If the grouper does not align with the index then this causes a full
shuffle. The order of rows within each group may not be preserved.
2. Dask's GroupBy.transform is not appropriate for aggregations. For custom
aggregations, use :class:`dask.dataframe.groupby.Aggregation`.
.. warning::
Pandas' groupby-transform can be used to to apply arbitrary functions,
including aggregations that result in one row per group. Dask's
groupby-transform will apply ``func`` once to each partition-group pair,
so when ``func`` is a reduction you'll end up with one row per
partition-group pair. To apply a custom aggregation with Dask,
use :class:`dask.dataframe.groupby.Aggregation`.
Parameters
----------
func: function
Function to apply
args, kwargs : Scalar, Delayed or object
Arguments and keywords to pass to the function.
$META
Returns
-------
applied : Series or DataFrame depending on columns keyword
"""
meta = kwargs.get("meta", no_default)
if meta is no_default:
with raise_on_meta_error(
"groupby.transform({0})".format(funcname(func)), udf=True
):
meta_args, meta_kwargs = _extract_meta((args, kwargs), nonempty=True)
meta = self._meta_nonempty.transform(func, *meta_args, **meta_kwargs)
msg = (
"`meta` is not specified, inferred from partial data. "
"Please provide `meta` if the result is unexpected.\n"
" Before: .transform(func)\n"
" After: .transform(func, meta={'x': 'f8', 'y': 'f8'}) for dataframe result\n"
" or: .transform(func, meta=('x', 'f8')) for series result"
)
warnings.warn(msg, stacklevel=2)
meta = make_meta(meta)
# Validate self.index
if isinstance(self.index, list) and any(
isinstance(item, Series) for item in self.index
):
raise NotImplementedError(
"groupby-transform with a multiple Series is currently not supported"
)
df = self.obj
should_shuffle = not (
df.known_divisions and df._contains_index_name(self.index)
)
if should_shuffle:
df2, index = self._shuffle(meta)
else:
df2 = df
index = self.index
# Perform embarrassingly parallel groupby-transform
kwargs["meta"] = meta
df3 = map_partitions(
_groupby_slice_transform,
df2,
index,
self._slice,
func,
token=funcname(func),
*args,
group_keys=self.group_keys,
**self.dropna,
**kwargs
)
return df3
class DataFrameGroupBy(_GroupBy):
_token_prefix = "dataframe-groupby-"
def __getitem__(self, key):
if isinstance(key, list):
g = DataFrameGroupBy(
self.obj, by=self.index, slice=key, sort=self.sort, **self.dropna
)
else:
g = SeriesGroupBy(
self.obj, by=self.index, slice=key, sort=self.sort, **self.dropna
)
# error is raised from pandas
g._meta = g._meta[key]
return g
def __dir__(self):
return sorted(
set(
dir(type(self))
+ list(self.__dict__)
+ list(filter(M.isidentifier, self.obj.columns))
)
)
def __getattr__(self, key):
try:
return self[key]
except KeyError as e:
raise AttributeError(e) from e
@derived_from(pd.core.groupby.DataFrameGroupBy)
def aggregate(self, arg, split_every=None, split_out=1):
if arg == "size":
return self.size()
return super(DataFrameGroupBy, self).aggregate(
arg, split_every=split_every, split_out=split_out
)
@derived_from(pd.core.groupby.DataFrameGroupBy)
def agg(self, arg, split_every=None, split_out=1):
return self.aggregate(arg, split_every=split_every, split_out=split_out)
class SeriesGroupBy(_GroupBy):
_token_prefix = "series-groupby-"
def __init__(self, df, by=None, slice=None, **kwargs):
# for any non series object, raise pandas-compat error message
if isinstance(df, Series):
if isinstance(by, Series):
pass
elif isinstance(by, list):
if len(by) == 0:
raise ValueError("No group keys passed!")
non_series_items = [item for item in by if not isinstance(item, Series)]
# raise error from pandas, if applicable
df._meta.groupby(non_series_items)
else:
# raise error from pandas, if applicable
df._meta.groupby(by)
super(SeriesGroupBy, self).__init__(df, by=by, slice=slice, **kwargs)
@derived_from(pd.core.groupby.SeriesGroupBy)
def nunique(self, split_every=None, split_out=1):
name = self._meta.obj.name
levels = _determine_levels(self.index)
if isinstance(self.obj, DataFrame):
chunk = _nunique_df_chunk
else:
chunk = _nunique_series_chunk
return aca(
[self.obj, self.index]
if not isinstance(self.index, list)
else [self.obj] + self.index,
chunk=chunk,
aggregate=_nunique_df_aggregate,
combine=_nunique_df_combine,
token="series-groupby-nunique",
chunk_kwargs={"levels": levels, "name": name},
aggregate_kwargs={"levels": levels, "name": name},
combine_kwargs={"levels": levels},
split_every=split_every,
split_out=split_out,
split_out_setup=split_out_on_index,
sort=self.sort,
)
@derived_from(pd.core.groupby.SeriesGroupBy)
def aggregate(self, arg, split_every=None, split_out=1):
result = super(SeriesGroupBy, self).aggregate(
arg, split_every=split_every, split_out=split_out
)
if self._slice:
result = result[self._slice]
if not isinstance(arg, (list, dict)) and isinstance(result, DataFrame):
result = result[result.columns[0]]
return result
@derived_from(pd.core.groupby.SeriesGroupBy)
def agg(self, arg, split_every=None, split_out=1):
return self.aggregate(arg, split_every=split_every, split_out=split_out)
@derived_from(pd.core.groupby.SeriesGroupBy)
def value_counts(self, split_every=None, split_out=1):
return self._aca_agg(
token="value_counts",
func=M.value_counts,
aggfunc=_value_counts_aggregate,
split_every=split_every,
split_out=split_out,
)
@derived_from(pd.core.groupby.SeriesGroupBy)
def unique(self, split_every=None, split_out=1):
name = self._meta.obj.name
return self._aca_agg(
token="unique",
func=M.unique,
aggfunc=_unique_aggregate,
aggregate_kwargs={"name": name},
split_every=split_every,
split_out=split_out,
)
def _unique_aggregate(series_gb, name=None):
ret = pd.Series({k: v.explode().unique() for k, v in series_gb}, name=name)
ret.index.names = series_gb.obj.index.names
return ret
def _value_counts_aggregate(series_gb):
to_concat = {k: v.sum(level=1) for k, v in series_gb}
names = list(series_gb.obj.index.names)
return pd.Series(pd.concat(to_concat, names=names))
| bsd-3-clause |
ResearchCodesHub/QuantumGeneticAlgorithms | HGA.py | 1 | 13199 | #########################################################
# #
# HYBRID GENETIC ALGORITHM (24.05.2016) #
# #
# R. Lahoz-Beltra #
# #
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND #
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY #
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #
# THE SOFWTARE CAN BE USED BY ANYONE SOLELY FOR THE #
# PURPOSES OF EDUCATION AND RESEARCH. #
# #
#########################################################
import math
import random
import numpy as np
import matplotlib.pyplot as plt
#########################################################
# ALGORITHM PARAMETERS #
#########################################################
N=50 # Define here the population size
Genome=4 # Define here the chromosome length
generation_max= 650 # Define here the maximum number of
# generations/iterations
#########################################################
# VARIABLES ALGORITHM #
#########################################################
popSize=N+1
genomeLength=Genome+1
top_bottom=3
QuBitZero = np.array([[1], [0]])
QuBitOne = np.array([[0], [1]])
AlphaBeta = np.empty([top_bottom])
fitness = np.empty([popSize])
probability = np.empty([popSize])
# qpv: quantum chromosome (or population vector, QPV)
qpv = np.empty([popSize, genomeLength, top_bottom])
nqpv = np.empty([popSize, genomeLength, top_bottom])
# chromosome: classical chromosome
chromosome = np.empty([popSize, genomeLength],dtype=np.int)
child1 = np.empty([popSize, genomeLength, top_bottom])
child2 = np.empty([popSize, genomeLength, top_bottom])
best_chrom = np.empty([generation_max])
# Initialization global variables
theta=0;
iteration=0;
the_best_chrom=0;
generation=0;
#########################################################
# QUANTUM POPULATION INITIALIZATION #
#########################################################
def Init_population():
# Hadamard gate
r2=math.sqrt(2.0)
h=np.array([[1/r2, 1/r2],[1/r2,-1/r2]])
# Rotation Q-gate
theta=0;
rot =np.empty([2,2])
# Initial population array (individual x chromosome)
i=1; j=1;
for i in range(1,popSize):
for j in range(1,genomeLength):
theta=np.random.uniform(0,1)*90
theta=math.radians(theta)
rot[0,0]=math.cos(theta); rot[0,1]=-math.sin(theta);
rot[1,0]=math.sin(theta); rot[1,1]=math.cos(theta);
AlphaBeta[0]=rot[0,0]*(h[0][0]*QuBitZero[0])+rot[0,1]*(h[0][1]*QuBitZero[1])
AlphaBeta[1]=rot[1,0]*(h[1][0]*QuBitZero[0])+rot[1,1]*(h[1][1]*QuBitZero[1])
# alpha squared
qpv[i,j,0]=np.around(2*pow(AlphaBeta[0],2),2)
# beta squared
qpv[i,j,1]=np.around(2*pow(AlphaBeta[1],2),2)
#########################################################
# SHOW QUANTUM POPULATION #
#########################################################
def Show_population():
i=1; j=1;
for i in range(1,popSize):
print()
print()
print("qpv = ",i," : ")
print()
for j in range(1,genomeLength):
print(qpv[i, j, 0],end="")
print(" ",end="")
print()
for j in range(1,genomeLength):
print(qpv[i, j, 1],end="")
print(" ",end="")
print()
##########################################################
# MAKE A MEASURE #
##########################################################
# p_alpha: probability of finding qubit in alpha state
def Measure(p_alpha):
for i in range(1,popSize):
print()
for j in range(1,genomeLength):
if p_alpha<=qpv[i, j, 0]:
chromosome[i,j]=0
else:
chromosome[i,j]=1
print(chromosome[i,j]," ",end="")
print()
print()
#########################################################
# FITNESS EVALUATION #
#########################################################
def Fitness_evaluation(generation):
i=1; j=1; fitness_total=0; sum_sqr=0;
fitness_average=0; variance=0;
for i in range(1,popSize):
fitness[i]=0
#########################################################
# Define your problem in this section. For instance: #
# #
# Let f(x)=abs(x-5/2+sin(x)) be a function that takes #
# values in the range 0<=x<=15. Within this range f(x) #
# has a maximum value at x=11 (binary is equal to 1011) #
#########################################################
for i in range(1,popSize):
x=0;
for j in range(1,genomeLength):
# translate from binary to decimal value
x=x+chromosome[i,j]*pow(2,genomeLength-j-1)
# replaces the value of x in the function f(x)
y= np.fabs((x-5)/(2+np.sin(x)))
# the fitness value is calculated below:
# (Note that in this example is multiplied
# by a scale value, e.g. 100)
fitness[i]=y*100
#########################################################
print("fitness = ",i," ",fitness[i])
fitness_total=fitness_total+fitness[i]
fitness_average=fitness_total/N
i=1;
while i<=N:
sum_sqr=sum_sqr+pow(fitness[i]-fitness_average,2)
i=i+1
variance=sum_sqr/N
if variance<=1.0e-4:
variance=0.0
# Best chromosome selection
the_best_chrom=0;
fitness_max=fitness[1];
for i in range(1,popSize):
if fitness[i]>=fitness_max:
fitness_max=fitness[i]
the_best_chrom=i
best_chrom[generation]=the_best_chrom
# Statistical output
f = open("output.dat", "a")
f.write(str(generation)+" "+str(fitness_average)+"\n")
f.write(" \n")
f.close()
print("Population size = ", popSize - 1)
print("mean fitness = ",fitness_average)
print("variance = ",variance," Std. deviation = ",math.sqrt(variance))
print("fitness max = ",best_chrom[generation])
print("fitness sum = ",fitness_total)
#########################################################
# QUANTUM ROTATION GATE #
#########################################################
def rotation():
rot =np.empty([2,2])
# Lookup table of the rotation angle
for i in range(1,popSize):
for j in range(1,genomeLength):
if fitness[i]<fitness[best_chrom[generation]]:
# if chromosome[i,j]==0 and chromosome[best_chrom[generation],j]==0:
if chromosome[i,j]==0 and chromosome[best_chrom[generation],j]==1:
# Define the rotation angle: delta_theta (e.g. 0.0785398163)
delta_theta=0.0785398163
rot[0,0]=math.cos(delta_theta); rot[0,1]=-math.sin(delta_theta);
rot[1,0]=math.sin(delta_theta); rot[1,1]=math.cos(delta_theta);
nqpv[i,j,0]=(rot[0,0]*qpv[i,j,0])+(rot[0,1]*qpv[i,j,1])
nqpv[i,j,1]=(rot[1,0]*qpv[i,j,0])+(rot[1,1]*qpv[i,j,1])
qpv[i,j,0]=round(nqpv[i,j,0],2)
qpv[i,j,1]=round(1-nqpv[i,j,0],2)
if chromosome[i,j]==1 and chromosome[best_chrom[generation],j]==0:
# Define the rotation angle: delta_theta (e.g. -0.0785398163)
delta_theta=-0.0785398163
rot[0,0]=math.cos(delta_theta); rot[0,1]=-math.sin(delta_theta);
rot[1,0]=math.sin(delta_theta); rot[1,1]=math.cos(delta_theta);
nqpv[i,j,0]=(rot[0,0]*qpv[i,j,0])+(rot[0,1]*qpv[i,j,1])
nqpv[i,j,1]=(rot[1,0]*qpv[i,j,0])+(rot[1,1]*qpv[i,j,1])
qpv[i,j,0]=round(nqpv[i,j,0],2)
qpv[i,j,1]=round(1-nqpv[i,j,0],2)
# if chromosome[i,j]==1 and chromosome[best_chrom[generation],j]==1:
#########################################################
# X-PAULI QUANTUM MUTATION GATE #
#########################################################
# pop_mutation_rate: mutation rate in the population
# mutation_rate: probability of a mutation of a bit
def mutation(pop_mutation_rate, mutation_rate):
for i in range(1,popSize):
up=np.random.random_integers(100)
up=up/100
if up<=pop_mutation_rate:
for j in range(1,genomeLength):
um=np.random.random_integers(100)
um=um/100
if um<=mutation_rate:
nqpv[i,j,0]=qpv[i,j,1]
nqpv[i,j,1]=qpv[i,j,0]
else:
nqpv[i,j,0]=qpv[i,j,0]
nqpv[i,j,1]=qpv[i,j,1]
else:
for j in range(1,genomeLength):
nqpv[i,j,0]=qpv[i,j,0]
nqpv[i,j,1]=qpv[i,j,1]
for i in range(1,popSize):
for j in range(1,genomeLength):
qpv[i,j,0]=nqpv[i,j,0]
qpv[i,j,1]=nqpv[i,j,1]
#########################################################
# TOURNAMENT SELECTION OPERATOR #
#########################################################
def select_p_tournament():
u1=0; u2=0; parent=99;
while (u1==0 and u2==0):
u1=np.random.random_integers(popSize-1)
u2=np.random.random_integers(popSize-1)
if fitness[u1]<=fitness[u2]:
parent=u1
else:
parent=u2
return parent
#########################################################
# ONE-POINT CROSSOVER OPERATOR #
#########################################################
# crossover_rate: setup crossover rate
def mating(crossover_rate):
j=0;
crossover_point=0;
parent1=select_p_tournament()
parent2=select_p_tournament()
if random.random()<=crossover_rate:
crossover_point=np.random.random_integers(genomeLength-2)
j=1;
while (j<=genomeLength-2):
if j<=crossover_point:
child1[parent1,j,0]=round(qpv[parent1,j,0],2)
child1[parent1,j,1]=round(qpv[parent1,j,1],2)
child2[parent2,j,0]=round(qpv[parent2,j,0],2)
child2[parent2,j,1]=round(qpv[parent2,j,1],2)
else:
child1[parent1,j,0]=round(qpv[parent2,j,0],2)
child1[parent1,j,1]=round(qpv[parent2,j,1],2)
child2[parent2,j,0]=round(qpv[parent1,j,0],2)
child2[parent2,j,1]=round(qpv[parent1,j,1],2)
j=j+1
j=1
for j in range(1,genomeLength):
qpv[parent1,j,0]=child1[parent1,j,0]
qpv[parent1,j,1]=child1[parent1,j,1]
qpv[parent2,j,0]=child2[parent2,j,0]
qpv[parent2,j,1]=child2[parent2,j,1]
def crossover(crossover_rate):
c=1;
while (c<=N):
mating(crossover_rate)
c=c+1
#########################################################
# PERFORMANCE GRAPH #
#########################################################
# Read the Docs in http://matplotlib.org/1.4.1/index.html
def plot_Output():
data = np.loadtxt('output.dat')
# plot the first column as x, and second column as y
x=data[:,0]
y=data[:,1]
plt.plot(x,y)
plt.xlabel('Generation')
plt.ylabel('Fitness average')
plt.xlim(0.0, 550.0)
plt.show()
#########################################################
# #
# MAIN PROGRAM #
# #
#########################################################
def Q_Hybrid():
generation=0
print("============== GENERATION: ",generation," =========================== ")
print()
Init_population()
Show_population()
Measure(0.5)
Fitness_evaluation(generation)
while (generation<generation_max-1):
print("The best of generation [",generation,"] ", best_chrom[generation])
print()
print("============== GENERATION: ",generation+1," =========================== ")
print()
rotation()
crossover(0.75)
mutation(0.0,0.001)
generation=generation+1
Measure(0.5)
Fitness_evaluation(generation)
print ("""HYBRID GENETIC ALGORITHM""")
input("Press Enter to continue...")
Q_Hybrid()
plot_Output()
| mit |
fdft/ml | ch04/build_lda.py | 2 | 2444 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
from __future__ import print_function
try:
import nltk.corpus
except ImportError:
print("nltk not found")
print("please install it")
raise
from scipy.spatial import distance
import numpy as np
from gensim import corpora, models
import sklearn.datasets
import nltk.stem
from collections import defaultdict
english_stemmer = nltk.stem.SnowballStemmer('english')
stopwords = set(nltk.corpus.stopwords.words('english'))
stopwords.update(['from:', 'subject:', 'writes:', 'writes'])
class DirectText(corpora.textcorpus.TextCorpus):
def get_texts(self):
return self.input
def __len__(self):
return len(self.input)
try:
dataset = sklearn.datasets.load_mlcomp("20news-18828", "train",
mlcomp_root='./data')
except:
print("Newsgroup data not found.")
print("Please download from http://mlcomp.org/datasets/379")
print("And expand the zip into the subdirectory data/")
print()
print()
raise
otexts = dataset.data
texts = dataset.data
texts = [t.decode('utf-8', 'ignore') for t in texts]
texts = [t.split() for t in texts]
texts = [map(lambda w: w.lower(), t) for t in texts]
texts = [filter(lambda s: not len(set("+-.?!()>@012345689") & set(s)), t)
for t in texts]
texts = [filter(lambda s: (len(s) > 3) and (s not in stopwords), t)
for t in texts]
texts = [map(english_stemmer.stem, t) for t in texts]
usage = defaultdict(int)
for t in texts:
for w in set(t):
usage[w] += 1
limit = len(texts) / 10
too_common = [w for w in usage if usage[w] > limit]
too_common = set(too_common)
texts = [filter(lambda s: s not in too_common, t) for t in texts]
corpus = DirectText(texts)
dictionary = corpus.dictionary
try:
dictionary['computer']
except:
pass
model = models.ldamodel.LdaModel(
corpus, num_topics=100, id2word=dictionary.id2token)
thetas = np.zeros((len(texts), 100))
for i, c in enumerate(corpus):
for ti, v in model[c]:
thetas[i, ti] += v
distances = distance.squareform(distance.pdist(thetas))
large = distances.max() + 1
for i in xrange(len(distances)):
distances[i, i] = large
print(otexts[1])
print()
print()
print()
print(otexts[distances[1].argmin()])
| mit |
pablo-co/insight-jobs | flm.py | 1 | 5339 | __author__ = ''
from gurobipy import *
import pandas as pd
import numpy as np
import os
from pandas import concat
from filesearcher import filesearcher
def process(frequencies_file):
stdmean = pd.read_csv('./5StopsCentroids/Output/StdAndMean.csv')
os.chdir('./7FLM/')
TS = np.genfromtxt('./Input/demand.csv', dtype=None, delimiter=',')
costmatrix = np.genfromtxt('./Input/DistanceStopsToCustomers.csv', dtype=None, delimiter=',')
objectivemat = []
PPS = range(len(costmatrix))
Solution = pd.DataFrame(index=PPS, columns=range(len(costmatrix)))
Solution = Solution.fillna(0)
for k in range(1, len(costmatrix) + 1):
# Facility location model (FLM)
m = Model('FLM1.1')
# Parking spots (max)
PS = k
# initialize objective function
obj = 0
# Potential parking stops
# Actual stops
Potspot = []
# Create decision variables
for i in PPS:
Potspot.append(m.addVar(vtype=GRB.BINARY, name="Chosen_Spots%d" % i))
transport = []
for i in PPS:
transport.append([])
for j in range(len(TS)):
transport[i].append(m.addVar(vtype=GRB.INTEGER, name="Trans%d.%d" % (i, j)))
m.modelSense = GRB.MINIMIZE
m.update()
# Objective function
for i in PPS:
for j in range(len(TS)):
obj = TS[j] * costmatrix[i][j] * transport[i][j] + obj
m.setObjective(obj)
# Constrains
for j in range(len(TS)):
m.addConstr(quicksum((transport[i][j] for i in PPS)) >= 1, name="Next_spot%d" % j)
for i in PPS:
for j in range(len(TS)):
m.addConstr((transport[i][j] - Potspot[i]) <= 0, "Link%d.%d" % (i, j))
for i in PPS:
m.addConstr((Potspot[i] - quicksum(transport[i][j] for j in range(len(TS)))) <= 0, "Link%d.%d" % (i, j))
m.addConstr(quicksum(Potspot[i] for i in PPS) == PS, "Max_spots%d")
m.optimize()
m.getObjective()
objectivemat.append(m.objVal)
for i in PPS:
Solution[k - 1][i] = Potspot[i].x
print(k, i)
# m.write('FLM1.11.lp')
if k == len(costmatrix):
clients = []
dropsize = []
droppercl = []
durpercl = []
for i in PPS:
clients.append(0)
dropsize.append(0)
droppercl.append(0)
durpercl.append(0)
for j in range(len(TS)):
dropsize[i] = TS[j] * transport[i][j].x + dropsize[i]
clients[i] = clients[i] + transport[i][j].x
droppercl[i] = dropsize[i] / clients[i]
durpercl[i] = stdmean.Mean[i] / clients[i]
filesearcher()
codes = pd.read_csv(frequencies_file)
Solution.columns = ['p' + str(i) for i in range(len(costmatrix))]
os.chdir(os.path.dirname(os.getcwd()))
centr = pd.read_csv('./6DistanceMatrices/Input/Centroids.csv')
coords = centr[['latitud', 'longitud']]
result = concat([Solution, coords], axis=1)
dropsize = pd.DataFrame(dropsize)
droppercl = pd.DataFrame(droppercl)
clients = pd.DataFrame(clients)
durpercl = pd.DataFrame(durpercl)
stopsdataset = concat([coords, stdmean, dropsize, droppercl, clients, durpercl,], axis=1)
stopsdataset.columns = ['latitud', 'longitud', 'sigma', 'mean_duration', 'drop_size_per_number_of_clientes',
'dropsize_per_stop', 'clientes', 'duration_per_number_of_clients']
pd.DataFrame(result).to_csv(
"./8Optimization/Input/SolutionFLM2.csv") # , header=['p'+str(i) for i in range(len(costmatrix)), 'latitud', 'longitud'])
pd.DataFrame(result).to_csv("./Km2Datasets/Scenario/Km2Solution.csv")
pd.DataFrame(stopsdataset).to_csv('./Km2Datasets/Stops/Km2Stops.csv', index=False)
def main(argv):
input = "input.csv"
output = "output.csv"
polygon_file_name = "polygon.csv"
client_file_name = "clients.csv"
distance_stops_to_stops = "distance_stops_to_stops.csv"
time_stops_to_stops = "time_stops_to_stops.csv"
distance_stops_to_customers = "distance_stops_to_customers.csv"
time_stops_to_customer = "time_stops_to_customer.csv"
hash_name = ''.join(random.choice(string.ascii_uppercase) for i in range(24))
hash_name += ".csv"
try:
opts, args = getopt.getopt(argv, "c:o:p:t:v:x:y:z:",
["centroids=", "output=", "polygon=", "clients=", "distance_stops_to_stops=",
"time_stops_to_stops=", "distance_stops_to_customers=", "time_stops_to_customers="])
except getopt.GetoptError, e:
sys.exit(2)
for opt, arg in opts:
if opt in ("-c", "--centroids"):
input = arg
elif opt in ("-o", "--output"):
output = arg
elif opt in ("-p", "--polygon"):
polygon_file_name = arg
elif opt in ("-y", "--distance_stops_to_customers"):
distance_stops_to_customers = arg
time_stops_to_customer = arg
create_depot(input, polygon_file_name, hash_name)
process(distance_stops_to_customers)
os.remove(hash_name)
if __name__ == "__main__":
main(sys.argv[1:])
| mit |
mhvk/astropy | astropy/visualization/wcsaxes/formatter_locator.py | 5 | 21247 | # Licensed under a 3-clause BSD style license - see LICENSE.rst
# This file defines the AngleFormatterLocator class which is a class that
# provides both a method for a formatter and one for a locator, for a given
# label spacing. The advantage of keeping the two connected is that we need to
# make sure that the formatter can correctly represent the spacing requested and
# vice versa. For example, a format of dd:mm cannot work with a tick spacing
# that is not a multiple of one arcminute.
import re
import warnings
import numpy as np
from matplotlib import rcParams
from astropy import units as u
from astropy.units import UnitsError
from astropy.coordinates import Angle
DMS_RE = re.compile('^dd(:mm(:ss(.(s)+)?)?)?$')
HMS_RE = re.compile('^hh(:mm(:ss(.(s)+)?)?)?$')
DDEC_RE = re.compile('^d(.(d)+)?$')
DMIN_RE = re.compile('^m(.(m)+)?$')
DSEC_RE = re.compile('^s(.(s)+)?$')
SCAL_RE = re.compile('^x(.(x)+)?$')
# Units with custom representations - see the note where it is used inside
# AngleFormatterLocator.formatter for more details.
CUSTOM_UNITS = {
u.degree: u.def_unit('custom_degree', represents=u.degree,
format={'generic': '\xb0',
'latex': r'^\circ',
'unicode': '°'}),
u.arcmin: u.def_unit('custom_arcmin', represents=u.arcmin,
format={'generic': "'",
'latex': r'^\prime',
'unicode': '′'}),
u.arcsec: u.def_unit('custom_arcsec', represents=u.arcsec,
format={'generic': '"',
'latex': r'^{\prime\prime}',
'unicode': '″'}),
u.hourangle: u.def_unit('custom_hourangle', represents=u.hourangle,
format={'generic': 'h',
'latex': r'^{\mathrm{h}}',
'unicode': r'$\mathregular{^h}$'})}
class BaseFormatterLocator:
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if len([x for x in (values, number, spacing) if x is None]) < 2:
raise ValueError("At most one of values/number/spacing can be specifed")
self._unit = unit
self._format_unit = format_unit or unit
if values is not None:
self.values = values
elif number is not None:
self.number = number
elif spacing is not None:
self.spacing = spacing
else:
self.number = 5
self.format = format
@property
def values(self):
return self._values
@values.setter
def values(self, values):
if not isinstance(values, u.Quantity) or (not values.ndim == 1):
raise TypeError("values should be an astropy.units.Quantity array")
if not values.unit.is_equivalent(self._unit):
raise UnitsError("value should be in units compatible with "
"coordinate units ({}) but found {}".format(self._unit, values.unit))
self._number = None
self._spacing = None
self._values = values
@property
def number(self):
return self._number
@number.setter
def number(self, number):
self._number = number
self._spacing = None
self._values = None
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
self._number = None
self._spacing = spacing
self._values = None
def minor_locator(self, spacing, frequency, value_min, value_max):
if self.values is not None:
return [] * self._unit
minor_spacing = spacing.value / frequency
values = self._locate_values(value_min, value_max, minor_spacing)
index = np.where((values % frequency) == 0)
index = index[0][0]
values = np.delete(values, np.s_[index::frequency])
return values * minor_spacing * self._unit
@property
def format_unit(self):
return self._format_unit
@format_unit.setter
def format_unit(self, unit):
self._format_unit = u.Unit(unit)
@staticmethod
def _locate_values(value_min, value_max, spacing):
imin = np.ceil(value_min / spacing)
imax = np.floor(value_max / spacing)
values = np.arange(imin, imax + 1, dtype=int)
return values
class AngleFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, decimal=None, format_unit=None, show_decimal_unit=True):
if unit is None:
unit = u.degree
if format_unit is None:
format_unit = unit
if format_unit not in (u.degree, u.hourangle, u.hour):
if decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
self._decimal = decimal
self._sep = None
self.show_decimal_unit = show_decimal_unit
super().__init__(values=values, number=number, spacing=spacing,
format=format, unit=unit, format_unit=format_unit)
@property
def decimal(self):
decimal = self._decimal
if self.format_unit not in (u.degree, u.hourangle, u.hour):
if self._decimal is None:
decimal = True
elif self._decimal is False:
raise UnitsError("Units should be degrees or hours when using non-decimal (sexagesimal) mode")
elif self._decimal is None:
decimal = False
return decimal
@decimal.setter
def decimal(self, value):
self._decimal = value
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and (not isinstance(spacing, u.Quantity) or
spacing.unit.physical_type != 'angle'):
raise TypeError("spacing should be an astropy.units.Quantity "
"instance with units of angle")
self._number = None
self._spacing = spacing
self._values = None
@property
def sep(self):
return self._sep
@sep.setter
def sep(self, separator):
self._sep = separator
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if DMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.degree
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif HMS_RE.match(value) is not None:
self._decimal = False
self._format_unit = u.hourangle
if '.' in value:
self._precision = len(value) - value.index('.') - 1
self._fields = 3
else:
self._precision = 0
self._fields = value.count(':') + 1
elif DDEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.degree
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DMIN_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcmin
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
elif DSEC_RE.match(value) is not None:
self._decimal = True
self._format_unit = u.arcsec
self._fields = 1
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
else:
raise ValueError(f"Invalid format: {value}")
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
@property
def base_spacing(self):
if self.decimal:
spacing = self._format_unit / (10. ** self._precision)
else:
if self._fields == 1:
spacing = 1. * u.degree
elif self._fields == 2:
spacing = 1. * u.arcmin
elif self._fields == 3:
if self._precision == 0:
spacing = 1. * u.arcsec
else:
spacing = u.arcsec / (10. ** self._precision)
if self._format_unit is u.hourangle:
spacing *= 15
return spacing
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * u.arcsec
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced). We return a
# non-zero spacing in case the caller needs to format a single
# coordinate, e.g. for mousover.
if value_min == value_max:
return [] * self._unit, 1 * u.arcsec
if self.spacing is not None:
# spacing was manually specified
spacing_value = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing_value = self.base_spacing.to_value(self._unit)
else:
# otherwise we clip to the nearest 'sensible' spacing
if self.decimal:
from .utils import select_step_scalar
spacing_value = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
else:
if self._format_unit is u.degree:
from .utils import select_step_degree
spacing_value = select_step_degree(dv).to_value(self._unit)
else:
from .utils import select_step_hour
spacing_value = select_step_hour(dv).to_value(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this.
values = self._locate_values(value_min, value_max, spacing_value)
return values * spacing_value * self._unit, spacing_value * self._unit
def formatter(self, values, spacing, format='auto'):
if not isinstance(values, u.Quantity) and values is not None:
raise TypeError("values should be a Quantities array")
if len(values) > 0:
decimal = self.decimal
unit = self._format_unit
if unit is u.hour:
unit = u.hourangle
if self.format is None:
if decimal:
# Here we assume the spacing can be arbitrary, so for example
# 1.000223 degrees, in which case we don't want to have a
# format that rounds to degrees. So we find the number of
# decimal places we get from representing the spacing as a
# string in the desired units. The easiest way to find
# the smallest number of decimal places required is to
# format the number as a decimal float and strip any zeros
# from the end. We do this rather than just trusting e.g.
# str() because str(15.) == 15.0. We format using 10 decimal
# places by default before stripping the zeros since this
# corresponds to a resolution of less than a microarcecond,
# which should be sufficient.
spacing = spacing.to_value(unit)
fields = 0
precision = len(f"{spacing:.10f}".replace('0', ' ').strip().split('.', 1)[1])
else:
spacing = spacing.to_value(unit / 3600)
if spacing >= 3600:
fields = 1
precision = 0
elif spacing >= 60:
fields = 2
precision = 0
elif spacing >= 1:
fields = 3
precision = 0
else:
fields = 3
precision = -int(np.floor(np.log10(spacing)))
else:
fields = self._fields
precision = self._precision
is_latex = format == 'latex' or (format == 'auto' and rcParams['text.usetex'])
if decimal:
# At the moment, the Angle class doesn't have a consistent way
# to always convert angles to strings in decimal form with
# symbols for units (instead of e.g 3arcsec). So as a workaround
# we take advantage of the fact that Angle.to_string converts
# the unit to a string manually when decimal=False and the unit
# is not strictly u.degree or u.hourangle
if self.show_decimal_unit:
decimal = False
sep = 'fromunit'
if is_latex:
fmt = 'latex'
else:
if unit is u.hourangle:
fmt = 'unicode'
else:
fmt = None
unit = CUSTOM_UNITS.get(unit, unit)
else:
sep = None
fmt = None
elif self.sep is not None:
sep = self.sep
fmt = None
else:
sep = 'fromunit'
if unit == u.degree:
if is_latex:
fmt = 'latex'
else:
sep = ('\xb0', "'", '"')
fmt = None
else:
if format == 'ascii':
fmt = None
elif is_latex:
fmt = 'latex'
else:
# Here we still use LaTeX but this is for Matplotlib's
# LaTeX engine - we can't use fmt='latex' as this
# doesn't produce LaTeX output that respects the fonts.
sep = (r'$\mathregular{^h}$', r'$\mathregular{^m}$', r'$\mathregular{^s}$')
fmt = None
angles = Angle(values)
string = angles.to_string(unit=unit,
precision=precision,
decimal=decimal,
fields=fields,
sep=sep,
format=fmt).tolist()
return string
else:
return []
class ScalarFormatterLocator(BaseFormatterLocator):
"""
A joint formatter/locator
"""
def __init__(self, values=None, number=None, spacing=None, format=None,
unit=None, format_unit=None):
if unit is not None:
unit = unit
format_unit = format_unit or unit
elif spacing is not None:
unit = spacing.unit
format_unit = format_unit or spacing.unit
elif values is not None:
unit = values.unit
format_unit = format_unit or values.unit
super().__init__(values=values, number=number, spacing=spacing,
format=format, unit=unit, format_unit=format_unit)
@property
def spacing(self):
return self._spacing
@spacing.setter
def spacing(self, spacing):
if spacing is not None and not isinstance(spacing, u.Quantity):
raise TypeError("spacing should be an astropy.units.Quantity instance")
self._number = None
self._spacing = spacing
self._values = None
@property
def format(self):
return self._format
@format.setter
def format(self, value):
self._format = value
if value is None:
return
if SCAL_RE.match(value) is not None:
if '.' in value:
self._precision = len(value) - value.index('.') - 1
else:
self._precision = 0
if self.spacing is not None and self.spacing < self.base_spacing:
warnings.warn("Spacing is too small - resetting spacing to match format")
self.spacing = self.base_spacing
if self.spacing is not None:
ratio = (self.spacing / self.base_spacing).decompose().value
remainder = ratio - np.round(ratio)
if abs(remainder) > 1.e-10:
warnings.warn("Spacing is not a multiple of base spacing - resetting spacing to match format")
self.spacing = self.base_spacing * max(1, round(ratio))
elif not value.startswith('%'):
raise ValueError(f"Invalid format: {value}")
@property
def base_spacing(self):
return self._format_unit / (10. ** self._precision)
def locator(self, value_min, value_max):
if self.values is not None:
# values were manually specified
return self.values, 1.1 * self._unit
else:
# In the special case where value_min is the same as value_max, we
# don't locate any ticks. This can occur for example when taking a
# slice for a cube (along the dimension sliced).
if value_min == value_max:
return [] * self._unit, 0 * self._unit
if self.spacing is not None:
# spacing was manually specified
spacing = self.spacing.to_value(self._unit)
elif self.number is not None:
# number of ticks was specified, work out optimal spacing
# first compute the exact spacing
dv = abs(float(value_max - value_min)) / self.number * self._unit
if self.format is not None and (not self.format.startswith('%')) and dv < self.base_spacing:
# if the spacing is less than the minimum spacing allowed by the format, simply
# use the format precision instead.
spacing = self.base_spacing.to_value(self._unit)
else:
from .utils import select_step_scalar
spacing = select_step_scalar(dv.to_value(self._format_unit)) * self._format_unit.to(self._unit)
# We now find the interval values as multiples of the spacing and
# generate the tick positions from this
values = self._locate_values(value_min, value_max, spacing)
return values * spacing * self._unit, spacing * self._unit
def formatter(self, values, spacing, format='auto'):
if len(values) > 0:
if self.format is None:
if spacing.value < 1.:
precision = -int(np.floor(np.log10(spacing.value)))
else:
precision = 0
elif self.format.startswith('%'):
return [(self.format % x.value) for x in values]
else:
precision = self._precision
return [("{0:." + str(precision) + "f}").format(x.to_value(self._format_unit)) for x in values]
else:
return []
| bsd-3-clause |
parkerzf/kaggle-expedia | src/features/build_baseline_features.py | 1 | 7635 | import numpy as np
import pandas as pd
import sys
import os
from sklearn.externals import joblib
scriptpath = os.path.dirname(os.path.realpath(sys.argv[0])) + '/../'
sys.path.append(os.path.abspath(scriptpath))
import utils
def time_features_enricher(dataset):
"""
Feature engineering on time related fields
:param dataset: train/test dataset
"""
dataset['date_time_dt'] = pd.to_datetime(dataset.date_time, format = '%Y-%m-%d %H:%M:%S')
dataset['date_time_dow'] = dataset.date_time_dt.dt.dayofweek
dataset['date_time_hour'] = dataset.date_time_dt.dt.hour
dataset['date_time_month'] = dataset.date_time_dt.dt.month
dataset.loc[dataset.srch_ci == '2161-10-00', 'srch_ci'] = '2016-01-20' #handle one error format case in test set
dataset['srch_ci_dt'] = pd.to_datetime(dataset.srch_ci, format = '%Y-%m-%d')
dataset['srch_ci_dow'] = dataset.srch_ci_dt.dt.dayofweek
dataset['srch_ci_month'] = dataset.srch_ci_dt.dt.month
dataset['srch_co_dt'] = pd.to_datetime(dataset.srch_co, format = '%Y-%m-%d')
dataset['srch_co_dow'] = dataset.srch_co_dt.dt.dayofweek
dataset['srch_co_month'] = dataset.srch_co_dt.dt.month
dataset['booking_window'] = (dataset['srch_ci_dt'] - dataset['date_time_dt'])/np.timedelta64(1, 'D')
dataset['booking_window'].fillna(1000, inplace=True)
dataset['booking_window'] = map(int, dataset['booking_window'])
dataset['length_of_stay'] = (dataset['srch_co_dt'] - dataset['srch_ci_dt'])/np.timedelta64(1, 'D')
def gen_top_one_hot_encoding(row, field_name, top_vals):
"""
The helper function for gen_top_one_hot_encoding_column for one row
:param row: the result instance to be filled
:param field_name: the categorical field name
:return: the one hot encoding features
"""
encoding = np.empty(len(top_vals))
encoding.fill(0)
encoding[top_vals==row[field_name]] = 1
return pd.Series(encoding)
def gen_top_one_hot_encoding_column(dataset, field_name, top_vals):
"""
Generate top 10 categorical one hot encoding for one field
:param dataset: train/test dataset
:param field_name: the categorical field name
:param top_vals: the top vals selected for one hot encoding
:return: the one hot encoding features for the field
"""
encoding = dataset.apply(lambda row: gen_top_one_hot_encoding(row, field_name, np.array(top_vals)), axis=1)
top_vals_str = map(str, top_vals)
encoding.columns = map('_'.join, zip([field_name] * len(top_vals), top_vals_str))
return encoding
def gen_all_top_one_hot_encoding_columns(dataset):
"""
Generate top 10 categorical one hot encoding columns, based on the analysis shown in the reports/figures/report.html
:param dataset: train/test dataset
:return: the one hot encoding features for all the categorical fields
"""
site_name_top_vals = [2, 11, 37, 24, 34, 8, 13, 23, 17, 28]
site_name_encoding = gen_top_one_hot_encoding_column(dataset, 'site_name', site_name_top_vals)
posa_continent_top_vals = [3, 1, 2, 4, 0]
posa_continent_encoding = gen_top_one_hot_encoding_column(dataset, 'posa_continent', posa_continent_top_vals)
user_location_country_top_vals = [66, 205, 69, 3, 77, 46, 1, 215, 133, 68]
user_location_country_encoding = gen_top_one_hot_encoding_column(dataset, 'user_location_country', user_location_country_top_vals)
user_location_region_top_vals = [174, 354, 348, 442, 220, 462, 155, 135, 50, 258]
user_location_region_encoding = gen_top_one_hot_encoding_column(dataset, 'user_location_region', user_location_region_top_vals)
channel_top_vals = [9, 10, 0, 1, 5, 2, 3, 4, 7, 8]
channel_encoding = gen_top_one_hot_encoding_column(dataset, 'channel', channel_top_vals)
srch_destination_type_id_top_vals = [1, 6, 3, 5, 4, 8, 7, 9, 0]
srch_destination_type_id_encoding = gen_top_one_hot_encoding_column(dataset, 'srch_destination_type_id', srch_destination_type_id_top_vals)
hotel_continent_top_vals = [2, 6, 3, 4, 0, 5, 1]
hotel_continent_encoding = gen_top_one_hot_encoding_column(dataset, 'hotel_continent', hotel_continent_top_vals)
hotel_country_top_vals = [50, 198, 70, 105, 8, 204, 77, 144, 106, 63]
hotel_country_encoding = gen_top_one_hot_encoding_column(dataset, 'hotel_country', hotel_country_top_vals)
return site_name_encoding, posa_continent_encoding, user_location_country_encoding, user_location_region_encoding, \
channel_encoding, srch_destination_type_id_encoding, hotel_continent_encoding, hotel_country_encoding
def fill_na_features(dataset):
"""
Fill the remaining missing values
:param dataset: train/test dataset
"""
dataset.fillna(-1, inplace=True)
#############################################################
#################### train dataset ####################
#############################################################
train = utils.load_train('baseline')
train_is_booking = train[train.is_booking == 1]
train_is_booking.reset_index(inplace = True)
train_is_booking.is_copy = False
del train
print 'generate train time features...'
time_features_enricher(train_is_booking)
print 'generate train one hot encoding features...'
site_name_encoding, posa_continent_encoding, user_location_country_encoding, user_location_region_encoding, \
channel_encoding, srch_destination_type_id_encoding, hotel_continent_encoding, hotel_country_encoding = \
gen_all_top_one_hot_encoding_columns(train_is_booking)
print 'fill train na features...'
fill_na_features(train_is_booking)
print 'concat all train baseline features...'
train_is_booking_features = pd.concat([train_is_booking[['hotel_cluster', 'date_time', 'orig_destination_distance', \
'is_mobile', 'is_package', 'srch_adults_cnt', 'srch_children_cnt', 'srch_rm_cnt', \
'date_time_dow', 'date_time_hour', 'date_time_month', 'srch_ci_dow', 'srch_ci_month', \
'srch_co_dow', 'srch_co_month', 'booking_window', 'length_of_stay']], \
site_name_encoding, posa_continent_encoding, user_location_country_encoding, user_location_region_encoding, \
channel_encoding, srch_destination_type_id_encoding, hotel_continent_encoding, hotel_country_encoding], axis=1)
train_is_booking_features.to_csv(utils.processed_data_path +
'_'.join(['train_is_booking_baseline', 'year', utils.train_year]) + '.csv',
header=True, index=False)
del train_is_booking
#############################################################
#################### test dataset ####################
#############################################################
test = utils.load_test('baseline')
print 'generate test time features...'
time_features_enricher(test)
print 'generate test one hot encoding features...'
site_name_encoding, posa_continent_encoding, user_location_country_encoding, user_location_region_encoding, \
channel_encoding, srch_destination_type_id_encoding, hotel_continent_encoding, hotel_country_encoding = \
gen_all_top_one_hot_encoding_columns(test)
print 'fill test na features...'
fill_na_features(test)
print 'concat all test baseline features...'
test_features = pd.concat([test[['date_time', 'orig_destination_distance', \
'is_mobile', 'is_package', 'srch_adults_cnt', 'srch_children_cnt', 'srch_rm_cnt', \
'date_time_dow', 'date_time_hour', 'date_time_month', 'srch_ci_dow', 'srch_ci_month', \
'srch_co_dow', 'srch_co_month', 'booking_window', 'length_of_stay']], \
site_name_encoding, posa_continent_encoding, user_location_country_encoding, user_location_region_encoding, \
channel_encoding, srch_destination_type_id_encoding, hotel_continent_encoding, hotel_country_encoding], axis=1)
test_features.to_csv(utils.processed_data_path +
'_'.join(['test_baseline', 'year', utils.train_year]) + '.csv',
header=True, index=False)
| bsd-3-clause |
abhishekkrthakur/scikit-learn | examples/ensemble/plot_forest_iris.py | 335 | 6271 | """
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and the sepal
length features only, on the second row using the petal length and sepal length
only, and on the third row using the petal width and the petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation, we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of the scores (but
the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import clone
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.externals.six.moves import xrange
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
plot_colors = "ryb"
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
np.random.shuffle(idx)
X = X[idx]
y = y[idx]
# Standardize
mean = X.mean(axis=0)
std = X.std(axis=0)
X = (X - mean) / std
# Train
clf = clone(model)
clf = model.fit(X, y)
scores = clf.score(X, y)
# Create a title for each column and the console by using str() and
# slicing away useless parts of the string
model_title = str(type(model)).split(".")[-1][:-2][:-len("Classifier")]
model_details = model_title
if hasattr(model, "estimators_"):
model_details += " with {} estimators".format(len(model.estimators_))
print( model_details + " with features", pair, "has a score of", scores )
plt.subplot(3, 4, plot_idx)
if plot_idx <= len(models):
# Add a title at the top of each column
plt.title(model_title)
# Now plot the decision boundary using a fine mesh as input to a
# filled contour plot
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
# Plot either a single DecisionTreeClassifier or alpha blend the
# decision surfaces of the ensemble of classifiers
if isinstance(model, DecisionTreeClassifier):
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=cmap)
else:
# Choose alpha blend level with respect to the number of estimators
# that are in use (noting that AdaBoost can use fewer estimators
# than its maximum if it achieves a good enough fit early on)
estimator_alpha = 1.0 / len(model.estimators_)
for tree in model.estimators_:
Z = tree.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, alpha=estimator_alpha, cmap=cmap)
# Build a coarser grid to plot a set of ensemble classifications
# to show how these are different to what we see in the decision
# surfaces. These points are regularly space and do not have a black outline
xx_coarser, yy_coarser = np.meshgrid(np.arange(x_min, x_max, plot_step_coarser),
np.arange(y_min, y_max, plot_step_coarser))
Z_points_coarser = model.predict(np.c_[xx_coarser.ravel(), yy_coarser.ravel()]).reshape(xx_coarser.shape)
cs_points = plt.scatter(xx_coarser, yy_coarser, s=15, c=Z_points_coarser, cmap=cmap, edgecolors="none")
# Plot the training points, these are clustered together and have a
# black outline
for i, c in zip(xrange(n_classes), plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1], c=c, label=iris.target_names[i],
cmap=cmap)
plot_idx += 1 # move on to the next plot in sequence
plt.suptitle("Classifiers on feature subsets of the Iris dataset")
plt.axis("tight")
plt.show()
| bsd-3-clause |
choldgraf/ecogtools | ecogtools/io/bci2000.py | 1 | 23379 | # -*- coding: utf-8 -*-
#
# $Id: FileReader.py 3326 2011-06-17 23:56:38Z jhill $
#
# This file is part of the BCPy2000 framework, a Python framework for
# implementing modules that run on top of the BCI2000 <http://bci2000.org/>
# platform, for the purpose of realtime biosignal processing.
# Copyright (C) 2007-11 Jeremy Hill, Thomas Schreiner,
# Christian Puzicha, Jason Farquhar
#
# bcpy2000@bci2000.org
#
# The BCPy2000 framework is free software: you can redistribute it
# and/or modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY
# without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import os
import sys
import struct
import time
try:
import numpy
except:
pass
__all__ = ['ListDatFiles', 'bcistream', 'ParseState',
'ParseParam', 'ReadPrmFile', 'FormatPrmList', 'unescape']
class DatFileError(Exception):
pass
def nsorted(x, width=20):
import re
return zip(*sorted(zip([re.sub('[0-9]+', lambda m: m.group().rjust(width, '0'), xi) for xi in x], x)))[1]
def ListDatFiles(d='.'):
return nsorted([os.path.realpath(os.path.join(d, f)) for f in os.listdir(d) if f.lower().endswith('.dat')])
class bcistream(object):
def __init__(self, filename, ind=-1):
import numpy
if os.path.isdir(filename):
filename = ListDatFiles(filename)[ind]
sys.stderr.write("file at index %d is %s\n" % (ind, filename))
self.filename = filename
self.headerlen = 0
self.stateveclen = 0
self.nchan = 0
self.bytesperchannel = 0
self.bytesperframe = 0
self.framefmt = ''
self.unpacksig = ''
self.unpackstates = ''
self.paramdefs = {}
self.statedefs = {}
self.samplingfreq_hz = 0
self.gains = None
self.offsets = None
self.params = {}
self.file = open(self.filename, 'r')
self.readHeader()
self.file.close()
self.bytesperframe = self.nchan * self.bytesperchannel \
+ self.stateveclen
self.gains = self.params.get('SourceChGain')
if self.gains is not None:
self.gains = numpy.array([float(x) for x in self.gains],
dtype=numpy.float32)
self.gains.shape = (self.nchan, 1)
self.offsets = self.params.get('SourceChOffset')
if self.offsets is not None:
self.offsets = numpy.array([float(x) for x in self.offsets],
dtype=numpy.float32)
self.offsets.shape = (self.nchan, 1)
for k, v in self.statedefs.items():
startbyte = int(v['bytePos'])
startbit = int(v['bitPos'])
nbits = int(v['length'])
nbytes = (startbit+nbits) / 8
if (startbit+nbits) % 8:
nbytes += 1
extrabits = nbytes * 8 - nbits - startbit
startmask = 255 & (255 << startbit)
endmask = 255 & (255 >> extrabits)
div = (1 << startbit)
v['slice'] = slice(startbyte, startbyte + nbytes)
v['mask'] = numpy.array([255]*nbytes, dtype=numpy.uint8)
v['mask'][0] &= startmask
v['mask'][-1] &= endmask
v['mask'].shape = (nbytes, 1)
v['mult'] = numpy.asmatrix(
256.0 ** numpy.arange(nbytes, dtype=numpy.float64) /
float(div))
self.statedefs[k] = v
self.open()
def open(self):
if self.file.closed:
self.file = open(self.filename, 'rb')
self.file.seek(self.headerlen)
def close(self):
if not self.file.closed:
self.file.close()
def __str__(self):
nsamp = self.samples()
s = ["<%s.%s instance at 0x%08X>" % (
self.__class__.__module__, self.__class__.__name__, id(self))]
s.append('file ' + self.filename.replace('\\', '/'))
d = self.datestamp
if not isinstance(d, basestring):
d = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.datestamp))
s.append('recorded ' + d)
s.append('%d samples @ %gHz = %s' % (nsamp, self.samplingfreq_hz, self.sample2time(nsamp),))
s.append('%d channels, total %.3g MB' % (self.nchan, self.datasize()/1024.0**2,) )
if not self.file.closed:
s.append('open for reading at sample %d (%s)' % (self.tell(), self.sample2time(self.tell()),) )
return '\n '.join(s)
def __repr__(self):
return self.__str__()
def channels(self):
return self.nchan
def samplingrate(self):
return self.samplingfreq_hz
def datasize(self):
return os.stat(self.filename)[6] - self.headerlen
def samples(self):
return self.datasize() / self.bytesperframe
def readHeader(self):
line = self.file.readline().split()
k = [x.rstrip('=') for x in line[::2]]
v = line[1::2]
self.headline = dict(zip(k, v))
if len(self.headline) == 0:
raise ValueError('Empty header line, perhaps it has already been read?')
self.headerlen = int(self.headline['HeaderLen'])
self.nchan = int(self.headline['SourceCh'])
self.stateveclen = int(self.headline['StatevectorLen'])
fmtstr = self.headline.get('DataFormat', 'int16')
fmt = {'int16': 'h', 'int32': 'l', 'float32': 'f'}.get(fmtstr)
if fmt is None:
raise DatFileError('unrecognized DataFormat "%s"' % fmtstr)
self.bytesperchannel = struct.calcsize(fmt)
self.framefmt = fmt * self.nchan + 'B' * self.stateveclen
self.unpacksig = fmt * self.nchan + 'x' * self.stateveclen
self.unpackstates = 'x' * self.bytesperchannel * self.nchan + 'B' * self.stateveclen
line = self.file.readline()
if line.strip() != '[ State Vector Definition ]':
raise DatFileError('failed to find state vector definition section where expected')
while True:
line = self.file.readline()
if len(line) == 0 or line[0] == '[':
break
rec = ParseState(line)
name = rec.pop('name')
self.statedefs[name] = rec
if line.strip() != '[ Parameter Definition ]':
raise DatFileError('failed to find parameter definition section where expected')
while True:
line = self.file.readline()
if self.file.tell() >= self.headerlen:
break
rec = ParseParam(line)
name = rec.pop('name')
self.paramdefs[name] = rec
self.params[name] = rec.get('scaled', rec['val'])
self.samplingfreq_hz = float(str(self.params['SamplingRate']).rstrip('Hz'))
def fixdate(d):
try:
import datetime
d = time.mktime(time.strptime(d, '%a %b %d %H:%M:%S %Y'))
except:
pass
else:
return d
try:
import datetime
d = time.mktime(time.strptime(d, '%Y-%m-%dT%H:%M:%S'))
except:
pass
else:
return d
return d
self.datestamp = fixdate(self.params.get('StorageTime'))
def read(self, nsamp=1, apply_gains=True):
if nsamp == -1:
nsamp = self.samples() - self.tell()
if nsamp == 'all':
self.rewind()
nsamp = self.samples()
if isinstance(nsamp, str):
nsamp = self.time2sample(nsamp)
raw = self.file.read(self.bytesperframe*nsamp)
nsamp = len(raw) / self.bytesperframe
sig = numpy.zeros((self.nchan, nsamp), dtype=numpy.float32)
rawstates = numpy.zeros((self.stateveclen, nsamp), dtype=numpy.uint8)
S1 = struct.Struct('<' + self.framefmt[:self.nchan])
n1 = S1.size
S2 = struct.Struct('<' + self.framefmt[self.nchan:])
n2 = S2.size
xT = sig.T
rT = rawstates.T
start = 0
for i in range(nsamp):
mid = start + n1
end = mid + n2
# multiple calls to precompiled Struct.unpack seem to be better than
xT[i].flat = S1.unpack(raw[start:mid])
# a single call to struct.unpack('ffffff...bbb') on the whole data
rT[i].flat = S2.unpack(raw[mid:end])
# since time efficiency is <= and the latter caused *massive* memory leaks on the apple's python 2.6.1 (snow leopard 64-bit)
start = end
if apply_gains:
if self.gains is not None:
sig *= self.gains
if self.offsets is not None:
sig += self.offsets
sig = numpy.asmatrix(sig)
return sig, rawstates
def decode(self, nsamp=1, states='all', apply_gains=True):
sig, rawstates = self.read(nsamp, apply_gains=apply_gains)
states, statenames = {}, states
if statenames == 'all':
statenames = self.statedefs.keys()
for statename in statenames:
sd = self.statedefs[statename]
states[statename] = numpy.array(
sd['mult'] * numpy.asmatrix(rawstates[sd['slice'], :] & sd['mask']), dtype=numpy.int32)
return sig, states
def tell(self):
if self.file.closed:
raise IOError('dat file is closed')
return (self.file.tell() - self.headerlen) / self.bytesperframe
def seek(self, value, wrt='bof'):
if self.file.closed:
raise IOError('dat file is closed')
if isinstance(value, str):
value = self.time2sample(value)
if wrt in ('bof', -1):
wrt = 0
elif wrt in ('eof', +1):
wrt = self.samples()
elif wrt in ('cof', 0):
wrt = self.tell()
else:
raise IOError('unknown origin "%s"' % str(wrt))
value = min(self.samples(), max(0, value + wrt))
self.file.seek(value * self.bytesperframe + self.headerlen)
def rewind(self):
self.file.seek(self.headerlen)
def time2sample(self, value):
t = value.split(':')
if len(t) > 3:
raise DatFileError('too many colons in timestamp "%s"' % value)
t.reverse()
t = [float(x) for x in t] + [0]*(3-len(t))
t = t[0] + 60.0 * t[1] + 3600.0 * t[2]
return int(round(t * self.samplingfreq_hz))
def sample2time(self, value):
msecs = round(1000.0 * float(value) / self.samplingfreq_hz)
secs, msecs = divmod(int(msecs), 1000)
mins, secs = divmod(int(secs), 60)
hours, mins = divmod(int(mins), 60)
return '%02d:%02d:%02d.%03d' % (hours,mins,secs,msecs)
def msec2samples(self, msec):
if msec is None:
return None
return numpy.round(self.samplingfreq_hz * msec / 1000.0)
def samples2msec(self, samples):
if samples is None:
return None
return numpy.round(1000.0 * samples / self.samplingfreq_hz)
def plotstates(self, states): # TODO: choose which states to plot
labels = states.keys()
v = numpy.matrix(numpy.concatenate(states.values(), axis=0),
dtype=numpy.float32)
ntraces, nsamp = v.shape
# v = v - numpy.min(v,1)
sc = numpy.max(v, 1)
sc[numpy.where(sc == 0.0)] = 1.0
v = v / sc
offsets = numpy.asmatrix(numpy.arange(1.0, ntraces+1.0)).A
v = v.T.A * -0.7 + offsets
t = numpy.matrix(range(nsamp), dtype=numpy.float32).T.A / self.samplingfreq_hz
pylab = load_pylab()
pylab.cla()
ax = pylab.gca()
h = pylab.plot(t, v)
ax.set_xlim(0, nsamp/self.samplingfreq_hz)
ax.set_yticks(offsets.flatten())
ax.set_yticklabels(labels)
ax.set_ylim(ntraces+1, 0)
ax.grid(True)
pylab.draw()
return h
# TODO: plot subsets of channels which don't necessarily correspond to ChannelNames param
def plotsig(self, sig, fac=3.0):
ntraces, nsamp = sig.shape
labels = self.params.get('ChannelNames', '')
if len(labels) == 0:
labels = [str(x) for x in range(1, ntraces+1)]
v = numpy.asmatrix(sig).T
v = v - numpy.median(v, axis=0)
offsets = numpy.asmatrix(numpy.arange(-1.0, ntraces + 1.0))
offsets = offsets.A * max(v.A.std(axis=0)) * fac
v = v.A + offsets[:, 1:-1]
t = numpy.matrix(range(nsamp), dtype=numpy.float32).T.A / self.samplingfreq_hz
pylab = load_pylab()
pylab.cla()
ax = pylab.gca()
h = pylab.plot(t, v)
ax.set_xlim(0, nsamp/self.samplingfreq_hz)
ax.set_yticks(offsets.flatten()[1:-1])
ax.set_yticklabels(labels)
ax.set_ylim(offsets.flatten()[-1], offsets.flatten()[0])
ax.grid(True)
pylab.draw()
return h
def unescape(s):
# unfortunately there are two slight difference between the BCI2000 standard and urllib.unquote
# here's one (empty string)
if s in ['%', '%0', '%00']:
return ''
out = ''
s = list(s)
while len(s):
c = s.pop(0)
if c == '%':
c = ''.join(s[:2])
if c.startswith('%'): # here's the other ('%%' maps to '%')
out += '%'
s = s[1:]
else:
try:
c = int(c, 16)
except:
pass
else:
out += chr(c)
s = s[2:]
else:
out += c
return out
def ParseState(state):
state = state.split()
return {
'name': state[0],
'length': int(state[1]),
'startVal': int(state[2]),
'bytePos': int(state[3]),
'bitPos': int(state[4])
}
def ReadPrmFile(f):
open_here = isinstance(f, str)
if open_here:
f = open(f)
f.seek(0)
p = [ParseParam(line) for line in f.readlines() if len(line.strip())]
if open_here:
f.close()
return p
def ParseParam(param):
param = param.strip().split('//', 1)
comment = ''
if len(param) > 1:
comment = param[1].strip()
param = param[0].split()
category = [unescape(x) for x in param.pop(0).split(':')]
param = [unescape(x) for x in param]
category += [''] * (3-len(category))
# this shouldn't happen, but some modules seem to register parameters with the string '::' inside one of the category elements. Let's assume this only happens in the third element
if len(category) > 3:
category = category[:2] + [':'.join(category[2:])]
datatype = param.pop(0)
name = param.pop(0).rstrip('=')
rec = {
'name': name, 'comment': comment, 'category': category, 'type': datatype,
'defaultVal': '', 'minVal': '', 'maxVal': '',
}
scaled = None
if datatype in ('int', 'float'):
datatypestr = datatype
datatype = {'float': float, 'int': int}.get(datatype)
val = param[0]
unscaled, units, scaled = DecodeUnits(val, datatype)
if isinstance(unscaled, (str, type(None))):
sys.stderr.write('WARNING: failed to interpret "%s" as type %s in parameter "%s"\n' % (val, datatypestr, name))
rec.update({
'valstr': val,
'val': unscaled,
'units': units,
})
elif datatype in ('string', 'variant'):
val = param.pop(0)
rec.update({
'valstr': val,
'val': val,
})
elif datatype.endswith('list'):
valtype = datatype[:-4]
valtypestr = valtype
valtype = {'float': float, 'int': int, '': str, 'string': str, 'variant': str}.get(valtype, valtype)
if isinstance(valtype, str):
raise DatFileError('Unknown list type "%s"' % valtype)
numel, labels, labelstr = ParseDim(param)
val = param[:numel]
valstr = ' '.join(filter(len, [labelstr]+val))
if valtype == str:
unscaled = val
units = [''] * len(val)
else:
val = [DecodeUnits(x, valtype) for x in val]
if len(val):
unscaled, units, scaled = zip(*val)
else:
unscaled, units, scaled = [], [], []
for u, v in zip(unscaled, val):
if isinstance(u, (str, type(None))):
print('WARNING: failed to interpret "%s" as type %s in parameter "%s"' % (v, valtypestr, name))
rec.update({
'valstr': valstr,
'valtype': valtype,
'len': numel,
'val': unscaled,
'units': units,
})
elif datatype.endswith('matrix'):
valtype = datatype[:-6]
valtype = {'float': float, 'int' :int, '': str, 'string': str, 'variant': str}.get(valtype, valtype)
if isinstance(valtype, str):
raise DatFileError('Unknown matrix type "%s"' % valtype)
nrows, rowlabels, rowlabelstr = ParseDim(param)
ncols, collabels, collabelstr = ParseDim(param)
valstr = ' '.join(filter(len, [rowlabelstr, collabelstr] + param[:nrows * ncols]))
val = []
for i in range(nrows):
val.append([])
for j in range(ncols):
val[-1].append(param.pop(0))
rec.update({
'valstr': valstr,
'valtype': valtype,
'val': val,
'shape': (nrows, ncols),
'dimlabels': (rowlabels, collabels),
})
else:
print("unsupported parameter type", datatype)
rec.update({
'valstr': ' '.join(param),
'val': param,
})
param.reverse()
if len(param):
rec['maxVal'] = param.pop(0)
if len(param):
rec['minVal'] = param.pop(0)
if len(param):
rec['defaultVal'] = param.pop(0)
if scaled is None:
rec['scaled'] = rec['val']
else:
rec['scaled'] = scaled
return rec
def ParseDim(param):
extent = param.pop(0)
labels = []
if extent == '{':
while True:
p = param.pop(0)
if p == '}':
break
labels.append(p)
extent = len(labels)
labelstr = ' '.join(['{'] + labels + ['}'])
else:
labelstr = extent
extent = int(extent)
labels = [str(x) for x in range(1,extent+1)]
return extent, labels, labelstr
def DecodeUnits(s, datatype=float):
if s.lower().startswith('0x'):
return int(s, 16), None, None
units = ''
while len(s) and not s[-1] in '0123456789.':
units = s[-1] + units
s = s[:-1]
if len(s) == 0:
return None, None, None
try:
unscaled = datatype(s)
except:
try:
unscaled = float(s)
except:
return s, None, None
scaled = unscaled * {
'hz': 1, 'khz': 1000, 'mhz': 1000000,
'muv': 1, 'mv': 1000, 'v': 1000000,
'musec': 0.001, 'msec': 1, 'sec': 1000, 'min': 60000,
'ms': 1, 's': 1000,
}.get(units.lower(), 1)
return unscaled, units, scaled
def FormatPrmList(p, sort=False):
max_element_width = 6
max_value_width = 20
max_treat_string_as_number = 1
def escape(s):
s = s.replace('%', '%%')
s = s.replace(' ', '%20')
if len(s) == 0:
s = '%'
return s
def FormatDimLabels(p):
dl = p.get('dimlabels', None)
if dl is None:
if isinstance(p['val'], (tuple, list)):
return ['', str(len(p['val']))]
else:
return ['', '']
sh = p['shape']
dl = list(dl)
for i in range(len(dl)):
if len(dl[i]):
t = ' '.join([escape(x) for x in dl[i]])
td = ' '.join([str(j+1) for j in range(len(dl[i]))])
if t == td:
dl[i] = str(len(dl[i]))
else:
dl[i] = '{ ' + t + ' }'
else: dl[i] = str(sh[i])
return dl
def FormatVal(p):
if isinstance(p, dict):
p = p['val']
if isinstance(p, (tuple,list)):
return ' '.join([FormatVal(x) for x in p])
return escape(str(p))
vv = []
pp = [{} for i in range(len(p))]
for i in range(len(p)):
pp[i]['category'] = ':'.join([x for x in p[i]['category'] if len(x.strip())])
pp[i]['type'] = p[i]['type']
pp[i]['name'] = p[i]['name'] + '='
pp[i]['rows'], pp[i]['cols'] = FormatDimLabels(p[i])
v = FormatVal(p[i]).split()
if len(v) == 0:
v = ['%']
pp[i]['val'] = range(len(vv), len(vv)+len(v))
vv += v
pp[i]['comment'] = '// ' + p[i]['comment']
align = [len(v) <= max_element_width for v in vv]
numalign = [len(v) <= max_treat_string_as_number or v[0] in '+-.0123456789' for v in vv]
n = [(vv[i]+' ').replace('.', ' ').index(' ') * int(align[i] and numalign[i]) for i in range(len(vv))]
maxn = max(n)
for i in range(len(vv)):
if align[i] and numalign[i]:
vv[i] = ' ' * (maxn-n[i]) + vv[i]
if align[i]:
n[i] = len(vv[i])
maxn = max(n)
for i in range(len(vv)):
if align[i] and numalign[i]:
vv[i] = vv[i].ljust(maxn, ' ')
elif align[i]:
vv[i] = vv[i].rjust(maxn, ' ')
for i in range(len(pp)):
pp[i]['val'] = ' '.join([vv[j] for j in pp[i]['val']])
align = [len(pp[i]['val']) <= max_value_width for i in range(len(pp))]
maxn = max([0]+[len(pp[i]['val']) for i in range(len(pp)) if align[i]])
for i in range(len(pp)):
if align[i]:
pp[i]['val'] = pp[i]['val'].ljust(maxn, ' ')
for x in pp[0].keys():
n = [len(pp[i][x]) for i in range(len(pp))]
n = max(n)
for i in range(len(pp)):
if x in ['rows', 'cols']:
pp[i][x] = pp[i][x].rjust(n, ' ')
elif x not in ['comment', 'val']:
pp[i][x] = pp[i][x].ljust(n, ' ')
if x not in ['rows', 'cols', 'category']:
pp[i][x] = ' ' + pp[i][x]
if sort:
pp = sorted(pp, cmp=lambda x, y: cmp((x['category'], x['name']), (y['category'],y['name'])))
fields = 'category type name rows cols val comment'.split()
for i in range(len(pp)):
pp[i] = ' '.join([pp[i][x] for x in fields])
return pp
def load_pylab():
try:
import matplotlib
if 'matplotlib.backends' not in sys.modules:
matplotlib.interactive(True)
import pylab
return pylab
except:
print(__name__, "module failed to import pylab: plotting methods will not work")
| bsd-2-clause |
freeman-lab/altair | altair/mpl.py | 1 | 7416 | from matplotlib import pyplot as plt
import matplotlib.markers as mmarkers
import pandas as pd
import numpy as np
from cycler import cycler
from .spec import SPEC
def _determine_col_name(agg_shelf, shelf):
if 'bin' in agg_shelf and agg_shelf.bin:
agg_coll = '{}_{}_bin'.format(agg_shelf.name, shelf)
else:
agg_coll = agg_shelf.name
return agg_coll
def _vl_line(ax, encoding, data, pl_kw):
return ax.plot(encoding.x.name, encoding.y.name, data=data,
**pl_kw)
def _vl_area(ax, encoding, data, pl_kw):
ln, = ax.plot(encoding.x.name, encoding.y.name, data=data, **pl_kw)
area = ax.fill_between(encoding.x.name, encoding.y.name,
data=data, **pl_kw)
return ln, area
def _vl_point(ax, encoding, data, pl_kw):
pl_kw.setdefault('linestyle', 'none')
if 'shape' in encoding:
data_itr = _do_shape(encoding.shape, data)
else:
data_itr = [((None, data), {'marker': 'o'})]
lns = []
for (k, df), sty in data_itr:
sty_dict = {}
sty_dict.update(pl_kw)
sty_dict.update(sty)
ln = ax.plot(encoding.x.name, encoding.y.name, data=df, **sty_dict)
lns.extend(ln)
return lns
def _vl_bar(ax, encoding, data, pl_kw):
return ax.bar(encoding.x.name, encoding.y.name, data=data, **pl_kw)
def _do_shape(shape, data):
"""Sort out how to do shape
Given an encoding + a possibly reduced DataFrame, return
an iterator of (gb_key, DataFrame), style kwarg
"""
filled = shape.filled
shapes = mmarkers.MarkerStyle.filled_markers
if not filled:
shapes = shapes + ('x', '4', '3', '+', '2', '1')
cyl = cycler('marker', shapes)
if shape.type == 'Q':
dig_data, bin_edges = _digitize_col(shape, data)
data['{}_shape_bin'.format(shape.name)] = dig_data
fill = 'full' if filled else 'none'
cyl *= cycler('fillstyle', (fill, ))
gb = data.groupby(shape.name)
for df, sty in zip(gb, cyl):
yield df, sty
def _do_aggregate(encoding, data, agg_key, by_keys):
agg_shelf = getattr(encoding, agg_key)
agg_coll = _determine_col_name(agg_shelf, agg_key)
agg_method = getattr(encoding, agg_key).aggregate
binned = data[agg_coll].groupby(by_keys)
agg_func_name = _AGG_MAP[agg_method]
data = getattr(binned, agg_func_name)().reset_index()
return data
def _do_binning(vls, data, bin_key, plot_kwargs):
encoding = vls.encoding
shelf = getattr(encoding, bin_key)
dig, bin_edges = _digitize_col(shelf, data)
data['{}_{}_bin'.format(shelf.name, bin_key)] = dig
if vls.marktype == 'bar':
plot_kwargs['width'] = np.mean(np.diff(bin_edges)) * .9
plot_kwargs['align'] = 'center'
return data
def _digitize_col(bin_encoding, data):
x_name = bin_encoding.name
d_min, d_max = data[x_name].min(), data[x_name].max()
bin_count = bin_encoding.bin
if isinstance(bin_count, bool):
bin_count = 3
bin_count = int(bin_count)
# add one to bin count as we are generating edges here
bin_edges = np.linspace(d_min, d_max, bin_count + 1, endpoint=True)
centers = (bin_edges[1:] + bin_edges[:-1]) / 2
dig = np.digitize(data[x_name], bin_edges, right=True) - 1
valid_mask = (-1 < dig) * (dig < len(centers))
ret = np.zeros_like(dig, dtype='float')
ret[valid_mask] = centers[dig[valid_mask]]
ret[~valid_mask] = np.nan
return ret, bin_edges
def render(vls, data=None):
"""Render a vega-lite spec using matplotlib
Parameters
----------
vls : dict
A dictionary complying with the vega-lite spec
Returns
-------
fig : Figure
The figure created
ax_list : list
The list of axes created
arts : dict
Dictionary keyed on Axes of the artists created
"""
plot_kwargs = {}
encoding = vls.encoding
if data is None:
data = vls.data
data = pd.DataFrame(data)
shelves = ('row', 'col', 'shape', 'size', 'color', 'y', 'x')
# TODO these seem to be missing from the api.py
# 'detail', 'text')
used_cols = list(set(getattr(encoding, k).name
for k in shelves if k in encoding))
data = data[used_cols]
x_binned = 'bin' in encoding.x and encoding.x.bin
y_binned = 'bin' in encoding.y and encoding.y.bin
if x_binned and y_binned:
raise NotImplementedError("Double binning not done yet")
for sh in shelves:
if sh not in encoding:
continue
shelf = getattr(encoding, sh)
if 'bin' in shelf and shelf.bin:
data = _do_binning(vls, data, sh, plot_kwargs)
data = data.dropna()
plot_func = _MARK_DISPATCHER[vls.marktype]
has_row = 'row' in encoding
has_col = 'col' in encoding
ax_map = {}
ax_list = None
if has_col and has_row:
# sort out the names with respect to binning
col_name = _determine_col_name(encoding.col, 'col')
row_name = _determine_col_name(encoding.row, 'row')
row_labels = data[row_name].unique()
col_labels = data[col_name].unique()
grid_keys = [(_['row'], _['col'])
for _ in (cycler('row', row_labels) *
cycler('col', col_labels))]
col_num, row_num = len(col_labels), len(row_labels)
facet_iter = data.groupby([row_name, col_name])
elif has_col:
col_name = _determine_col_name(encoding.col, 'col')
col_labels = data[col_name].unique()
col_num, row_num = len(col_labels), 1
fig, ax_list = plt.subplots(col_num, row_num,
sharex=True, sharey=True)
grid_keys = list(col_labels)
facet_iter = data.groupby(col_name)
elif has_row:
row_name = _determine_col_name(encoding.row, 'row')
row_labels = data[row_name].unique()
col_num, row_num = 1, len(row_labels)
grid_keys = list(row_labels)
facet_iter = data.groupby(row_name)
else:
grid_keys = [None, ]
col_num, row_num = 1, 1
def _inner():
yield None, data
facet_iter = _inner()
fig, ax_list = plt.subplots(row_num, col_num,
sharex=True, sharey=True,
squeeze=False)
for k, ax in zip(grid_keys, ax_list.ravel()):
ax_map[k] = ax
ax.set_prop_cycle(cycler('color', 'k'))
if 'x' in encoding and ax.rowNum == row_num - 1:
ax.set_xlabel(encoding.x.name)
if 'y' in encoding and ax.colNum == 0:
ax.set_ylabel(encoding.y.name)
rets = {}
for k, df in facet_iter:
ax = ax_map[k]
_r = plot_func(ax, encoding, df, plot_kwargs)
rets[k] = _r
if k:
ax.set_title(repr(k))
return rets, ax_map
_MARK_DISPATCHER = {'area': _vl_area, # fill below line
'bar': _vl_bar, # bar
'circle': None, # ??
'line': _vl_line, # line
'point': _vl_point, # scatter
'square': None, # ??
'text': None, # ??
'tick': None} # ??
_AGG_MAP = {"avg": 'mean',
"sum": 'sum',
"min": 'min',
"max": 'max',
"count": 'count'}
| bsd-3-clause |
jmfranck/pyspecdata | docs/_downloads/54c91e6b35ea86f52bd26d115eb9b78b/fourier_aliasing.py | 1 | 4725 | """
Fourier Aliasing
================
Here, we show that we can view the Fourier transform as an infinitely repeat
set of replicates (aliases, *s.t.*
:math:`Ш(\nu/t_{dw})*\tilde{f}(\nu)`) and view any of those aliases
(of width :math:`SW=1/t_{dw}`)
that we choose.
"""
# from JF noteobok sec:fourier_aliasing_test
from pylab import *
from pyspecdata import *
from pyspecdata.fourier.ft_shift import _get_ft_dt
fl = figlist_var()
t = r_[-10:10:512j]
t -= t[argmin(abs(t))] # to be sure that an index exactly equals zero
data = nddata(empty_like(t,dtype = complex128),[-1],['t']).setaxis('t',t)
data.set_units('t','s') # set the units to s, which are automatically converted to Hz upon FT
sigma = 1.0
data = data.fromaxis('t',lambda x: complex128(exp(-x**2/2./sigma**2)))
test_non_integral = False
data.ft('t',shift = test_non_integral)# this is required for the non-integral shift!
print(data.other_info)
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
fl.next('ft')
fl.plot(data, alpha=0.5)
fl.plot(data.runcopy(imag), alpha=0.5)
expand_x()
expand_y()
print("what is the initial desired startpoint?",data.get_prop("FT_start_time"))
# https://matplotlib.org/3.2.1/api/_as_gen/matplotlib.pyplot.plot.html
default_plot_kwargs = dict(alpha=0.3, lw=2, mew=2, ms=8, marker='o', ls='none')
print("-----------------------")
print("starting standard")
forplot = data.copy() # keep and re-use the gaussian
print("what is the initial desired startpoint?",forplot.get_prop("FT_start_time"))
forplot.ift('t')
#forplot = forplot['t':(-2,2)]
t_start = forplot.getaxis('t')[0]
fl.next('ift')
fl.plot(forplot,label = '$t_{start}$: standard %0.2fs'%t_start,**default_plot_kwargs)
if test_non_integral:
fl.next('ift -- non-integral')
fl.plot(forplot,label = '$t_{start}$: standard %0.2fs'%t_start,**default_plot_kwargs)
#fl.plot(forplot.runcopy(imag),label = 'I: standard',**default_plot_kwargs)
dt = diff(forplot.getaxis('t')[r_[0,1]]).item()
print("and what is the actual first t index (t_start) after I ift?: ", end=' ')
print("t_start is",t_start,"and dt is",dt)
symbols = iter(['d','x','s','o'])
for this_integer in [2,-250,1000]:
print("-----------------------")
print("starting integral shift for",this_integer)
forplot = data.copy() # keep and re-use the gaussian
print("what is the initial desired startpoint?",forplot.get_ft_prop('t',"start_time"))
new_startpoint = t_start + this_integer * dt
print("now, I try to reset the startpoint to",new_startpoint)
print("my dt",dt,"_get_ft_dt",_get_ft_dt(data,'t'))
forplot.ft_clear_startpoints('t',t = new_startpoint,f = 'current')
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
fl.next('ift')
forplot.ift('t')
print("And the actual t startpoint after ift? ",forplot.getaxis('t')[0])
print("the difference between the two?",forplot.getaxis('t')[0] - forplot.get_ft_prop('t',"start_time"))
default_plot_kwargs['marker'] = next(symbols)
fl.plot(forplot,label = '$t_{start}$: shifted by %0.0fpts $\\rightarrow$ %0.2fs'%(this_integer,new_startpoint),**default_plot_kwargs)
print("-----------------------")
#fl.plot(forplot.runcopy(imag),label = 'I: integral shifted',**default_plot_kwargs)
expand_x()
expand_y()
if test_non_integral:
symbols = iter(['d','x','s','o'])
for this_float in [0.5,0.25,10.75]:
print("-----------------------")
print("starting non-integral shift for",this_float)
forplot = data.copy() # keep and re-use the gaussian
print("what is the initial desired startpoint?",forplot.get_ft_prop('t',"start_time"))
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
new_startpoint = t_start + this_float * dt
print("now, I try to reset the startpoint to",new_startpoint)
forplot.ft_clear_startpoints('t',t = new_startpoint,f = 'current')
fl.next('ift -- non-integral')
print("is it safe?",data.get_ft_prop('t',['freq','not','aliased']))
forplot.ift('t')
print("And the actual t startpoint after ift? ",forplot.getaxis('t')[0])
print("the difference between the two?",forplot.getaxis('t')[0] - forplot.get_ft_prop('t',"start_time"))
default_plot_kwargs['marker'] = next(symbols)
default_plot_kwargs['markersize'] = 10.0
fl.plot(forplot,label = '$t_{start}$: shifted by %0.0fpts $\\rightarrow$ %0.2fs'%(this_float,new_startpoint),**default_plot_kwargs)
#fl.plot(forplot.runcopy(imag),label = 'I: integral shifted',**default_plot_kwargs)
#{{{ these are manually set for a nice view of the peak of the gaussian
xlim(-1,1)
ylim(0.9,1.04)
#}}}
fl.show('interpolation_test_150824.pdf')
| bsd-3-clause |
caporaso-lab/short-read-tax-assignment | tax_credit/plotting_functions.py | 4 | 33330 | #!/usr/bin/env python
# ----------------------------------------------------------------------------
# Copyright (c) 2016--, tax-credit development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
import pandas as pd
import seaborn as sns
import numpy as np
from seaborn import violinplot, heatmap
import matplotlib.pyplot as plt
from scipy.stats import (kruskal,
linregress,
mannwhitneyu,
wilcoxon,
ttest_ind,
ttest_rel)
from statsmodels.sandbox.stats.multicomp import multipletests
from skbio.diversity import beta_diversity
from skbio.stats.ordination import pcoa
from skbio.stats.distance import anosim
from biom import load_table
from glob import glob
from os.path import join, split
from itertools import combinations
from IPython.display import display, Markdown
from bokeh.plotting import figure, show, output_file
from bokeh.models import (HoverTool,
WheelZoomTool,
PanTool,
ResetTool,
SaveTool,
ColumnDataSource)
from bokeh.io import output_notebook
def lmplot_from_data_frame(df, x, y, group_by=None, style_theme="whitegrid",
regress=False, hue=None, color_palette=None):
'''Make seaborn lmplot from pandas dataframe.
df: pandas.DataFrame
x: str
x axis variable
y: str
y axis variable
group_by: str
df variable to use for separating plot panels with FacetGrid
style_theme: str
seaborn plot style theme
'''
sns.set_style(style_theme)
lm = sns.lmplot(x, y, col=group_by, data=df, ci=None, size=5,
scatter_kws={"s": 50, "alpha": 1}, sharey=True, hue=hue,
palette=color_palette)
sns.plt.show()
if regress is True:
try:
reg = calculate_linear_regress(df, x, y, group_by)
except ValueError:
reg = calculate_linear_regress(df, x, y, hue)
else:
reg = None
return lm, reg
def pointplot_from_data_frame(df, x_axis, y_vars, group_by, color_by,
color_palette, style_theme="whitegrid",
plot_type=sns.pointplot):
'''Generate seaborn pointplot from pandas dataframe.
df = pandas.DataFrame
x_axis = x axis variable
y_vars = LIST of variables to use for plotting y axis
group_by = df variable to use for separating plot panels with FacetGrid
color_by = df variable on which to plot and color subgroups within data
color_palette = color palette to use for plotting. Either a dict mapping
color_by groups to colors, or a named seaborn palette.
style_theme = seaborn plot style theme
plot_type = allows switching to other plot types, but this is untested
'''
grid = dict()
sns.set_style(style_theme)
for y_var in y_vars:
grid[y_var] = sns.FacetGrid(df, col=group_by, hue=color_by,
palette=color_palette)
grid[y_var] = grid[y_var].map(
sns.pointplot, x_axis, y_var, marker="o", ms=4)
sns.plt.show()
return grid
def heatmap_from_data_frame(df, metric, rows=["Method", "Parameters"],
cols=["Dataset"], vmin=0, vmax=1, cmap='Reds'):
"""Generate heatmap of specified metric by (method, parameter) x dataset
df: pandas.DataFrame
rows: list
df column names to use for categorizing heatmap rows
cols: list
df column names to use for categorizing heatmap rows
metric: str
metric to plot in the heatmap
"""
df = df.pivot_table(index=rows, columns=cols, values=metric)
df.sort_index()
height = len(df.index) * 0.35
width = len(df.columns) * 1
ax = plt.figure(figsize=(width, height))
ax = heatmap(df, cmap=cmap, linewidths=0, square=True, vmin=vmin,
vmax=vmax)
ax.set_title(metric, fontsize=20)
plt.show()
return ax
def boxplot_from_data_frame(df,
group_by="Method",
metric="Precision",
hue=None,
y_min=0.0,
y_max=1.0,
plotf=violinplot,
color='grey',
color_palette=None,
label_rotation=45):
"""Generate boxplot or violinplot of metric by group
To generate boxplots instead of violin plots, pass plotf=seaborn.boxplot
hue, color variables all pass directly to equivalently named
variables in seaborn.violinplot().
group_by = "x"
metric = "y"
"""
sns.set_style("whitegrid")
ax = violinplot(x=group_by, y=metric, hue=hue, data=df, color=color,
palette=color_palette, order=sorted(df[group_by].unique()))
ax.set_ylim(bottom=y_min, top=y_max)
ax.set_ylabel(metric)
ax.set_xlabel(group_by)
for lab in ax.get_xticklabels():
lab.set_rotation(label_rotation)
plt.show()
return ax
def calculate_linear_regress(df, x, y, group_by):
'''Calculate slope, intercept from series of lines
df: pandas.DataFrame
x: str
x axis variable
y: str
y axis variable
group_by: str
df variable to use for separating data subsets
'''
results = []
for group in df[group_by].unique():
df_mod = df[df[group_by] == group]
slope, intercept, r_value, p_value, std_err = linregress(df_mod[x],
df_mod[y])
results.append((group, slope, intercept, r_value, p_value, std_err))
result = pd.DataFrame(results, columns=[group_by, "Slope", "Intercept",
"R", "P-val", "Std Error"])
return result
def per_level_kruskal_wallis(df,
y_vars,
group_by,
dataset_col='Dataset',
level_name="level",
levelrange=range(1, 7),
alpha=0.05,
pval_correction='fdr_bh'):
'''Test whether 2+ population medians are different.
Due to the assumption that H has a chi square distribution, the number of
samples in each group must not be too small. A typical rule is that each
sample must have at least 5 measurements.
df = pandas.DataFrame
y_vars = LIST of variables (df column names) to test
group_by = df variable to use for separating subgroups to compare
dataset_col = df variable to use for separating individual datasets to test
level_name = df variable name that specifies taxonomic level
levelrange = range of taxonomic levels to test.
alpha = level of alpha significance for test
pval_correction = type of p-value correction to use
'''
dataset_list = []
p_list = []
for dataset in df[dataset_col].unique():
df1 = df[df[dataset_col] == dataset]
for var in y_vars:
dataset_list.append((dataset, var))
for level in levelrange:
level_subset = df1[level_name] == level
# group data by groups
group_list = []
for group in df1[group_by].unique():
group_data = df1[group_by] == group
group_results = df1[level_subset & group_data][var]
group_list.append(group_results)
# kruskal-wallis tests
try:
h_stat, p_val = kruskal(*group_list, nan_policy='omit')
# default to p=1.0 if all values = 0
# this is not technically correct, from the standpoint of p-val
# correction below makes p-vals very slightly less significant
# than they should be
except ValueError:
h_stat, p_val = ('na', 1) # noqa
p_list.append(p_val)
# correct p-values
rej, pval_corr, alphas, alphab = multipletests(np.array(p_list),
alpha=alpha,
method=pval_correction)
range_len = len([i for i in levelrange])
results = [(dataset_list[i][0], dataset_list[i][1],
*[pval_corr[i * range_len + n] for n in range(0, range_len)])
for i in range(0, len(dataset_list))]
result = pd.DataFrame(results, columns=[dataset_col, "Variable",
*[n for n in levelrange]])
return result
def seek_tables(expected_results_dir, table_fn='merged_table.biom'):
'''Find and deliver merged biom tables'''
table_fps = glob(join(expected_results_dir, '*', '*', table_fn))
for table in table_fps:
reference_dir, _ = split(table)
dataset_dir, reference_id = split(reference_dir)
_, dataset_id = split(dataset_dir)
yield table, dataset_id, reference_id
def batch_beta_diversity(expected_results_dir, method="braycurtis",
permutations=99, col='method', dim=2,
colormap={'expected': 'red', 'rdp': 'seagreen',
'sortmerna': 'gray', 'uclust': 'blue',
'blast': 'purple'}):
'''Find merged biom tables and run beta_diversity_through_plots'''
for table, dataset_id, reference_id in seek_tables(expected_results_dir):
display(Markdown('## {0} {1}'.format(dataset_id, reference_id)))
s, r, pc, dm = beta_diversity_pcoa(table, method=method, col=col,
permutations=permutations, dim=dim,
colormap=colormap)
sns.plt.show()
sns.plt.clf()
def make_distance_matrix(biom_fp, method="braycurtis"):
'''biom.Table --> skbio.DistanceMatrix'''
table = load_table(biom_fp)
# extract sample metadata from table, put in df
table_md = {s_id: dict(table.metadata(s_id)) for s_id in table.ids()}
s_md = pd.DataFrame.from_dict(table_md, orient='index')
# extract data from table and multiply, assuming that table contains
# relative abundances (which cause beta_diversity to fail)
table_data = [[int(num * 100000) for num in table.data(s_id)]
for s_id in table.ids()]
# beta diversity
dm = beta_diversity(method, table_data, table.ids())
return dm, s_md
def beta_diversity_pcoa(biom_fp, method="braycurtis", permutations=99, dim=2,
col='method', colormap={'expected': 'red',
'rdp': 'seagreen',
'sortmerna': 'gray',
'uclust': 'blue',
'blast': 'purple'}):
'''From biom table, compute Bray-Curtis distance; generate PCoA plot;
and calculate adonis differences.
biom_fp: path
Path to biom.Table containing sample metadata.
method: str
skbio.Diversity method to use for ordination.
permutations: int
Number of permutations to perform for anosim tests.
dim: int
Number of dimensions to plot. Currently supports only 2-3 dimensions.
col: str
metadata name to use for distinguishing groups for anosim tests and
pcoa plots.
colormap: dict
map groups names (must be group names in col) to colors used for plots.
'''
dm, s_md = make_distance_matrix(biom_fp, method=method)
# pcoa
pc = pcoa(dm)
# anosim tests
results = anosim(dm, s_md, column=col, permutations=permutations)
print('R = ', results['test statistic'], '; P = ', results['p-value'])
if dim == 2:
# bokeh pcoa plots
pc123 = pc.samples.ix[:, ["PC1", "PC2", "PC3"]]
smd_merge = s_md.merge(pc123, left_index=True, right_index=True)
smd_merge['Color'] = [colormap[x] for x in smd_merge['method']]
title = smd_merge['reference'][0]
labels = ['PC {0} ({1:.2f})'.format(d + 1, pc.proportion_explained[d])
for d in range(0, 2)]
circle_plot_from_dataframe(smd_merge, "PC1", "PC2", title,
columns=["method", "sample_id", "params"],
color="Color", labels=labels)
else:
# skbio pcoa plots
pcoa_plot_skbio(pc, s_md, col='method')
return s_md, results, pc, dm
def circle_plot_from_dataframe(df, x, y, title=None, color="Color",
columns=["method", "sample_id", "params"],
labels=None, plot_width=400, plot_height=400,
fill_alpha=0.2, size=10, output_fn=None):
'''Make bokeh circle plot from dataframe, use df columns for hover tool.
df: pandas.DataFrame
Containing all sample data, including color categories.
x: str
df category to use for x-axis coordinates.
y: str
df category to use for y-axis coordinates.
title: str
Title to print above plot.
color: str
df category to use for coloring data points.
columns: list
df categories to add as hovertool metadata.
labels: list
Axis labels for x and y axes. If none, default to column names.
output_fn: path
Filepath for output file. Defaults to None.
Other parameters feed directly to bokeh.plotting.
'''
if labels is None:
labels = [x, y]
source = ColumnDataSource(df)
hover = HoverTool(tooltips=[(c, '@' + c) for c in columns])
TOOLS = [hover, WheelZoomTool(), PanTool(), ResetTool(), SaveTool()]
fig = figure(title=title, tools=TOOLS, plot_width=plot_width,
plot_height=plot_height)
# Set asix labels
fig.xaxis.axis_label = labels[0]
fig.yaxis.axis_label = labels[1]
# Plot x and y axes
fig.circle(x, y, source=source, color=color, fill_alpha=fill_alpha,
size=size)
if output_fn is not None:
output_file(output_fn)
output_notebook()
show(fig)
def pcoa_plot_skbio(pc, s_md, col='method'):
'''Input principal coordinates, display figure.
pc: skbio.OrdinationResults
Sample coordinates.
s_md: pandas.DataFrame
Sample metadata.
col: str
Category in s_md to use for coloring groups.
'''
# make labels for PCoA plot
pcl = ['PC {0} ({1:.2f})'.format(d + 1, pc.proportion_explained[d])
for d in range(0, 3)]
fig = pc.plot(s_md, col, axis_labels=(pcl[0], pcl[1], pcl[2]),
cmap='jet', s=50)
fig
def average_distance_boxplots(expected_results_dir, group_by="method",
standard='expected', metric="distance",
params='params', beta="braycurtis",
reference_filter=True, reference_col='reference',
references=['gg_13_8_otus',
'unite_20.11.2016_clean_fullITS'],
paired=True, use_best=True, parametric=True,
plotf=violinplot, label_rotation=45,
color_palette=None, y_min=0.0, y_max=1.0,
color=None, hue=None):
'''Distance boxplots that aggregate and average results across multiple
mock community datasets.
reference_filter: bool
Filter by reference dataset to only include specific references in
results?
reference_col: str
df column header containing reference set information.
references: list
List of strings containing names of reference datasets to include.
paired: bool
Perform paired or unpaired comparisons?
parametric: bool
Perform parametric or non-parametric statistical tests?
use_best: bool
Compare average distance distributions across all methods (False) or
only the best parameter configuration for each method? (True)
'''
box = dict()
best = dict()
# Aggregate all distance matrix data
archive = pd.DataFrame()
for table, dataset_id, reference_id in seek_tables(expected_results_dir):
dm, sample_md = make_distance_matrix(table, method=beta)
per_method = per_method_distance(dm, sample_md, group_by=group_by,
standard=standard, metric=metric)
archive = pd.concat([archive, per_method])
# filter out auxiliary reference database results
if reference_filter is True:
archive = archive[archive[reference_col].isin(references)]
# plot results for each reference db separately
for reference in archive[reference_col].unique():
display(Markdown('## {0}'.format(reference)))
archive_subset = archive[archive[reference_col] == reference]
# for each method find best average method/parameter config
if use_best:
best[reference], param_report = isolate_top_params(
archive_subset, group_by, params, metric)
# display(pd.DataFrame(param_report, columns=[group_by, params]))
method_rank = _show_method_rank(
best[reference], group_by, params, metric,
[group_by, params, metric], ascending=False)
else:
best[reference] = archive_subset
results = per_method_pairwise_tests(best[reference], group_by=group_by,
metric=metric, paired=paired,
parametric=parametric)
box[reference] = boxplot_from_data_frame(
best[reference], group_by=group_by, color=color, hue=hue,
y_min=None, y_max=None, plotf=plotf, label_rotation=label_rotation,
metric=metric, color_palette=color_palette)
if use_best:
box[reference] = _add_significance_to_boxplots(
results, method_rank, box[reference], method='method')
sns.plt.show()
sns.plt.clf()
display(results)
return box, best
def _add_significance_to_boxplots(pairwise, rankings, ax, method='Method'):
x_labels = [a.get_text() for a in ax.get_xticklabels()]
methods = [m for m in rankings[method]]
ranks = []
# iterate range instead of methods, so that we can use pop for comparisons
# against shrinking list of methods
methods_copy = methods.copy()
for n in range(len(methods_copy)):
method = methods_copy.pop(0)
inner_rank = {method}
for other_method in methods_copy:
if method in pairwise.index.levels[0] and \
other_method in pairwise.loc[method].index:
if pairwise.loc[method].loc[other_method]['FDR P'] > 0.05:
inner_rank.add(other_method)
elif other_method in pairwise.index.levels[0] and \
method in pairwise.loc[other_method].index:
if pairwise.loc[other_method].loc[method]['FDR P'] > 0.05:
inner_rank.add(other_method)
# only add new set of equalities if it contains unique items
if len(ranks) == 0 or not inner_rank.issubset(ranks[-1]):
ranks.append(inner_rank)
# provide unique letters for each significance group
letters = 'abcdefghijklmnopqrstuvwxyz'
sig_groups = {}
for method in methods:
sig_groups[method] = []
for rank, letter in zip(ranks, letters):
if method in rank:
sig_groups[method].append(letter)
sig_groups[method] = ''.join(sig_groups[method])
# add significance labels above plot
pos = range(len(x_labels))
for tick, label in zip(pos, x_labels):
ax.text(tick, ax.get_ybound()[1], sig_groups[label], size='medium',
horizontalalignment='center', color='k', weight='semibold')
return ax
def _show_method_rank(best, group_by, params, metric, display_fields,
ascending=False):
'''Find the best param configuration for each method and show those
configs, along with the parameters and metric scores.
'''
avg_best = best.groupby([group_by, params]).mean().reset_index()
avg_best_sorted = avg_best.sort_values(by=metric, ascending=ascending)
method_rank = avg_best_sorted.ix[:, display_fields]
display(method_rank)
return method_rank
def fastlane_boxplots(expected_results_dir, group_by="method",
standard='expected', metric="distance", hue=None,
plotf=violinplot, label_rotation=45,
y_min=0.0, y_max=1.0, color=None, beta="braycurtis"):
'''per_method_boxplots for those who don't have time to wait.'''
for table, dataset_id, reference_id in seek_tables(expected_results_dir):
display(Markdown('## {0} {1}'.format(dataset_id, reference_id)))
dm, sample_md = make_distance_matrix(table, method=beta)
per_method_boxplots(dm, sample_md, group_by=group_by, metric=metric,
standard=standard, hue=hue, y_min=y_min,
y_max=y_max, plotf=plotf, color=color,
label_rotation=label_rotation)
def per_method_boxplots(dm, sample_md, group_by="method", standard='expected',
metric="distance", hue=None, y_min=0.0, y_max=1.0,
plotf=violinplot, label_rotation=45, color=None,
color_palette=None):
'''Generate distance boxplots and Mann-Whitney U tests on distance matrix.
dm: skbio.DistanceMatrix
sample_md: pandas.DataFrame
containing sample metadata
group_by: str
df category to use for grouping samples
standard: str
group name in group_by category to which all other groups are compared.
metric: str
name of distance column in output.
To generate boxplots instead of violin plots, pass plotf=seaborn.boxplot
hue, color variables all pass directly to equivalently named variables in
seaborn.violinplot().
'''
box = dict()
within_between = within_between_category_distance(dm, sample_md, 'method')
per_method = per_method_distance(dm, sample_md, group_by=group_by,
standard=standard, metric=metric)
for d, g, s in [(within_between, 'Comparison', '1: Within- vs. Between-'),
(per_method, group_by, '2: Pairwise ')]:
display(Markdown('## Comparison {0} Distance'.format(s + group_by)))
box[g] = boxplot_from_data_frame(
d, group_by=g, color=color, metric=metric, y_min=None, y_max=None,
hue=hue, plotf=plotf, label_rotation=label_rotation,
color_palette=color_palette)
results = per_method_pairwise_tests(d, group_by=g, metric=metric)
sns.plt.show()
sns.plt.clf()
display(results)
return box
def per_method_distance(dm, md, group_by='method', standard='expected',
metric='distance', sample='sample_id'):
'''Compile list of distances between groups of samples in distance matrix.
returns dataframe of distances and group metadata.
dm: skbio.DistanceMatrix
md: pandas.DataFrame
containing sample metadata
group_by: str
df category to use for grouping samples
standard: str
group name in group_by category to which all other groups are compared.
metric: str
name of distance column in output.
sample: str
df category containing sample_id names.
'''
results = []
expected = md[md[group_by] == standard]
observed = md[md[group_by] != standard]
for group in observed[group_by].unique():
group_md = observed[observed[group_by] == group]
for i in list(expected.index.values):
for j in list(group_md.index.values):
if group_md.loc[j][sample] == expected.loc[i][sample]:
results.append((*[n for n in group_md.loc[j]], dm[i, j]))
return pd.DataFrame(results, columns=[*[n for n in md.columns.values],
metric])
def within_between_category_distance(dm, md, md_category, distance='distance'):
'''Compile list of distances between groups of samples and within groups
of samples.
dm: skbio.DistanceMatrix
md: pandas.DataFrame
containing sample metadata
md_category: str
df category to use for grouping samples
'''
distances = []
for i, sample_id1 in enumerate(dm.ids):
sample_md1 = md[md_category][sample_id1]
for sample_id2 in dm.ids[:i]:
sample_md2 = md[md_category][sample_id2]
if sample_md1 == sample_md2:
comp = 'within'
group = sample_md1
else:
comp = 'between'
group = sample_md1 + '_' + sample_md2
distances.append((comp, group, dm[sample_id1, sample_id2]))
return pd.DataFrame(distances, columns=["Comparison", md_category,
distance])
def per_method_pairwise_tests(df, group_by='method', metric='distance',
paired=False, parametric=True):
'''Perform mann whitney U tests between group distance distributions,
followed by FDR correction. Returns pandas dataframe of p-values.
df: pandas.DataFrame
results from per_method_distance()
group_by: str
df category to use for grouping samples
metric: str
df category to use as variable for comparison.
paired: bool
Perform Wilcoxon signed rank test instead of Mann Whitney U. df must be
ordered such that paired samples will appear in same order in subset
dataframes when df is subset by term f[df[group_by] == a[0]][metric].
'''
pvals = []
groups = [group for group in df[group_by].unique()]
combos = [a for a in combinations(groups, 2)]
for a in combos:
try:
if paired is False and parametric is False:
u, p = mannwhitneyu(df[df[group_by] == a[0]][metric],
df[df[group_by] == a[1]][metric],
alternative='two-sided')
elif paired is False and parametric is True:
u, p = ttest_ind(df[df[group_by] == a[0]][metric],
df[df[group_by] == a[1]][metric],
nan_policy='raise')
elif paired is True and parametric is False:
u, p = wilcoxon(df[df[group_by] == a[0]][metric],
df[df[group_by] == a[1]][metric])
else:
u, p = ttest_rel(df[df[group_by] == a[0]][metric],
df[df[group_by] == a[1]][metric],
nan_policy='raise')
except ValueError:
# default to p=1.0 if all values = 0
# this is not technically correct, from the standpoint of p-val
# correction below makes p-vals very slightly less significant
# than they should be
u, p = 0.0, 1.0
pvals.append((a[0], a[1], u, p))
result = pd.DataFrame(pvals, columns=["Method A", "Method B", "stat", "P"])
result.set_index(['Method A', 'Method B'], inplace=True)
try:
result['FDR P'] = multipletests(result['P'], method='fdr_bh')[1]
except ZeroDivisionError:
pass
return result
def isolate_top_params(df, group_by="Method", params="Parameters",
metric="F-measure", ascending=True):
'''For each method in df, find top params for each method and filter df to
contain only those parameters.
df: pandas df
group_by: str
df category name to use for segregating groups from which top param is
chosen.
params: str
df category name indicating parameters column.
'''
best = pd.DataFrame()
param_report = []
for group in df[group_by].unique():
subset = df[df[group_by] == group]
avg = subset.groupby(params).mean().reset_index()
sorted_avg = avg.sort_values(by=metric, ascending=ascending)
top_param = sorted_avg.reset_index()[params][0]
param_report.append((group, top_param))
best = pd.concat([best, subset[subset[params] == top_param]])
return best, param_report
def rank_optimized_method_performance_by_dataset(df,
dataset="Dataset",
method="Method",
params="Parameters",
metric="F-measure",
level="Level",
level_range=range(5, 7),
display_fields=["Method",
"Parameters",
"Precision",
"Recall",
"F-measure"],
ascending=False,
paired=True,
parametric=True,
hue=None,
y_min=0.0,
y_max=1.0,
plotf=violinplot,
label_rotation=45,
color=None,
color_palette=None):
'''Rank the performance of methods using optimized parameter configuration
within each dataset in dataframe. Optimal methods are computed from the
mean performance of each method/param configuration across all datasets
in df.
df: pandas df
dataset: str
df category to use for grouping samples (by dataset)
method: str
df category to use for grouping samples (by method); these groups are
compared in plots and pairwise statistical testing.
params: str
df category containing parameter configurations for each method. Best
method configurations are computed by grouping method groups on this
category value, then finding the best average metric value.
metric: str
df category containing metric to use for ranking and statistical
comparisons between method groups.
level: str
df category containing taxonomic level information.
level_range: range
Perform plotting and testing at each level in range.
display_fields: list
List of columns in df to display in results.
ascending: bool
Rank methods my metric score in ascending or descending order?
paired: bool
Perform paired statistical test? See per_method_pairwise_tests()
parametric: bool
Perform parametric statistical test? See per_method_pairwise_tests()
To generate boxplots instead of violin plots, pass plotf=seaborn.boxplot
hue, color variables all pass directly to equivalently named variables in
seaborn.violinplot(). See boxplot_from_data_frame() for more
information.
'''
box = dict()
for d in df[dataset].unique():
for lv in level_range:
display(Markdown("## {0} level {1}".format(d, lv)))
df_l = df[df[level] == lv]
best, param_report = isolate_top_params(df_l[df_l[dataset] == d],
method, params, metric,
ascending=ascending)
method_rank = _show_method_rank(
best, method, params, metric, display_fields,
ascending=ascending)
results = per_method_pairwise_tests(best, group_by=method,
metric=metric, paired=paired,
parametric=parametric)
display(results)
box[d] = boxplot_from_data_frame(
best, group_by=method, color=color, metric=metric, y_min=y_min,
y_max=y_max, label_rotation=label_rotation, hue=hue,
plotf=plotf, color_palette=color_palette)
box[d] = _add_significance_to_boxplots(
results, method_rank, box[d])
sns.plt.show()
return box
| bsd-3-clause |
DouglasOrr/DeepLearnTute | dlt/sequence.py | 1 | 2510 | '''Adaptation of the UJI dataset for the "sequential" version of the problem,
rather than the rasterized Dataset from dlt.data.
'''
import numpy as np
import matplotlib.pyplot as plt
import json
class Dataset:
def __init__(self, vocab, points, breaks, masks, labels):
self.vocab = vocab
self.points = points
self.breaks = breaks
self.masks = masks
self.labels = labels
def find(self, char):
label = int(np.where(self.vocab == char)[0])
return np.where(self.labels == label)[0]
def show(self, indices=None, limit=64):
plt.figure(figsize=(16, 16))
indices = list(range(limit) if indices is None else indices)
dim = int(np.ceil(np.sqrt(len(indices))))
for plot_index, index in enumerate(indices):
plt.subplot(dim, dim, plot_index+1)
plt.plot(*zip(*self.points[index, self.masks[index]]))
ends = self.masks[index] & (
self.breaks[index] | np.roll(self.breaks[index], -1))
plt.plot(*zip(*self.points[index, ends]), '.')
plt.title('%d : %s' % (index, self.vocab[self.labels[index]]))
plt.gca().invert_yaxis()
plt.gca().set_aspect('equal')
plt.gca().axis('off')
@classmethod
def load(cls, path, max_length=200):
'''Read the dataset from a JSONlines file.'''
with open(path) as f:
data = [json.loads(line) for line in f]
vocab = np.array(sorted(set(d['target'] for d in data)))
char_to_index = {ch: n for n, ch in enumerate(vocab)}
labels = np.array([char_to_index[d['target']] for d in data],
dtype=np.int32)
nsamples = min(max_length, max(
sum(len(stroke) for stroke in d['strokes']) for d in data))
points = np.zeros((len(data), nsamples, 2), dtype=np.float32)
breaks = np.zeros((len(data), nsamples), dtype=np.bool)
masks = np.zeros((len(data), nsamples), dtype=np.bool)
for n, d in enumerate(data):
stroke = np.concatenate(d['strokes'])[:nsamples]
points[n, :len(stroke)] = stroke
masks[n, :len(stroke)] = True
all_breaks = np.cumsum([len(stroke) for stroke in d['strokes']])
breaks[n, all_breaks[all_breaks < nsamples]] = True
return cls(vocab=vocab,
points=points,
breaks=breaks,
masks=masks,
labels=labels)
| mit |
chris-ch/cointeg | setup.py | 1 | 1046 | import os
from setuptools import setup
def read(fname):
"""
Utility function to read the README file.
Used for the long_description. It's nice, because now 1) we have a top level README file and 2) it's easier to
type in the README file than to put a raw string in below ...
:param fname:
:return:
"""
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name='stat arb toolbox',
version='0.1',
author='Christophe',
author_email='chris.perso@gmail.com',
description='investigating mean reversion on ETFs.',
license='BSD',
keywords='mean reversion systematic trading',
packages=['statsmodelsext', 'mktdata', 'mktdatadb', 'statsext', 'bollinger'],
long_description=read('README.md'),
install_requires=[
'pandas', 'pytz', 'numpy', 'statsmodels', 'matplotlib', 'Quandl', 'scipy', 'xlsxwriter'],
classifiers=[
'Development Status :: 3 - Alpha',
'Topic :: Utilities',
'License :: OSI Approved :: BSD License',
],
) | gpl-3.0 |
asnorkin/sentiment_analysis | site/lib/python2.7/site-packages/sklearn/decomposition/tests/test_fastica.py | 70 | 7808 | """
Test the fastica algorithm.
"""
import itertools
import warnings
import numpy as np
from scipy import stats
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_warns
from sklearn.utils.testing import assert_raises
from sklearn.decomposition import FastICA, fastica, PCA
from sklearn.decomposition.fastica_ import _gs_decorrelation
from sklearn.externals.six import moves
def center_and_norm(x, axis=-1):
""" Centers and norms x **in place**
Parameters
-----------
x: ndarray
Array with an axis of observations (statistical units) measured on
random variables.
axis: int, optional
Axis along which the mean and variance are calculated.
"""
x = np.rollaxis(x, axis)
x -= x.mean(axis=0)
x /= x.std(axis=0)
def test_gs():
# Test gram schmidt orthonormalization
# generate a random orthogonal matrix
rng = np.random.RandomState(0)
W, _, _ = np.linalg.svd(rng.randn(10, 10))
w = rng.randn(10)
_gs_decorrelation(w, W, 10)
assert_less((w ** 2).sum(), 1.e-10)
w = rng.randn(10)
u = _gs_decorrelation(w, W, 5)
tmp = np.dot(u, W.T)
assert_less((tmp[:5] ** 2).sum(), 1.e-10)
def test_fastica_simple(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
# scipy.stats uses the global RNG:
np.random.seed(0)
n_samples = 1000
# Generate two sources:
s1 = (2 * np.sin(np.linspace(0, 100, n_samples)) > 0) - 1
s2 = stats.t.rvs(1, size=n_samples)
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing angle
phi = 0.6
mixing = np.array([[np.cos(phi), np.sin(phi)],
[np.sin(phi), -np.cos(phi)]])
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(2, 1000)
center_and_norm(m)
# function as fun arg
def g_test(x):
return x ** 3, (3 * x ** 2).mean(axis=-1)
algos = ['parallel', 'deflation']
nls = ['logcosh', 'exp', 'cube', g_test]
whitening = [True, False]
for algo, nl, whiten in itertools.product(algos, nls, whitening):
if whiten:
k_, mixing_, s_ = fastica(m.T, fun=nl, algorithm=algo)
assert_raises(ValueError, fastica, m.T, fun=np.tanh,
algorithm=algo)
else:
X = PCA(n_components=2, whiten=True).fit_transform(m.T)
k_, mixing_, s_ = fastica(X, fun=nl, algorithm=algo, whiten=False)
assert_raises(ValueError, fastica, X, fun=np.tanh,
algorithm=algo)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
if whiten:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=2)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=2)
else:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=1)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=1)
# Test FastICA class
_, _, sources_fun = fastica(m.T, fun=nl, algorithm=algo, random_state=0)
ica = FastICA(fun=nl, algorithm=algo, random_state=0)
sources = ica.fit_transform(m.T)
assert_equal(ica.components_.shape, (2, 2))
assert_equal(sources.shape, (1000, 2))
assert_array_almost_equal(sources_fun, sources)
assert_array_almost_equal(sources, ica.transform(m.T))
assert_equal(ica.mixing_.shape, (2, 2))
for fn in [np.tanh, "exp(-.5(x^2))"]:
ica = FastICA(fun=fn, algorithm=algo, random_state=0)
assert_raises(ValueError, ica.fit, m.T)
assert_raises(TypeError, FastICA(fun=moves.xrange(10)).fit, m.T)
def test_fastica_nowhiten():
m = [[0, 1], [1, 0]]
# test for issue #697
ica = FastICA(n_components=1, whiten=False, random_state=0)
assert_warns(UserWarning, ica.fit, m)
assert_true(hasattr(ica, 'mixing_'))
def test_non_square_fastica(add_noise=False):
# Test the FastICA algorithm on very simple data.
rng = np.random.RandomState(0)
n_samples = 1000
# Generate two sources:
t = np.linspace(0, 100, n_samples)
s1 = np.sin(t)
s2 = np.ceil(np.sin(np.pi * t))
s = np.c_[s1, s2].T
center_and_norm(s)
s1, s2 = s
# Mixing matrix
mixing = rng.randn(6, 2)
m = np.dot(mixing, s)
if add_noise:
m += 0.1 * rng.randn(6, n_samples)
center_and_norm(m)
k_, mixing_, s_ = fastica(m.T, n_components=2, random_state=rng)
s_ = s_.T
# Check that the mixing model described in the docstring holds:
assert_almost_equal(s_, np.dot(np.dot(mixing_, k_), m))
center_and_norm(s_)
s1_, s2_ = s_
# Check to see if the sources have been estimated
# in the wrong order
if abs(np.dot(s1_, s2)) > abs(np.dot(s1_, s1)):
s2_, s1_ = s_
s1_ *= np.sign(np.dot(s1_, s1))
s2_ *= np.sign(np.dot(s2_, s2))
# Check that we have estimated the original sources
if not add_noise:
assert_almost_equal(np.dot(s1_, s1) / n_samples, 1, decimal=3)
assert_almost_equal(np.dot(s2_, s2) / n_samples, 1, decimal=3)
def test_fit_transform():
# Test FastICA.fit_transform
rng = np.random.RandomState(0)
X = rng.random_sample((100, 10))
for whiten, n_components in [[True, 5], [False, None]]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
Xt = ica.fit_transform(X)
assert_equal(ica.components_.shape, (n_components_, 10))
assert_equal(Xt.shape, (100, n_components_))
ica = FastICA(n_components=n_components, whiten=whiten, random_state=0)
ica.fit(X)
assert_equal(ica.components_.shape, (n_components_, 10))
Xt2 = ica.transform(X)
assert_array_almost_equal(Xt, Xt2)
def test_inverse_transform():
# Test FastICA.inverse_transform
n_features = 10
n_samples = 100
n1, n2 = 5, 10
rng = np.random.RandomState(0)
X = rng.random_sample((n_samples, n_features))
expected = {(True, n1): (n_features, n1),
(True, n2): (n_features, n2),
(False, n1): (n_features, n2),
(False, n2): (n_features, n2)}
for whiten in [True, False]:
for n_components in [n1, n2]:
n_components_ = (n_components if n_components is not None else
X.shape[1])
ica = FastICA(n_components=n_components, random_state=rng,
whiten=whiten)
with warnings.catch_warnings(record=True):
# catch "n_components ignored" warning
Xt = ica.fit_transform(X)
expected_shape = expected[(whiten, n_components_)]
assert_equal(ica.mixing_.shape, expected_shape)
X2 = ica.inverse_transform(Xt)
assert_equal(X.shape, X2.shape)
# reversibility test in non-reduction case
if n_components == X.shape[1]:
assert_array_almost_equal(X, X2)
| mit |
ekzhu/datasketch | benchmark/indexes/containment/lshensemble_benchmark_plot.py | 1 | 6598 | import json, sys, argparse
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
from utils import get_precision_recall, fscore, average_fscore
def _parse_results(r):
r = r.strip().split(",")
return [x for x in r if len(x) > 0]
def _label(num_part):
if num_part == 1:
label = "MinHash LSH"
else:
label = "LSH Ensemble ({})".format(num_part)
return label
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("query_results")
parser.add_argument("ground_truth_results")
parser.add_argument("--asym-query-results")
args = parser.parse_args(sys.argv[1:])
df = pd.read_csv(args.query_results,
converters={"results": _parse_results})
df_groundtruth = pd.read_csv(args.ground_truth_results,
converters={"results": _parse_results})
df_groundtruth["has_result"] = [len(r) > 0
for r in df_groundtruth["results"]]
df_groundtruth = df_groundtruth[df_groundtruth["has_result"]]
df = pd.merge(df, df_groundtruth, on=["query_key", "threshold"],
suffixes=("", "_ground_truth"))
prs = [get_precision_recall(result, ground_truth)
for result, ground_truth in \
zip(df["results"], df["results_ground_truth"])]
df["precision"] = [p for p, _ in prs]
df["recall"] = [r for _, r in prs]
df["fscore"] = [fscore(*pr) for pr in prs]
#df["query_time_lshensemble"] = df["probe_time"] + df["process_time"]
df["query_time_lshensemble"] = df["probe_time"]
if args.asym_query_results is not None:
df_asym = pd.read_csv(args.asym_query_results,
converters={"results": _parse_results})
df = pd.merge(df, df_asym, on=["query_key", "threshold"],
suffixes=("", "_asym"))
prs = [get_precision_recall(result, ground_truth)
for result, ground_truth in \
zip(df["results_asym"], df["results_ground_truth"])]
df["precision_asym"] = [p for p, _ in prs]
df["recall_asym"] = [r for _, r in prs]
df["fscore_asym"] = [fscore(*pr) for pr in prs]
#df["query_time_asym"] = df["probe_time_asym"] + df["process_time_asym"]
df["query_time_asym"] = df["probe_time_asym"]
thresholds = sorted(list(set(df["threshold"])))
num_perms = sorted(list(set(df["num_perm"])))
num_parts = sorted(list(set(df["num_part"])))
for i, num_perm in enumerate(num_perms):
# Plot precisions
for j, num_part in enumerate(num_parts):
sub = df[(df["num_part"] == num_part) & (df["num_perm"] == num_perm)].\
groupby("threshold")
precisions = sub["precision"].mean()
stds = sub["precision"].std()
plt.plot(thresholds, precisions, "^-", label=_label(num_part))
#plt.fill_between(thresholds, precisions-stds, precisions+stds,
# alpha=0.2)
if "precision_asym" in df:
sub = df[(df["num_part"] == 1) & (df["num_perm"] == num_perm)].\
groupby("threshold")
precisions = sub["precision_asym"].mean()
stds = sub["precision_asym"].std()
plt.plot(thresholds, precisions, "s-", label="Asym Minhash LSH")
#plt.fill_between(thresholds, precisions-stds, precisions+stds,
# alpha=0.2)
plt.ylim(0.0, 1.0)
plt.xlabel("Thresholds")
plt.ylabel("Average Precisions")
plt.grid()
plt.legend()
plt.savefig("lshensemble_num_perm_{}_precision.png".format(num_perm))
plt.close()
# Plot recalls
for j, num_part in enumerate(num_parts):
sub = df[(df["num_part"] == num_part) & (df["num_perm"] == num_perm)].\
groupby("threshold")
recalls = sub["recall"].mean()
stds = sub["recall"].std()
plt.plot(thresholds, recalls, "^-", label=_label(num_part))
#plt.fill_between(thresholds, recalls-stds, recalls+stds, alpha=0.2)
if "recall_asym" in df:
sub = df[(df["num_part"] == 1) & (df["num_perm"] == num_perm)].\
groupby("threshold")
recalls = sub["recall_asym"].mean()
stds = sub["recall_asym"].std()
plt.plot(thresholds, recalls, "s-", label="Asym Minhash LSH")
#plt.fill_between(thresholds, recalls-stds, recalls+stds, alpha=0.2)
plt.ylim(0.0, 1.0)
plt.xlabel("Thresholds")
plt.ylabel("Average Recalls")
plt.grid()
plt.legend()
plt.savefig("lshensemble_num_perm_{}_recall.png".format(num_perm))
plt.close()
# Plot fscores.
for j, num_part in enumerate(num_parts):
sub = df[(df["num_part"] == num_part) & (df["num_perm"] == num_perm)].\
groupby("threshold")
fscores = sub["fscore"].mean()
stds = sub["fscore"].std()
plt.plot(thresholds, fscores, "^-", label=_label(num_part))
#plt.fill_between(thresholds, fscores-stds, fscores+stds, alpha=0.2)
if "fscore_asym" in df:
sub = df[(df["num_part"] == 1) & (df["num_perm"] == num_perm)].\
groupby("threshold")
fscores = sub["fscore_asym"].mean()
stds = sub["fscore_asym"].std()
plt.plot(thresholds, fscores, "s-", label="Asym Minhash LSH")
#plt.fill_between(thresholds, fscores-stds, fscores+stds, alpha=0.2)
plt.ylim(0.0, 1.0)
plt.xlabel("Thresholds")
plt.ylabel("Average F-Scores")
plt.grid()
plt.legend()
plt.savefig("lshensemble_num_perm_{}_fscore.png".format(num_perm))
plt.close()
# Plot query time.
for num_part in num_parts:
sub = df[(df["num_part"] == num_part) & (df["num_perm"] == num_perm)].\
groupby("threshold")
t = sub["query_time_lshensemble"].quantile(0.9)
plt.plot(thresholds, t, "^-", label=_label(num_part))
t = df_groundtruth.groupby("threshold")["query_time"].quantile(0.9)
plt.plot(thresholds, t, "o-", label="Exact")
plt.xlabel("Thresholds")
plt.ylabel("90 Percentile Query Time (ms)")
plt.legend()
plt.grid()
plt.savefig("lshensemble_num_perm_{}_query_time.png".format(num_perm))
plt.close()
# Output results
# df = df.drop(columns=["results", "results_ground_truth", "results_asym"])
# df.to_csv("out.csv")
| mit |
enigmampc/catalyst | tests/exchange/test_suites/test_suite_algo.py | 1 | 3542 | import importlib
import pandas as pd
import os
from catalyst import run_algorithm
from catalyst.constants import ALPHA_WARNING_MESSAGE
from catalyst.exchange.utils.stats_utils import get_pretty_stats, \
extract_transactions, set_print_settings, extract_orders
from catalyst.exchange.utils.test_utils import clean_exchange_bundles, \
ingest_exchange_bundles
from catalyst.testing.fixtures import WithLogger, CatalystTestCase
from logbook import TestHandler, WARNING
filter_algos = [
# 'buy_and_hodl.py',
'buy_btc_simple.py',
'buy_low_sell_high.py',
# 'mean_reversion_simple.py',
# 'rsi_profit_target.py',
# 'simple_loop.py',
# 'simple_universe.py',
]
class TestSuiteAlgo(WithLogger, CatalystTestCase):
@staticmethod
def analyze(context, perf):
set_print_settings()
transaction_df = extract_transactions(perf)
print('the transactions:\n{}'.format(transaction_df))
orders_df = extract_orders(perf)
print('the orders:\n{}'.format(orders_df))
stats = get_pretty_stats(perf, show_tail=False, num_rows=5)
print('the stats:\n{}'.format(stats))
pass
def test_run_examples(self):
# folder = join('..', '..', '..', 'catalyst', 'examples')
HERE = os.path.dirname(os.path.abspath(__file__))
folder = os.path.join(HERE, '..', '..', '..', 'catalyst', 'examples')
files = [f for f in os.listdir(folder)
if os.path.isfile(os.path.join(folder, f))]
algo_list = []
for filename in files:
name = os.path.basename(filename)
if filter_algos and name not in filter_algos:
continue
module_name = 'catalyst.examples.{}'.format(
name.replace('.py', '')
)
algo_list.append(module_name)
exchanges = ['poloniex', 'bittrex', 'binance']
asset_name = 'btc_usdt'
quote_currency = 'usdt'
capital_base = 10000
data_freq = 'daily'
start_date = pd.to_datetime('2017-10-01', utc=True)
end_date = pd.to_datetime('2017-12-01', utc=True)
for exchange_name in exchanges:
ingest_exchange_bundles(exchange_name, data_freq, asset_name)
for module_name in algo_list:
algo = importlib.import_module(module_name)
# namespace = module_name.replace('.', '_')
log_catcher = TestHandler()
with log_catcher:
run_algorithm(
capital_base=capital_base,
data_frequency=data_freq,
initialize=algo.initialize,
handle_data=algo.handle_data,
analyze=TestSuiteAlgo.analyze,
exchange_name=exchange_name,
algo_namespace='test_{}'.format(exchange_name),
quote_currency=quote_currency,
start=start_date,
end=end_date,
# output=out
)
warnings = [record for record in log_catcher.records if
record.level == WARNING]
assert(len(warnings) == 1)
assert (warnings[0].message == ALPHA_WARNING_MESSAGE)
assert (not log_catcher.has_errors)
assert (not log_catcher.has_criticals)
clean_exchange_bundles(exchange_name, data_freq)
| apache-2.0 |
fmaschler/networkit | scripts/DynamicBetweennessExperiments.py | 3 | 4139 | from networkit import *
from networkit.dynamic import *
from networkit.centrality import *
import pandas as pd
import random
def removeAndAddEdges(G, nEdges, tabu=None):
if nEdges > G.numberOfEdges() - tabu.numberOfEdges():
raise Error("G does not have enough edges")
# select random edges for removal
removed = set()
while len(removed) < nEdges:
(u, v) = G.randomEdge()
if not tabu.hasEdge(u, v) and not ((u,v) in removed or (v,u) in removed): # exclude all edges in the tabu graph
removed.add((u, v))
print (removed)
# build event streams
removeStream = []
for (u, v) in removed:
removeStream.append(GraphEvent(GraphEvent.EDGE_REMOVAL, u, v, 0))
addStream = []
for (u, v) in removed:
addStream.append(GraphEvent(GraphEvent.EDGE_ADDITION, u, v, 1.0))
return (removeStream, addStream)
def setRandomWeights(G, mu, sigma):
"""
Add random weights, normal distribution with mean mu and standard deviation sigma
"""
for (u, v) in G.edges():
w = random.normalvariate(mu, sigma)
G.setWeight(u, v, w)
return G
def test(G, nEdges, batchSize, epsilon, delta, size):
# find a set of nEdges to remove from G
T = graph.SpanningForest(G).generate()
(removeStream, addStream) = removeAndAddEdges(G, nEdges, tabu=T)
# remove the edges from G
updater = dynamic.GraphUpdater(G)
updater.update(removeStream)
# run the algorithms on the inital graph
bc = Betweenness(G)
print("Running bc")
bc.run()
dynBc = DynBetweenness(G, True)
print("Running dyn bc with predecessors")
dynBc.run()
apprBc = ApproxBetweenness(G, epsilon, delta)
print("Running approx bc")
apprBc.run()
dynApprBc = DynApproxBetweenness(G, epsilon, delta, True)
print("Running dyn approx bc with predecessors")
dynApprBc.run()
# apply the batches
nExperiments = nEdges // batchSize
timesBc = []
timesDynBc = []
timesApprBc = []
timesDynApprBc = []
scoresBc = []
scoresApprBc = []
for i in range(nExperiments):
batch = addStream[i*batchSize : (i+1)*batchSize]
# add the edges of batch to the graph
totalTime = 0.0
for j in range(0, batchSize):
updater.update([batch[j]])
# update the betweenness with the dynamic exact algorithm
t = stopwatch.Timer()
dynBc.update(batch[j])
totalTime += t.stop()
timesDynBc.append(totalTime)
# update the betweenness with the static exact algorithm
t = stopwatch.Timer()
bc.run()
x = t.stop()
timesBc.append(x)
print("Exact BC")
print(x)
print("Speedup Dyn BC (with preds)")
print(x/totalTime)
# update the betweenness with the static approximated algorithm
t = stopwatch.Timer()
apprBc.run()
x = t.stop()
timesApprBc.append(x)
print("ApprBC")
print(x)
# update the betweenness with the dynamic approximated algorithm
t = stopwatch.Timer()
dynApprBc.update(batch)
y = t.stop()
timesDynApprBc.append(y)
print("Speedup DynApprBC (with preds)")
print(x/y)
bcNormalized = [ k/(size*(size-1)) for k in bc.scores()]
scoresBc.append(bcNormalized)
scoresApprBc.append(dynApprBc.scores())
a = pd.Series(timesBc)
b = pd.Series(timesDynBc)
c = pd.Series(timesApprBc)
d = pd.Series(timesDynApprBc)
df1 = pd.DataFrame({"Static exact bc": a, "Dynamic exact bc" : b, "Static approx bc" : c, "Dynamic approx bc" : d})
dic2 = {}
for experiment in range(nExperiments):
a = pd.Series(scoresBc[experiment])
b = pd.Series(scoresApprBc[experiment])
dic2["Exact scores (exp. "+str(experiment)+")"] = a
dic2["Approx scores (exp. "+str(experiment)+")"] = b
df2 = pd.DataFrame(dic2)
return df1, df2
if __name__ == "__main__":
setNumberOfThreads(1)
size = 20000
for i in range(11):
batchSize = 2**i
G = generators.DorogovtsevMendesGenerator(size).generate()
cc = properties.ConnectedComponents(G)
cc.run()
if (cc.numberOfComponents() == 1) :
nEdges = batchSize * 10
epsilon = 0.05
delta = 0.1
(df1, df2) = test(G, nEdges, batchSize, epsilon, delta, size)
df1.to_csv("results/times_unweighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
df2.to_csv("results/scores_unweighted_size_"+str(size)+"_batch_"+str(batchSize)+".csv")
else:
print("The generated graph is not connected.")
| mit |
josephcslater/scipy | scipy/interpolate/interpolate.py | 3 | 101080 | """ Classes for interpolating values.
"""
from __future__ import division, print_function, absolute_import
__all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp',
'ppform', 'lagrange', 'PPoly', 'BPoly', 'NdPPoly',
'RegularGridInterpolator', 'interpn']
import itertools
import warnings
import functools
import operator
import numpy as np
from numpy import (array, transpose, searchsorted, atleast_1d, atleast_2d,
dot, ravel, poly1d, asarray, intp)
import scipy.linalg
import scipy.special as spec
from scipy.special import comb
from scipy._lib.six import xrange, integer_types, string_types
from . import fitpack
from . import dfitpack
from . import _fitpack
from .polyint import _Interpolator1D
from . import _ppoly
from .fitpack2 import RectBivariateSpline
from .interpnd import _ndim_coords_from_arrays
from ._bsplines import make_interp_spline, BSpline
def prod(x):
"""Product of a list of numbers; ~40x faster vs np.prod for Python tuples"""
if len(x) == 0:
return 1
return functools.reduce(operator.mul, x)
def lagrange(x, w):
"""
Return a Lagrange interpolating polynomial.
Given two 1-D arrays `x` and `w,` returns the Lagrange interpolating
polynomial through the points ``(x, w)``.
Warning: This implementation is numerically unstable. Do not expect to
be able to use more than about 20 points even if they are chosen optimally.
Parameters
----------
x : array_like
`x` represents the x-coordinates of a set of datapoints.
w : array_like
`w` represents the y-coordinates of a set of datapoints, i.e. f(`x`).
Returns
-------
lagrange : numpy.poly1d instance
The Lagrange interpolating polynomial.
"""
M = len(x)
p = poly1d(0.0)
for j in xrange(M):
pt = poly1d(w[j])
for k in xrange(M):
if k == j:
continue
fac = x[j]-x[k]
pt *= poly1d([1.0, -x[k]])/fac
p += pt
return p
# !! Need to find argument for keeping initialize. If it isn't
# !! found, get rid of it!
class interp2d(object):
"""
interp2d(x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=nan)
Interpolate over a 2-D grid.
`x`, `y` and `z` are arrays of values used to approximate some function
f: ``z = f(x, y)``. This class returns a function whose call method uses
spline interpolation to find the value of new points.
If `x` and `y` represent a regular grid, consider using
RectBivariateSpline.
Note that calling `interp2d` with NaNs present in input values results in
undefined behaviour.
Methods
-------
__call__
Parameters
----------
x, y : array_like
Arrays defining the data point coordinates.
If the points lie on a regular grid, `x` can specify the column
coordinates and `y` the row coordinates, for example::
>>> x = [0,1,2]; y = [0,3]; z = [[1,2,3], [4,5,6]]
Otherwise, `x` and `y` must specify the full coordinates for each
point, for example::
>>> x = [0,1,2,0,1,2]; y = [0,0,0,3,3,3]; z = [1,2,3,4,5,6]
If `x` and `y` are multi-dimensional, they are flattened before use.
z : array_like
The values of the function to interpolate at the data points. If
`z` is a multi-dimensional array, it is flattened before use. The
length of a flattened `z` array is either
len(`x`)*len(`y`) if `x` and `y` specify the column and row coordinates
or ``len(z) == len(x) == len(y)`` if `x` and `y` specify coordinates
for each point.
kind : {'linear', 'cubic', 'quintic'}, optional
The kind of spline interpolation to use. Default is 'linear'.
copy : bool, optional
If True, the class makes internal copies of x, y and z.
If False, references may be used. The default is to copy.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data (x,y), a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If omitted (None), values outside
the domain are extrapolated.
See Also
--------
RectBivariateSpline :
Much faster 2D interpolation if your input data is on a grid
bisplrep, bisplev :
Spline interpolation based on FITPACK
BivariateSpline : a more recent wrapper of the FITPACK routines
interp1d : one dimension version of this function
Notes
-----
The minimum number of data points required along the interpolation
axis is ``(k+1)**2``, with k=1 for linear, k=3 for cubic and k=5 for
quintic interpolation.
The interpolator is constructed by `bisplrep`, with a smoothing factor
of 0. If more control over smoothing is needed, `bisplrep` should be
used directly.
Examples
--------
Construct a 2-D grid and interpolate on it:
>>> from scipy import interpolate
>>> x = np.arange(-5.01, 5.01, 0.25)
>>> y = np.arange(-5.01, 5.01, 0.25)
>>> xx, yy = np.meshgrid(x, y)
>>> z = np.sin(xx**2+yy**2)
>>> f = interpolate.interp2d(x, y, z, kind='cubic')
Now use the obtained interpolation function and plot the result:
>>> import matplotlib.pyplot as plt
>>> xnew = np.arange(-5.01, 5.01, 1e-2)
>>> ynew = np.arange(-5.01, 5.01, 1e-2)
>>> znew = f(xnew, ynew)
>>> plt.plot(x, z[0, :], 'ro-', xnew, znew[0, :], 'b-')
>>> plt.show()
"""
def __init__(self, x, y, z, kind='linear', copy=True, bounds_error=False,
fill_value=None):
x = ravel(x)
y = ravel(y)
z = asarray(z)
rectangular_grid = (z.size == len(x) * len(y))
if rectangular_grid:
if z.ndim == 2:
if z.shape != (len(y), len(x)):
raise ValueError("When on a regular grid with x.size = m "
"and y.size = n, if z.ndim == 2, then z "
"must have shape (n, m)")
if not np.all(x[1:] >= x[:-1]):
j = np.argsort(x)
x = x[j]
z = z[:, j]
if not np.all(y[1:] >= y[:-1]):
j = np.argsort(y)
y = y[j]
z = z[j, :]
z = ravel(z.T)
else:
z = ravel(z)
if len(x) != len(y):
raise ValueError(
"x and y must have equal lengths for non rectangular grid")
if len(z) != len(x):
raise ValueError(
"Invalid length for input z for non rectangular grid")
try:
kx = ky = {'linear': 1,
'cubic': 3,
'quintic': 5}[kind]
except KeyError:
raise ValueError("Unsupported interpolation type.")
if not rectangular_grid:
# TODO: surfit is really not meant for interpolation!
self.tck = fitpack.bisplrep(x, y, z, kx=kx, ky=ky, s=0.0)
else:
nx, tx, ny, ty, c, fp, ier = dfitpack.regrid_smth(
x, y, z, None, None, None, None,
kx=kx, ky=ky, s=0.0)
self.tck = (tx[:nx], ty[:ny], c[:(nx - kx - 1) * (ny - ky - 1)],
kx, ky)
self.bounds_error = bounds_error
self.fill_value = fill_value
self.x, self.y, self.z = [array(a, copy=copy) for a in (x, y, z)]
self.x_min, self.x_max = np.amin(x), np.amax(x)
self.y_min, self.y_max = np.amin(y), np.amax(y)
def __call__(self, x, y, dx=0, dy=0, assume_sorted=False):
"""Interpolate the function.
Parameters
----------
x : 1D array
x-coordinates of the mesh on which to interpolate.
y : 1D array
y-coordinates of the mesh on which to interpolate.
dx : int >= 0, < kx
Order of partial derivatives in x.
dy : int >= 0, < ky
Order of partial derivatives in y.
assume_sorted : bool, optional
If False, values of `x` and `y` can be in any order and they are
sorted first.
If True, `x` and `y` have to be arrays of monotonically
increasing values.
Returns
-------
z : 2D array with shape (len(y), len(x))
The interpolated values.
"""
x = atleast_1d(x)
y = atleast_1d(y)
if x.ndim != 1 or y.ndim != 1:
raise ValueError("x and y should both be 1-D arrays")
if not assume_sorted:
x = np.sort(x)
y = np.sort(y)
if self.bounds_error or self.fill_value is not None:
out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
any_out_of_bounds_x = np.any(out_of_bounds_x)
any_out_of_bounds_y = np.any(out_of_bounds_y)
if self.bounds_error and (any_out_of_bounds_x or any_out_of_bounds_y):
raise ValueError("Values out of range; x must be in %r, y in %r"
% ((self.x_min, self.x_max),
(self.y_min, self.y_max)))
z = fitpack.bisplev(x, y, self.tck, dx, dy)
z = atleast_2d(z)
z = transpose(z)
if self.fill_value is not None:
if any_out_of_bounds_x:
z[:, out_of_bounds_x] = self.fill_value
if any_out_of_bounds_y:
z[out_of_bounds_y, :] = self.fill_value
if len(z) == 1:
z = z[0]
return array(z)
def _check_broadcast_up_to(arr_from, shape_to, name):
"""Helper to check that arr_from broadcasts up to shape_to"""
shape_from = arr_from.shape
if len(shape_to) >= len(shape_from):
for t, f in zip(shape_to[::-1], shape_from[::-1]):
if f != 1 and f != t:
break
else: # all checks pass, do the upcasting that we need later
if arr_from.size != 1 and arr_from.shape != shape_to:
arr_from = np.ones(shape_to, arr_from.dtype) * arr_from
return arr_from.ravel()
# at least one check failed
raise ValueError('%s argument must be able to broadcast up '
'to shape %s but had shape %s'
% (name, shape_to, shape_from))
def _do_extrapolate(fill_value):
"""Helper to check if fill_value == "extrapolate" without warnings"""
return (isinstance(fill_value, string_types) and
fill_value == 'extrapolate')
class interp1d(_Interpolator1D):
"""
Interpolate a 1-D function.
`x` and `y` are arrays of values used to approximate some function f:
``y = f(x)``. This class returns a function whose call method uses
interpolation to find the value of new points.
Note that calling `interp1d` with NaNs present in input values results in
undefined behaviour.
Parameters
----------
x : (N,) array_like
A 1-D array of real values.
y : (...,N,...) array_like
A N-D array of real values. The length of `y` along the interpolation
axis must be equal to the length of `x`.
kind : str or int, optional
Specifies the kind of interpolation as a string
('linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'
where 'zero', 'slinear', 'quadratic' and 'cubic' refer to a spline
interpolation of zeroth, first, second or third order) or as an
integer specifying the order of the spline interpolator to use.
Default is 'linear'.
axis : int, optional
Specifies the axis of `y` along which to interpolate.
Interpolation defaults to the last axis of `y`.
copy : bool, optional
If True, the class makes internal copies of x and y.
If False, references to `x` and `y` are used. The default is to copy.
bounds_error : bool, optional
If True, a ValueError is raised any time interpolation is attempted on
a value outside of the range of x (where extrapolation is
necessary). If False, out of bounds values are assigned `fill_value`.
By default, an error is raised unless `fill_value="extrapolate"`.
fill_value : array-like or (array-like, array_like) or "extrapolate", optional
- if a ndarray (or float), this value will be used to fill in for
requested points outside of the data range. If not provided, then
the default is NaN. The array-like must broadcast properly to the
dimensions of the non-interpolation axes.
- If a two-element tuple, then the first element is used as a
fill value for ``x_new < x[0]`` and the second element is used for
``x_new > x[-1]``. Anything that is not a 2-element tuple (e.g.,
list or ndarray, regardless of shape) is taken to be a single
array-like argument meant to be used for both bounds as
``below, above = fill_value, fill_value``.
.. versionadded:: 0.17.0
- If "extrapolate", then points outside the data range will be
extrapolated.
.. versionadded:: 0.17.0
assume_sorted : bool, optional
If False, values of `x` can be in any order and they are sorted first.
If True, `x` has to be an array of monotonically increasing values.
Methods
-------
__call__
See Also
--------
splrep, splev
Spline interpolation/smoothing based on FITPACK.
UnivariateSpline : An object-oriented wrapper of the FITPACK routines.
interp2d : 2-D interpolation
Examples
--------
>>> import matplotlib.pyplot as plt
>>> from scipy import interpolate
>>> x = np.arange(0, 10)
>>> y = np.exp(-x/3.0)
>>> f = interpolate.interp1d(x, y)
>>> xnew = np.arange(0, 9, 0.1)
>>> ynew = f(xnew) # use interpolation function returned by `interp1d`
>>> plt.plot(x, y, 'o', xnew, ynew, '-')
>>> plt.show()
"""
def __init__(self, x, y, kind='linear', axis=-1,
copy=True, bounds_error=None, fill_value=np.nan,
assume_sorted=False):
""" Initialize a 1D linear interpolation class."""
_Interpolator1D.__init__(self, x, y, axis=axis)
self.bounds_error = bounds_error # used by fill_value setter
self.copy = copy
if kind in ['zero', 'slinear', 'quadratic', 'cubic']:
order = {'nearest': 0, 'zero': 0, 'slinear': 1,
'quadratic': 2, 'cubic': 3}[kind]
kind = 'spline'
elif isinstance(kind, int):
order = kind
kind = 'spline'
elif kind not in ('linear', 'nearest'):
raise NotImplementedError("%s is unsupported: Use fitpack "
"routines for other types." % kind)
x = array(x, copy=self.copy)
y = array(y, copy=self.copy)
if not assume_sorted:
ind = np.argsort(x)
x = x[ind]
y = np.take(y, ind, axis=axis)
if x.ndim != 1:
raise ValueError("the x array must have exactly one dimension.")
if y.ndim == 0:
raise ValueError("the y array must have at least one dimension.")
# Force-cast y to a floating-point type, if it's not yet one
if not issubclass(y.dtype.type, np.inexact):
y = y.astype(np.float_)
# Backward compatibility
self.axis = axis % y.ndim
# Interpolation goes internally along the first axis
self.y = y
self._y = self._reshape_yi(self.y)
self.x = x
del y, x # clean up namespace to prevent misuse; use attributes
self._kind = kind
self.fill_value = fill_value # calls the setter, can modify bounds_err
# Adjust to interpolation kind; store reference to *unbound*
# interpolation methods, in order to avoid circular references to self
# stored in the bound instance methods, and therefore delayed garbage
# collection. See: http://docs.python.org/2/reference/datamodel.html
if kind in ('linear', 'nearest'):
# Make a "view" of the y array that is rotated to the interpolation
# axis.
minval = 2
if kind == 'nearest':
# Do division before addition to prevent possible integer
# overflow
self.x_bds = self.x / 2.0
self.x_bds = self.x_bds[1:] + self.x_bds[:-1]
self._call = self.__class__._call_nearest
else:
# Check if we can delegate to numpy.interp (2x-10x faster).
cond = self.x.dtype == np.float_ and self.y.dtype == np.float_
cond = cond and self.y.ndim == 1
cond = cond and not _do_extrapolate(fill_value)
if cond:
self._call = self.__class__._call_linear_np
else:
self._call = self.__class__._call_linear
else:
minval = order + 1
rewrite_nan = False
xx, yy = self.x, self._y
if order > 1:
# Quadratic or cubic spline. If input contains even a single
# nan, then the output is all nans. We cannot just feed data
# with nans to make_interp_spline because it calls LAPACK.
# So, we make up a bogus x and y with no nans and use it
# to get the correct shape of the output, which we then fill
# with nans.
# For slinear or zero order spline, we just pass nans through.
if np.isnan(self.x).any():
xx = np.linspace(min(self.x), max(self.x), len(self.x))
rewrite_nan = True
if np.isnan(self._y).any():
yy = np.ones_like(self._y)
rewrite_nan = True
self._spline = make_interp_spline(xx, yy, k=order,
check_finite=False)
if rewrite_nan:
self._call = self.__class__._call_nan_spline
else:
self._call = self.__class__._call_spline
if len(self.x) < minval:
raise ValueError("x and y arrays must have at "
"least %d entries" % minval)
@property
def fill_value(self):
# backwards compat: mimic a public attribute
return self._fill_value_orig
@fill_value.setter
def fill_value(self, fill_value):
# extrapolation only works for nearest neighbor and linear methods
if _do_extrapolate(fill_value):
if self.bounds_error:
raise ValueError("Cannot extrapolate and raise "
"at the same time.")
self.bounds_error = False
self._extrapolate = True
else:
broadcast_shape = (self.y.shape[:self.axis] +
self.y.shape[self.axis + 1:])
if len(broadcast_shape) == 0:
broadcast_shape = (1,)
# it's either a pair (_below_range, _above_range) or a single value
# for both above and below range
if isinstance(fill_value, tuple) and len(fill_value) == 2:
below_above = [np.asarray(fill_value[0]),
np.asarray(fill_value[1])]
names = ('fill_value (below)', 'fill_value (above)')
for ii in range(2):
below_above[ii] = _check_broadcast_up_to(
below_above[ii], broadcast_shape, names[ii])
else:
fill_value = np.asarray(fill_value)
below_above = [_check_broadcast_up_to(
fill_value, broadcast_shape, 'fill_value')] * 2
self._fill_value_below, self._fill_value_above = below_above
self._extrapolate = False
if self.bounds_error is None:
self.bounds_error = True
# backwards compat: fill_value was a public attr; make it writeable
self._fill_value_orig = fill_value
def _call_linear_np(self, x_new):
# Note that out-of-bounds values are taken care of in self._evaluate
return np.interp(x_new, self.x, self.y)
def _call_linear(self, x_new):
# 2. Find where in the orignal data, the values to interpolate
# would be inserted.
# Note: If x_new[n] == x[m], then m is returned by searchsorted.
x_new_indices = searchsorted(self.x, x_new)
# 3. Clip x_new_indices so that they are within the range of
# self.x indices and at least 1. Removes mis-interpolation
# of x_new[n] = x[0]
x_new_indices = x_new_indices.clip(1, len(self.x)-1).astype(int)
# 4. Calculate the slope of regions that each x_new value falls in.
lo = x_new_indices - 1
hi = x_new_indices
x_lo = self.x[lo]
x_hi = self.x[hi]
y_lo = self._y[lo]
y_hi = self._y[hi]
# Note that the following two expressions rely on the specifics of the
# broadcasting semantics.
slope = (y_hi - y_lo) / (x_hi - x_lo)[:, None]
# 5. Calculate the actual value for each entry in x_new.
y_new = slope*(x_new - x_lo)[:, None] + y_lo
return y_new
def _call_nearest(self, x_new):
""" Find nearest neighbour interpolated y_new = f(x_new)."""
# 2. Find where in the averaged data the values to interpolate
# would be inserted.
# Note: use side='left' (right) to searchsorted() to define the
# halfway point to be nearest to the left (right) neighbour
x_new_indices = searchsorted(self.x_bds, x_new, side='left')
# 3. Clip x_new_indices so that they are within the range of x indices.
x_new_indices = x_new_indices.clip(0, len(self.x)-1).astype(intp)
# 4. Calculate the actual value for each entry in x_new.
y_new = self._y[x_new_indices]
return y_new
def _call_spline(self, x_new):
return self._spline(x_new)
def _call_nan_spline(self, x_new):
out = self._spline(x_new)
out[...] = np.nan
return out
def _evaluate(self, x_new):
# 1. Handle values in x_new that are outside of x. Throw error,
# or return a list of mask array indicating the outofbounds values.
# The behavior is set by the bounds_error variable.
x_new = asarray(x_new)
y_new = self._call(self, x_new)
if not self._extrapolate:
below_bounds, above_bounds = self._check_bounds(x_new)
if len(y_new) > 0:
# Note fill_value must be broadcast up to the proper size
# and flattened to work here
y_new[below_bounds] = self._fill_value_below
y_new[above_bounds] = self._fill_value_above
return y_new
def _check_bounds(self, x_new):
"""Check the inputs for being in the bounds of the interpolated data.
Parameters
----------
x_new : array
Returns
-------
out_of_bounds : bool array
The mask on x_new of values that are out of the bounds.
"""
# If self.bounds_error is True, we raise an error if any x_new values
# fall outside the range of x. Otherwise, we return an array indicating
# which values are outside the boundary region.
below_bounds = x_new < self.x[0]
above_bounds = x_new > self.x[-1]
# !! Could provide more information about which values are out of bounds
if self.bounds_error and below_bounds.any():
raise ValueError("A value in x_new is below the interpolation "
"range.")
if self.bounds_error and above_bounds.any():
raise ValueError("A value in x_new is above the interpolation "
"range.")
# !! Should we emit a warning if some values are out of bounds?
# !! matlab does not.
return below_bounds, above_bounds
class _PPolyBase(object):
"""Base class for piecewise polynomials."""
__slots__ = ('c', 'x', 'extrapolate', 'axis')
def __init__(self, c, x, extrapolate=None, axis=0):
self.c = np.asarray(c)
self.x = np.ascontiguousarray(x, dtype=np.float64)
if extrapolate is None:
extrapolate = True
elif extrapolate != 'periodic':
extrapolate = bool(extrapolate)
self.extrapolate = extrapolate
if self.c.ndim < 2:
raise ValueError("Coefficients array must be at least "
"2-dimensional.")
if not (0 <= axis < self.c.ndim - 1):
raise ValueError("axis=%s must be between 0 and %s" %
(axis, self.c.ndim-1))
self.axis = axis
if axis != 0:
# roll the interpolation axis to be the first one in self.c
# More specifically, the target shape for self.c is (k, m, ...),
# and axis !=0 means that we have c.shape (..., k, m, ...)
# ^
# axis
# So we roll two of them.
self.c = np.rollaxis(self.c, axis+1)
self.c = np.rollaxis(self.c, axis+1)
if self.x.ndim != 1:
raise ValueError("x must be 1-dimensional")
if self.x.size < 2:
raise ValueError("at least 2 breakpoints are needed")
if self.c.ndim < 2:
raise ValueError("c must have at least 2 dimensions")
if self.c.shape[0] == 0:
raise ValueError("polynomial must be at least of order 0")
if self.c.shape[1] != self.x.size-1:
raise ValueError("number of coefficients != len(x)-1")
dx = np.diff(self.x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` must be strictly increasing or decreasing.")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
@classmethod
def construct_fast(cls, c, x, extrapolate=None, axis=0):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
self.axis = axis
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _ensure_c_contiguous(self):
"""
c and x may be modified by the user. The Cython code expects
that they are C contiguous.
"""
if not self.x.flags.c_contiguous:
self.x = self.x.copy()
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
def extend(self, c, x, right=None):
"""
Add additional breakpoints and coefficients to the polynomial.
Parameters
----------
c : ndarray, size (k, m, ...)
Additional coefficients for polynomials in intervals. Note that
the first additional interval will be formed using one of the
`self.x` end points.
x : ndarray, size (m,)
Additional breakpoints. Must be sorted in the same order as
`self.x` and either to the right or to the left of the current
breakpoints.
right
Deprecated argument. Has no effect.
.. deprecated:: 0.19
"""
if right is not None:
warnings.warn("`right` is deprecated and will be removed.")
c = np.asarray(c)
x = np.asarray(x)
if c.ndim < 2:
raise ValueError("invalid dimensions for c")
if x.ndim != 1:
raise ValueError("invalid dimensions for x")
if x.shape[0] != c.shape[1]:
raise ValueError("x and c have incompatible sizes")
if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim:
raise ValueError("c and self.c have incompatible shapes")
if c.size == 0:
return
dx = np.diff(x)
if not (np.all(dx >= 0) or np.all(dx <= 0)):
raise ValueError("`x` is not sorted.")
if self.x[-1] >= self.x[0]:
if not x[-1] >= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] >= self.x[-1]:
action = 'append'
elif x[-1] <= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
else:
if not x[-1] <= x[0]:
raise ValueError("`x` is in the different order "
"than `self.x`.")
if x[0] <= self.x[-1]:
action = 'append'
elif x[-1] >= self.x[0]:
action = 'prepend'
else:
raise ValueError("`x` is neither on the left or on the right "
"from `self.x`.")
dtype = self._get_dtype(c.dtype)
k2 = max(c.shape[0], self.c.shape[0])
c2 = np.zeros((k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:],
dtype=dtype)
if action == 'append':
c2[k2-self.c.shape[0]:, :self.c.shape[1]] = self.c
c2[k2-c.shape[0]:, self.c.shape[1]:] = c
self.x = np.r_[self.x, x]
elif action == 'prepend':
c2[k2-self.c.shape[0]:, :c.shape[1]] = c
c2[k2-c.shape[0]:, c.shape[1]:] = self.c
self.x = np.r_[x, self.x]
self.c = c2
def __call__(self, x, nu=0, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative.
Parameters
----------
x : array_like
Points to evaluate the interpolant at.
nu : int, optional
Order of derivative to evaluate. Must be non-negative.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
y : array_like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
x = np.asarray(x)
x_shape, x_ndim = x.shape, x.ndim
x = np.ascontiguousarray(x.ravel(), dtype=np.float_)
# With periodic extrapolation we map x to the segment
# [self.x[0], self.x[-1]].
if extrapolate == 'periodic':
x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0])
extrapolate = False
out = np.empty((len(x), prod(self.c.shape[2:])), dtype=self.c.dtype)
self._ensure_c_contiguous()
self._evaluate(x, nu, extrapolate, out)
out = out.reshape(x_shape + self.c.shape[2:])
if self.axis != 0:
# transpose to move the calculated values to the interpolation axis
l = list(range(out.ndim))
l = l[x_ndim:x_ndim+self.axis] + l[:x_ndim] + l[x_ndim+self.axis:]
out = out.transpose(l)
return out
class PPoly(_PPolyBase):
"""
Piecewise polynomial in terms of coefficients and breakpoints
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
local power basis::
S = sum(c[m, i] * (xp - x[i])**(k-m) for m in range(k+1))
where ``k`` is the degree of the polynomial.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
derivative
antiderivative
integrate
solve
roots
extend
from_spline
from_bernstein_basis
construct_fast
See also
--------
BPoly : piecewise polynomials in the Bernstein basis
Notes
-----
High-order polynomials in the power basis can be numerically
unstable. Precision problems can start to appear for orders
larger than 20-30.
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k - n representing the derivative
of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if nu < 0:
return self.antiderivative(-nu)
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
c2 = self.c[:-nu, :].copy()
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[0], 0, -1), nu)
c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)]
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
c = np.zeros((self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:],
dtype=self.c.dtype)
c[:-nu] = self.c
# divide by the correct rising factorials
factor = spec.poch(np.arange(self.c.shape[0], 0, -1), nu)
c[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
self._ensure_c_contiguous()
_ppoly.fix_continuity(c.reshape(c.shape[0], c.shape[1], -1),
self.x, nu - 1)
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
# construct a compatible polynomial
return self.construct_fast(c, self.x, extrapolate, self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used.
If None (default), use `self.extrapolate`.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over [a, b]
"""
if extrapolate is None:
extrapolate = self.extrapolate
# Swap integration bounds if needed
sign = 1
if b < a:
a, b = b, a
sign = -1
range_int = np.empty((prod(self.c.shape[2:]),), dtype=self.c.dtype)
self._ensure_c_contiguous()
# Compute the integral.
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
if n_periods > 0:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xe, False, out=range_int)
range_int *= n_periods
else:
range_int.fill(0)
# Map a to [xs, xe], b is always a + left.
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
remainder_int = np.empty_like(range_int)
if b <= xe:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, xe, False, out=remainder_int)
range_int += remainder_int
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, xs, xs + left + a - xe, False, out=remainder_int)
range_int += remainder_int
else:
_ppoly.integrate(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, a, b, bool(extrapolate), out=range_int)
# Return
range_int *= sign
return range_int.reshape(self.c.shape[2:])
def solve(self, y=0., discontinuity=True, extrapolate=None):
"""
Find real solutions of the the equation ``pp(x) == y``.
Parameters
----------
y : float, optional
Right-hand side. Default is zero.
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
Notes
-----
This routine works only on real-valued polynomials.
If the piecewise polynomial contains sections that are
identically zero, the root list will contain the start point
of the corresponding interval, followed by a ``nan`` value.
If the polynomial is discontinuous across a breakpoint, and
there is a sign change across the breakpoint, this is reported
if the `discont` parameter is True.
Examples
--------
Finding roots of ``[x**2 - 1, (x - 1)**2]`` defined on intervals
``[-2, 1], [1, 2]``:
>>> from scipy.interpolate import PPoly
>>> pp = PPoly(np.array([[1, -4, 3], [1, 0, 0]]).T, [-2, 1, 2])
>>> pp.roots()
array([-1., 1.])
"""
if extrapolate is None:
extrapolate = self.extrapolate
self._ensure_c_contiguous()
if np.issubdtype(self.c.dtype, np.complexfloating):
raise ValueError("Root finding is only for "
"real-valued polynomials")
y = float(y)
r = _ppoly.real_roots(self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, y, bool(discontinuity),
bool(extrapolate))
if self.c.ndim == 2:
return r[0]
else:
r2 = np.empty(prod(self.c.shape[2:]), dtype=object)
# this for-loop is equivalent to ``r2[...] = r``, but that's broken
# in numpy 1.6.0
for ii, root in enumerate(r):
r2[ii] = root
return r2.reshape(self.c.shape[2:])
def roots(self, discontinuity=True, extrapolate=None):
"""
Find real roots of the the piecewise polynomial.
Parameters
----------
discontinuity : bool, optional
Whether to report sign changes across discontinuities at
breakpoints as roots.
extrapolate : {bool, 'periodic', None}, optional
If bool, determines whether to return roots from the polynomial
extrapolated based on first and last intervals, 'periodic' works
the same as False. If None (default), use `self.extrapolate`.
Returns
-------
roots : ndarray
Roots of the polynomial(s).
If the PPoly object describes multiple polynomials, the
return value is an object array whose each element is an
ndarray containing the roots.
See Also
--------
PPoly.solve
"""
return self.solve(0, discontinuity, extrapolate)
@classmethod
def from_spline(cls, tck, extrapolate=None):
"""
Construct a piecewise polynomial from a spline
Parameters
----------
tck
A spline, as returned by `splrep` or a BSpline object.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
if isinstance(tck, BSpline):
t, c, k = tck.tck
if extrapolate is None:
extrapolate = tck.extrapolate
else:
t, c, k = tck
cvals = np.empty((k + 1, len(t)-1), dtype=c.dtype)
for m in xrange(k, -1, -1):
y = fitpack.splev(t[:-1], tck, der=m)
cvals[k - m, :] = y/spec.gamma(m+1)
return cls.construct_fast(cvals, t, extrapolate)
@classmethod
def from_bernstein_basis(cls, bp, extrapolate=None):
"""
Construct a piecewise polynomial in the power basis
from a polynomial in Bernstein basis.
Parameters
----------
bp : BPoly
A Bernstein basis polynomial, as created by BPoly
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(bp.x)
k = bp.c.shape[0] - 1 # polynomial order
rest = (None,)*(bp.c.ndim-2)
c = np.zeros_like(bp.c)
for a in range(k+1):
factor = (-1)**a * comb(k, a) * bp.c[a]
for s in range(a, k+1):
val = comb(k-a, s-a) * (-1)**s
c[k-s] += factor * val / dx[(slice(None),)+rest]**s
if extrapolate is None:
extrapolate = bp.extrapolate
return cls.construct_fast(c, bp.x, extrapolate, bp.axis)
class BPoly(_PPolyBase):
"""Piecewise polynomial in terms of coefficients and breakpoints.
The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the
Bernstein polynomial basis::
S = sum(c[a, i] * b(a, k; x) for a in range(k+1)),
where ``k`` is the degree of the polynomial, and::
b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a),
with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial
coefficient.
Parameters
----------
c : ndarray, shape (k, m, ...)
Polynomial coefficients, order `k` and `m` intervals
x : ndarray, shape (m+1,)
Polynomial breakpoints. Must be sorted in either increasing or
decreasing order.
extrapolate : bool, optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs. If 'periodic',
periodic extrapolation is used. Default is True.
axis : int, optional
Interpolation axis. Default is zero.
Attributes
----------
x : ndarray
Breakpoints.
c : ndarray
Coefficients of the polynomials. They are reshaped
to a 3-dimensional array with the last dimension representing
the trailing dimensions of the original coefficient array.
axis : int
Interpolation axis.
Methods
-------
__call__
extend
derivative
antiderivative
integrate
construct_fast
from_power_basis
from_derivatives
See also
--------
PPoly : piecewise polynomials in the power basis
Notes
-----
Properties of Bernstein polynomials are well documented in the literature.
Here's a non-exhaustive list:
.. [1] http://en.wikipedia.org/wiki/Bernstein_polynomial
.. [2] Kenneth I. Joy, Bernstein polynomials,
http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf
.. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems,
vol 2011, article ID 829546, :doi:`10.1155/2011/829543`.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> x = [0, 1]
>>> c = [[1], [2], [3]]
>>> bp = BPoly(c, x)
This creates a 2nd order polynomial
.. math::
B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + 3 \\times b_{2, 2}(x) \\\\
= 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2
"""
def _evaluate(self, x, nu, extrapolate, out):
_ppoly.evaluate_bernstein(
self.c.reshape(self.c.shape[0], self.c.shape[1], -1),
self.x, x, nu, bool(extrapolate), out)
def derivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : int, optional
Order of derivative to evaluate. Default is 1, i.e. compute the
first derivative. If negative, the antiderivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k - nu representing the derivative of
this polynomial.
"""
if nu < 0:
return self.antiderivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.derivative()
return bp
# reduce order
if nu == 0:
c2 = self.c.copy()
else:
# For a polynomial
# B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x),
# we use the fact that
# b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ),
# which leads to
# B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1}
#
# finally, for an interval [y, y + dy] with dy != 1,
# we need to correct for an extra power of dy
rest = (None,)*(self.c.ndim-2)
k = self.c.shape[0] - 1
dx = np.diff(self.x)[(None, slice(None))+rest]
c2 = k * np.diff(self.c, axis=0) / dx
if c2.shape[0] == 0:
# derivative of order 0 is zero
c2 = np.zeros((1,) + c2.shape[1:], dtype=c2.dtype)
# construct a compatible polynomial
return self.construct_fast(c2, self.x, self.extrapolate, self.axis)
def antiderivative(self, nu=1):
"""
Construct a new piecewise polynomial representing the antiderivative.
Parameters
----------
nu : int, optional
Order of antiderivative to evaluate. Default is 1, i.e. compute
the first integral. If negative, the derivative is returned.
Returns
-------
bp : BPoly
Piecewise polynomial of order k + nu representing the
antiderivative of this polynomial.
Notes
-----
If antiderivative is computed and ``self.extrapolate='periodic'``,
it will be set to False for the returned instance. This is done because
the antiderivative is no longer periodic and its correct evaluation
outside of the initially given x interval is difficult.
"""
if nu <= 0:
return self.derivative(-nu)
if nu > 1:
bp = self
for k in range(nu):
bp = bp.antiderivative()
return bp
# Construct the indefinite integrals on individual intervals
c, x = self.c, self.x
k = c.shape[0]
c2 = np.zeros((k+1,) + c.shape[1:], dtype=c.dtype)
c2[1:, ...] = np.cumsum(c, axis=0) / k
delta = x[1:] - x[:-1]
c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)]
# Now fix continuity: on the very first interval, take the integration
# constant to be zero; on an interval [x_j, x_{j+1}) with j>0,
# the integration constant is then equal to the jump of the `bp` at x_j.
# The latter is given by the coefficient of B_{n+1, n+1}
# *on the previous interval* (other B. polynomials are zero at the
# breakpoint). Finally, use the fact that BPs form a partition of unity.
c2[:,1:] += np.cumsum(c2[k, :], axis=0)[:-1]
if self.extrapolate == 'periodic':
extrapolate = False
else:
extrapolate = self.extrapolate
return self.construct_fast(c2, x, extrapolate, axis=self.axis)
def integrate(self, a, b, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
a : float
Lower integration bound
b : float
Upper integration bound
extrapolate : {bool, 'periodic', None}, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. If 'periodic', periodic
extrapolation is used. If None (default), use `self.extrapolate`.
Returns
-------
array_like
Definite integral of the piecewise polynomial over [a, b]
"""
# XXX: can probably use instead the fact that
# \int_0^{1} B_{j, n}(x) \dx = 1/(n+1)
ib = self.antiderivative()
if extrapolate is None:
extrapolate = self.extrapolate
# ib.extrapolate shouldn't be 'periodic', it is converted to
# False for 'periodic. in antiderivative() call.
if extrapolate != 'periodic':
ib.extrapolate = extrapolate
if extrapolate == 'periodic':
# Split the integral into the part over period (can be several
# of them) and the remaining part.
# For simplicity and clarity convert to a <= b case.
if a <= b:
sign = 1
else:
a, b = b, a
sign = -1
xs, xe = self.x[0], self.x[-1]
period = xe - xs
interval = b - a
n_periods, left = divmod(interval, period)
res = n_periods * (ib(xe) - ib(xs))
# Map a and b to [xs, xe].
a = xs + (a - xs) % period
b = a + left
# If b <= xe then we need to integrate over [a, b], otherwise
# over [a, xe] and from xs to what is remained.
if b <= xe:
res += ib(b) - ib(a)
else:
res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs)
return sign * res
else:
return ib(b) - ib(a)
def extend(self, c, x, right=None):
k = max(self.c.shape[0], c.shape[0])
self.c = self._raise_degree(self.c, k - self.c.shape[0])
c = self._raise_degree(c, k - c.shape[0])
return _PPolyBase.extend(self, c, x, right)
extend.__doc__ = _PPolyBase.extend.__doc__
@classmethod
def from_power_basis(cls, pp, extrapolate=None):
"""
Construct a piecewise polynomial in Bernstein basis
from a power basis polynomial.
Parameters
----------
pp : PPoly
A piecewise polynomial in the power basis
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
"""
dx = np.diff(pp.x)
k = pp.c.shape[0] - 1 # polynomial order
rest = (None,)*(pp.c.ndim-2)
c = np.zeros_like(pp.c)
for a in range(k+1):
factor = pp.c[a] / comb(k, k-a) * dx[(slice(None),)+rest]**(k-a)
for j in range(k-a, k+1):
c[j] += factor * comb(j, k-a)
if extrapolate is None:
extrapolate = pp.extrapolate
return cls.construct_fast(c, pp.x, extrapolate, pp.axis)
@classmethod
def from_derivatives(cls, xi, yi, orders=None, extrapolate=None):
"""Construct a piecewise polynomial in the Bernstein basis,
compatible with the specified values and derivatives at breakpoints.
Parameters
----------
xi : array_like
sorted 1D array of x-coordinates
yi : array_like or list of array_likes
``yi[i][j]`` is the ``j``-th derivative known at ``xi[i]``
orders : None or int or array_like of ints. Default: None.
Specifies the degree of local polynomials. If not None, some
derivatives are ignored.
extrapolate : bool or 'periodic', optional
If bool, determines whether to extrapolate to out-of-bounds points
based on first and last intervals, or to return NaNs.
If 'periodic', periodic extrapolation is used. Default is True.
Notes
-----
If ``k`` derivatives are specified at a breakpoint ``x``, the
constructed polynomial is exactly ``k`` times continuously
differentiable at ``x``, unless the ``order`` is provided explicitly.
In the latter case, the smoothness of the polynomial at
the breakpoint is controlled by the ``order``.
Deduces the number of derivatives to match at each end
from ``order`` and the number of derivatives available. If
possible it uses the same number of derivatives from
each end; if the number is odd it tries to take the
extra one from y2. In any case if not enough derivatives
are available at one end or another it draws enough to
make up the total from the other end.
If the order is too high and not enough derivatives are available,
an exception is raised.
Examples
--------
>>> from scipy.interpolate import BPoly
>>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]])
Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]`
such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4`
>>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]])
Creates a piecewise polynomial `f(x)`, such that
`f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`.
Based on the number of derivatives provided, the order of the
local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`.
Notice that no restriction is imposed on the derivatives at
`x = 1` and `x = 2`.
Indeed, the explicit form of the polynomial is::
f(x) = | x * (1 - x), 0 <= x < 1
| 2 * (x - 1), 1 <= x <= 2
So that f'(1-0) = -1 and f'(1+0) = 2
"""
xi = np.asarray(xi)
if len(xi) != len(yi):
raise ValueError("xi and yi need to have the same length")
if np.any(xi[1:] - xi[:1] <= 0):
raise ValueError("x coordinates are not in increasing order")
# number of intervals
m = len(xi) - 1
# global poly order is k-1, local orders are <=k and can vary
try:
k = max(len(yi[i]) + len(yi[i+1]) for i in range(m))
except TypeError:
raise ValueError("Using a 1D array for y? Please .reshape(-1, 1).")
if orders is None:
orders = [None] * m
else:
if isinstance(orders, (integer_types, np.integer)):
orders = [orders] * m
k = max(k, max(orders))
if any(o <= 0 for o in orders):
raise ValueError("Orders must be positive.")
c = []
for i in range(m):
y1, y2 = yi[i], yi[i+1]
if orders[i] is None:
n1, n2 = len(y1), len(y2)
else:
n = orders[i]+1
n1 = min(n//2, len(y1))
n2 = min(n - n1, len(y2))
n1 = min(n - n2, len(y2))
if n1+n2 != n:
mesg = ("Point %g has %d derivatives, point %g"
" has %d derivatives, but order %d requested" % (
xi[i], len(y1), xi[i+1], len(y2), orders[i]))
raise ValueError(mesg)
if not (n1 <= len(y1) and n2 <= len(y2)):
raise ValueError("`order` input incompatible with"
" length y1 or y2.")
b = BPoly._construct_from_derivatives(xi[i], xi[i+1],
y1[:n1], y2[:n2])
if len(b) < k:
b = BPoly._raise_degree(b, k - len(b))
c.append(b)
c = np.asarray(c)
return cls(c.swapaxes(0, 1), xi, extrapolate)
@staticmethod
def _construct_from_derivatives(xa, xb, ya, yb):
r"""Compute the coefficients of a polynomial in the Bernstein basis
given the values and derivatives at the edges.
Return the coefficients of a polynomial in the Bernstein basis
defined on `[xa, xb]` and having the values and derivatives at the
endpoints ``xa`` and ``xb`` as specified by ``ya`` and ``yb``.
The polynomial constructed is of the minimal possible degree, i.e.,
if the lengths of ``ya`` and ``yb`` are ``na`` and ``nb``, the degree
of the polynomial is ``na + nb - 1``.
Parameters
----------
xa : float
Left-hand end point of the interval
xb : float
Right-hand end point of the interval
ya : array_like
Derivatives at ``xa``. ``ya[0]`` is the value of the function, and
``ya[i]`` for ``i > 0`` is the value of the ``i``-th derivative.
yb : array_like
Derivatives at ``xb``.
Returns
-------
array
coefficient array of a polynomial having specified derivatives
Notes
-----
This uses several facts from life of Bernstein basis functions.
First of all,
.. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1})
If B(x) is a linear combination of the form
.. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n},
then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}.
Iterating the latter one, one finds for the q-th derivative
.. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q},
with
.. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a}
This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and
`c_q` are found one by one by iterating `q = 0, ..., na`.
At `x = xb` it's the same with `a = n - q`.
"""
ya, yb = np.asarray(ya), np.asarray(yb)
if ya.shape[1:] != yb.shape[1:]:
raise ValueError('ya and yb have incompatible dimensions.')
dta, dtb = ya.dtype, yb.dtype
if (np.issubdtype(dta, np.complexfloating) or
np.issubdtype(dtb, np.complexfloating)):
dt = np.complex_
else:
dt = np.float_
na, nb = len(ya), len(yb)
n = na + nb
c = np.empty((na+nb,) + ya.shape[1:], dtype=dt)
# compute coefficients of a polynomial degree na+nb-1
# walk left-to-right
for q in range(0, na):
c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q
for j in range(0, q):
c[q] -= (-1)**(j+q) * comb(q, j) * c[j]
# now walk right-to-left
for q in range(0, nb):
c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q
for j in range(0, q):
c[-q-1] -= (-1)**(j+1) * comb(q, j+1) * c[-q+j]
return c
@staticmethod
def _raise_degree(c, d):
r"""Raise a degree of a polynomial in the Bernstein basis.
Given the coefficients of a polynomial degree `k`, return (the
coefficients of) the equivalent polynomial of degree `k+d`.
Parameters
----------
c : array_like
coefficient array, 1D
d : integer
Returns
-------
array
coefficient array, 1D array of length `c.shape[0] + d`
Notes
-----
This uses the fact that a Bernstein polynomial `b_{a, k}` can be
identically represented as a linear combination of polynomials of
a higher degree `k+d`:
.. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \
comb(d, j) / comb(k+d, a+j)
"""
if d == 0:
return c
k = c.shape[0] - 1
out = np.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype)
for a in range(c.shape[0]):
f = c[a] * comb(k, a)
for j in range(d+1):
out[a+j] += f * comb(d, j) / comb(k+d, a+j)
return out
class NdPPoly(object):
"""
Piecewise tensor product polynomial
The value at point `xp = (x', y', z', ...)` is evaluated by first
computing the interval indices `i` such that::
x[0][i[0]] <= x' < x[0][i[0]+1]
x[1][i[1]] <= y' < x[1][i[1]+1]
...
and then computing::
S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]]
* (xp[0] - x[0][i[0]])**m0
* ...
* (xp[n] - x[n][i[n]])**mn
for m0 in range(k[0]+1)
...
for mn in range(k[n]+1))
where ``k[j]`` is the degree of the polynomial in dimension j. This
representation is the piecewise multivariate power basis.
Parameters
----------
c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...)
Polynomial coefficients, with polynomial order `kj` and
`mj+1` intervals for each dimension `j`.
x : ndim-tuple of ndarrays, shapes (mj+1,)
Polynomial breakpoints for each dimension. These must be
sorted in increasing order.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs. Default: True.
Attributes
----------
x : tuple of ndarrays
Breakpoints.
c : ndarray
Coefficients of the polynomials.
Methods
-------
__call__
construct_fast
See also
--------
PPoly : piecewise polynomials in 1D
Notes
-----
High-order polynomials in the power basis can be numerically
unstable.
"""
def __init__(self, c, x, extrapolate=None):
self.x = tuple(np.ascontiguousarray(v, dtype=np.float64) for v in x)
self.c = np.asarray(c)
if extrapolate is None:
extrapolate = True
self.extrapolate = bool(extrapolate)
ndim = len(self.x)
if any(v.ndim != 1 for v in self.x):
raise ValueError("x arrays must all be 1-dimensional")
if any(v.size < 2 for v in self.x):
raise ValueError("x arrays must all contain at least 2 points")
if c.ndim < 2*ndim:
raise ValueError("c must have at least 2*len(x) dimensions")
if any(np.any(v[1:] - v[:-1] < 0) for v in self.x):
raise ValueError("x-coordinates are not in increasing order")
if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)):
raise ValueError("x and c do not agree on the number of intervals")
dtype = self._get_dtype(self.c.dtype)
self.c = np.ascontiguousarray(self.c, dtype=dtype)
@classmethod
def construct_fast(cls, c, x, extrapolate=None):
"""
Construct the piecewise polynomial without making checks.
Takes the same parameters as the constructor. Input arguments
`c` and `x` must be arrays of the correct shape and type. The
`c` array can only be of dtypes float and complex, and `x`
array must have dtype float.
"""
self = object.__new__(cls)
self.c = c
self.x = x
if extrapolate is None:
extrapolate = True
self.extrapolate = extrapolate
return self
def _get_dtype(self, dtype):
if np.issubdtype(dtype, np.complexfloating) \
or np.issubdtype(self.c.dtype, np.complexfloating):
return np.complex_
else:
return np.float_
def _ensure_c_contiguous(self):
if not self.c.flags.c_contiguous:
self.c = self.c.copy()
if not isinstance(self.x, tuple):
self.x = tuple(self.x)
def __call__(self, x, nu=None, extrapolate=None):
"""
Evaluate the piecewise polynomial or its derivative
Parameters
----------
x : array-like
Points to evaluate the interpolant at.
nu : tuple, optional
Orders of derivatives to evaluate. Each must be non-negative.
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
y : array-like
Interpolated values. Shape is determined by replacing
the interpolation axis in the original array with the shape of x.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals are considered half-open,
``[a, b)``, except for the last interval which is closed
``[a, b]``.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
x = _ndim_coords_from_arrays(x)
x_shape = x.shape
x = np.ascontiguousarray(x.reshape(-1, x.shape[-1]), dtype=np.float_)
if nu is None:
nu = np.zeros((ndim,), dtype=np.intc)
else:
nu = np.asarray(nu, dtype=np.intc)
if nu.ndim != 1 or nu.shape[0] != ndim:
raise ValueError("invalid number of derivative orders nu")
dim1 = prod(self.c.shape[:ndim])
dim2 = prod(self.c.shape[ndim:2*ndim])
dim3 = prod(self.c.shape[2*ndim:])
ks = np.array(self.c.shape[:ndim], dtype=np.intc)
out = np.empty((x.shape[0], dim3), dtype=self.c.dtype)
self._ensure_c_contiguous()
_ppoly.evaluate_nd(self.c.reshape(dim1, dim2, dim3),
self.x,
ks,
x,
nu,
bool(extrapolate),
out)
return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:])
def _derivative_inplace(self, nu, axis):
"""
Compute 1D derivative along a selected dimension in-place
May result to non-contiguous c array.
"""
if nu < 0:
return self._antiderivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
# reduce order
if nu == 0:
# noop
return
else:
sl = [slice(None)]*ndim
sl[axis] = slice(None, -nu, None)
c2 = self.c[sl]
if c2.shape[axis] == 0:
# derivative of order 0 is zero
shp = list(c2.shape)
shp[axis] = 1
c2 = np.zeros(shp, dtype=c2.dtype)
# multiply by the correct rising factorials
factor = spec.poch(np.arange(c2.shape[axis], 0, -1), nu)
sl = [None]*c2.ndim
sl[axis] = slice(None)
c2 *= factor[sl]
self.c = c2
def _antiderivative_inplace(self, nu, axis):
"""
Compute 1D antiderivative along a selected dimension
May result to non-contiguous c array.
"""
if nu <= 0:
return self._derivative_inplace(-nu, axis)
ndim = len(self.x)
axis = axis % ndim
perm = list(range(ndim))
perm[0], perm[axis] = perm[axis], perm[0]
perm = perm + list(range(ndim, self.c.ndim))
c = self.c.transpose(perm)
c2 = np.zeros((c.shape[0] + nu,) + c.shape[1:],
dtype=c.dtype)
c2[:-nu] = c
# divide by the correct rising factorials
factor = spec.poch(np.arange(c.shape[0], 0, -1), nu)
c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)]
# fix continuity of added degrees of freedom
perm2 = list(range(c2.ndim))
perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1]
c2 = c2.transpose(perm2)
c2 = c2.copy()
_ppoly.fix_continuity(c2.reshape(c2.shape[0], c2.shape[1], -1),
self.x[axis], nu-1)
c2 = c2.transpose(perm2)
c2 = c2.transpose(perm)
# Done
self.c = c2
def derivative(self, nu):
"""
Construct a new piecewise polynomial representing the derivative.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the antiderivative is returned.
Returns
-------
pp : NdPPoly
Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n])
representing the derivative of this polynomial.
Notes
-----
Derivatives are evaluated piecewise for each polynomial
segment, even if the polynomial is not differentiable at the
breakpoints. The polynomial intervals in each dimension are
considered half-open, ``[a, b)``, except for the last interval
which is closed ``[a, b]``.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._derivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def antiderivative(self, nu):
"""
Construct a new piecewise polynomial representing the antiderivative.
Antiderivative is also the indefinite integral of the function,
and derivative is its inverse operation.
Parameters
----------
nu : ndim-tuple of int
Order of derivatives to evaluate for each dimension.
If negative, the derivative is returned.
Returns
-------
pp : PPoly
Piecewise polynomial of order k2 = k + n representing
the antiderivative of this polynomial.
Notes
-----
The antiderivative returned by this function is continuous and
continuously differentiable to order n-1, up to floating point
rounding error.
"""
p = self.construct_fast(self.c.copy(), self.x, self.extrapolate)
for axis, n in enumerate(nu):
p._antiderivative_inplace(n, axis)
p._ensure_c_contiguous()
return p
def integrate_1d(self, a, b, axis, extrapolate=None):
r"""
Compute NdPPoly representation for one dimensional definite integral
The result is a piecewise polynomial representing the integral:
.. math::
p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...)
where the dimension integrated over is specified with the
`axis` parameter.
Parameters
----------
a, b : float
Lower and upper bound for integration.
axis : int
Dimension over which to compute the 1D integrals
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : NdPPoly or array-like
Definite integral of the piecewise polynomial over [a, b].
If the polynomial was 1-dimensional, an array is returned,
otherwise, an NdPPoly object.
"""
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
ndim = len(self.x)
axis = int(axis) % ndim
# reuse 1D integration routines
c = self.c
swap = list(range(c.ndim))
swap.insert(0, swap[axis])
del swap[axis + 1]
swap.insert(1, swap[ndim + axis])
del swap[ndim + axis + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1),
self.x[axis],
extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
# Construct result
if ndim == 1:
return out.reshape(c.shape[2:])
else:
c = out.reshape(c.shape[2:])
x = self.x[:axis] + self.x[axis+1:]
return self.construct_fast(c, x, extrapolate=extrapolate)
def integrate(self, ranges, extrapolate=None):
"""
Compute a definite integral over a piecewise polynomial.
Parameters
----------
ranges : ndim-tuple of 2-tuples float
Sequence of lower and upper bounds for each dimension,
``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]``
extrapolate : bool, optional
Whether to extrapolate to out-of-bounds points based on first
and last intervals, or to return NaNs.
Returns
-------
ig : array_like
Definite integral of the piecewise polynomial over
[a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]]
"""
ndim = len(self.x)
if extrapolate is None:
extrapolate = self.extrapolate
else:
extrapolate = bool(extrapolate)
if not hasattr(ranges, '__len__') or len(ranges) != ndim:
raise ValueError("Range not a sequence of correct length")
self._ensure_c_contiguous()
# Reuse 1D integration routine
c = self.c
for n, (a, b) in enumerate(ranges):
swap = list(range(c.ndim))
swap.insert(1, swap[ndim - n])
del swap[ndim - n + 1]
c = c.transpose(swap)
p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate)
out = p.integrate(a, b, extrapolate=extrapolate)
c = out.reshape(c.shape[2:])
return c
class RegularGridInterpolator(object):
"""
Interpolation on a regular grid in arbitrary dimensions
The data must be defined on a regular grid; the grid spacing however may be
uneven. Linear and nearest-neighbour interpolation are supported. After
setting up the interpolator object, the interpolation method (*linear* or
*nearest*) may be chosen at each evaluation.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest". This parameter will become the default for the object's
``__call__`` method. Default is "linear".
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated.
Methods
-------
__call__
Notes
-----
Contrary to LinearNDInterpolator and NearestNDInterpolator, this class
avoids expensive triangulation of the input data by taking advantage of the
regular grid structure.
.. versionadded:: 0.14
Examples
--------
Evaluate a simple example function on the points of a 3D grid:
>>> from scipy.interpolate import RegularGridInterpolator
>>> def f(x, y, z):
... return 2 * x**3 + 3 * y**2 - z
>>> x = np.linspace(1, 4, 11)
>>> y = np.linspace(4, 7, 22)
>>> z = np.linspace(7, 9, 33)
>>> data = f(*np.meshgrid(x, y, z, indexing='ij', sparse=True))
``data`` is now a 3D array with ``data[i,j,k] = f(x[i], y[j], z[k])``.
Next, define an interpolating function from this data:
>>> my_interpolating_function = RegularGridInterpolator((x, y, z), data)
Evaluate the interpolating function at the two points
``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``:
>>> pts = np.array([[2.1, 6.2, 8.3], [3.3, 5.2, 7.1]])
>>> my_interpolating_function(pts)
array([ 125.80469388, 146.30069388])
which is indeed a close approximation to
``[f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1)]``.
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
References
----------
.. [1] Python package *regulargrid* by Johannes Buchner, see
https://pypi.python.org/pypi/regulargrid/
.. [2] Trilinear interpolation. (2013, January 17). In Wikipedia, The Free
Encyclopedia. Retrieved 27 Feb 2013 01:28.
http://en.wikipedia.org/w/index.php?title=Trilinear_interpolation&oldid=533448871
.. [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise linear
and multilinear table interpolation in many dimensions." MATH.
COMPUT. 50.181 (1988): 189-196.
http://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf
"""
# this class is based on code originally programmed by Johannes Buchner,
# see https://github.com/JohannesBuchner/regulargrid
def __init__(self, points, values, method="linear", bounds_error=True,
fill_value=np.nan):
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
self.method = method
self.bounds_error = bounds_error
if not hasattr(values, 'ndim'):
# allow reasonable duck-typed values
values = np.asarray(values)
if len(points) > values.ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), values.ndim))
if hasattr(values, 'dtype') and hasattr(values, 'astype'):
if not np.issubdtype(values.dtype, np.inexact):
values = values.astype(float)
self.fill_value = fill_value
if fill_value is not None:
fill_value_dtype = np.asarray(fill_value).dtype
if (hasattr(values, 'dtype') and not
np.can_cast(fill_value_dtype, values.dtype,
casting='same_kind')):
raise ValueError("fill_value must be either 'None' or "
"of a type compatible with values")
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
self.grid = tuple([np.asarray(p) for p in points])
self.values = values
def __call__(self, xi, method=None):
"""
Interpolation at coordinates
Parameters
----------
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str
The method of interpolation to perform. Supported are "linear" and
"nearest".
"""
method = self.method if method is None else method
if method not in ["linear", "nearest"]:
raise ValueError("Method '%s' is not defined" % method)
ndim = len(self.grid)
xi = _ndim_coords_from_arrays(xi, ndim=ndim)
if xi.shape[-1] != len(self.grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], ndim))
xi_shape = xi.shape
xi = xi.reshape(-1, xi_shape[-1])
if self.bounds_error:
for i, p in enumerate(xi.T):
if not np.logical_and(np.all(self.grid[i][0] <= p),
np.all(p <= self.grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
indices, norm_distances, out_of_bounds = self._find_indices(xi.T)
if method == "linear":
result = self._evaluate_linear(indices,
norm_distances,
out_of_bounds)
elif method == "nearest":
result = self._evaluate_nearest(indices,
norm_distances,
out_of_bounds)
if not self.bounds_error and self.fill_value is not None:
result[out_of_bounds] = self.fill_value
return result.reshape(xi_shape[:-1] + self.values.shape[ndim:])
def _evaluate_linear(self, indices, norm_distances, out_of_bounds):
# slice for broadcasting over trailing dimensions in self.values
vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices))
# find relevant values
# each i and i+1 represents a edge
edges = itertools.product(*[[i, i + 1] for i in indices])
values = 0.
for edge_indices in edges:
weight = 1.
for ei, i, yi in zip(edge_indices, indices, norm_distances):
weight *= np.where(ei == i, 1 - yi, yi)
values += np.asarray(self.values[edge_indices]) * weight[vslice]
return values
def _evaluate_nearest(self, indices, norm_distances, out_of_bounds):
idx_res = []
for i, yi in zip(indices, norm_distances):
idx_res.append(np.where(yi <= .5, i, i + 1))
return self.values[idx_res]
def _find_indices(self, xi):
# find relevant edges between which xi are situated
indices = []
# compute distance to lower edge in unity units
norm_distances = []
# check for out of bounds xi
out_of_bounds = np.zeros((xi.shape[1]), dtype=bool)
# iterate through dimensions
for x, grid in zip(xi, self.grid):
i = np.searchsorted(grid, x) - 1
i[i < 0] = 0
i[i > grid.size - 2] = grid.size - 2
indices.append(i)
norm_distances.append((x - grid[i]) /
(grid[i + 1] - grid[i]))
if not self.bounds_error:
out_of_bounds += x < grid[0]
out_of_bounds += x > grid[-1]
return indices, norm_distances, out_of_bounds
def interpn(points, values, xi, method="linear", bounds_error=True,
fill_value=np.nan):
"""
Multidimensional interpolation on regular grids.
Parameters
----------
points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, )
The points defining the regular grid in n dimensions.
values : array_like, shape (m1, ..., mn, ...)
The data on the regular grid in n dimensions.
xi : ndarray of shape (..., ndim)
The coordinates to sample the gridded data at
method : str, optional
The method of interpolation to perform. Supported are "linear" and
"nearest", and "splinef2d". "splinef2d" is only supported for
2-dimensional data.
bounds_error : bool, optional
If True, when interpolated values are requested outside of the
domain of the input data, a ValueError is raised.
If False, then `fill_value` is used.
fill_value : number, optional
If provided, the value to use for points outside of the
interpolation domain. If None, values outside
the domain are extrapolated. Extrapolation is not supported by method
"splinef2d".
Returns
-------
values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:]
Interpolated values at input coordinates.
Notes
-----
.. versionadded:: 0.14
See also
--------
NearestNDInterpolator : Nearest neighbour interpolation on unstructured
data in N dimensions
LinearNDInterpolator : Piecewise linear interpolant on unstructured data
in N dimensions
RegularGridInterpolator : Linear and nearest-neighbor Interpolation on a
regular grid in arbitrary dimensions
RectBivariateSpline : Bivariate spline approximation over a rectangular mesh
"""
# sanity check 'method' kwarg
if method not in ["linear", "nearest", "splinef2d"]:
raise ValueError("interpn only understands the methods 'linear', "
"'nearest', and 'splinef2d'. You provided %s." %
method)
if not hasattr(values, 'ndim'):
values = np.asarray(values)
ndim = values.ndim
if ndim > 2 and method == "splinef2d":
raise ValueError("The method spline2fd can only be used for "
"2-dimensional input data")
if not bounds_error and fill_value is None and method == "splinef2d":
raise ValueError("The method spline2fd does not support extrapolation.")
# sanity check consistency of input dimensions
if len(points) > ndim:
raise ValueError("There are %d point arrays, but values has %d "
"dimensions" % (len(points), ndim))
if len(points) != ndim and method == 'splinef2d':
raise ValueError("The method spline2fd can only be used for "
"scalar data with one point per coordinate")
# sanity check input grid
for i, p in enumerate(points):
if not np.all(np.diff(p) > 0.):
raise ValueError("The points in dimension %d must be strictly "
"ascending" % i)
if not np.asarray(p).ndim == 1:
raise ValueError("The points in dimension %d must be "
"1-dimensional" % i)
if not values.shape[i] == len(p):
raise ValueError("There are %d points and %d values in "
"dimension %d" % (len(p), values.shape[i], i))
grid = tuple([np.asarray(p) for p in points])
# sanity check requested xi
xi = _ndim_coords_from_arrays(xi, ndim=len(grid))
if xi.shape[-1] != len(grid):
raise ValueError("The requested sample points xi have dimension "
"%d, but this RegularGridInterpolator has "
"dimension %d" % (xi.shape[1], len(grid)))
for i, p in enumerate(xi.T):
if bounds_error and not np.logical_and(np.all(grid[i][0] <= p),
np.all(p <= grid[i][-1])):
raise ValueError("One of the requested xi is out of bounds "
"in dimension %d" % i)
# perform interpolation
if method == "linear":
interp = RegularGridInterpolator(points, values, method="linear",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "nearest":
interp = RegularGridInterpolator(points, values, method="nearest",
bounds_error=bounds_error,
fill_value=fill_value)
return interp(xi)
elif method == "splinef2d":
xi_shape = xi.shape
xi = xi.reshape(-1, xi.shape[-1])
# RectBivariateSpline doesn't support fill_value; we need to wrap here
idx_valid = np.all((grid[0][0] <= xi[:, 0], xi[:, 0] <= grid[0][-1],
grid[1][0] <= xi[:, 1], xi[:, 1] <= grid[1][-1]),
axis=0)
result = np.empty_like(xi[:, 0])
# make a copy of values for RectBivariateSpline
interp = RectBivariateSpline(points[0], points[1], values[:])
result[idx_valid] = interp.ev(xi[idx_valid, 0], xi[idx_valid, 1])
result[np.logical_not(idx_valid)] = fill_value
return result.reshape(xi_shape[:-1])
# backward compatibility wrapper
class ppform(PPoly):
"""
Deprecated piecewise polynomial class.
New code should use the `PPoly` class instead.
"""
def __init__(self, coeffs, breaks, fill=0.0, sort=False):
warnings.warn("ppform is deprecated -- use PPoly instead",
category=DeprecationWarning)
if sort:
breaks = np.sort(breaks)
else:
breaks = np.asarray(breaks)
PPoly.__init__(self, coeffs, breaks)
self.coeffs = self.c
self.breaks = self.x
self.K = self.coeffs.shape[0]
self.fill = fill
self.a = self.breaks[0]
self.b = self.breaks[-1]
def __call__(self, x):
return PPoly.__call__(self, x, 0, False)
def _evaluate(self, x, nu, extrapolate, out):
PPoly._evaluate(self, x, nu, extrapolate, out)
out[~((x >= self.a) & (x <= self.b))] = self.fill
return out
@classmethod
def fromspline(cls, xk, cvals, order, fill=0.0):
# Note: this spline representation is incompatible with FITPACK
N = len(xk)-1
sivals = np.empty((order+1, N), dtype=float)
for m in xrange(order, -1, -1):
fact = spec.gamma(m+1)
res = _fitpack._bspleval(xk[:-1], xk, cvals, order, m)
res /= fact
sivals[order-m, :] = res
return cls(sivals, xk, fill=fill)
# The 3 private functions below can be called by splmake().
def _dot0(a, b):
"""Similar to numpy.dot, but sum over last axis of a and 1st axis of b"""
if b.ndim <= 2:
return dot(a, b)
else:
axes = list(range(b.ndim))
axes.insert(-1, 0)
axes.pop(0)
return dot(a, b.transpose(axes))
def _find_smoothest(xk, yk, order, conds=None, B=None):
# construct Bmatrix, and Jmatrix
# e = J*c
# minimize norm(e,2) given B*c=yk
# if desired B can be given
# conds is ignored
N = len(xk)-1
K = order
if B is None:
B = _fitpack._bsplmat(order, xk)
J = _fitpack._bspldismat(order, xk)
u, s, vh = scipy.linalg.svd(B)
ind = K-1
V2 = vh[-ind:,:].T
V1 = vh[:-ind,:].T
A = dot(J.T,J)
tmp = dot(V2.T,A)
Q = dot(tmp,V2)
p = scipy.linalg.solve(Q, tmp)
tmp = dot(V2,p)
tmp = np.eye(N+K) - tmp
tmp = dot(tmp,V1)
tmp = dot(tmp,np.diag(1.0/s))
tmp = dot(tmp,u.T)
return _dot0(tmp, yk)
# conds is a tuple of an array and a vector
# giving the left-hand and the right-hand side
# of the additional equations to add to B
def _find_user(xk, yk, order, conds, B):
lh = conds[0]
rh = conds[1]
B = np.concatenate((B, lh), axis=0)
w = np.concatenate((yk, rh), axis=0)
M, N = B.shape
if (M > N):
raise ValueError("over-specification of conditions")
elif (M < N):
return _find_smoothest(xk, yk, order, None, B)
else:
return scipy.linalg.solve(B, w)
# Remove the 3 private functions above as well when removing splmake
@np.deprecate(message="splmake is deprecated in scipy 0.19.0, "
"use make_interp_spline instead.")
def splmake(xk, yk, order=3, kind='smoothest', conds=None):
"""
Return a representation of a spline given data-points at internal knots
Parameters
----------
xk : array_like
The input array of x values of rank 1
yk : array_like
The input array of y values of rank N. `yk` can be an N-d array to
represent more than one curve, through the same `xk` points. The first
dimension is assumed to be the interpolating dimension and is the same
length of `xk`.
order : int, optional
Order of the spline
kind : str, optional
Can be 'smoothest', 'not_a_knot', 'fixed', 'clamped', 'natural',
'periodic', 'symmetric', 'user', 'mixed' and it is ignored if order < 2
conds : optional
Conds
Returns
-------
splmake : tuple
Return a (`xk`, `cvals`, `k`) representation of a spline given
data-points where the (internal) knots are at the data-points.
"""
yk = np.asanyarray(yk)
order = int(order)
if order < 0:
raise ValueError("order must not be negative")
if order == 0:
return xk, yk[:-1], order
elif order == 1:
return xk, yk, order
try:
func = eval('_find_%s' % kind)
except:
raise NotImplementedError
# the constraint matrix
B = _fitpack._bsplmat(order, xk)
coefs = func(xk, yk, order, conds, B)
return xk, coefs, order
@np.deprecate(message="spleval is deprecated in scipy 0.19.0, "
"use BSpline instead.")
def spleval(xck, xnew, deriv=0):
"""
Evaluate a fixed spline represented by the given tuple at the new x-values
The `xj` values are the interior knot points. The approximation
region is `xj[0]` to `xj[-1]`. If N+1 is the length of `xj`, then `cvals`
should have length N+k where `k` is the order of the spline.
Parameters
----------
(xj, cvals, k) : tuple
Parameters that define the fixed spline
xj : array_like
Interior knot points
cvals : array_like
Curvature
k : int
Order of the spline
xnew : array_like
Locations to calculate spline
deriv : int
Deriv
Returns
-------
spleval : ndarray
If `cvals` represents more than one curve (`cvals.ndim` > 1) and/or
`xnew` is N-d, then the result is `xnew.shape` + `cvals.shape[1:]`
providing the interpolation of multiple curves.
Notes
-----
Internally, an additional `k`-1 knot points are added on either side of
the spline.
"""
(xj, cvals, k) = xck
oldshape = np.shape(xnew)
xx = np.ravel(xnew)
sh = cvals.shape[1:]
res = np.empty(xx.shape + sh, dtype=cvals.dtype)
for index in np.ndindex(*sh):
sl = (slice(None),) + index
if issubclass(cvals.dtype.type, np.complexfloating):
res[sl].real = _fitpack._bspleval(xx,xj, cvals.real[sl], k, deriv)
res[sl].imag = _fitpack._bspleval(xx,xj, cvals.imag[sl], k, deriv)
else:
res[sl] = _fitpack._bspleval(xx, xj, cvals[sl], k, deriv)
res.shape = oldshape + sh
return res
@np.deprecate(message="spltopp is deprecated in scipy 0.19.0, "
"use PPoly.from_spline instead.")
def spltopp(xk, cvals, k):
"""Return a piece-wise polynomial object from a fixed-spline tuple."""
return ppform.fromspline(xk, cvals, k)
@np.deprecate(message="spline is deprecated in scipy 0.19.0, "
"use Bspline class instead.")
def spline(xk, yk, xnew, order=3, kind='smoothest', conds=None):
"""
Interpolate a curve at new points using a spline fit
Parameters
----------
xk, yk : array_like
The x and y values that define the curve.
xnew : array_like
The x values where spline should estimate the y values.
order : int
Default is 3.
kind : string
One of {'smoothest'}
conds : Don't know
Don't know
Returns
-------
spline : ndarray
An array of y values; the spline evaluated at the positions `xnew`.
"""
return spleval(splmake(xk, yk, order=order, kind=kind, conds=conds), xnew)
| bsd-3-clause |
kruegg21/casino_analytics | src/main.py | 1 | 14236 | import helper
import json
import pandas as pd
import mpld3
import numpy as np
import requests
import factor_analysis
import visualizations
from datetime import timedelta, datetime
from netwin_analysis import netwin_analysis
from sqlalchemy import create_engine
from generateresponsefromrequest import get_intent_entity_from_watson
from query_parameters import query_parameters
from translation_dictionaries import *
# Read password from external file
with open('passwords.json') as data_file:
data = json.load(data_file)
DATABASE_HOST = 'soft-feijoa.db.elephantsql.com'
DATABASE_PORT = '5432'
DATABASE_NAME = 'ohdimqey'
DATABASE_USER = 'ohdimqey'
DATABASE_PASSWORD = data['DATABASE_PASSWORD']
# Connect to database
database_string = 'postgres://{}:{}@{}:{}/{}'.format(DATABASE_USER,
DATABASE_PASSWORD,
DATABASE_HOST,
DATABASE_PORT,
DATABASE_NAME)
engine = create_engine(database_string)
main_factors = ['bank', 'zone', 'clublevel', 'area']
specific_factors = ['club_level', 'area', 'game_title', 'manufacturer',
'stand', 'zone', 'bank']
# dataframes = {}
def impute_period(query_params, error_checking = False):
'''
Checks to see if a period is specified in query parameters object. If none
is specified, this function imputes a period by looking at the range in the
query parameters object. The imputed range is then put into the sql_period
attribute of the query params object.
Input:
query_params -- query parameters object
error_checking (bool) -- whether to print to console
Output:
query_params with imputed period
'''
# Check to see if a period is specified, if not impute period based on range
if not query_params.period:
period = None
time_range = query_params.stop - query_params.start
if time_range > timedelta(hours = 23, minutes = 59, seconds = 59):
# Range is greater than a day
if time_range > timedelta(days = 6, hours = 23, minutes = 59, seconds = 59):
# Range is greater than a week
if time_range > timedelta(days = 31, hours = 23, minutes = 59, seconds = 59):
# Range is greater than a month
if time_range > timedelta(days = 364, hours = 23, minutes = 59, seconds = 59):
# Range is greater than a year
# Segment by months
period = 'monthly'
else:
# Range is less than a year
# Segment by weeks
period = 'weekly'
else:
# Range is less than a month
# Segment by days
period = 'daily'
else:
# Range is less than week
# Segment by hour
period = 'hourly'
else:
# Segment by minute
period = 'by_minute'
# Add imputed period
query_params.sql_period = translation_dictionary[period]
# Check to see if we need more granularity for time factor
if query_params.time_factor:
if query_params.time_factor == 'top minute':
if query_params.sql_period in ['year', 'month', 'week', 'day', 'hour']:
query_params.sql_period = 'minute'
if query_params.time_factor == 'top hour':
if query_params.sql_period in ['year', 'month', 'week', 'day']:
query_params.sql_period = 'hour'
if query_params.time_factor == 'top day':
if query_params.sql_period in ['year', 'month', 'week']:
query_params.sql_period = 'day'
if query_params.time_factor == 'top week':
if query_params.sql_period in ['year', 'month']:
query_params.sql_period = 'week'
if query_params.time_factor == 'top month':
if query_params.sql_period in ['year']:
query_params.sql_period = 'month'
return query_params
def get_data_from_nl_query(nl_query, error_checking = False):
'''
Input:
nl_query (str) -- this is a natural language query
i.e. what is my revenue today
Returns
df (dataframe) -- this is a pandas dataframe that contains a table
which will be used for visualization
query_params (query_parameters object) -- this is an object holding
everything we need to know
about the query
'''
# Get JSON Watson conversations response to natual language query
response = get_intent_entity_from_watson(nl_query, error_checking = False)
# Transform JSON Watson conversations response to query parameters object
query_params = query_parameters()
query_params.generate_query_params_from_response(nl_query, response, error_checking = error_checking)
# Add main factors
if query_params.intent == 'machine_performance':
pass
else:
query_params.sql_factors += main_factors
# Impute period if needed
query_params = impute_period(query_params)
# Generate SQL query
query_params.generate_sql_query(error_checking = error_checking)
# Get SQL query string from query parameters object
sql_query = query_params.sql_string
if error_checking:
print query_params
# Place SQL results into DataFrame
df = helper.get_sql_data(sql_query, engine)
if error_checking:
print df.head()
return df, query_params
def main(query, error_checking = False):
'''
Args:
query (str): this is the natural language input string
Returns:
plot1 (unicode): this is the html, css, javascript to render the
mpld3 plot
mainfactors (list): this is a list of tuples where each element
is three items - the metric, the direction, and the percent change
plot2 (unicode): this is the html, css, javascript to render the
mpld3 plot
derivedmetrics (list): this is a list of tuples where each element
is three items - the metric, the direction, and the percent change
aggregate_statistics (dict) -- dictionary of aggregate statistics to
display on dashboard
'''
# Pull down data from database
df, query_params = get_data_from_nl_query(query, error_checking = error_checking)
# Decide what to do based on query parameters
"""
# Metric
self.metric = None
# Factor(s)
self.factors = []
# Range
self.start = datetime.strptime('2015-01-01', '%Y-%m-%d')
self.stop = datetime.strptime('2015-01-02', '%Y-%m-%d')
# Period
self.period = None
# Ordering
self.ordering = 'date'
# Aggregate Statistic
self.statistic = None
# Specific Factors
self.club_level = None
self.area = None
self.game_title = None
self.manufacturer = None
self.stand = None
self.zone = None
self.bank = None
"""
# All queries will have a metric and range (if none provided we will infer)
# M: always included (if not infer)
# F: indicates multi-line graph or multi-dimensional histogram
# SF: indicates filtering
# R: always included (if not infer)
# P: indicates which materialized view to pull from, if missing indicates a
# single value answer should be provided
# O: indicates histogram
# S:
# Dictionary to hold calculated metrics
metrics = {}
print query_params
# Check if we want to do net win analysis
if query_params.intent == 'netwin_analysis':
return netwin_analysis(df, query_params, engine)
# Determine metrics and graph type to build
if query_params.ordering == 'date' and query_params.intent != 'machine_performance':
# Line graph
# Find factor we need to aggregate on (currently supports only single factor)
if query_params.factors:
factor = translation_dictionary.get(query_params.factors[0], query_params.factors[0])
else:
factor = None
df_1 = helper.sum_by_time(df, factor)
# Calculate number of days
query_params.num_days = len(df_1.tmstmp.unique()) * query_params.days_per_interval
# Calculate metric total we are interested in
if factor:
# Multiple factor
total_metric_for_specific_factors = df_1.groupby(['factor'], as_index = False).sum()
for index, row in total_metric_for_specific_factors.iterrows():
# Calculate metric PUPD
metric_per_day_name = "{} for {}".format(human_readable_translation[query_params.sql_metric],
human_readable_translation[row['factor']])
metrics[metric_per_day_name] = round(row.metric / (query_params.num_days * query_params.num_machines), 3)
else:
# Single total
total_metric = df_1['metric'].sum()
# Calculate metric PUPD
metric_per_day_name = "{}".format(human_readable_translation[query_params.sql_metric])
metrics[metric_per_day_name] = round(total_metric / (query_params.num_days * query_params.num_machines), 3)
# Calculate PUPD for each metric
df_1 = helper.calculate_pupd(df_1, query_params)
# Round to 3 decimal places
df_1 = df_1.round(3)
# Make Plot
text = 'hello'
plot1 = visualizations.makeplot('line', df_1, query_params, metrics)
else:
# Bar plot
# Find factor (currently supports one factor)
if query_params.factors:
factor = translation_dictionary.get(query_params.factors[0], query_params.factors[0])
else:
# Defaults to clublevel
factor = 'clublevel'
if query_params.time_factor:
factor = query_params.time_factor
# Find top specific factors for given factor
df_1 = helper.find_top_specific_factors(df, factor, query_params)
# Calculate PUPD for each metric
if query_params.show_as_pupd:
df_1 = helper.calculate_pupd(df_1, query_params)
# Find metrics to display
if query_params.ordering == 'best' or query_params.intent == 'machine_performance':
best = df_1.iloc[-1]['factor']
metric_for_best = df_1.iloc[-1]['metric']
metric_string = 'Best {} is {} with {}'.format(human_readable_translation.get(factor, factor),
human_readable_translation.get(best, best),
human_readable_translation.get(query_params.sql_metric, query_params.sql_metric))
metrics[metric_string] = round(metric_for_best, 3)
else:
worst = df_1.iloc[0]['factor']
metric_for_worst = df_1.iloc[0]['metric']
metric_string = 'Worst {} is {} with {}'.format(human_readable_translation.get(factor),
human_readable_translation.get(worst, worst),
human_readable_translation.get(query_params.sql_metric, query_params.sql_metric))
metrics[metric_string] = round(metric_for_worst, 3)
# Round decimals to 3 places
df_1 = df_1.round(3)
# Filter most important
df_1 = df_1.iloc[-15:,:]
df_1 = df_1.reset_index(drop = True)
# Make plot
text = 'hello'
plot1 = visualizations.makeplot('hbar', df_1, query_params, metrics)
'''
Upper right chart
'''
if query_params.metric == 'netwins' or query_params.intent == 'machine_performance':
if query_params.ordering != 'date' or query_params.intent == 'machine_performance':
print df_1
print len(df_1)
mainfactors = []
if len(df_1) <= 15:
for i in xrange(1, len(df_1) + 1):
mainfactors.append((df_1.iloc[-i]['factor'], '', helper.convert_money_to_string(df_1.iloc[-i]['metric'])))
else:
for i in xrange(1,16):
mainfactors.append((df_1.iloc[-i]['factor'], '', helper.convert_money_to_string(df_1.iloc[-i]['metric'])))
table_metrics = [('Net Win PUPD')]
else:
# Calculate the main factors driving change in metric
mainfactors_df = factor_analysis.get_main_factors(df)
mainfactors = factor_analysis.translate_mainfactors_df_into_list(mainfactors_df)
table_metrics = [('Total Net Win')]
else:
table_metrics = None
mainfactors = None
'''
Bottom left plot
'''
# Find the top factor from mainfactors
if query_params.ordering != 'date' or query_params.intent == 'machine_performance':
plot2 = ''
else:
# Make plot 2
# specific_factor = mainfactors[0][0]
# if specific_factor[:4] == 'AREA':
# factor = 'area'
# elif specific_factor[:4] == 'BANK':
# factor = 'bank'
# elif specific_factor[:4] == 'ZONE':
# factor = 'zone'
# else:
# factor = 'clublevel'
#
# df_1 = helper.filter_by_specific_factor(df, factor, specific_factor)
# print df_1.head()
# text = 'hello'
# plot2 = visualizations.makeplot('line', df_1, query_params, text)
plot2 = ''
'''
Bottom right chart
'''
derivedmetrics = factor_analysis.create_derivedmetrics()
# derivedmetrics = None
return plot1, None, mainfactors, derivedmetrics, None, metrics, table_metrics, None, None, None, None
if __name__ == "__main__":
query = 'how are my machines doing january'
query = 'how are my machines doing january'
query = 'what is my net win'
main(query, error_checking = False)
| apache-2.0 |
bharcode/Kaggle | commons_ml/Logistic_Regression/Logistic_Binary_Classification/Scripts/logistic_regression.py | 2 | 5789 | #!/usr/bin/env python
# logistic_regression.py
# Author : Saimadhu
# Date: 19-March-2017
# About: Implementing Logistic Regression Classifier to predict to whom the voter will vote.
# Required Python Packages
import pandas as pd
import numpy as np
import pdb
import plotly.plotly as py
import plotly.graph_objs as go
# import plotly.plotly as py
# from plotly.graph_objs import *
py.sign_in('dataaspirant', 'RhJdlA1OsXsTjcRA0Kka')
from sklearn.cross_validation import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn import metrics
# Files
DATA_SET_PATH = "../Inputs/anes_dataset.csv"
def dataset_headers(dataset):
"""
To get the dataset header names
:param dataset: loaded dataset into pandas DataFrame
:return: list of header names
"""
return list(dataset.columns.values)
def unique_observations(dataset, header, method=1):
"""
To get unique observations in the loaded pandas DataFrame column
:param dataset:
:param header:
:param method: Method to perform the unique (default method=1 for pandas and method=0 for numpy )
:return:
"""
try:
if method == 0:
# With Numpy
observations = np.unique(dataset[[header]])
elif method == 1:
# With Pandas
observations = pd.unique(dataset[header].values.ravel())
else:
observations = None
print "Wrong method type, Use 1 for pandas and 0 for numpy"
except Exception as e:
observations = None
print "Error: {error_msg} /n Please check the inputs once..!".format(error_msg=e.message)
return observations
def feature_target_frequency_relation(dataset, f_t_headers):
"""
To get the frequency relation between targets and the unique feature observations
:param dataset:
:param f_t_headers: feature and target header
:return: feature unique observations dictionary of frequency count dictionary
"""
feature_unique_observations = unique_observations(dataset, f_t_headers[0])
unique_targets = unique_observations(dataset, f_t_headers[1])
frequencies = {}
for feature in feature_unique_observations:
frequencies[feature] = {unique_targets[0]: len(
dataset[(dataset[f_t_headers[0]] == feature) & (dataset[f_t_headers[1]] == unique_targets[0])]),
unique_targets[1]: len(
dataset[(dataset[f_t_headers[0]] == feature) & (dataset[f_t_headers[1]] == unique_targets[1])])}
return frequencies
def feature_target_histogram(feature_target_frequencies, feature_header):
"""
:param feature_target_frequencies:
:param feature_header:
:return:
"""
keys = feature_target_frequencies.keys()
y0 = [feature_target_frequencies[key][0] for key in keys]
y1 = [feature_target_frequencies[key][1] for key in keys]
trace1 = go.Bar(
x=keys,
y=y0,
name='Clinton'
)
trace2 = go.Bar(
x=keys,
y=y1,
name='Dole'
)
data = [trace1, trace2]
layout = go.Layout(
barmode='group',
title='Feature :: ' + feature_header + ' Clinton Vs Dole votes Frequency',
xaxis=dict(title="Feature :: " + feature_header + " classes"),
yaxis=dict(title="Votes Frequency")
)
fig = go.Figure(data=data, layout=layout)
# plot_url = py.plot(fig, filename=feature_header + ' - Target - Histogram')
py.image.save_as(fig, filename=feature_header + '_Target_Histogram.png')
def train_logistic_regression(train_x, train_y):
"""
Training logistic regression model with train dataset features(train_x) and target(train_y)
:param train_x:
:param train_y:
:return:
"""
logistic_regression_model = LogisticRegression()
logistic_regression_model.fit(train_x, train_y)
return logistic_regression_model
def model_accuracy(trained_model, features, targets):
"""
Get the accuracy score of the model
:param trained_model:
:param features:
:param targets:
:return:
"""
accuracy_score = trained_model.score(features, targets)
return accuracy_score
def main():
"""
Logistic Regression classifier main
:return:
"""
# Load the data set for training and testing the logistic regression classifier
dataset = pd.read_csv(DATA_SET_PATH)
print "Number of Observations :: ", len(dataset)
# Get the first observation
print dataset.head()
headers = dataset_headers(dataset)
print "Data set headers :: {headers}".format(headers=headers)
training_features = ['TVnews', 'PID', 'age', 'educ', 'income']
target = 'vote'
# Train , Test data split
train_x, test_x, train_y, test_y = train_test_split(dataset[training_features], dataset[target], train_size=0.7)
print "train_x size :: ", train_x.shape
print "train_y size :: ", train_y.shape
print "test_x size :: ", test_x.shape
print "test_y size :: ", test_y.shape
print "edu_target_frequencies :: ", feature_target_frequency_relation(dataset, [training_features[3], target])
for feature in training_features:
feature_target_frequencies = feature_target_frequency_relation(dataset, [feature, target])
feature_target_histogram(feature_target_frequencies, feature)
# Training Logistic regression model
trained_logistic_regression_model = train_logistic_regression(train_x, train_y)
train_accuracy = model_accuracy(trained_logistic_regression_model, train_x, train_y)
# Testing the logistic regression model
test_accuracy = model_accuracy(trained_logistic_regression_model, test_x, test_y)
print "Train Accuracy :: ", train_accuracy
print "Test Accuracy :: ", test_accuracy
if __name__ == "__main__":
main() | gpl-2.0 |
olinguyen/shogun | applications/tapkee/swissroll_embedding.py | 12 | 2600 | import numpy
numpy.random.seed(40)
tt = numpy.genfromtxt('../../data/toy/swissroll_color.dat',unpack=True).T
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
N = X.shape[1]
converters = []
from shogun import LocallyLinearEmbedding
lle = LocallyLinearEmbedding()
lle.set_k(9)
converters.append((lle, "LLE with k=%d" % lle.get_k()))
from shogun import MultidimensionalScaling
mds = MultidimensionalScaling()
converters.append((mds, "Classic MDS"))
lmds = MultidimensionalScaling()
lmds.set_landmark(True)
lmds.set_landmark_number(20)
converters.append((lmds,"Landmark MDS with %d landmarks" % lmds.get_landmark_number()))
from shogun import Isomap
cisomap = Isomap()
cisomap.set_k(9)
converters.append((cisomap,"Isomap with k=%d" % cisomap.get_k()))
from shogun import DiffusionMaps
from shogun import GaussianKernel
dm = DiffusionMaps()
dm.set_t(2)
dm.set_width(1000.0)
converters.append((dm,"Diffusion Maps with t=%d, sigma=%.1f" % (dm.get_t(),dm.get_width())))
from shogun import HessianLocallyLinearEmbedding
hlle = HessianLocallyLinearEmbedding()
hlle.set_k(6)
converters.append((hlle,"Hessian LLE with k=%d" % (hlle.get_k())))
from shogun import LocalTangentSpaceAlignment
ltsa = LocalTangentSpaceAlignment()
ltsa.set_k(6)
converters.append((ltsa,"LTSA with k=%d" % (ltsa.get_k())))
from shogun import LaplacianEigenmaps
le = LaplacianEigenmaps()
le.set_k(20)
le.set_tau(100.0)
converters.append((le,"Laplacian Eigenmaps with k=%d, tau=%d" % (le.get_k(),le.get_tau())))
import matplotlib
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
new_mpl = False
try:
swiss_roll_fig = fig.add_subplot(3,3,1, projection='3d')
new_mpl = True
except:
figure = plt.figure()
swiss_roll_fig = Axes3D(figure)
swiss_roll_fig.scatter(X[0], X[1], X[2], s=10, c=tt, cmap=plt.cm.Spectral)
swiss_roll_fig._axis3don = False
plt.suptitle('Swissroll embedding',fontsize=9)
plt.subplots_adjust(hspace=0.4)
from shogun import RealFeatures
for (i, (converter, label)) in enumerate(converters):
X = numpy.genfromtxt('../../data/toy/swissroll.dat',unpack=True).T
features = RealFeatures(X)
converter.set_target_dim(2)
converter.parallel.set_num_threads(1)
new_feats = converter.embed(features).get_feature_matrix()
if not new_mpl:
embedding_subplot = fig.add_subplot(4,2,i+1)
else:
embedding_subplot = fig.add_subplot(3,3,i+2)
embedding_subplot.scatter(new_feats[0],new_feats[1], c=tt, cmap=plt.cm.Spectral)
plt.axis('tight')
plt.xticks([]), plt.yticks([])
plt.title(label,fontsize=9)
print converter.get_name(), 'done'
plt.show()
| gpl-3.0 |
harisbal/pandas | pandas/tests/io/parser/test_read_fwf.py | 1 | 16039 | # -*- coding: utf-8 -*-
"""
Tests the 'read_fwf' function in parsers.py. This
test suite is independent of the others because the
engine is set to 'python-fwf' internally.
"""
from datetime import datetime
import numpy as np
import pytest
import pandas.compat as compat
from pandas.compat import BytesIO, StringIO
import pandas as pd
from pandas import DataFrame
import pandas.util.testing as tm
from pandas.io.parsers import EmptyDataError, read_csv, read_fwf
class TestFwfParsing(object):
def test_fwf(self):
data_expected = """\
2011,58,360.242940,149.910199,11950.7
2011,59,444.953632,166.985655,11788.4
2011,60,364.136849,183.628767,11806.2
2011,61,413.836124,184.375703,11916.8
2011,62,502.953953,173.237159,12468.3
"""
expected = read_csv(StringIO(data_expected),
engine='python', header=None)
data1 = """\
201158 360.242940 149.910199 11950.7
201159 444.953632 166.985655 11788.4
201160 364.136849 183.628767 11806.2
201161 413.836124 184.375703 11916.8
201162 502.953953 173.237159 12468.3
"""
colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]
df = read_fwf(StringIO(data1), colspecs=colspecs, header=None)
tm.assert_frame_equal(df, expected)
data2 = """\
2011 58 360.242940 149.910199 11950.7
2011 59 444.953632 166.985655 11788.4
2011 60 364.136849 183.628767 11806.2
2011 61 413.836124 184.375703 11916.8
2011 62 502.953953 173.237159 12468.3
"""
df = read_fwf(StringIO(data2), widths=[5, 5, 13, 13, 7], header=None)
tm.assert_frame_equal(df, expected)
# From Thomas Kluyver: apparently some non-space filler characters can
# be seen, this is supported by specifying the 'delimiter' character:
# http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html
data3 = """\
201158~~~~360.242940~~~149.910199~~~11950.7
201159~~~~444.953632~~~166.985655~~~11788.4
201160~~~~364.136849~~~183.628767~~~11806.2
201161~~~~413.836124~~~184.375703~~~11916.8
201162~~~~502.953953~~~173.237159~~~12468.3
"""
df = read_fwf(
StringIO(data3), colspecs=colspecs, delimiter='~', header=None)
tm.assert_frame_equal(df, expected)
with tm.assert_raises_regex(ValueError,
"must specify only one of"):
read_fwf(StringIO(data3), colspecs=colspecs, widths=[6, 10, 10, 7])
with tm.assert_raises_regex(ValueError, "Must specify either"):
read_fwf(StringIO(data3), colspecs=None, widths=None)
def test_BytesIO_input(self):
if not compat.PY3:
pytest.skip(
"Bytes-related test - only needs to work on Python 3")
result = read_fwf(BytesIO("שלום\nשלום".encode('utf8')), widths=[
2, 2], encoding='utf8')
expected = DataFrame([["של", "ום"]], columns=["של", "ום"])
tm.assert_frame_equal(result, expected)
def test_fwf_colspecs_is_list_or_tuple(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'column specifications must '
'be a list or tuple.+'):
pd.io.parsers.FixedWidthReader(StringIO(data),
{'a': 1}, ',', '#')
def test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples(self):
data = """index,A,B,C,D
foo,2,3,4,5
bar,7,8,9,10
baz,12,13,14,15
qux,12,13,14,15
foo2,12,13,14,15
bar2,12,13,14,15
"""
with tm.assert_raises_regex(TypeError,
'Each column specification '
'must be.+'):
read_fwf(StringIO(data), [('a', 1)])
def test_fwf_colspecs_None(self):
# GH 7079
data = """\
123456
456789
"""
colspecs = [(0, 3), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, 3), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123, 456], [456, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(0, None), (3, None)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
colspecs = [(None, None), (3, 6)]
result = read_fwf(StringIO(data), colspecs=colspecs, header=None)
expected = DataFrame([[123456, 456], [456789, 789]])
tm.assert_frame_equal(result, expected)
def test_fwf_regression(self):
# GH 3594
# turns out 'T060' is parsable as a datetime slice!
tzlist = [1, 10, 20, 30, 60, 80, 100]
ntz = len(tzlist)
tcolspecs = [16] + [8] * ntz
tcolnames = ['SST'] + ["T%03d" % z for z in tzlist[1:]]
data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192
2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869
2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657
2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379
2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039
"""
df = read_fwf(StringIO(data),
index_col=0,
header=None,
names=tcolnames,
widths=tcolspecs,
parse_dates=True,
date_parser=lambda s: datetime.strptime(s, '%Y%j%H%M%S'))
for c in df.columns:
res = df.loc[:, c]
assert len(res)
def test_fwf_for_uint8(self):
data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127
1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa
df = read_fwf(StringIO(data),
colspecs=[(0, 17), (25, 26), (33, 37),
(49, 51), (58, 62), (63, 1000)],
names=['time', 'pri', 'pgn', 'dst', 'src', 'data'],
converters={
'pgn': lambda x: int(x, 16),
'src': lambda x: int(x, 16),
'dst': lambda x: int(x, 16),
'data': lambda x: len(x.split(' '))})
expected = DataFrame([[1421302965.213420, 3, 61184, 23, 40, 8],
[1421302964.226776, 6, 61442, None, 71, 8]],
columns=["time", "pri", "pgn",
"dst", "src", "data"])
expected["dst"] = expected["dst"].astype(object)
tm.assert_frame_equal(df, expected)
def test_fwf_compression(self):
try:
import gzip
import bz2
except ImportError:
pytest.skip("Need gzip and bz2 to run this test")
data = """1111111111
2222222222
3333333333""".strip()
widths = [5, 5]
names = ['one', 'two']
expected = read_fwf(StringIO(data), widths=widths, names=names)
if compat.PY3:
data = bytes(data, encoding='utf-8')
comps = [('gzip', gzip.GzipFile), ('bz2', bz2.BZ2File)]
for comp_name, compresser in comps:
with tm.ensure_clean() as path:
tmp = compresser(path, mode='wb')
tmp.write(data)
tmp.close()
result = read_fwf(path, widths=widths, names=names,
compression=comp_name)
tm.assert_frame_equal(result, expected)
def test_comment_fwf(self):
data = """
1 2. 4 #hello world
5 NaN 10.0
"""
expected = np.array([[1, 2., 4],
[5, np.nan, 10.]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (4, 9), (9, 25)],
comment='#')
tm.assert_almost_equal(df.values, expected)
def test_1000_fwf(self):
data = """
1 2,334.0 5
10 13 10.
"""
expected = np.array([[1, 2334., 5],
[10, 13, 10]])
df = read_fwf(StringIO(data), colspecs=[(0, 3), (3, 11), (12, 16)],
thousands=',')
tm.assert_almost_equal(df.values, expected)
def test_bool_header_arg(self):
# see gh-6114
data = """\
MyColumn
a
b
a
b"""
for arg in [True, False]:
with pytest.raises(TypeError):
read_fwf(StringIO(data), header=arg)
def test_full_file(self):
# File with all values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
2000-01-05T00:00:00 0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0.487094399463 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
2000-01-11T00:00:00 0.157160753327 34 foo"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_missing(self):
# File with missing values
test = """index A B C
2000-01-03T00:00:00 0.980268513777 3 foo
2000-01-04T00:00:00 1.04791624281 -4 bar
0.498580885705 73 baz
2000-01-06T00:00:00 1.12020151869 1 foo
2000-01-07T00:00:00 0 bar
2000-01-10T00:00:00 0.836648671666 2 baz
34"""
colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces(self):
# File with spaces in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 Keanu Reeves 9315.45 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 Jennifer Love Hewitt 0 17000.00 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65 5000.00 2/5/2007
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_full_file_with_spaces_and_missing(self):
# File with spaces and missing values in columns
test = """
Account Name Balance CreditLimit AccountCreated
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00 8/6/2003
868 5/25/1985
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_messed_up_data(self):
# Completely messed up file
test = """
Account Name Balance Credit Limit Account Created
101 10000.00 1/17/1998
312 Gerard Butler 90.00 1000.00
761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006
317 Bill Murray 789.65
""".strip('\r\n')
colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))
expected = read_fwf(StringIO(test), colspecs=colspecs)
tm.assert_frame_equal(expected, read_fwf(StringIO(test)))
def test_multiple_delimiters(self):
test = r"""
col1~~~~~col2 col3++++++++++++++++++col4
~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves
33+++122.33\\\bar.........Gerard Butler
++44~~~~12.01 baz~~Jennifer Love Hewitt
~~55 11+++foo++++Jada Pinkett-Smith
..66++++++.03~~~bar Bill Murray
""".strip('\r\n')
colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))
expected = read_fwf(StringIO(test), colspecs=colspecs,
delimiter=' +~.\\')
tm.assert_frame_equal(expected, read_fwf(StringIO(test),
delimiter=' +~.\\'))
def test_variable_width_unicode(self):
if not compat.PY3:
pytest.skip(
'Bytes-related test - only needs to work on Python 3')
test = """
שלום שלום
ום שלל
של ום
""".strip('\r\n')
expected = read_fwf(BytesIO(test.encode('utf8')),
colspecs=[(0, 4), (5, 9)],
header=None, encoding='utf8')
tm.assert_frame_equal(expected, read_fwf(
BytesIO(test.encode('utf8')), header=None, encoding='utf8'))
def test_dtype(self):
data = """ a b c
1 2 3.2
3 4 5.2
"""
colspecs = [(0, 5), (5, 10), (10, None)]
result = pd.read_fwf(StringIO(data), colspecs=colspecs)
expected = pd.DataFrame({
'a': [1, 3],
'b': [2, 4],
'c': [3.2, 5.2]}, columns=['a', 'b', 'c'])
tm.assert_frame_equal(result, expected)
expected['a'] = expected['a'].astype('float64')
expected['b'] = expected['b'].astype(str)
expected['c'] = expected['c'].astype('int32')
result = pd.read_fwf(StringIO(data), colspecs=colspecs,
dtype={'a': 'float64', 'b': str, 'c': 'int32'})
tm.assert_frame_equal(result, expected)
def test_skiprows_inference(self):
# GH11256
test = """
Text contained in the file header
DataCol1 DataCol2
0.0 1.0
101.6 956.1
""".strip()
expected = read_csv(StringIO(test), skiprows=2,
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=2))
def test_skiprows_by_index_inference(self):
test = """
To be skipped
Not To Be Skipped
Once more to be skipped
123 34 8 123
456 78 9 456
""".strip()
expected = read_csv(StringIO(test), skiprows=[0, 2],
delim_whitespace=True)
tm.assert_frame_equal(expected, read_fwf(
StringIO(test), skiprows=[0, 2]))
def test_skiprows_inference_empty(self):
test = """
AA BBB C
12 345 6
78 901 2
""".strip()
with pytest.raises(EmptyDataError):
read_fwf(StringIO(test), skiprows=3)
def test_whitespace_preservation(self):
# Addresses Issue #16772
data_expected = """
a ,bbb
cc,dd """
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a bbb
ccdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0], delimiter="\n\t")
tm.assert_frame_equal(result, expected)
def test_default_delimiter(self):
data_expected = """
a,bbb
cc,dd"""
expected = read_csv(StringIO(data_expected), header=None)
test_data = """
a \tbbb
cc\tdd """
result = read_fwf(StringIO(test_data), widths=[3, 3],
header=None, skiprows=[0])
tm.assert_frame_equal(result, expected)
| bsd-3-clause |
esa/pykep | tools/wheel_setup.py | 2 | 3039 | from setuptools import setup
from setuptools.dist import Distribution
from distutils import util
import sys
NAME = 'pykep'
VERSION = '@pykep_VERSION@'
DESCRIPTION = 'Basic space flight mechanics computations mostly based on perturbed Keplerian dynamics'
LONG_DESCRIPTION = 'pykep is a scientific library providing basic space flight mechanics computations mostly based on perturbed Keplerian dynamics.'
URL = 'https://github.com/esa/pykep'
AUTHOR = 'Dario Izzo'
AUTHOR_EMAIL = 'dario.izzo@gmail.com'
LICENSE = 'GPLv3+/LGPL3+'
INSTALL_REQUIRES = [
'numba',
'numpy',
'matplotlib',
'pygmo',
'pygmo_plugins_nonfree',
'scipy',
'sklearn',
]
CLASSIFIERS = [
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Astronomy',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Physics',
'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',
'License :: OSI Approved :: GNU Lesser General Public License v3 or later (LGPLv3+)',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3'
]
KEYWORDS = 'space keplerian math physics interplanetary'
PLATFORMS = ['Unix', 'Windows', 'OSX']
class BinaryDistribution(Distribution):
def has_ext_modules(foo):
return True
# Setup the list of external dlls and other data files.
import os.path
PYKEP_UTIL_FILES = ['gravity_models/*/*txt']
if os.name == 'nt':
mingw_wheel_libs = 'mingw_wheel_libs_python{}{}.txt'.format(
sys.version_info[0], sys.version_info[1])
l = open(mingw_wheel_libs, 'r').readlines()
DLL_LIST = [os.path.basename(_[:-1]) for _ in l]
PACKAGE_DATA = {
'pykep.core': ['core.pyd'] + DLL_LIST,
'pykep.planet': ['planet.pyd'],
'pykep.sims_flanagan': ['sims_flanagan.pyd'],
'pykep.util': ['util.pyd'] + PYKEP_UTIL_FILES
}
else:
PACKAGE_DATA = {
'pykep.core': ['core.so'],
'pykep.planet': ['planet.so'],
'pykep.sims_flanagan': ['sims_flanagan.so'],
'pykep.util': ['util.so'] + PYKEP_UTIL_FILES
}
setup(name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=URL,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
classifiers=CLASSIFIERS,
keywords=KEYWORDS,
platforms=PLATFORMS,
install_requires=INSTALL_REQUIRES,
packages=['pykep', 'pykep.core', 'pykep.examples', 'pykep.orbit_plots', 'pykep.phasing',
'pykep.planet', 'pykep.sims_flanagan', 'pykep.pontryagin', 'pykep.trajopt', 'pykep.trajopt.gym', 'pykep.util'],
# Include pre-compiled extension
package_data=PACKAGE_DATA,
distclass=BinaryDistribution)
| gpl-3.0 |
jonaslandsgesell/espresso | samples/python/electrophoresis.py | 4 | 8573 | #
# Copyright (C) 2013,2014 The ESPResSo project
#
# This file is part of ESPResSo.
#
# ESPResSo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function
import espressomd
from espressomd import code_info
from espressomd import thermostat
from espressomd import interactions
from espressomd import electrostatics
import sys
import numpy as np
try:
import cPickle as pickle
except ImportError:
import pickle
import os
print(code_info.features())
# Seed
#############################################################
np.random.seed(42)
# System parameters
#############################################################
system = espressomd.System()
system.time_step = 0.01
system.cell_system.skin = 0.4
system.box_l = [100, 100, 100]
system.periodicity = [1,1,1]
system.thermostat.set_langevin(kT=1.0, gamma=1.0)
# system.cell_system.set_n_square(use_verlet_lists=False)
system.cell_system.max_num_cells = 2744
# Non-bonded interactions
###############################################################
# WCA between monomers
system.non_bonded_inter[0, 0].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# WCA counterions - polymer
system.non_bonded_inter[0, 1].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# WCA coions - polymer
system.non_bonded_inter[0, 2].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# WCA between ions
system.non_bonded_inter[1, 2].lennard_jones.set_params(
epsilon=1, sigma=1,
cutoff=2**(1. / 6), shift="auto")
# Bonded interactions
################################################################
# fene = interactions.FeneBond(k=10, d_r_max=2)
# system.bonded_inter.add(fene)
harmonic = interactions.HarmonicBond(k=10, r_0=2)
harmonicangle = interactions.Angle_Harmonic(bend=10, phi0=np.pi)
system.bonded_inter.add(harmonic)
system.bonded_inter.add(harmonicangle)
# Create Monomer beads and bonds
#########################################################################################
n_monomers = 20
init_polymer_pos=np.dstack((np.arange(n_monomers),np.zeros(n_monomers),np.zeros(n_monomers)))[0]+np.array([system.box_l[0]/2-n_monomers/2, system.box_l[1]/2, system.box_l[2]/2])
system.part.add(pos=init_polymer_pos)
system.part[:-1].add_bond((harmonic, np.arange(n_monomers)[1:]))
system.part[1:-1].add_bond((harmonicangle, np.arange(n_monomers)[:-2], np.arange(n_monomers)[2:]))
# Particle creation with loops:
# for i in range(n_monomers):
# if i > 0:
# system.part[i].add_bond((harmonic, i - 1))
# for i in range(1,n_monomers-1):
# system.part[i].add_bond((harmonicangle,i - 1, i + 1))
system.part[:n_monomers].q = -np.ones(n_monomers)
# Create counterions
###################################################################
system.part.add(pos=np.random.random((n_monomers,3)) * system.box_l,
q=1,
type=1)
# Create ions
###############################################################
n_ions = 100
system.part.add(pos=np.random.random((n_ions,3)) * system.box_l,
q=np.hstack((np.ones(n_ions/2),-np.ones(n_ions/2))),
type=np.array(np.hstack((np.ones(n_ions/2),2*np.ones(n_ions/2))),dtype=int))
# Sign charges to particles after the particle creation:
# system.part[2*n_monomers:2*n_monomers+n_ions/2] = np.ones(n_ions/2)
# system.part[2*n_monomers+n_ions/2:] = -np.ones(n_ions/2)
print("types:", system.part[:].type)
print("")
print("Q_tot:", np.sum(system.part[:].q))
#############################################################
# Warmup #
#############################################################
system.non_bonded_inter.set_force_cap(10)
for i in range(1000):
sys.stdout.write("\rWarmup: %03i"%i)
sys.stdout.flush()
system.integrator.run(steps=1)
system.non_bonded_inter.set_force_cap(10*i)
system.non_bonded_inter.set_force_cap(0)
print("\nWarmup finished!\n")
#############################################################
# Sampling #
#############################################################
#
# Activate electostatic with checkpoint example
#############################################################
read_checkpoint = False
# Load checkpointed p3m class
if os.path.isfile("p3m_checkpoint") and read_checkpoint == True:
print("reading p3m from file")
p3m = pickle.load(open("p3m_checkpoint","r"))
else:
p3m = electrostatics.P3M(bjerrum_length=1.0, accuracy=1e-2)
print("Tuning P3M")
system.actors.add(p3m)
# Checkpoint AFTER tuning (adding method to actors)
pickle.dump(p3m,open("p3m_checkpoint","w"),-1)
print("P3M parameter:\n")
p3m_params = p3m.get_params()
for key in list(p3m_params.keys()):
print("{} = {}".format(key, p3m_params[key]))
print(system.actors)
# Apply external Force
#############################################################
n_part = len(system.part)
system.part[:].ext_force = np.dstack((system.part[:].q * np.ones(n_part), np.zeros(n_part), np.zeros(n_part)))[0]
# print(system.part[:].ext_force)
# Activate LB
############################################################
# lbf = lb.LBF(dens=1, tau=0.01, visc=1, fric=1, agrid=1)
# system.actors.add(lbf)
# Data arrays
v_list = []
pos_list = []
# Sampling Loop
for i in range(4000):
sys.stdout.write("\rSampling: %04i"%i)
sys.stdout.flush()
system.integrator.run(steps=1)
v_list.append(system.part[:n_monomers].v)
pos_list.append(system.part[:n_monomers].pos)
# other observales:
print("\nSampling finished!\n")
# Data evaluation
############################################################
# Convert data to numpy arrays
# shape = [time_step, monomer, coordinate]!
v_list = np.array(v_list)
pos_list = np.array(pos_list)
# Calculate COM and COM velocity
COM = pos_list.sum(axis=1)/n_monomers
COM_v = (COM[1:] - COM[:-1])/system.time_step
# Calculate the Mobility mu = v/E
##################################
mu = COM_v.mean()/1.0
print("MOBILITY", mu)
# Calculate the Persistence length
# fits better for longer sampling
##################################
# this calculation method requires
# numpy 1.10 or higher
if float(np.version.version.split(".")[1]) >= 10:
from scipy.optimize import curve_fit
from numpy.linalg import norm
# First get bond vectors
bond_vec = pos_list[:,1:,:] - pos_list[:,:-1,:]
bond_abs = norm(bond_vec, axis=2, keepdims=True)
bond_abs_avg = bond_abs.mean(axis=0)[:,0]
c_length = bond_abs_avg
for i in range(1,len(bond_abs_avg)):
c_length[i] += c_length[i-1]
bv_norm = bond_vec / bond_abs
bv_zero = np.empty_like(bv_norm)
for i in range(bv_zero.shape[1]):
bv_zero[:,i,:] = bv_norm[:,0,:]
# Calculate <cos(theta)>
cos_theta = (bv_zero*bv_norm).sum(axis=2).mean(axis=0)
def decay(x,lp):
return np.exp(-x/lp)
fit,_ = curve_fit(decay, c_length, cos_theta)
print(c_length.shape, cos_theta.shape)
print("PERSISTENCE LENGTH", fit[0])
# Plot Results
############################################################
import matplotlib.pyplot as pp
direction = ["x", "y", "z"]
fig1=pp.figure()
ax=fig1.add_subplot(111)
for i in range(3):
ax.plot(COM[:-500,i], label="COM pos %s" %direction[i])
ax.legend(loc="best")
ax.set_xlabel("time_step")
ax.set_ylabel("r")
fig2=pp.figure()
ax=fig2.add_subplot(111)
for i in range(3):
ax.plot(COM_v[:-500,i], label="COM v %s" %direction[i])
ax.legend(loc="best")
ax.set_xlabel("time_step")
ax.set_ylabel("v")
if float(np.version.version.split(".")[1]) >= 10:
fig3=pp.figure()
ax=fig3.add_subplot(111)
ax.plot(c_length, cos_theta, label="sim data")
ax.plot(c_length, decay(c_length, fit[0]), label="fit")
ax.legend(loc="best")
ax.set_xlabel("contour length")
ax.set_ylabel("<cos(theta)>")
pp.show()
print("\nJob finished!\n")
| gpl-3.0 |
rohanp/scikit-learn | examples/semi_supervised/plot_label_propagation_versus_svm_iris.py | 286 | 2378 | """
=====================================================================
Decision boundary of label propagation versus SVM on the Iris dataset
=====================================================================
Comparison for decision boundary generated on iris dataset
between Label Propagation and SVM.
This demonstrates Label Propagation learning a good boundary
even with a small amount of labeled data.
"""
print(__doc__)
# Authors: Clay Woolam <clay@woolam.org>
# Licence: BSD
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn import svm
from sklearn.semi_supervised import label_propagation
rng = np.random.RandomState(0)
iris = datasets.load_iris()
X = iris.data[:, :2]
y = iris.target
# step size in the mesh
h = .02
y_30 = np.copy(y)
y_30[rng.rand(len(y)) < 0.3] = -1
y_50 = np.copy(y)
y_50[rng.rand(len(y)) < 0.5] = -1
# we create an instance of SVM and fit out data. We do not scale our
# data since we want to plot the support vectors
ls30 = (label_propagation.LabelSpreading().fit(X, y_30),
y_30)
ls50 = (label_propagation.LabelSpreading().fit(X, y_50),
y_50)
ls100 = (label_propagation.LabelSpreading().fit(X, y), y)
rbf_svc = (svm.SVC(kernel='rbf').fit(X, y), y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# title for the plots
titles = ['Label Spreading 30% data',
'Label Spreading 50% data',
'Label Spreading 100% data',
'SVC with rbf kernel']
color_map = {-1: (1, 1, 1), 0: (0, 0, .9), 1: (1, 0, 0), 2: (.8, .6, 0)}
for i, (clf, y_train) in enumerate((ls30, ls50, ls100, rbf_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
colors = [color_map[y] for y in y_train]
plt.scatter(X[:, 0], X[:, 1], c=colors, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.text(.90, 0, "Unlabeled points are colored white")
plt.show()
| bsd-3-clause |
myfleetingtime/spark2.11_bingo | python/pyspark/sql/context.py | 11 | 23630 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import print_function
import sys
import warnings
if sys.version >= '3':
basestring = unicode = str
from pyspark import since
from pyspark.rdd import ignore_unicode_prefix
from pyspark.sql.session import _monkey_patch_RDD, SparkSession
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.readwriter import DataFrameReader
from pyspark.sql.streaming import DataStreamReader
from pyspark.sql.types import IntegerType, Row, StringType
from pyspark.sql.utils import install_exception_handler
__all__ = ["SQLContext", "HiveContext", "UDFRegistration"]
class SQLContext(object):
"""The entry point for working with structured data (rows and columns) in Spark, in Spark 1.x.
As of Spark 2.0, this is replaced by :class:`SparkSession`. However, we are keeping the class
here for backward compatibility.
A SQLContext can be used create :class:`DataFrame`, register :class:`DataFrame` as
tables, execute SQL over tables, cache tables, and read parquet files.
:param sparkContext: The :class:`SparkContext` backing this SQLContext.
:param sparkSession: The :class:`SparkSession` around which this SQLContext wraps.
:param jsqlContext: An optional JVM Scala SQLContext. If set, we do not instantiate a new
SQLContext in the JVM, instead we make all calls to this object.
"""
_instantiatedContext = None
@ignore_unicode_prefix
def __init__(self, sparkContext, sparkSession=None, jsqlContext=None):
"""Creates a new SQLContext.
>>> from datetime import datetime
>>> sqlContext = SQLContext(sc)
>>> allTypes = sc.parallelize([Row(i=1, s="string", d=1.0, l=1,
... b=True, list=[1, 2, 3], dict={"s": 0}, row=Row(a=1),
... time=datetime(2014, 8, 1, 14, 1, 5))])
>>> df = allTypes.toDF()
>>> df.createOrReplaceTempView("allTypes")
>>> sqlContext.sql('select i+1, d+1, not b, list[1], dict["s"], time, row.a '
... 'from allTypes where b and i > 0').collect()
[Row((i + CAST(1 AS BIGINT))=2, (d + CAST(1 AS DOUBLE))=2.0, (NOT b)=False, list[1]=2, \
dict[s]=0, time=datetime.datetime(2014, 8, 1, 14, 1, 5), a=1)]
>>> df.rdd.map(lambda x: (x.i, x.s, x.d, x.l, x.b, x.time, x.row.a, x.list)).collect()
[(1, u'string', 1.0, 1, True, datetime.datetime(2014, 8, 1, 14, 1, 5), 1, [1, 2, 3])]
"""
self._sc = sparkContext
self._jsc = self._sc._jsc
self._jvm = self._sc._jvm
if sparkSession is None:
sparkSession = SparkSession(sparkContext)
if jsqlContext is None:
jsqlContext = sparkSession._jwrapped
self.sparkSession = sparkSession
self._jsqlContext = jsqlContext
_monkey_patch_RDD(self.sparkSession)
install_exception_handler()
if SQLContext._instantiatedContext is None:
SQLContext._instantiatedContext = self
@property
def _ssql_ctx(self):
"""Accessor for the JVM Spark SQL context.
Subclasses can override this property to provide their own
JVM Contexts.
"""
return self._jsqlContext
@classmethod
@since(1.6)
def getOrCreate(cls, sc):
"""
Get the existing SQLContext or create a new one with given SparkContext.
:param sc: SparkContext
"""
if cls._instantiatedContext is None:
jsqlContext = sc._jvm.SQLContext.getOrCreate(sc._jsc.sc())
sparkSession = SparkSession(sc, jsqlContext.sparkSession())
cls(sc, sparkSession, jsqlContext)
return cls._instantiatedContext
@since(1.6)
def newSession(self):
"""
Returns a new SQLContext as new session, that has separate SQLConf,
registered temporary views and UDFs, but shared SparkContext and
table cache.
"""
return self.__class__(self._sc, self.sparkSession.newSession())
@since(1.3)
def setConf(self, key, value):
"""Sets the given Spark SQL configuration property.
"""
self.sparkSession.conf.set(key, value)
@ignore_unicode_prefix
@since(1.3)
def getConf(self, key, defaultValue=None):
"""Returns the value of Spark SQL configuration property for the given key.
If the key is not set and defaultValue is not None, return
defaultValue. If the key is not set and defaultValue is None, return
the system default value.
>>> sqlContext.getConf("spark.sql.shuffle.partitions")
u'200'
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'10'
>>> sqlContext.setConf("spark.sql.shuffle.partitions", u"50")
>>> sqlContext.getConf("spark.sql.shuffle.partitions", u"10")
u'50'
"""
return self.sparkSession.conf.get(key, defaultValue)
@property
@since("1.3.1")
def udf(self):
"""Returns a :class:`UDFRegistration` for UDF registration.
:return: :class:`UDFRegistration`
"""
return UDFRegistration(self)
@since(1.4)
def range(self, start, end=None, step=1, numPartitions=None):
"""
Create a :class:`DataFrame` with single :class:`pyspark.sql.types.LongType` column named
``id``, containing elements in a range from ``start`` to ``end`` (exclusive) with
step value ``step``.
:param start: the start value
:param end: the end value (exclusive)
:param step: the incremental step (default: 1)
:param numPartitions: the number of partitions of the DataFrame
:return: :class:`DataFrame`
>>> sqlContext.range(1, 7, 2).collect()
[Row(id=1), Row(id=3), Row(id=5)]
If only one argument is specified, it will be used as the end value.
>>> sqlContext.range(3).collect()
[Row(id=0), Row(id=1), Row(id=2)]
"""
return self.sparkSession.range(start, end, step, numPartitions)
@ignore_unicode_prefix
@since(1.2)
def registerFunction(self, name, f, returnType=StringType()):
"""Registers a python function (including lambda function) as a UDF
so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not given it default to a string and conversion will automatically
be done. For any other return type, the produced object must match the specified type.
:param name: name of the UDF
:param f: python function
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerFunction("stringLengthString", lambda x: len(x))
>>> sqlContext.sql("SELECT stringLengthString('test')").collect()
[Row(stringLengthString(test)=u'4')]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.registerFunction("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
>>> from pyspark.sql.types import IntegerType
>>> sqlContext.udf.register("stringLengthInt", lambda x: len(x), IntegerType())
>>> sqlContext.sql("SELECT stringLengthInt('test')").collect()
[Row(stringLengthInt(test)=4)]
"""
self.sparkSession.catalog.registerFunction(name, f, returnType)
@ignore_unicode_prefix
@since(2.1)
def registerJavaFunction(self, name, javaClassName, returnType=None):
"""Register a java UDF so it can be used in SQL statements.
In addition to a name and the function itself, the return type can be optionally specified.
When the return type is not specified we would infer it via reflection.
:param name: name of the UDF
:param javaClassName: fully qualified name of java class
:param returnType: a :class:`pyspark.sql.types.DataType` object
>>> sqlContext.registerJavaFunction("javaStringLength",
... "test.org.apache.spark.sql.JavaStringLength", IntegerType())
>>> sqlContext.sql("SELECT javaStringLength('test')").collect()
[Row(UDF(test)=4)]
>>> sqlContext.registerJavaFunction("javaStringLength2",
... "test.org.apache.spark.sql.JavaStringLength")
>>> sqlContext.sql("SELECT javaStringLength2('test')").collect()
[Row(UDF(test)=4)]
"""
jdt = None
if returnType is not None:
jdt = self.sparkSession._jsparkSession.parseDataType(returnType.json())
self.sparkSession._jsparkSession.udf().registerJava(name, javaClassName, jdt)
# TODO(andrew): delete this once we refactor things to take in SparkSession
def _inferSchema(self, rdd, samplingRatio=None):
"""
Infer schema from an RDD of Row or tuple.
:param rdd: an RDD of Row or tuple
:param samplingRatio: sampling ratio, or no sampling (default)
:return: :class:`pyspark.sql.types.StructType`
"""
return self.sparkSession._inferSchema(rdd, samplingRatio)
@since(1.3)
@ignore_unicode_prefix
def createDataFrame(self, data, schema=None, samplingRatio=None, verifySchema=True):
"""
Creates a :class:`DataFrame` from an :class:`RDD`, a list or a :class:`pandas.DataFrame`.
When ``schema`` is a list of column names, the type of each column
will be inferred from ``data``.
When ``schema`` is ``None``, it will try to infer the schema (column names and types)
from ``data``, which should be an RDD of :class:`Row`,
or :class:`namedtuple`, or :class:`dict`.
When ``schema`` is :class:`pyspark.sql.types.DataType` or a datatype string it must match
the real data, or an exception will be thrown at runtime. If the given schema is not
:class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` as its only field, and the field name will be "value",
each record will also be wrapped into a tuple, which can be converted to row later.
If schema inference is needed, ``samplingRatio`` is used to determined the ratio of
rows used for schema inference. The first row will be used if ``samplingRatio`` is ``None``.
:param data: an RDD of any kind of SQL data representation(e.g. :class:`Row`,
:class:`tuple`, ``int``, ``boolean``, etc.), or :class:`list`, or
:class:`pandas.DataFrame`.
:param schema: a :class:`pyspark.sql.types.DataType` or a datatype string or a list of
column names, default is None. The data type string format equals to
:class:`pyspark.sql.types.DataType.simpleString`, except that top level struct type can
omit the ``struct<>`` and atomic types use ``typeName()`` as their format, e.g. use
``byte`` instead of ``tinyint`` for :class:`pyspark.sql.types.ByteType`.
We can also use ``int`` as a short name for :class:`pyspark.sql.types.IntegerType`.
:param samplingRatio: the sample ratio of rows used for inferring
:param verifySchema: verify data types of every row against schema.
:return: :class:`DataFrame`
.. versionchanged:: 2.0
The ``schema`` parameter can be a :class:`pyspark.sql.types.DataType` or a
datatype string after 2.0.
If it's not a :class:`pyspark.sql.types.StructType`, it will be wrapped into a
:class:`pyspark.sql.types.StructType` and each record will also be wrapped into a tuple.
.. versionchanged:: 2.1
Added verifySchema.
>>> l = [('Alice', 1)]
>>> sqlContext.createDataFrame(l).collect()
[Row(_1=u'Alice', _2=1)]
>>> sqlContext.createDataFrame(l, ['name', 'age']).collect()
[Row(name=u'Alice', age=1)]
>>> d = [{'name': 'Alice', 'age': 1}]
>>> sqlContext.createDataFrame(d).collect()
[Row(age=1, name=u'Alice')]
>>> rdd = sc.parallelize(l)
>>> sqlContext.createDataFrame(rdd).collect()
[Row(_1=u'Alice', _2=1)]
>>> df = sqlContext.createDataFrame(rdd, ['name', 'age'])
>>> df.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql import Row
>>> Person = Row('name', 'age')
>>> person = rdd.map(lambda r: Person(*r))
>>> df2 = sqlContext.createDataFrame(person)
>>> df2.collect()
[Row(name=u'Alice', age=1)]
>>> from pyspark.sql.types import *
>>> schema = StructType([
... StructField("name", StringType(), True),
... StructField("age", IntegerType(), True)])
>>> df3 = sqlContext.createDataFrame(rdd, schema)
>>> df3.collect()
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(df.toPandas()).collect() # doctest: +SKIP
[Row(name=u'Alice', age=1)]
>>> sqlContext.createDataFrame(pandas.DataFrame([[1, 2]])).collect() # doctest: +SKIP
[Row(0=1, 1=2)]
>>> sqlContext.createDataFrame(rdd, "a: string, b: int").collect()
[Row(a=u'Alice', b=1)]
>>> rdd = rdd.map(lambda row: row[1])
>>> sqlContext.createDataFrame(rdd, "int").collect()
[Row(value=1)]
>>> sqlContext.createDataFrame(rdd, "boolean").collect() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
...
Py4JJavaError: ...
"""
return self.sparkSession.createDataFrame(data, schema, samplingRatio, verifySchema)
@since(1.3)
def registerDataFrameAsTable(self, df, tableName):
"""Registers the given :class:`DataFrame` as a temporary table in the catalog.
Temporary tables exist only during the lifetime of this instance of :class:`SQLContext`.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
"""
df.createOrReplaceTempView(tableName)
@since(1.6)
def dropTempTable(self, tableName):
""" Remove the temp table from catalog.
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> sqlContext.dropTempTable("table1")
"""
self.sparkSession.catalog.dropTempView(tableName)
@since(1.3)
def createExternalTable(self, tableName, path=None, source=None, schema=None, **options):
"""Creates an external table based on the dataset in a data source.
It returns the DataFrame associated with the external table.
The data source is specified by the ``source`` and a set of ``options``.
If ``source`` is not specified, the default data source configured by
``spark.sql.sources.default`` will be used.
Optionally, a schema can be provided as the schema of the returned :class:`DataFrame` and
created external table.
:return: :class:`DataFrame`
"""
return self.sparkSession.catalog.createExternalTable(
tableName, path, source, schema, **options)
@ignore_unicode_prefix
@since(1.0)
def sql(self, sqlQuery):
"""Returns a :class:`DataFrame` representing the result of the given query.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.sql("SELECT field1 AS f1, field2 as f2 from table1")
>>> df2.collect()
[Row(f1=1, f2=u'row1'), Row(f1=2, f2=u'row2'), Row(f1=3, f2=u'row3')]
"""
return self.sparkSession.sql(sqlQuery)
@since(1.0)
def table(self, tableName):
"""Returns the specified table as a :class:`DataFrame`.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.table("table1")
>>> sorted(df.collect()) == sorted(df2.collect())
True
"""
return self.sparkSession.table(tableName)
@ignore_unicode_prefix
@since(1.3)
def tables(self, dbName=None):
"""Returns a :class:`DataFrame` containing names of tables in the given database.
If ``dbName`` is not specified, the current database will be used.
The returned DataFrame has two columns: ``tableName`` and ``isTemporary``
(a column with :class:`BooleanType` indicating if a table is a temporary one or not).
:param dbName: string, name of the database to use.
:return: :class:`DataFrame`
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> df2 = sqlContext.tables()
>>> df2.filter("tableName = 'table1'").first()
Row(database=u'', tableName=u'table1', isTemporary=True)
"""
if dbName is None:
return DataFrame(self._ssql_ctx.tables(), self)
else:
return DataFrame(self._ssql_ctx.tables(dbName), self)
@since(1.3)
def tableNames(self, dbName=None):
"""Returns a list of names of tables in the database ``dbName``.
:param dbName: string, name of the database to use. Default to the current database.
:return: list of table names, in string
>>> sqlContext.registerDataFrameAsTable(df, "table1")
>>> "table1" in sqlContext.tableNames()
True
>>> "table1" in sqlContext.tableNames("default")
True
"""
if dbName is None:
return [name for name in self._ssql_ctx.tableNames()]
else:
return [name for name in self._ssql_ctx.tableNames(dbName)]
@since(1.0)
def cacheTable(self, tableName):
"""Caches the specified table in-memory."""
self._ssql_ctx.cacheTable(tableName)
@since(1.0)
def uncacheTable(self, tableName):
"""Removes the specified table from the in-memory cache."""
self._ssql_ctx.uncacheTable(tableName)
@since(1.3)
def clearCache(self):
"""Removes all cached tables from the in-memory cache. """
self._ssql_ctx.clearCache()
@property
@since(1.4)
def read(self):
"""
Returns a :class:`DataFrameReader` that can be used to read data
in as a :class:`DataFrame`.
:return: :class:`DataFrameReader`
"""
return DataFrameReader(self)
@property
@since(2.0)
def readStream(self):
"""
Returns a :class:`DataStreamReader` that can be used to read data streams
as a streaming :class:`DataFrame`.
.. note:: Experimental.
:return: :class:`DataStreamReader`
>>> text_sdf = sqlContext.readStream.text(tempfile.mkdtemp())
>>> text_sdf.isStreaming
True
"""
return DataStreamReader(self)
@property
@since(2.0)
def streams(self):
"""Returns a :class:`StreamingQueryManager` that allows managing all the
:class:`StreamingQuery` StreamingQueries active on `this` context.
.. note:: Experimental.
"""
from pyspark.sql.streaming import StreamingQueryManager
return StreamingQueryManager(self._ssql_ctx.streams())
class HiveContext(SQLContext):
"""A variant of Spark SQL that integrates with data stored in Hive.
Configuration for Hive is read from ``hive-site.xml`` on the classpath.
It supports running both SQL and HiveQL commands.
:param sparkContext: The SparkContext to wrap.
:param jhiveContext: An optional JVM Scala HiveContext. If set, we do not instantiate a new
:class:`HiveContext` in the JVM, instead we make all calls to this object.
.. note:: Deprecated in 2.0.0. Use SparkSession.builder.enableHiveSupport().getOrCreate().
"""
def __init__(self, sparkContext, jhiveContext=None):
warnings.warn(
"HiveContext is deprecated in Spark 2.0.0. Please use " +
"SparkSession.builder.enableHiveSupport().getOrCreate() instead.",
DeprecationWarning)
if jhiveContext is None:
sparkSession = SparkSession.builder.enableHiveSupport().getOrCreate()
else:
sparkSession = SparkSession(sparkContext, jhiveContext.sparkSession())
SQLContext.__init__(self, sparkContext, sparkSession, jhiveContext)
@classmethod
def _createForTesting(cls, sparkContext):
"""(Internal use only) Create a new HiveContext for testing.
All test code that touches HiveContext *must* go through this method. Otherwise,
you may end up launching multiple derby instances and encounter with incredibly
confusing error messages.
"""
jsc = sparkContext._jsc.sc()
jtestHive = sparkContext._jvm.org.apache.spark.sql.hive.test.TestHiveContext(jsc, False)
return cls(sparkContext, jtestHive)
def refreshTable(self, tableName):
"""Invalidate and refresh all the cached the metadata of the given
table. For performance reasons, Spark SQL or the external data source
library it uses might cache certain metadata about a table, such as the
location of blocks. When those change outside of Spark SQL, users should
call this function to invalidate the cache.
"""
self._ssql_ctx.refreshTable(tableName)
class UDFRegistration(object):
"""Wrapper for user-defined function registration."""
def __init__(self, sqlContext):
self.sqlContext = sqlContext
def register(self, name, f, returnType=StringType()):
return self.sqlContext.registerFunction(name, f, returnType)
register.__doc__ = SQLContext.registerFunction.__doc__
def _test():
import os
import doctest
import tempfile
from pyspark.context import SparkContext
from pyspark.sql import Row, SQLContext
import pyspark.sql.context
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.context.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['tempfile'] = tempfile
globs['os'] = os
globs['sc'] = sc
globs['sqlContext'] = SQLContext(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")]
)
globs['df'] = rdd.toDF()
jsonStrings = [
'{"field1": 1, "field2": "row1", "field3":{"field4":11}}',
'{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},'
'"field6":[{"field7": "row2"}]}',
'{"field1" : null, "field2": "row3", '
'"field3":{"field4":33, "field5": []}}'
]
globs['jsonStrings'] = jsonStrings
globs['json'] = sc.parallelize(jsonStrings)
(failure_count, test_count) = doctest.testmod(
pyspark.sql.context, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
quheng/scikit-learn | examples/plot_johnson_lindenstrauss_bound.py | 127 | 7477 | r"""
=====================================================================
The Johnson-Lindenstrauss bound for embedding with random projections
=====================================================================
The `Johnson-Lindenstrauss lemma`_ states that any high dimensional
dataset can be randomly projected into a lower dimensional Euclidean
space while controlling the distortion in the pairwise distances.
.. _`Johnson-Lindenstrauss lemma`: http://en.wikipedia.org/wiki/Johnson%E2%80%93Lindenstrauss_lemma
Theoretical bounds
==================
The distortion introduced by a random projection `p` is asserted by
the fact that `p` is defining an eps-embedding with good probability
as defined by:
.. math::
(1 - eps) \|u - v\|^2 < \|p(u) - p(v)\|^2 < (1 + eps) \|u - v\|^2
Where u and v are any rows taken from a dataset of shape [n_samples,
n_features] and p is a projection by a random Gaussian N(0, 1) matrix
with shape [n_components, n_features] (or a sparse Achlioptas matrix).
The minimum number of components to guarantees the eps-embedding is
given by:
.. math::
n\_components >= 4 log(n\_samples) / (eps^2 / 2 - eps^3 / 3)
The first plot shows that with an increasing number of samples ``n_samples``,
the minimal number of dimensions ``n_components`` increased logarithmically
in order to guarantee an ``eps``-embedding.
The second plot shows that an increase of the admissible
distortion ``eps`` allows to reduce drastically the minimal number of
dimensions ``n_components`` for a given number of samples ``n_samples``
Empirical validation
====================
We validate the above bounds on the the digits dataset or on the 20 newsgroups
text document (TF-IDF word frequencies) dataset:
- for the digits dataset, some 8x8 gray level pixels data for 500
handwritten digits pictures are randomly projected to spaces for various
larger number of dimensions ``n_components``.
- for the 20 newsgroups dataset some 500 documents with 100k
features in total are projected using a sparse random matrix to smaller
euclidean spaces with various values for the target number of dimensions
``n_components``.
The default dataset is the digits dataset. To run the example on the twenty
newsgroups dataset, pass the --twenty-newsgroups command line argument to this
script.
For each value of ``n_components``, we plot:
- 2D distribution of sample pairs with pairwise distances in original
and projected spaces as x and y axis respectively.
- 1D histogram of the ratio of those distances (projected / original).
We can see that for low values of ``n_components`` the distribution is wide
with many distorted pairs and a skewed distribution (due to the hard
limit of zero ratio on the left as distances are always positives)
while for larger values of n_components the distortion is controlled
and the distances are well preserved by the random projection.
Remarks
=======
According to the JL lemma, projecting 500 samples without too much distortion
will require at least several thousands dimensions, irrespective of the
number of features of the original dataset.
Hence using random projections on the digits dataset which only has 64 features
in the input space does not make sense: it does not allow for dimensionality
reduction in this case.
On the twenty newsgroups on the other hand the dimensionality can be decreased
from 56436 down to 10000 while reasonably preserving pairwise distances.
"""
print(__doc__)
import sys
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.random_projection import johnson_lindenstrauss_min_dim
from sklearn.random_projection import SparseRandomProjection
from sklearn.datasets import fetch_20newsgroups_vectorized
from sklearn.datasets import load_digits
from sklearn.metrics.pairwise import euclidean_distances
# Part 1: plot the theoretical dependency between n_components_min and
# n_samples
# range of admissible distortions
eps_range = np.linspace(0.1, 0.99, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(eps_range)))
# range of number of samples (observation) to embed
n_samples_range = np.logspace(1, 9, 9)
plt.figure()
for eps, color in zip(eps_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples_range, eps=eps)
plt.loglog(n_samples_range, min_n_components, color=color)
plt.legend(["eps = %0.1f" % eps for eps in eps_range], loc="lower right")
plt.xlabel("Number of observations to eps-embed")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_samples vs n_components")
# range of admissible distortions
eps_range = np.linspace(0.01, 0.99, 100)
# range of number of samples (observation) to embed
n_samples_range = np.logspace(2, 6, 5)
colors = plt.cm.Blues(np.linspace(0.3, 1.0, len(n_samples_range)))
plt.figure()
for n_samples, color in zip(n_samples_range, colors):
min_n_components = johnson_lindenstrauss_min_dim(n_samples, eps=eps_range)
plt.semilogy(eps_range, min_n_components, color=color)
plt.legend(["n_samples = %d" % n for n in n_samples_range], loc="upper right")
plt.xlabel("Distortion eps")
plt.ylabel("Minimum number of dimensions")
plt.title("Johnson-Lindenstrauss bounds:\nn_components vs eps")
# Part 2: perform sparse random projection of some digits images which are
# quite low dimensional and dense or documents of the 20 newsgroups dataset
# which is both high dimensional and sparse
if '--twenty-newsgroups' in sys.argv:
# Need an internet connection hence not enabled by default
data = fetch_20newsgroups_vectorized().data[:500]
else:
data = load_digits().data[:500]
n_samples, n_features = data.shape
print("Embedding %d samples with dim %d using various random projections"
% (n_samples, n_features))
n_components_range = np.array([300, 1000, 10000])
dists = euclidean_distances(data, squared=True).ravel()
# select only non-identical samples pairs
nonzero = dists != 0
dists = dists[nonzero]
for n_components in n_components_range:
t0 = time()
rp = SparseRandomProjection(n_components=n_components)
projected_data = rp.fit_transform(data)
print("Projected %d samples from %d to %d in %0.3fs"
% (n_samples, n_features, n_components, time() - t0))
if hasattr(rp, 'components_'):
n_bytes = rp.components_.data.nbytes
n_bytes += rp.components_.indices.nbytes
print("Random matrix with size: %0.3fMB" % (n_bytes / 1e6))
projected_dists = euclidean_distances(
projected_data, squared=True).ravel()[nonzero]
plt.figure()
plt.hexbin(dists, projected_dists, gridsize=100, cmap=plt.cm.PuBu)
plt.xlabel("Pairwise squared distances in original space")
plt.ylabel("Pairwise squared distances in projected space")
plt.title("Pairwise distances distribution for n_components=%d" %
n_components)
cb = plt.colorbar()
cb.set_label('Sample pairs counts')
rates = projected_dists / dists
print("Mean distances rate: %0.2f (%0.2f)"
% (np.mean(rates), np.std(rates)))
plt.figure()
plt.hist(rates, bins=50, normed=True, range=(0., 2.))
plt.xlabel("Squared distances rate: projected / original")
plt.ylabel("Distribution of samples pairs")
plt.title("Histogram of pairwise distance rates for n_components=%d" %
n_components)
# TODO: compute the expected value of eps and add them to the previous plot
# as vertical lines / region
plt.show()
| bsd-3-clause |
Chasego/kafka | system_test/utils/metrics.py | 89 | 13937 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#!/usr/bin/env python
# ===================================
# file: metrics.py
# ===================================
import inspect
import json
import logging
import os
import signal
import subprocess
import sys
import traceback
import csv
import time
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from collections import namedtuple
import numpy
from pyh import *
import kafka_system_test_utils
import system_test_utils
logger = logging.getLogger("namedLogger")
thisClassName = '(metrics)'
d = {'name_of_class': thisClassName}
attributeNameToNameInReportedFileMap = {
'Min': 'min',
'Max': 'max',
'Mean': 'mean',
'50thPercentile': 'median',
'StdDev': 'stddev',
'95thPercentile': '95%',
'99thPercentile': '99%',
'999thPercentile': '99.9%',
'Count': 'count',
'OneMinuteRate': '1 min rate',
'MeanRate': 'mean rate',
'FiveMinuteRate': '5 min rate',
'FifteenMinuteRate': '15 min rate',
'Value': 'value'
}
def getCSVFileNameFromMetricsMbeanName(mbeanName):
return mbeanName.replace(":type=", ".").replace(",name=", ".") + ".csv"
def read_metrics_definition(metricsFile):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
allGraphs = []
for dashboard in allDashboards:
dashboardName = dashboard['name']
graphs = dashboard['graphs']
for graph in graphs:
bean = graph['bean_name']
allGraphs.append(graph)
attributes = graph['attributes']
#print "Filtering on attributes " + attributes
return allGraphs
def get_dashboard_definition(metricsFile, role):
metricsFileData = open(metricsFile, "r").read()
metricsJsonData = json.loads(metricsFileData)
allDashboards = metricsJsonData['dashboards']
dashboardsForRole = []
for dashboard in allDashboards:
if dashboard['role'] == role:
dashboardsForRole.append(dashboard)
return dashboardsForRole
def ensure_valid_headers(headers, attributes):
if headers[0] != "# time":
raise Exception("First column should be time")
for header in headers:
logger.debug(header, extra=d)
# there should be exactly one column with a name that matches attributes
try:
attributeColumnIndex = headers.index(attributes)
return attributeColumnIndex
except ValueError as ve:
#print "#### attributes : ", attributes
#print "#### headers : ", headers
raise Exception("There should be exactly one column that matches attribute: {0} in".format(attributes) +
" headers: {0}".format(",".join(headers)))
def plot_graphs(inputCsvFiles, labels, title, xLabel, yLabel, attribute, outputGraphFile):
if not inputCsvFiles: return
# create empty plot
fig=plt.figure()
fig.subplots_adjust(bottom=0.2)
ax=fig.add_subplot(111)
labelx = -0.3 # axes coords
ax.set_xlabel(xLabel)
ax.set_ylabel(yLabel)
ax.grid()
#ax.yaxis.set_label_coords(labelx, 0.5)
Coordinates = namedtuple("Coordinates", 'x y')
plots = []
coordinates = []
# read data for all files, organize by label in a dict
for fileAndLabel in zip(inputCsvFiles, labels):
inputCsvFile = fileAndLabel[0]
label = fileAndLabel[1]
csv_reader = list(csv.reader(open(inputCsvFile, "rb")))
x,y = [],[]
xticks_labels = []
try:
# read first line as the headers
headers = csv_reader.pop(0)
attributeColumnIndex = ensure_valid_headers(headers, attributeNameToNameInReportedFileMap[attribute])
logger.debug("Column index for attribute {0} is {1}".format(attribute, attributeColumnIndex), extra=d)
start_time = (int)(os.path.getctime(inputCsvFile) * 1000)
int(csv_reader[0][0])
for line in csv_reader:
if(len(line) == 0):
continue
yVal = float(line[attributeColumnIndex])
xVal = int(line[0])
y.append(yVal)
epoch= start_time + int(line[0])
x.append(xVal)
xticks_labels.append(time.strftime("%H:%M:%S", time.localtime(epoch)))
coordinates.append(Coordinates(xVal, yVal))
p1 = ax.plot(x,y)
plots.append(p1)
except Exception as e:
logger.error("ERROR while plotting data for {0}: {1}".format(inputCsvFile, e), extra=d)
traceback.print_exc()
# find xmin, xmax, ymin, ymax from all csv files
xmin = min(map(lambda coord: coord.x, coordinates))
xmax = max(map(lambda coord: coord.x, coordinates))
ymin = min(map(lambda coord: coord.y, coordinates))
ymax = max(map(lambda coord: coord.y, coordinates))
# set x and y axes limits
plt.xlim(xmin, xmax)
plt.ylim(ymin, ymax)
# set ticks accordingly
xticks = numpy.arange(xmin, xmax, 0.2*xmax)
# yticks = numpy.arange(ymin, ymax)
plt.xticks(xticks,xticks_labels,rotation=17)
# plt.yticks(yticks)
plt.legend(plots,labels, loc=2)
plt.title(title)
plt.savefig(outputGraphFile)
def draw_all_graphs(metricsDescriptionFile, testcaseEnv, clusterConfig):
# go through each role and plot graphs for the role's metrics
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
dashboards = get_dashboard_definition(metricsDescriptionFile, role)
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
for dashboard in dashboards:
graphs = dashboard['graphs']
# draw each graph for all entities
draw_graph_for_role(graphs, entities, role, testcaseEnv)
def draw_graph_for_role(graphs, entities, role, testcaseEnv):
for graph in graphs:
graphName = graph['graph_name']
yLabel = graph['y_label']
inputCsvFiles = []
graphLegendLabels = []
for entity in entities:
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entity['entity_id'], "metrics")
entityMetricCsvFile = entityMetricsDir + "/" + getCSVFileNameFromMetricsMbeanName(graph['bean_name'])
if(not os.path.exists(entityMetricCsvFile)):
logger.warn("The file {0} does not exist for plotting".format(entityMetricCsvFile), extra=d)
else:
inputCsvFiles.append(entityMetricCsvFile)
graphLegendLabels.append(role + "-" + entity['entity_id'])
# print "Plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
try:
# plot one graph per mbean attribute
labels = graph['y_label'].split(',')
fullyQualifiedAttributeNames = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
attributes = graph['attributes'].split(',')
for labelAndAttribute in zip(labels, fullyQualifiedAttributeNames, attributes):
outputGraphFile = testcaseEnv.testCaseDashboardsDir + "/" + role + "/" + labelAndAttribute[1] + ".svg"
plot_graphs(inputCsvFiles, graphLegendLabels, graph['graph_name'] + '-' + labelAndAttribute[2],
"time", labelAndAttribute[0], labelAndAttribute[2], outputGraphFile)
# print "Finished plotting graph for metric {0} on entity {1}".format(graph['graph_name'], entity['entity_id'])
except Exception as e:
logger.error("ERROR while plotting graph {0}: {1}".format(outputGraphFile, e), extra=d)
traceback.print_exc()
def build_all_dashboards(metricsDefinitionFile, testcaseDashboardsDir, clusterConfig):
metricsHtmlFile = testcaseDashboardsDir + "/metrics.html"
centralDashboard = PyH('Kafka Metrics Dashboard')
centralDashboard << h1('Kafka Metrics Dashboard', cl='center')
roles = set(map(lambda config: config['role'], clusterConfig))
for role in roles:
entities = kafka_system_test_utils.get_entities_for_role(clusterConfig, role)
dashboardPagePath = build_dashboard_for_role(metricsDefinitionFile, role,
entities, testcaseDashboardsDir)
centralDashboard << a(role, href = dashboardPagePath)
centralDashboard << br()
centralDashboard.printOut(metricsHtmlFile)
def build_dashboard_for_role(metricsDefinitionFile, role, entities, testcaseDashboardsDir):
# build all dashboards for the input entity's based on its role. It can be one of kafka, zookeeper, producer
# consumer
dashboards = get_dashboard_definition(metricsDefinitionFile, role)
entityDashboard = PyH('Kafka Metrics Dashboard for ' + role)
entityDashboard << h1('Kafka Metrics Dashboard for ' + role, cl='center')
entityDashboardHtml = testcaseDashboardsDir + "/" + role + "-dashboards.html"
for dashboard in dashboards:
# place the graph svg files in this dashboard
allGraphs = dashboard['graphs']
for graph in allGraphs:
attributes = map(lambda attribute: graph['bean_name'] + ':' + attribute,
graph['attributes'].split(','))
for attribute in attributes:
graphFileLocation = testcaseDashboardsDir + "/" + role + "/" + attribute + ".svg"
entityDashboard << embed(src = graphFileLocation, type = "image/svg+xml")
entityDashboard.printOut(entityDashboardHtml)
return entityDashboardHtml
def start_metrics_collection(jmxHost, jmxPort, role, entityId, systemTestEnv, testcaseEnv):
logger.info("starting metrics collection on jmx port : " + jmxPort, extra=d)
jmxUrl = "service:jmx:rmi:///jndi/rmi://" + jmxHost + ":" + jmxPort + "/jmxrmi"
clusterConfig = systemTestEnv.clusterEntityConfigDictList
metricsDefinitionFile = systemTestEnv.METRICS_PATHNAME
entityMetricsDir = kafka_system_test_utils.get_testcase_config_log_dir_pathname(testcaseEnv, role, entityId, "metrics")
dashboardsForRole = get_dashboard_definition(metricsDefinitionFile, role)
mbeansForRole = get_mbeans_for_role(dashboardsForRole)
kafkaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "kafka_home")
javaHome = system_test_utils.get_data_by_lookup_keyval(clusterConfig, "entity_id", entityId, "java_home")
for mbean in mbeansForRole:
outputCsvFile = entityMetricsDir + "/" + mbean + ".csv"
startMetricsCmdList = ["ssh " + jmxHost,
"'JAVA_HOME=" + javaHome,
"JMX_PORT= " + kafkaHome + "/bin/kafka-run-class.sh kafka.tools.JmxTool",
"--jmx-url " + jmxUrl,
"--object-name " + mbean + " 1> ",
outputCsvFile + " & echo pid:$! > ",
entityMetricsDir + "/entity_pid'"]
startMetricsCommand = " ".join(startMetricsCmdList)
logger.debug("executing command: [" + startMetricsCommand + "]", extra=d)
system_test_utils.async_sys_call(startMetricsCommand)
time.sleep(1)
pidCmdStr = "ssh " + jmxHost + " 'cat " + entityMetricsDir + "/entity_pid' 2> /dev/null"
logger.debug("executing command: [" + pidCmdStr + "]", extra=d)
subproc = system_test_utils.sys_call_return_subproc(pidCmdStr)
# keep track of JMX ppid in a dictionary of entity_id to list of JMX ppid
# testcaseEnv.entityJmxParentPidDict:
# key: entity_id
# val: list of JMX ppid associated to that entity_id
# { 1: [1234, 1235, 1236], 2: [2234, 2235, 2236], ... }
for line in subproc.stdout.readlines():
line = line.rstrip('\n')
logger.debug("line: [" + line + "]", extra=d)
if line.startswith("pid"):
logger.debug("found pid line: [" + line + "]", extra=d)
tokens = line.split(':')
thisPid = tokens[1]
if entityId not in testcaseEnv.entityJmxParentPidDict:
testcaseEnv.entityJmxParentPidDict[entityId] = []
testcaseEnv.entityJmxParentPidDict[entityId].append(thisPid)
#print "\n#### testcaseEnv.entityJmxParentPidDict ", testcaseEnv.entityJmxParentPidDict, "\n"
def stop_metrics_collection(jmxHost, jmxPort):
logger.info("stopping metrics collection on " + jmxHost + ":" + jmxPort, extra=d)
system_test_utils.sys_call("ps -ef | grep JmxTool | grep -v grep | grep " + jmxPort + " | awk '{print $2}' | xargs kill -9")
def get_mbeans_for_role(dashboardsForRole):
graphs = reduce(lambda x,y: x+y, map(lambda dashboard: dashboard['graphs'], dashboardsForRole))
return set(map(lambda metric: metric['bean_name'], graphs))
| apache-2.0 |
WeKeyPedia/toolkit-python | examples/diff.py | 1 | 4083 | import wekeypedia
import nltk
import pandas as pd
from bs4 import BeautifulSoup
from collections import defaultdict
from multiprocessing import Pool as ThreadPool
import codecs
import json
ignore_list = "{}()[]<>./,;\"':!?&#"
lemmatizer = nltk.WordNetLemmatizer()
stemmer = nltk.stem.porter.PorterStemmer()
# page = "Michel Maffesoli"
# page = "Love"
# page = "War"
page = "Wisdom"
p = wekeypedia.WikipediaPage()
p.fetch_from_api_title(page)
added = defaultdict(dict)
deleted = defaultdict(dict)
inflections = {}
inflections["added"] = defaultdict(set)
inflections["deleted"] = defaultdict(set)
def get_revs():
revisions = p.get_revisions_list()
return revisions
def red(r):
a = r[0]
d = r[1]
i = r[2]
for s in ["added" , "deleted"]:
for w in i[s]:
inflections[s][w] |= i[s][w]
for w, c in a:
if not("count" in added[w]):
added[w]["count"] = 0
added[w]["count"] += c
added[w]["inflections"] = ", ".join(list(inflections["added"][w]))
for w, c in d:
if not("count" in deleted[w]):
deleted[w]["count"] = 0
deleted[w]["count"] += c
deleted[w]["inflections"] = ", ".join(list(inflections["deleted"][w]))
def normalize(word):
# old = word
word = word.lower()
word = stemmer.stem_word(word)
word = lemmatizer.lemmatize(word)
# print word
return word
def count(a, sentence, which):
for w in nltk.word_tokenize(sentence):
old = w
w = normalize(w)
if not(w in ignore_list):
inflections[which][w] = inflections[which][w] | set([ old ])
a[w] += 1
def rev_diff(revid):
ad = defaultdict(int)
de = defaultdict(int)
d = p.get_diff(revid)
# bug with Ethics#462124891
if d == False:
return ({},{}, { "added": [], "deleted": [] })
d = BeautifulSoup(d, 'html.parser')
# check additions of block
addedlines = d.find_all("td", "diff-addedline")
for tag in addedlines:
inner = tag.find_all("ins")
# check that there is no inner addition
if len(inner) == 0:
count(ad, tag.get_text(), "added")
ins_tags = d.find_all("ins")
for txt in map(lambda x: x.get_text(), ins_tags):
count(ad, txt, "added")
deletedline = d.find_all("td", "diff-deletedline")
for tag in deletedline:
inner = tag.find_all("del")
if len(inner) == 0:
count(de, tag.get_text(), "deleted")
del_tags = d.find_all("del")
for txt in map(lambda x: x.get_text(), del_tags):
count(de, txt, "deleted")
print("%s %s|%s" % (revid, "+"*len(ad), "-"*len(de)))
return (ad.items(),de.items(), { "added":inflections["added"], "deleted":inflections["deleted"] })
def write_revdif(revid):
data = p.get_diff_full(revid)
name = "data/%s/%s.json" % (page, revid)
data = data["query"]["pages"][list(data["query"]["pages"].keys())[0]]
if "diff" in data["revisions"][0]:
data = data["revisions"][0]
print revid
with codecs.open(name, "w", "utf-8-sig") as f:
json.dump(data, f, ensure_ascii=False, indent=2, separators=(',', ': '))
# print len(rev_list)
# print p.get_diff(rev_list[0]["revid"])
# print [ x["revid"] for x in rev_list ]
# print rev_diff(462124891)
# exit()
rev_list = get_revs()
pool = ThreadPool(4)
#result = pool.map(rev_diff, [ x["revid"] for x in rev_list ])
pool.map(write_revdif, [ x["revid"] for x in rev_list ])
# for r in rev_list:
# rev_id = r["revid"]
# # pool.apply_async( rev_diff, args=(rev_id, ), callback=red )
# red(rev_diff(rev_id))
pool.close()
pool.join()
exit()
print "## reduce results"
for r in result:
red(r)
# print added
print "## save consolidated data"
#df1 = pd.DataFrame.from_dict(added, orient="index")
df1 = pd.DataFrame([ [ x[1]["count"], x[1]["inflections"] ] for x in added.iteritems() ], index=added.keys())
df1.columns = [ 'count', 'inflections']
df1.to_csv("added.csv", encoding="utf8")
#df2 = pd.DataFrame.from_dict(deleted, orient="index")
df2 = pd.DataFrame([ [ x[1]["count"], x[1]["inflections"] ] for x in deleted.iteritems() ], index=deleted.keys())
df2.columns = ['count', 'inflections']
df2.to_csv("deleted.csv", encoding="utf8") | mit |
yandex/rep | rep/estimators/utils.py | 1 | 5468 | from __future__ import division, print_function, absolute_import
import numpy
import pandas
import warnings
from scipy.special import expit, logit
from sklearn.base import BaseEstimator, TransformerMixin, clone
from sklearn.utils.validation import column_or_1d
from ..utils import check_sample_weight, get_columns_in_df
__author__ = 'Alex Rogozhnikov'
def check_inputs(X, y, sample_weight, allow_none_weights=True, allow_multiple_targets=False):
if allow_multiple_targets:
y = numpy.array(y)
else:
y = column_or_1d(y)
if allow_none_weights and sample_weight is None:
# checking only X, y
if len(X) != len(y):
raise ValueError('Different size of X: {} and y: {}'.format(X.shape, y.shape))
return X, y, None
if sample_weight is None:
sample_weight = numpy.ones(len(y), dtype=float)
sample_weight = column_or_1d(sample_weight)
assert sum(numpy.isnan(sample_weight)) == 0, "Weight contains nan, this format isn't supported"
if not (len(X) == len(y) == len(sample_weight)):
message = 'Different sizes of X: {}, y: {} and sample_weight: {}'
raise ValueError(message.format(X.shape, y.shape, sample_weight.shape))
return X, y, sample_weight
def score_to_proba(score):
proba = numpy.zeros([len(score), 2])
proba[:, 1] = expit(score)
proba[:, 0] = 1 - proba[:, 1]
return proba
def proba_to_two_dimensions(probability):
proba = numpy.zeros([len(probability), 2])
proba[:, 1] = probability
proba[:, 0] = 1 - proba[:, 1]
return proba
def proba_to_score(proba):
assert proba.shape[1] == 2, 'Converting proba to score is possible only for two-class classification'
proba = proba / proba.sum(axis=1, keepdims=True)
score = logit(proba[:, 1])
return score
def normalize_weights(y, sample_weight, per_class=True):
"""Returns normalized weights with average = 1.
:param y: answers
:param sample_weight: original weights (can not be None)
:param per_class: if True
"""
sample_weight = check_sample_weight(y, sample_weight=sample_weight)
if per_class:
sample_weight = sample_weight.copy()
for label in numpy.unique(y):
sample_weight[y == label] /= numpy.mean(sample_weight[y == label])
return sample_weight
else:
return sample_weight / numpy.mean(sample_weight)
def _get_features(features, X, allow_nans=False):
"""
Get data with necessary features
:param list[str] features: features
:param pandas.DataFrame X: train dataset
:return: pandas.DataFrame with used features, features
"""
new_features = features
if isinstance(X, numpy.ndarray):
X = pandas.DataFrame(X, columns=['Feature_%d' % index for index in range(X.shape[1])])
else:
assert isinstance(X, pandas.DataFrame), 'Support only numpy.ndarray and pandas.DataFrame'
if features is None:
new_features = list(X.columns)
X_features = X
elif list(X.columns) == list(features):
X_features = X
else:
# assert set(self.features).issubset(set(X.columns)), "Data doesn't contain all training features"
# X_features = X.ix[:, self.features]
X_features = get_columns_in_df(X, features)
if not allow_nans:
# check column-by-column in order not to create copy of whole DataFrame
for column in X_features.columns:
assert numpy.all(numpy.isfinite(X_features[column])), "Does not support NaN: " + str(column)
return X_features, new_features
class IdentityTransformer(BaseEstimator, TransformerMixin):
"""
Identity transformer is a very neat technology:
it in a constant, reproducible manner makes nothing with input,
though may convert it to some provided dtype
"""
def __init__(self, dtype='float32'):
self.dtype = dtype
def fit(self, X, y, **kwargs):
return self
def transform(self, X):
if self.dtype is None:
return X
else:
return numpy.array(X, dtype=self.dtype)
def check_scaler(scaler):
"""
Used in neural networks. To unify usage in different neural networks.
:param scaler: scaler
:type scaler: str or False or TransformerMixin
:return: TransformerMixin, scaler
"""
from sklearn.preprocessing import StandardScaler, MinMaxScaler
transformers = {
'standard': StandardScaler(),
'minmax': MinMaxScaler(),
'identity': IdentityTransformer(),
False: IdentityTransformer()
}
if scaler in transformers.keys():
return transformers[scaler]
else:
if not isinstance(scaler, TransformerMixin):
warnings.warn("Passed scaler wasn't derived from TransformerMixin.")
return clone(scaler)
def one_hot_transform(y, n_classes=None, dtype='float32'):
"""
For neural networks, this function needed only in training.
Classes in 'y' should be [0, 1, 2, .. n_classes -1]
"""
if n_classes is None:
n_classes = numpy.max(y) + 1
target = numpy.zeros([len(y), n_classes], dtype=dtype)
target[numpy.arange(len(y)), y] = 1
return target
def remove_first_line(string):
"""
Returns the copy of string without first line (needed for descriptions which differ in one line)
:param string: initial string
:return: copy of string without first line
"""
return '\n'.join(string.split('\n')[1:])
| apache-2.0 |
amolkahat/pandas | pandas/core/sparse/frame.py | 1 | 37149 | """
Data structures for sparse float data. Life is made simpler by dealing only
with float64 data
"""
from __future__ import division
# pylint: disable=E1101,E1103,W0231,E0202
import warnings
from pandas.compat import lmap
from pandas import compat
import numpy as np
from pandas.core.dtypes.missing import isna, notna
from pandas.core.dtypes.cast import maybe_upcast, find_common_type
from pandas.core.dtypes.common import ensure_platform_int, is_scipy_sparse
from pandas.compat.numpy import function as nv
from pandas.core.index import Index, MultiIndex, ensure_index
from pandas.core.series import Series
from pandas.core.frame import DataFrame, extract_index, _prep_ndarray
import pandas.core.algorithms as algos
from pandas.core.internals import (BlockManager,
create_block_manager_from_arrays)
import pandas.core.generic as generic
from pandas.core.arrays.sparse import SparseArray, SparseDtype
from pandas.core.sparse.series import SparseSeries
from pandas._libs.sparse import BlockIndex, get_blocks
from pandas.util._decorators import Appender
import pandas.core.ops as ops
import pandas.core.common as com
import pandas.core.indexes.base as ibase
_shared_doc_kwargs = dict(klass='SparseDataFrame')
class SparseDataFrame(DataFrame):
"""
DataFrame containing sparse floating point data in the form of SparseSeries
objects
Parameters
----------
data : same types as can be passed to DataFrame or scipy.sparse.spmatrix
.. versionchanged :: 0.23.0
If data is a dict, argument order is maintained for Python 3.6
and later.
index : array-like, optional
column : array-like, optional
default_kind : {'block', 'integer'}, default 'block'
Default sparse kind for converting Series to SparseSeries. Will not
override SparseSeries passed into constructor
default_fill_value : float
Default fill_value for converting Series to SparseSeries
(default: nan). Will not override SparseSeries passed in.
"""
_subtyp = 'sparse_frame'
def __init__(self, data=None, index=None, columns=None, default_kind=None,
default_fill_value=None, dtype=None, copy=False):
# pick up the defaults from the Sparse structures
if isinstance(data, SparseDataFrame):
if index is None:
index = data.index
if columns is None:
columns = data.columns
if default_fill_value is None:
default_fill_value = data.default_fill_value
if default_kind is None:
default_kind = data.default_kind
elif isinstance(data, (SparseSeries, SparseArray)):
if index is None:
index = data.index
if default_fill_value is None:
default_fill_value = data.fill_value
if columns is None and hasattr(data, 'name'):
columns = [data.name]
if columns is None:
raise Exception("cannot pass a series w/o a name or columns")
data = {columns[0]: data}
if default_fill_value is None:
default_fill_value = np.nan
if default_kind is None:
default_kind = 'block'
self._default_kind = default_kind
self._default_fill_value = default_fill_value
if is_scipy_sparse(data):
mgr = self._init_spmatrix(data, index, columns, dtype=dtype,
fill_value=default_fill_value)
elif isinstance(data, dict):
mgr = self._init_dict(data, index, columns, dtype=dtype)
elif isinstance(data, (np.ndarray, list)):
mgr = self._init_matrix(data, index, columns, dtype=dtype)
elif isinstance(data, SparseDataFrame):
mgr = self._init_mgr(data._data,
dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif isinstance(data, DataFrame):
mgr = self._init_dict(data, data.index, data.columns, dtype=dtype)
elif isinstance(data, Series):
mgr = self._init_dict(data.to_frame(), data.index,
columns=None, dtype=dtype)
elif isinstance(data, BlockManager):
mgr = self._init_mgr(data, axes=dict(index=index, columns=columns),
dtype=dtype, copy=copy)
elif data is None:
data = DataFrame()
if index is None:
index = Index([])
else:
index = ensure_index(index)
if columns is None:
columns = Index([])
else:
for c in columns:
data[c] = SparseArray(np.nan, index=index,
kind=self._default_kind,
fill_value=self._default_fill_value)
mgr = to_manager(data, columns, index)
if dtype is not None:
mgr = mgr.astype(dtype)
else:
msg = ('SparseDataFrame called with unknown type "{data_type}" '
'for data argument')
raise TypeError(msg.format(data_type=type(data).__name__))
generic.NDFrame.__init__(self, mgr)
@property
def _constructor(self):
return SparseDataFrame
_constructor_sliced = SparseSeries
def _init_dict(self, data, index, columns, dtype=None):
# pre-filter out columns if we passed it
if columns is not None:
columns = ensure_index(columns)
data = {k: v for k, v in compat.iteritems(data) if k in columns}
else:
keys = com.dict_keys_to_ordered_list(data)
columns = Index(keys)
if index is None:
index = extract_index(list(data.values()))
def sp_maker(x):
return SparseArray(x, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=True, dtype=dtype)
sdict = {}
for k, v in compat.iteritems(data):
if isinstance(v, Series):
# Force alignment, no copy necessary
if not v.index.equals(index):
v = v.reindex(index)
if not isinstance(v, SparseSeries):
v = sp_maker(v.values)
elif isinstance(v, SparseArray):
v = v.copy()
else:
if isinstance(v, dict):
v = [v.get(i, np.nan) for i in index]
v = sp_maker(v)
if index is not None and len(v) != len(index):
msg = "Length of passed values is {}, index implies {}"
raise ValueError(msg.format(len(v), len(index)))
sdict[k] = v
if len(columns.difference(sdict)):
# TODO: figure out how to handle this case, all nan's?
# add in any other columns we want to have (completeness)
nan_arr = np.empty(len(index), dtype='float64')
nan_arr.fill(np.nan)
nan_arr = SparseArray(nan_arr, kind=self._default_kind,
fill_value=self._default_fill_value,
copy=False)
sdict.update((c, nan_arr) for c in columns if c not in sdict)
return to_manager(sdict, columns, index)
def _init_matrix(self, data, index, columns, dtype=None):
""" Init self from ndarray or list of lists """
data = _prep_ndarray(data, copy=False)
index, columns = self._prep_index(data, index, columns)
data = {idx: data[:, i] for i, idx in enumerate(columns)}
return self._init_dict(data, index, columns, dtype)
def _init_spmatrix(self, data, index, columns, dtype=None,
fill_value=None):
""" Init self from scipy.sparse matrix """
index, columns = self._prep_index(data, index, columns)
data = data.tocoo()
N = len(index)
# Construct a dict of SparseSeries
sdict = {}
values = Series(data.data, index=data.row, copy=False)
for col, rowvals in values.groupby(data.col):
# get_blocks expects int32 row indices in sorted order
rowvals = rowvals.sort_index()
rows = rowvals.index.values.astype(np.int32)
blocs, blens = get_blocks(rows)
sdict[columns[col]] = SparseSeries(
rowvals.values, index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, blocs, blens))
# Add any columns that were empty and thus not grouped on above
sdict.update({column: SparseSeries(index=index,
fill_value=fill_value,
sparse_index=BlockIndex(N, [], []))
for column in columns
if column not in sdict})
return self._init_dict(sdict, index, columns, dtype)
def _prep_index(self, data, index, columns):
N, K = data.shape
if index is None:
index = ibase.default_index(N)
if columns is None:
columns = ibase.default_index(K)
if len(columns) != K:
raise ValueError('Column length mismatch: {columns} vs. {K}'
.format(columns=len(columns), K=K))
if len(index) != N:
raise ValueError('Index length mismatch: {index} vs. {N}'
.format(index=len(index), N=N))
return index, columns
def to_coo(self):
"""
Return the contents of the frame as a sparse SciPy COO matrix.
.. versionadded:: 0.20.0
Returns
-------
coo_matrix : scipy.sparse.spmatrix
If the caller is heterogeneous and contains booleans or objects,
the result will be of dtype=object. See Notes.
Notes
-----
The dtype will be the lowest-common-denominator type (implicit
upcasting); that is to say if the dtypes (even of numeric types)
are mixed, the one that accommodates all will be chosen.
e.g. If the dtypes are float16 and float32, dtype will be upcast to
float32. By numpy.find_common_type convention, mixing int64 and
and uint64 will result in a float64 dtype.
"""
try:
from scipy.sparse import coo_matrix
except ImportError:
raise ImportError('Scipy is not installed')
dtype = find_common_type(self.dtypes)
if isinstance(dtype, SparseDtype):
dtype = dtype.subtype
cols, rows, datas = [], [], []
for col, name in enumerate(self):
s = self[name]
row = s.sp_index.to_int_index().indices
cols.append(np.repeat(col, len(row)))
rows.append(row)
datas.append(s.sp_values.astype(dtype, copy=False))
cols = np.concatenate(cols)
rows = np.concatenate(rows)
datas = np.concatenate(datas)
return coo_matrix((datas, (rows, cols)), shape=self.shape)
def __array_wrap__(self, result):
return self._constructor(
result, index=self.index, columns=self.columns,
default_kind=self._default_kind,
default_fill_value=self._default_fill_value).__finalize__(self)
def __getstate__(self):
# pickling
return dict(_typ=self._typ, _subtyp=self._subtyp, _data=self._data,
_default_fill_value=self._default_fill_value,
_default_kind=self._default_kind)
def _unpickle_sparse_frame_compat(self, state):
""" original pickle format """
series, cols, idx, fv, kind = state
if not isinstance(cols, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
columns = _unpickle_array(cols)
else:
columns = cols
if not isinstance(idx, Index): # pragma: no cover
from pandas.io.pickle import _unpickle_array
index = _unpickle_array(idx)
else:
index = idx
series_dict = DataFrame()
for col, (sp_index, sp_values) in compat.iteritems(series):
series_dict[col] = SparseSeries(sp_values, sparse_index=sp_index,
fill_value=fv)
self._data = to_manager(series_dict, columns, index)
self._default_fill_value = fv
self._default_kind = kind
def to_dense(self):
"""
Convert to dense DataFrame
Returns
-------
df : DataFrame
"""
data = {k: v.to_dense() for k, v in compat.iteritems(self)}
return DataFrame(data, index=self.index, columns=self.columns)
def _apply_columns(self, func):
""" get new SparseDataFrame applying func to each columns """
new_data = {}
for col, series in compat.iteritems(self):
new_data[col] = func(series)
return self._constructor(
data=new_data, index=self.index, columns=self.columns,
default_fill_value=self.default_fill_value).__finalize__(self)
def astype(self, dtype):
return self._apply_columns(lambda x: x.astype(dtype))
def copy(self, deep=True):
"""
Make a copy of this SparseDataFrame
"""
result = super(SparseDataFrame, self).copy(deep=deep)
result._default_fill_value = self._default_fill_value
result._default_kind = self._default_kind
return result
@property
def default_fill_value(self):
return self._default_fill_value
@property
def default_kind(self):
return self._default_kind
@property
def density(self):
"""
Ratio of non-sparse points to total (dense) data points
represented in the frame
"""
tot_nonsparse = sum(ser.sp_index.npoints
for _, ser in compat.iteritems(self))
tot = len(self.index) * len(self.columns)
return tot_nonsparse / float(tot)
def fillna(self, value=None, method=None, axis=0, inplace=False,
limit=None, downcast=None):
new_self = super(SparseDataFrame,
self).fillna(value=value, method=method, axis=axis,
inplace=inplace, limit=limit,
downcast=downcast)
if not inplace:
self = new_self
# set the fill value if we are filling as a scalar with nothing special
# going on
if (value is not None and value == value and method is None and
limit is None):
self._default_fill_value = value
if not inplace:
return self
# ----------------------------------------------------------------------
# Support different internal representation of SparseDataFrame
def _sanitize_column(self, key, value, **kwargs):
"""
Creates a new SparseArray from the input value.
Parameters
----------
key : object
value : scalar, Series, or array-like
kwargs : dict
Returns
-------
sanitized_column : SparseArray
"""
def sp_maker(x, index=None):
return SparseArray(x, index=index,
fill_value=self._default_fill_value,
kind=self._default_kind)
if isinstance(value, SparseSeries):
clean = value.reindex(self.index).as_sparse_array(
fill_value=self._default_fill_value, kind=self._default_kind)
elif isinstance(value, SparseArray):
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = value
elif hasattr(value, '__iter__'):
if isinstance(value, Series):
clean = value.reindex(self.index)
if not isinstance(value, SparseSeries):
clean = sp_maker(clean)
else:
if len(value) != len(self.index):
raise AssertionError('Length of values does not match '
'length of index')
clean = sp_maker(value)
# Scalar
else:
clean = sp_maker(value, self.index)
# always return a SparseArray!
return clean
def get_value(self, index, col, takeable=False):
"""
Quickly retrieve single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
takeable : interpret the index/col as indexers, default False
Returns
-------
value : scalar value
"""
warnings.warn("get_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._get_value(index, col, takeable=takeable)
def _get_value(self, index, col, takeable=False):
if takeable is True:
series = self._iget_item_cache(col)
else:
series = self._get_item_cache(col)
return series._get_value(index, takeable=takeable)
_get_value.__doc__ = get_value.__doc__
def set_value(self, index, col, value, takeable=False):
"""
Put single value at passed column and index
.. deprecated:: 0.21.0
Please use .at[] or .iat[] accessors.
Parameters
----------
index : row label
col : column label
value : scalar value
takeable : interpret the index/col as indexers, default False
Notes
-----
This method *always* returns a new object. It is currently not
particularly efficient (and potentially very expensive) but is provided
for API compatibility with DataFrame
Returns
-------
frame : DataFrame
"""
warnings.warn("set_value is deprecated and will be removed "
"in a future release. Please use "
".at[] or .iat[] accessors instead", FutureWarning,
stacklevel=2)
return self._set_value(index, col, value, takeable=takeable)
def _set_value(self, index, col, value, takeable=False):
dense = self.to_dense()._set_value(
index, col, value, takeable=takeable)
return dense.to_sparse(kind=self._default_kind,
fill_value=self._default_fill_value)
_set_value.__doc__ = set_value.__doc__
def _slice(self, slobj, axis=0, kind=None):
if axis == 0:
new_index = self.index[slobj]
new_columns = self.columns
else:
new_index = self.index
new_columns = self.columns[slobj]
return self.reindex(index=new_index, columns=new_columns)
def xs(self, key, axis=0, copy=False):
"""
Returns a row (cross-section) from the SparseDataFrame as a Series
object.
Parameters
----------
key : some index contained in the index
Returns
-------
xs : Series
"""
if axis == 1:
data = self[key]
return data
i = self.index.get_loc(key)
data = self.take([i]).get_values()[0]
return Series(data, index=self.columns)
# ----------------------------------------------------------------------
# Arithmetic-related methods
def _combine_frame(self, other, func, fill_value=None, level=None):
this, other = self.align(other, join='outer', level=level, copy=False)
new_index, new_columns = this.index, this.columns
if level is not None:
raise NotImplementedError("'level' argument is not supported")
if self.empty and other.empty:
return self._constructor(index=new_index).__finalize__(self)
new_data = {}
if fill_value is not None:
# TODO: be a bit more intelligent here
for col in new_columns:
if col in this and col in other:
dleft = this[col].to_dense()
dright = other[col].to_dense()
result = dleft._binop(dright, func, fill_value=fill_value)
result = result.to_sparse(fill_value=this[col].fill_value)
new_data[col] = result
else:
for col in new_columns:
if col in this and col in other:
new_data[col] = func(this[col], other[col])
# if the fill values are the same use them? or use a valid one
new_fill_value = None
other_fill_value = getattr(other, 'default_fill_value', np.nan)
if self.default_fill_value == other_fill_value:
new_fill_value = self.default_fill_value
elif np.isnan(self.default_fill_value) and not np.isnan(
other_fill_value):
new_fill_value = other_fill_value
elif not np.isnan(self.default_fill_value) and np.isnan(
other_fill_value):
new_fill_value = self.default_fill_value
return self._constructor(data=new_data, index=new_index,
columns=new_columns,
default_fill_value=new_fill_value
).__finalize__(self)
def _combine_match_index(self, other, func, level=None):
new_data = {}
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_index = self.index.union(other.index)
this = self
if self.index is not new_index:
this = self.reindex(new_index)
if other.index is not new_index:
other = other.reindex(new_index)
for col, series in compat.iteritems(this):
new_data[col] = func(series.values, other.values)
# fill_value is a function of our operator
if isna(other.fill_value) or isna(self.default_fill_value):
fill_value = np.nan
else:
fill_value = func(np.float64(self.default_fill_value),
np.float64(other.fill_value))
return self._constructor(
new_data, index=new_index, columns=self.columns,
default_fill_value=fill_value).__finalize__(self)
def _combine_match_columns(self, other, func, level=None):
# patched version of DataFrame._combine_match_columns to account for
# NumPy circumventing __rsub__ with float64 types, e.g.: 3.0 - series,
# where 3.0 is numpy.float64 and series is a SparseSeries. Still
# possible for this to happen, which is bothersome
if level is not None:
raise NotImplementedError("'level' argument is not supported")
new_data = {}
union = intersection = self.columns
if not union.equals(other.index):
union = other.index.union(self.columns)
intersection = other.index.intersection(self.columns)
for col in intersection:
new_data[col] = func(self[col], float(other[col]))
return self._constructor(
new_data, index=self.index, columns=union,
default_fill_value=self.default_fill_value).__finalize__(self)
def _combine_const(self, other, func, errors='raise'):
return self._apply_columns(lambda x: func(x, other))
def _reindex_index(self, index, method, copy, level, fill_value=np.nan,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if self.index.equals(index):
if copy:
return self.copy()
else:
return self
if len(self.index) == 0:
return self._constructor(
index=index, columns=self.columns).__finalize__(self)
indexer = self.index.get_indexer(index, method, limit=limit)
indexer = ensure_platform_int(indexer)
mask = indexer == -1
need_mask = mask.any()
new_series = {}
for col, series in self.iteritems():
if mask.all():
continue
values = series.values
# .take returns SparseArray
new = values.take(indexer)
if need_mask:
new = new.values
# convert integer to float if necessary. need to do a lot
# more than that, handle boolean etc also
new, fill_value = maybe_upcast(new, fill_value=fill_value)
np.putmask(new, mask, fill_value)
new_series[col] = new
return self._constructor(
new_series, index=index, columns=self.columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_columns(self, columns, method, copy, level, fill_value=None,
limit=None, takeable=False):
if level is not None:
raise TypeError('Reindex by level not supported for sparse')
if notna(fill_value):
raise NotImplementedError("'fill_value' argument is not supported")
if limit:
raise NotImplementedError("'limit' argument is not supported")
if method is not None:
raise NotImplementedError("'method' argument is not supported")
# TODO: fill value handling
sdict = {k: v for k, v in compat.iteritems(self) if k in columns}
return self._constructor(
sdict, index=self.index, columns=columns,
default_fill_value=self._default_fill_value).__finalize__(self)
def _reindex_with_indexers(self, reindexers, method=None, fill_value=None,
limit=None, copy=False, allow_dups=False):
if method is not None or limit is not None:
raise NotImplementedError("cannot reindex with a method or limit "
"with sparse")
if fill_value is None:
fill_value = np.nan
reindexers = {self._get_axis_number(a): val
for (a, val) in compat.iteritems(reindexers)}
index, row_indexer = reindexers.get(0, (None, None))
columns, col_indexer = reindexers.get(1, (None, None))
if columns is None:
columns = self.columns
new_arrays = {}
for col in columns:
if col not in self:
continue
if row_indexer is not None:
new_arrays[col] = algos.take_1d(self[col].get_values(),
row_indexer,
fill_value=fill_value)
else:
new_arrays[col] = self[col]
return self._constructor(new_arrays, index=index,
columns=columns).__finalize__(self)
def _join_compat(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
if on is not None:
raise NotImplementedError("'on' keyword parameter is not yet "
"implemented")
return self._join_index(other, how, lsuffix, rsuffix)
def _join_index(self, other, how, lsuffix, rsuffix):
if isinstance(other, Series):
if other.name is None:
raise ValueError('Other Series must have a name')
other = SparseDataFrame(
{other.name: other},
default_fill_value=self._default_fill_value)
join_index = self.index.join(other.index, how=how)
this = self.reindex(join_index)
other = other.reindex(join_index)
this, other = this._maybe_rename_join(other, lsuffix, rsuffix)
from pandas import concat
return concat([this, other], axis=1, verify_integrity=True)
def _maybe_rename_join(self, other, lsuffix, rsuffix):
to_rename = self.columns.intersection(other.columns)
if len(to_rename) > 0:
if not lsuffix and not rsuffix:
raise ValueError('columns overlap but no suffix specified: '
'{to_rename}'.format(to_rename=to_rename))
def lrenamer(x):
if x in to_rename:
return '{x}{lsuffix}'.format(x=x, lsuffix=lsuffix)
return x
def rrenamer(x):
if x in to_rename:
return '{x}{rsuffix}'.format(x=x, rsuffix=rsuffix)
return x
this = self.rename(columns=lrenamer)
other = other.rename(columns=rrenamer)
else:
this = self
return this, other
def transpose(self, *args, **kwargs):
"""
Returns a DataFrame with the rows/columns switched.
"""
nv.validate_transpose(args, kwargs)
return self._constructor(
self.values.T, index=self.columns, columns=self.index,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
T = property(transpose)
@Appender(DataFrame.count.__doc__)
def count(self, axis=0, **kwds):
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.count(), axis=axis)
def cumsum(self, axis=0, *args, **kwargs):
"""
Return SparseDataFrame of cumulative sums over requested axis.
Parameters
----------
axis : {0, 1}
0 for row-wise, 1 for column-wise
Returns
-------
y : SparseDataFrame
"""
nv.validate_cumsum(args, kwargs)
if axis is None:
axis = self._stat_axis_number
return self.apply(lambda x: x.cumsum(), axis=axis)
@Appender(generic._shared_docs['isna'] % _shared_doc_kwargs)
def isna(self):
return self._apply_columns(lambda x: x.isna())
isnull = isna
@Appender(generic._shared_docs['notna'] % _shared_doc_kwargs)
def notna(self):
return self._apply_columns(lambda x: x.notna())
notnull = notna
def apply(self, func, axis=0, broadcast=None, reduce=None,
result_type=None):
"""
Analogous to DataFrame.apply, for SparseDataFrame
Parameters
----------
func : function
Function to apply to each column
axis : {0, 1, 'index', 'columns'}
broadcast : bool, default False
For aggregation functions, return object of same size with values
propagated
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='broadcast'.
reduce : boolean or None, default None
Try to apply reduction procedures. If the DataFrame is empty,
apply will use reduce to determine whether the result should be a
Series or a DataFrame. If reduce is None (the default), apply's
return value will be guessed by calling func an empty Series (note:
while guessing, exceptions raised by func will be ignored). If
reduce is True a Series will always be returned, and if False a
DataFrame will always be returned.
.. deprecated:: 0.23.0
This argument will be removed in a future version, replaced
by result_type='reduce'.
result_type : {'expand', 'reduce', 'broadcast, None}
These only act when axis=1 {columns}:
* 'expand' : list-like results will be turned into columns.
* 'reduce' : return a Series if possible rather than expanding
list-like results. This is the opposite to 'expand'.
* 'broadcast' : results will be broadcast to the original shape
of the frame, the original index & columns will be retained.
The default behaviour (None) depends on the return value of the
applied function: list-like results will be returned as a Series
of those. However if the apply function returns a Series these
are expanded to columns.
.. versionadded:: 0.23.0
Returns
-------
applied : Series or SparseDataFrame
"""
if not len(self.columns):
return self
axis = self._get_axis_number(axis)
if isinstance(func, np.ufunc):
new_series = {}
for k, v in compat.iteritems(self):
applied = func(v)
applied.fill_value = func(v.fill_value)
new_series[k] = applied
return self._constructor(
new_series, index=self.index, columns=self.columns,
default_fill_value=self._default_fill_value,
default_kind=self._default_kind).__finalize__(self)
from pandas.core.apply import frame_apply
op = frame_apply(self,
func=func,
axis=axis,
reduce=reduce,
broadcast=broadcast,
result_type=result_type)
return op.get_result()
def applymap(self, func):
"""
Apply a function to a DataFrame that is intended to operate
elementwise, i.e. like doing map(func, series) for each series in the
DataFrame
Parameters
----------
func : function
Python function, returns a single value from a single value
Returns
-------
applied : DataFrame
"""
return self.apply(lambda x: lmap(func, x))
def to_manager(sdf, columns, index):
""" create and return the block manager from a dataframe of series,
columns, index
"""
# from BlockManager perspective
axes = [ensure_index(columns), ensure_index(index)]
return create_block_manager_from_arrays(
[sdf[c] for c in columns], columns, axes)
def stack_sparse_frame(frame):
"""
Only makes sense when fill_value is NaN
"""
lengths = [s.sp_index.npoints for _, s in compat.iteritems(frame)]
nobs = sum(lengths)
# this is pretty fast
minor_labels = np.repeat(np.arange(len(frame.columns)), lengths)
inds_to_concat = []
vals_to_concat = []
# TODO: Figure out whether this can be reached.
# I think this currently can't be reached because you can't build a
# SparseDataFrame with a non-np.NaN fill value (fails earlier).
for _, series in compat.iteritems(frame):
if not np.isnan(series.fill_value):
raise TypeError('This routine assumes NaN fill value')
int_index = series.sp_index.to_int_index()
inds_to_concat.append(int_index.indices)
vals_to_concat.append(series.sp_values)
major_labels = np.concatenate(inds_to_concat)
stacked_values = np.concatenate(vals_to_concat)
index = MultiIndex(levels=[frame.index, frame.columns],
labels=[major_labels, minor_labels],
verify_integrity=False)
lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index,
columns=['foo'])
return lp.sort_index(level=0)
def homogenize(series_dict):
"""
Conform a set of SparseSeries (with NaN fill_value) to a common SparseIndex
corresponding to the locations where they all have data
Parameters
----------
series_dict : dict or DataFrame
Notes
-----
Using the dumbest algorithm I could think of. Should put some more thought
into this
Returns
-------
homogenized : dict of SparseSeries
"""
index = None
need_reindex = False
for _, series in compat.iteritems(series_dict):
if not np.isnan(series.fill_value):
raise TypeError('this method is only valid with NaN fill values')
if index is None:
index = series.sp_index
elif not series.sp_index.equals(index):
need_reindex = True
index = index.intersect(series.sp_index)
if need_reindex:
output = {}
for name, series in compat.iteritems(series_dict):
if not series.sp_index.equals(index):
series = series.sparse_reindex(index)
output[name] = series
else:
output = series_dict
return output
# use unaccelerated ops for sparse objects
ops.add_flex_arithmetic_methods(SparseDataFrame)
ops.add_special_arithmetic_methods(SparseDataFrame)
| bsd-3-clause |
profxj/old_xastropy | xastropy/xguis/spec_widgets.py | 1 | 60410 | """
#;+
#; NAME:
#; spec_widgets
#; Version 1.0
#;
#; PURPOSE:
#; Module for Spectroscopy widgets with QT
#; 12-Dec-2014 by JXP
#;-
#;------------------------------------------------------------------------------
"""
from __future__ import print_function, absolute_import, division, unicode_literals
# Import libraries
import numpy as np
import os, sys, imp
import matplotlib.pyplot as plt
from PyQt4 import QtGui
from PyQt4 import QtCore
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
# Matplotlib Figure object
from matplotlib.figure import Figure
from astropy.table.table import Table
from astropy import constants as const
from astropy import units as u
from astropy.units import Quantity
u.def_unit(['mAA', 'milliAngstrom'], 0.001 * u.AA, namespace=globals()) # mA
from astropy.nddata import StdDevUncertainty
from specutils.spectrum1d import Spectrum1D
from linetools.spectra import io as lsi
from linetools.spectralline import AbsLine
from linetools.lists.linelist import LineList
from xastropy import stats as xstats
from xastropy.xutils import xdebug as xdb
from xastropy import xutils
from xastropy.plotting import utils as xputils
from xastropy.igm.abs_sys import abssys_utils as xiaa
from xastropy.igm.abs_sys.lls_utils import LLSSystem
from xastropy.xguis import utils as xguiu
xa_path = imp.find_module('xastropy')[1]
# class ExamineSpecWidget
# class PlotLinesWidget
class ExamineSpecWidget(QtGui.QWidget):
''' Widget to plot a spectrum and interactively
fiddle about. Akin to XIDL/x_specplot.pro
12-Dec-2014 by JXP
'''
def __init__(self, ispec, parent=None, status=None, llist=None,
abs_sys=None, norm=True, second_file=None, zsys=None):
'''
spec = Spectrum1D
'''
super(ExamineSpecWidget, self).__init__(parent)
# Spectrum
spec, spec_fil = read_spec(ispec, second_file=second_file)
self.orig_spec = spec # For smoothing
self.spec = self.orig_spec
# Abs Systems
if abs_sys is None:
self.abs_sys = []
else:
self.abs_sys = abs_sys
self.norm = norm
self.psdict = {} # Dict for spectra plotting
self.adict = {} # Dict for analysis
self.init_spec()
self.xval = None # Used with velplt
# Status Bar?
if not status is None:
self.statusBar = status
# Line List?
if llist is None:
self.llist = {'Plot': False, 'List': 'None', 'z': 0.}
else:
self.llist = llist
# zsys
if not zsys is None:
self.llist['z'] = zsys
# Create the mpl Figure and FigCanvas objects.
# 5x4 inches, 100 dots-per-inch
#
self.dpi = 150 # 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Make two plots
self.ax = self.fig.add_subplot(1,1,1)
self.fig.subplots_adjust(hspace=0.1, wspace=0.1)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
#
# Draw on init
self.on_draw()
# Setup the spectrum plotting info
def init_spec(self):
#xy min/max
xmin = np.min(self.spec.dispersion).value
xmax = np.max(self.spec.dispersion).value
ymed = np.median(self.spec.flux).value
ymin = 0. - 0.1*ymed
ymax = ymed * 1.5
#
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.psdict['xmnx'] = np.array([xmin,xmax])
self.psdict['ymnx'] = [ymin,ymax]
self.psdict['sv_xy'] = [ [xmin,xmax], [ymin,ymax] ]
self.psdict['nav'] = navigate(0,0,init=True)
# Analysis dict
self.adict['flg'] = 0 # Column density flag
# Main Driver
def on_key(self,event):
flg = -1
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
## DOUBLETS
if event.key in ['C','M','X','4','8','B']: # Set left
wave = set_doublet(self, event)
#print('wave = {:g},{:g}'.format(wave[0], wave[1]))
self.ax.plot( [wave[0],wave[0]], self.psdict['ymnx'], '--', color='red')
self.ax.plot( [wave[1],wave[1]], self.psdict['ymnx'], '--', color='red')
flg = 2 # Layer
## SMOOTH
if event.key == 'S':
self.spec = self.spec.box_smooth(2)
flg = 1
if event.key == 'U':
self.spec = self.orig_spec
flg = 1
## Lya Profiles
if event.key in ['D', 'R']:
# Set NHI
if event.key == 'D':
NHI = 20.3
elif event.key == 'R':
NHI = 19.0
zlya = event.xdata/1215.6701 - 1.
self.llist['z'] = zlya
# Generate Lya profile
lya_line = AbsLine(1215.6701*u.AA)
lya_line.z = zlya
lya_line.attrib['N'] = NHI
lya_line.attrib['b'] = 30.
self.lya_line = xspec.voigt.voigt_model(self.spec.dispersion, lya_line, Npix=3.)
self.adict['flg'] = 4
flg = 1
## ANALYSIS: EW, AODM column density
if event.key in ['N', 'E', '$']:
# If column check for line list
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if (event.key in ['N','E']) & (self.llist['List'] == 'None'):
print('xspec: Choose a Line list first!')
try:
self.statusBar().showMessage('Choose a Line list first!')
except AttributeError:
pass
self.adict['flg'] = 0
return
flg = 1
if self.adict['flg'] == 0:
self.adict['wv_1'] = event.xdata # wavelength
self.adict['C_1'] = event.ydata # continuum
self.adict['flg'] = 1 # Plot dot
else:
self.adict['wv_2'] = event.xdata # wavelength
self.adict['C_2'] = event.ydata # continuum
self.adict['flg'] = 2 # Ready to plot + print
# Sort em + make arrays
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
iwv = np.array(sorted([self.adict['wv_1'], self.adict['wv_2']])) * self.spec.wcs.unit
ic = np.array(sorted([self.adict['C_1'], self.adict['C_2']]))
# Calculate the continuum (linear fit)
param = np.polyfit(iwv, ic, 1)
cfunc = np.poly1d(param)
self.spec.conti = cfunc(self.spec.dispersion)
if event.key == '$': # Simple stats
pix = self.spec.pix_minmax(iwv)[0]
mean = np.mean(self.spec.flux[pix])
median = np.median(self.spec.flux[pix])
stdv = np.std(self.spec.flux[pix]-self.spec.conti[pix])
S2N = median / stdv
mssg = 'Mean={:g}, Median={:g}, S/N={:g}'.format(mean,median,S2N)
else:
# Find the spectral line (or request it!)
rng_wrest = iwv / (self.llist['z']+1)
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
gdl = np.where( (self.llist[self.llist['List']].wrest-rng_wrest[0]) *
(self.llist[self.llist['List']].wrest-rng_wrest[1]) < 0.)[0]
if len(gdl) == 1:
wrest = self.llist[self.llist['List']].wrest[gdl[0]]
else:
if len(gdl) == 0: # Search through them all
gdl = np.arange(len(self.llist[self.llist['List']]))
sel_widg = SelectLineWidget(self.llist[self.llist['List']]._data[gdl])
sel_widg.exec_()
line = sel_widg.line
#wrest = float(line.split('::')[1].lstrip())
quant = line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
# Units
if not hasattr(wrest,'unit'):
# Assume Ang
wrest = wrest * u.AA
# Generate the Spectral Line
aline = AbsLine(wrest,linelist=self.llist[self.llist['List']])
aline.attrib['z'] = self.llist['z']
aline.analy['spec'] = self.spec
# AODM
if event.key == 'N':
# Calculate the velocity limits and load-up
aline.analy['vlim'] = const.c.to('km/s') * (
( iwv/(1+self.llist['z']) - wrest) / wrest )
# AODM
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
aline.measure_aodm()
mssg = 'Using '+ aline.__repr__()
mssg = mssg + ' :: logN = {:g} +/- {:g}'.format(
aline.attrib['logN'], aline.attrib['sig_logN'])
elif event.key == 'E': #EW
aline.analy['wvlim'] = iwv
aline.measure_restew()
mssg = 'Using '+ aline.__repr__()
mssg = mssg + ' :: Rest EW = {:g} +/- {:g}'.format(
aline.attrib['EW'].to(mAA), aline.attrib['sigEW'].to(mAA))
# Display values
try:
self.statusBar().showMessage(mssg)
except AttributeError:
pass
print(mssg)
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
## Velocity plot
if event.key == 'v':
flg = 0
from xastropy.xguis import spec_guis as xsgui
z=self.llist['z']
# Check for a match in existing list and use it if so
if len(self.abs_sys) > 0:
zabs = np.array([abs_sys.zabs for abs_sys in self.abs_sys])
mt = np.where( np.abs(zabs-z) < 1e-4)[0]
else:
mt = []
if len(mt) == 1:
ini_abs_sys = self.abs_sys[mt[0]]
outfil = ini_abs_sys.absid_file
self.vplt_flg = 0 # Old one
print('Using existing ID file {:s}'.format(outfil))
else:
ini_abs_sys = None
outfil = None
self.vplt_flg = 1 # New one
# Outfil
if outfil is None:
i0 = self.spec.filename.rfind('/')
i1 = self.spec.filename.rfind('.')
if i0 < 0:
path = './ID_LINES/'
else:
path = self.spec.filename[0:i0]+'/ID_LINES/'
outfil = path + self.spec.filename[i0+1:i1]+'_z'+'{:.4f}'.format(z)+'_id.fits'
xutils.files.ensure_dir(outfil)
self.outfil = outfil
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Launch
gui = xsgui.XVelPltGui(self.spec, z=z, outfil=outfil, llist=self.llist,
abs_sys=ini_abs_sys, norm=self.norm,
sel_wv=self.xval*self.spec.wcs.unit)
gui.exec_()
if gui.flg_quit == 0: # Quit without saving (i.e. discarded)
self.vplt_flg = 0
else:
# Push to Abs_Sys
if len(mt) == 1:
self.abs_sys[mt[0]] = gui.abs_sys
else:
self.abs_sys.append(gui.abs_sys)
print('Adding new abs system')
# Redraw
flg=1
# Dummy keys
if event.key in ['shift', 'control', 'shift+super', 'super+shift']:
flg = 0
# Draw
if flg==1: # Default is not to redraw
self.on_draw()
elif flg==2: # Layer (no clear)
self.on_draw(replot=False)
elif flg==-1: # Layer (no clear)
try:
self.statusBar().showMessage('Not a valid key! {:s}'.format(event.key))
except AttributeError:
pass
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
print('Out of bounds')
return
if event.button == 1: # Draw line
self.xval = event.xdata
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
# ######
def on_draw(self, replot=True):
""" Redraws the spectrum
"""
#
if replot is True:
self.ax.clear()
self.ax.plot(self.spec.dispersion, self.spec.flux, 'k-',drawstyle='steps-mid')
try:
self.ax.plot(self.spec.dispersion, self.spec.sig, 'r:')
except ValueError:
pass
self.ax.set_xlabel('Wavelength')
self.ax.set_ylabel('Flux')
# Spectral lines?
if self.llist['Plot'] is True:
ylbl = self.psdict['ymnx'][1]-0.2*(self.psdict['ymnx'][1]-self.psdict['ymnx'][0])
z = self.llist['z']
wvobs = np.array((1+z) * self.llist[self.llist['List']].wrest)
gdwv = np.where( (wvobs > self.psdict['xmnx'][0]) &
(wvobs < self.psdict['xmnx'][1]))[0]
for kk in range(len(gdwv)):
jj = gdwv[kk]
wrest = self.llist[self.llist['List']].wrest[jj].value
lbl = self.llist[self.llist['List']].name[jj]
# Plot
self.ax.plot(wrest*np.array([z+1,z+1]), self.psdict['ymnx'], 'b--')
# Label
self.ax.text(wrest*(z+1), ylbl, lbl, color='blue', rotation=90., size='small')
# Abs Sys?
if not self.abs_sys is None:
ylbl = self.psdict['ymnx'][0]+0.2*(self.psdict['ymnx'][1]-self.psdict['ymnx'][0])
clrs = ['red', 'green', 'cyan', 'orange', 'gray', 'purple']*10
for abs_sys in self.abs_sys:
ii = self.abs_sys.index(abs_sys)
kwrest = np.array(abs_sys.lines.keys())
wvobs = kwrest * (abs_sys.zabs+1) * u.AA
gdwv = np.where( ((wvobs.value+5) > self.psdict['xmnx'][0]) & # Buffer for region
((wvobs.value-5) < self.psdict['xmnx'][1]))[0]
for kk in range(len(gdwv)):
jj = gdwv[kk]
if abs_sys.lines[kwrest[jj]].analy['do_analysis'] == 0:
continue
# Paint spectrum red
wvlim = wvobs[jj]*(1 + abs_sys.lines[kwrest[jj]].analy['vlim']/const.c.to('km/s'))
pix = np.where( (self.spec.dispersion > wvlim[0]) & (self.spec.dispersion < wvlim[1]))[0]
self.ax.plot(self.spec.dispersion[pix], self.spec.flux[pix], '-',drawstyle='steps-mid',
color=clrs[ii])
# Label
lbl = abs_sys.lines[kwrest[jj]].analy['IONNM']+' z={:g}'.format(abs_sys.zabs)
self.ax.text(wvobs[jj].value, ylbl, lbl, color=clrs[ii], rotation=90., size='x-small')
# Analysis? EW, Column
if self.adict['flg'] == 1:
self.ax.plot(self.adict['wv_1'], self.adict['C_1'], 'go')
elif self.adict['flg'] == 2:
self.ax.plot([self.adict['wv_1'], self.adict['wv_2']],
[self.adict['C_1'], self.adict['C_2']], 'g--', marker='o')
self.adict['flg'] = 0
# Lya line?
if self.adict['flg'] == 4:
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.ax.plot(self.spec.dispersion, self.lya_line.flux, color='green')
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
self.ax.set_ylim(self.psdict['ymnx'])
# Draw
self.canvas.draw()
# Notes on usage
def help_notes():
doublets = [ 'Doublets --------',
'C: CIV',
'M: MgII',
'O: OVI',
'8: NeVIII',
'B: Lyb/Lya'
]
analysis = [ 'Analysis --------',
'N/N: Column density (AODM)',
'E/E: EW (boxcar)',
'$/$: stats on spectrum'
]
# #####
class PlotLinesWidget(QtGui.QWidget):
''' Widget to set up spectral lines for plotting
13-Dec-2014 by JXP
'''
def __init__(self, parent=None, status=None, init_llist=None, init_z=None):
'''
'''
super(PlotLinesWidget, self).__init__(parent)
# Initialize
if not status is None:
self.statusBar = status
if init_z is None:
init_z = 0.
# Create a dialog window for redshift
z_label = QtGui.QLabel('z=')
self.zbox = QtGui.QLineEdit()
self.zbox.z_frmt = '{:.7f}'
self.zbox.setText(self.zbox.z_frmt.format(init_z))
self.zbox.setMinimumWidth(50)
self.connect(self.zbox, QtCore.SIGNAL('editingFinished ()'), self.setz)
# Create the line list
self.lists = ['None', 'ISM', 'Strong', 'H2']
#'grb.lst', 'dla.lst', 'lls.lst', 'subLLS.lst',
# 'lyman.lst', 'Dlyman.lst', 'gal_vac.lst', 'ne8.lst',
# 'lowz_ovi.lst', 'casbah.lst', 'H2.lst']
list_label = QtGui.QLabel('Line Lists:')
self.llist_widget = QtGui.QListWidget(self)
for ilist in self.lists:
self.llist_widget.addItem(ilist)
self.llist_widget.setCurrentRow(0)
self.llist_widget.currentItemChanged.connect(self.on_list_change)
self.llist_widget.setMaximumHeight(100)
# Input line list?
if init_llist is None:
self.llist = {} # Dict for the line lists
self.llist['Plot'] = False
self.llist['z'] = 0.
self.llist['List'] = 'None'
else: # Fill it all up and select
self.llist = init_llist
if not init_llist['List'] in self.lists:
self.lists.append(init_llist['List'])
self.llist_widget.addItem(init_llist['List'])
self.llist_widget.setCurrentRow(len(self.lists)-1)
else:
idx = self.lists.index(init_llist['List'])
self.llist_widget.setCurrentRow(idx)
try:
self.zbox.setText(self.zbox.z_frmt.format(init_llist['z']))
except KeyError:
pass
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(z_label)
vbox.addWidget(self.zbox)
vbox.addWidget(list_label)
vbox.addWidget(self.llist_widget)
self.setLayout(vbox)
self.setMaximumHeight(200)
def on_list_change(self,curr,prev):
llist = str(curr.text())
# Print
try:
self.statusBar().showMessage('You chose: {:s}'.format(llist))
except AttributeError:
print('You chose: {:s}'.format(curr.text()))
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.llist = set_llist(llist,in_dict=self.llist)
# Try to draw
if self.llist['Plot'] is True:
try:
self.spec_widg.on_draw()
except AttributeError:
return
def setz(self):
sstr = unicode(self.zbox.text())
try:
self.llist['z'] = float(sstr)
except ValueError:
try:
self.statusBar().showMessage('ERROR: z Input must be a float! Try again..')
except AttributeError:
print('ERROR: z Input must be a float! Try again..')
self.zbox.setText(self.zbox.z_frmt.format(self.llist['z']))
return
# Report
try:
self.statusBar().showMessage('z = {:g}'.format(self.llist['z']))
except AttributeError:
print('z = {:g}'.format(self.llist['z']))
# Try to draw
try:
self.spec_widg.on_draw()
except AttributeError:
return
# #####
class SelectLineWidget(QtGui.QDialog):
''' Widget to select a spectral line
inp: string or dict or Table
Input line list
15-Dec-2014 by JXP
'''
def __init__(self, inp, parent=None):
'''
'''
super(SelectLineWidget, self).__init__(parent)
# Line list Table
if isinstance(inp,Table):
lines = inp
else:
raise ValueError('SelectLineWidget: Wrong type of input')
self.resize(250, 800)
# Create the line list
line_label = QtGui.QLabel('Lines:')
self.lines_widget = QtGui.QListWidget(self)
self.lines_widget.addItem('None')
self.lines_widget.setCurrentRow(0)
#xdb.set_trace()
# Loop on lines (could put a preferred list first)
# Sort
srt = np.argsort(lines['wrest'])
for ii in srt:
self.lines_widget.addItem('{:s} :: {:.4f}'.format(lines['name'][ii],
lines['wrest'][ii]))
self.lines_widget.currentItemChanged.connect(self.on_list_change)
#self.scrollArea = QtGui.QScrollArea()
# Quit
qbtn = QtGui.QPushButton('Quit', self)
qbtn.clicked.connect(self.close)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(line_label)
vbox.addWidget(self.lines_widget)
vbox.addWidget(qbtn)
self.setLayout(vbox)
def on_list_change(self,curr,prev):
self.line = str(curr.text())
# Print
print('You chose: {:s}'.format(curr.text()))
# #####
class SelectedLinesWidget(QtGui.QWidget):
''' Widget to show and enable lines to be selected
inp: LineList
Input LineList
24-Dec-2014 by JXP
'''
def __init__(self, inp, parent=None, init_select=None, plot_widget=None):
'''
'''
super(SelectedLinesWidget, self).__init__(parent)
# Line list Table
if isinstance(inp,LineList):
self.lines = inp._data
self.llst = inp
elif isinstance(inp,Table):
raise ValueError('SelectedLineWidget: DEPRECATED')
else:
raise ValueError('SelectedLineWidget: Wrong type of input')
self.plot_widget = plot_widget
# Create the line list
line_label = QtGui.QLabel('Lines:')
self.lines_widget = QtGui.QListWidget(self)
self.lines_widget.setSelectionMode(QtGui.QAbstractItemView.MultiSelection)
# Initialize list
self.item_flg = 0
self.init_list()
# Initial selection
if init_select is None:
self.selected = [0]
else:
self.selected = init_select
for iselect in self.selected:
self.lines_widget.item(iselect).setSelected(True)
self.lines_widget.scrollToItem( self.lines_widget.item( self.selected[0] ) )
# Events
#self.lines_widget.itemClicked.connect(self.on_list_change)
self.lines_widget.itemSelectionChanged.connect(self.on_item_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(line_label)
vbox.addWidget(self.lines_widget)
self.setLayout(vbox)
def init_list(self):
nlin = len(self.lines['wrest'])
for ii in range(nlin):
self.lines_widget.addItem('{:s} :: {:.3f}'.format(self.lines['name'][ii],
self.lines['wrest'][ii].value))
def on_item_change(self): #,item):
# For big changes
if self.item_flg == 1:
return
all_items = [self.lines_widget.item(ii) for ii in range(self.lines_widget.count())]
sel_items = self.lines_widget.selectedItems()
self.selected = [all_items.index(isel) for isel in sel_items]
self.selected.sort()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Update llist
try:
self.plot_widget.llist['show_line'] = self.selected
except AttributeError:
return
else:
self.plot_widget.on_draw()
def on_list_change(self,lines):
# Clear
self.item_flg = 1
self.lines_widget.clear()
# Initialize
self.lines = lines
self.init_list()
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Set selected
for iselect in self.selected:
self.lines_widget.item(iselect).setSelected(True)
self.lines_widget.scrollToItem( self.lines_widget.item( self.selected[0] ) )
self.item_flg = 0
# #####
class AbsSysWidget(QtGui.QWidget):
''' Widget to organize AbsSys along a given sightline
Parameters:
-----------
abssys_list: List
String list of abssys files
16-Dec-2014 by JXP
'''
def __init__(self, abssys_list, parent=None):
'''
'''
super(AbsSysWidget, self).__init__(parent)
#if not status is None:
# self.statusBar = status
self.abssys_list = abssys_list
# Create the line list
list_label = QtGui.QLabel('Abs Systems:')
self.abslist_widget = QtGui.QListWidget(self)
self.abslist_widget.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
self.abslist_widget.addItem('None')
#self.abslist_widget.addItem('Test')
# Lists
self.abs_sys = []
self.items = []
self.all_items = []
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLS_System.from_absid_fil(abssys_fil))
self.add_item(abssys_fil)
self.abslist_widget.setCurrentRow(0)
self.abslist_widget.itemSelectionChanged.connect(self.on_list_change)
# Layout
vbox = QtGui.QVBoxLayout()
vbox.addWidget(list_label)
# Buttons
buttons = QtGui.QWidget()
self.refine_button = QtGui.QPushButton('Refine', self)
#self.refine_button.clicked.connect(self.refine) # CONNECTS TO A PARENT
reload_btn = QtGui.QPushButton('Reload', self)
reload_btn.clicked.connect(self.reload)
hbox1 = QtGui.QHBoxLayout()
hbox1.addWidget(self.refine_button)
hbox1.addWidget(reload_btn)
buttons.setLayout(hbox1)
vbox.addWidget(buttons)
vbox.addWidget(self.abslist_widget)
self.setLayout(vbox)
# ##
def on_list_change(self):
items = self.abslist_widget.selectedItems()
# Empty the list
#self.abs_sys = []
if len(self.abs_sys) > 0:
for ii in range(len(self.abs_sys)-1,-1,-1):
self.abs_sys.pop(ii)
# Load up abs_sys (as need be)
new_items = []
for item in items:
txt = item.text()
# Dummy
if txt == 'None':
continue
print('Including {:s} in the list'.format(txt))
# Using LLS for now. Might change to generic
new_items.append(txt)
ii = self.all_items.index(txt)
self.abs_sys.append(self.all_abssys[ii])
# Pass back
self.items = new_items
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
def add_fil(self,abssys_fil):
self.abssys_list.append( abssys_fil )
self.add_item(abssys_fil)
def add_item(self,abssys_fil):
ipos0 = abssys_fil.rfind('/') + 1
ipos1 = abssys_fil.rfind('.fits')
self.all_items.append( abssys_fil[ipos0:ipos1] )
self.abslist_widget.addItem(abssys_fil[ipos0:ipos1] )
def reload(self):
print('AbsSysWidget: Reloading systems..')
self.all_abssys = []
for abssys_fil in self.abssys_list:
self.all_abssys.append(LLS_System.from_absid_fil(abssys_fil))
#self.add_item(abssys_fil)
self.on_list_change()
# ######################
class VelPlotWidget(QtGui.QWidget):
''' Widget for a velocity plot with interaction.
19-Dec-2014 by JXP
'''
def __init__(self, ispec, z=None, parent=None, llist=None, norm=True,
vmnx=[-300., 300.]*u.km/u.s, abs_sys=None):
'''
spec = Spectrum1D
Norm: Bool (False)
Normalized spectrum?
abs_sys: AbsSystem
Absorption system class
'''
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
super(VelPlotWidget, self).__init__(parent)
# Initialize
spec, spec_fil = read_spec(ispec)
self.spec = spec
self.spec_fil = spec_fil
self.z = z
self.vmnx = vmnx
self.norm = norm
# Abs_System
self.abs_sys = abs_sys
if self.abs_sys is None:
self.abs_sys = xiaa.GenericAbsSystem()
self.abs_sys.zabs = self.z
else:
self.z = self.abs_sys.zabs
# Line list
if llist is None:
try:
lwrest = [iline.wrest for iline in self.abs_sys.lines]
except AttributeError:
lwrest = None
if not lwrest is None:
llist = set_llist(lwrest) # Not sure this is working..
self.psdict = {} # Dict for spectra plotting
self.psdict['xmnx'] = self.vmnx.value
self.psdict['ymnx'] = [-0.1, 1.1]
self.psdict['nav'] = navigate(0,0,init=True)
# Status Bar?
#if not status is None:
# self.statusBar = status
# Line List
if llist is None:
self.llist = set_llist('Strong')
else:
self.llist = llist
self.llist['z'] = self.z
# Indexing for line plotting
self.idx_line = 0
self.init_lines()
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
# Sub_plots
self.sub_xy = [3,4]
self.fig.subplots_adjust(hspace=0.0, wspace=0.1)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Load them up for display
def init_lines(self):
wvmin = np.min(self.spec.dispersion)
wvmax = np.max(self.spec.dispersion)
#
wrest = self.llist[self.llist['List']].wrest
wvobs = (1+self.z) * wrest
gdlin = np.where( (wvobs > wvmin) & (wvobs < wvmax) )[0]
self.llist['show_line'] = gdlin
# Update/generate lines [will not update]
for idx in gdlin:
self.generate_line((self.z,wrest[idx]))
def generate_line(self,inp):
''' Generate a new line, if it doesn't exist
Parameters:
----------
inp: tuple
(z,wrest)
'''
# Generate?
if self.abs_sys.grab_line(inp) is None:
newline = AbsLine(inp[1],linelist=self.llist[self.llist['List']])
print('VelPlot: Generating line {:g}'.format(inp[1]))
newline.analy['vlim'] = self.vmnx/2.
newline.analy['do_analysis'] = 2 # Init to ok
# Spec file
if self.spec_fil is not None:
newline.analy['datafile'] = self.spec_fil
# Append
self.abs_sys.lines.append(newline)
# Key stroke
def on_key(self,event):
# Init
rescale = True
fig_clear = False
wrest = None
flg = 0
sv_idx = self.idx_line
## Change rows/columns
if event.key == 'k':
self.sub_xy[0] = max(0, self.sub_xy[0]-1)
if event.key == 'K':
self.sub_xy[0] = self.sub_xy[0]+1
if event.key == 'c':
self.sub_xy[1] = max(0, self.sub_xy[1]-1)
if event.key == 'C':
self.sub_xy[1] = max(0, self.sub_xy[1]+1)
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
if event.key == '-':
self.idx_line = max(0, self.idx_line-self.sub_xy[0]*self.sub_xy[1]) # Min=0
if self.idx_line == sv_idx:
print('Edge of list')
if event.key == '=':
self.idx_line = min(len(self.llist['show_line'])-self.sub_xy[0]*self.sub_xy[1],
self.idx_line + self.sub_xy[0]*self.sub_xy[1])
if self.idx_line == sv_idx:
print('Edge of list')
## Reset z
if event.key == 'z':
from astropy.relativity import velocities
newz = velocities.z_from_v(self.z, event.xdata)
self.z = newz
self.abs_sys.zabs = newz
# Drawing
self.psdict['xmnx'] = self.vmnx
# Single line command
if event.key in ['1','2','B','U','L','N','V','A', 'x', 'X']:
try:
wrest = event.inaxes.get_gid()
except AttributeError:
return
else:
absline = self.abs_sys.grab_line((self.z,wrest))
kwrest = wrest.value
## Velocity limits
unit = u.km/u.s
if event.key == '1':
absline.analy['vlim'][0] = event.xdata*unit
if event.key == '2':
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
absline.analy['vlim'][1] = event.xdata*unit
if event.key == '!':
for iline in self.abs_sys.lines:
iline.analy['vlim'][0] = event.xdata*unit
if event.key == '@':
for iline in self.abs_sys.lines:
iline.analy['vlim'][1] = event.xdata*unit
## Line type
if event.key == 'A': # Add to lines
self.generate_line((self.z,wrest))
if event.key == 'x': # Remove line
if self.abs_sys.remove_line((self.z,wrest)):
print('VelPlot: Removed line {:g}'.format(wrest))
if event.key == 'X': # Remove all lines
# Double check
gui = xguiu.WarningWidg('About to remove all lines. \n Continue??')
gui.exec_()
if gui.ans is False:
return
#
self.abs_sys.lines = [] # Flush??
if event.key == 'B': # Toggle blend
try:
feye = absline.analy['flg_eye']
except KeyError:
feye = 0
feye = (feye + 1) % 2
absline.analy['flg_eye'] = feye
if event.key == 'N': # Toggle NG
try:
fanly = absline.analy['do_analysis']
except KeyError:
fanly = 2
if fanly == 0:
fanly = 2 # Not using 1 anymore..
else:
fanly = 0
absline.analy['do_analysis'] = fanly
if event.key == 'V': # Normal
absline.analy['flg_limit'] = 1
if event.key == 'L': # Lower limit
absline.analy['flg_limit'] = 2
if event.key == 'U': # Upper limit
absline.analy['flg_limit'] = 3
# AODM plot
if event.key == ':': #
# Grab good lines
from xastropy.xguis import spec_guis as xsgui
gdl = [iline.wrest for iline in self.abs_sys.lines
if iline.analy['do_analysis'] > 0]
# Launch AODM
if len(gdl) > 0:
gui = xsgui.XAODMGui(self.spec, self.z, gdl, vmnx=self.vmnx, norm=self.norm)
gui.exec_()
else:
print('VelPlot.AODM: No good lines to plot')
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
if not wrest is None: # Single window
flg = 3
if event.key in ['c','C','k','K','W','!', '@', '=', '-', 'X', 'z','R']: # Redraw all
flg = 1
if event.key in ['Y']:
rescale = False
if event.key in ['k','c','C','K', 'R']:
fig_clear = True
if flg==1: # Default is not to redraw
self.on_draw(rescale=rescale, fig_clear=fig_clear)
elif flg==2: # Layer (no clear)
self.on_draw(replot=False, rescale=rescale)
elif flg==3: # Layer (no clear)
self.on_draw(in_wrest=wrest, rescale=rescale)
# Click of main mouse button
def on_click(self,event):
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw(replot=False)
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, replot=True, in_wrest=None, rescale=True, fig_clear=False):
""" Redraws the figure
"""
#
if replot is True:
if fig_clear:
self.fig.clf()
# Loop on windows
all_idx = self.llist['show_line']
nplt = self.sub_xy[0]*self.sub_xy[1]
if len(all_idx) <= nplt:
self.idx_line = 0
subp = np.arange(nplt) + 1
subp_idx = np.hstack(subp.reshape(self.sub_xy[0],self.sub_xy[1]).T)
for jj in range(min(nplt, len(all_idx))):
try:
idx = all_idx[jj+self.idx_line]
except IndexError:
continue # Likely too few lines
# Grab line
#wvobs = np.array((1+self.z) * self.llist[self.llist['List']]['wrest'][idx])
wrest = self.llist[self.llist['List']].wrest[idx] # *
# self.llist[self.llist['List']].wrest.unit)
kwrest = wrest.value # For the Dict
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Single window?
if not in_wrest is None:
if np.abs(wrest-in_wrest) > (1e-3*u.AA):
continue
# Generate plot
self.ax = self.fig.add_subplot(self.sub_xy[0],self.sub_xy[1], subp_idx[jj])
self.ax.clear()
#print('Plotting {:g}, {:d}'.format(wrest,subp_idx[jj]))
# Zero line
self.ax.plot( [0., 0.], [-1e9, 1e9], ':', color='gray')
# Velocity
wvobs = (1+self.z) * wrest
velo = (self.spec.dispersion/wvobs - 1.)*const.c.to('km/s').value
# Plot
self.ax.plot(velo, self.spec.flux, 'k-',drawstyle='steps-mid')
# GID for referencing
self.ax.set_gid(wrest)
# Labels
#if jj >= (self.sub_xy[0]-1)*(self.sub_xy[1]):
if ((jj+1) % self.sub_xy[0]) == 0:
self.ax.set_xlabel('Relative Velocity (km/s)')
else:
self.ax.get_xaxis().set_ticks([])
#if ((jj+1) // 2 == 0) & (jj < self.sub_xy[0]):
# self.ax.set_ylabel('Relative Flux')
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
lbl = self.llist[self.llist['List']].name[idx]
self.ax.text(0.1, 0.05, lbl, color='blue', transform=self.ax.transAxes,
size='x-small', ha='left')
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
# Rescale?
if (rescale is True) & (self.norm is False):
gdp = np.where( (velo > self.psdict['xmnx'][0]) &
(velo < self.psdict['xmnx'][1]))[0]
if len(gdp) > 5:
per = xstats.basic.perc(self.spec.flux[gdp])
self.ax.set_ylim((0., 1.1*per[1]))
else:
self.ax.set_ylim(self.psdict['ymnx'])
else:
self.ax.set_ylim(self.psdict['ymnx'])
# Fonts
xputils.set_fontsize(self.ax,6.)
# Abs_Sys: Color the lines
if not self.abs_sys is None:
absline = self.abs_sys.grab_line((self.z,wrest))
if absline is None:
break
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
try:
vlim = absline.analy['vlim'].value
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
except KeyError:
continue
# Color coding
clr = 'black'
try: # .clm style
flag = absline.analy['FLAGS'][0]
except KeyError:
flag = None
else:
if flag <= 1: # Standard detection
clr = 'green'
elif flag in [2,3]:
clr = 'blue'
elif flag in [4,5]:
clr = 'purple'
# ABS ID
try: # NG?
flagA = absline.analy['do_analysis']
except KeyError:
flagA = None
else:
if (flagA>0) & (clr == 'black'):
clr = 'green'
try: # Limit?
flagL = absline.analy['flg_limit']
except KeyError:
flagL = None
else:
if flagL == 2:
clr = 'blue'
if flagL == 3:
clr = 'purple'
try: # Blends?
flagE = absline.analy['flg_eye']
except KeyError:
flagE = None
else:
if flagE == 1:
clr = 'orange'
if flagA == 0:
clr = 'red'
pix = np.where( (velo > vlim[0]) & (velo < vlim[1]))[0]
self.ax.plot(velo[pix], self.spec.flux[pix], '-',
drawstyle='steps-mid', color=clr)
# Draw
self.canvas.draw()
# ######################
class AODMWidget(QtGui.QWidget):
''' Widget for comparing tau_AODM profiles
19-Dec-2014 by JXP
'''
def __init__(self, spec, z, wrest, parent=None, vmnx=[-300., 300.]*u.km/u.s,
norm=True, linelist=None):
'''
spec = Spectrum1D
'''
super(AODMWidget, self).__init__(parent)
# Initialize
self.spec = spec
self.norm = norm
self.z = z
self.vmnx = vmnx
self.wrest = wrest # Expecting (requires) units
self.lines = []
if linelist is None:
self.linelist = LineList('ISM')
for iwrest in self.wrest:
self.lines.append(AbsLine(iwrest),linelist=self.linelist)
self.psdict = {} # Dict for spectra plotting
self.psdict['xmnx'] = self.vmnx
self.psdict['ymnx'] = [-0.1, 1.1]
self.psdict['nav'] = navigate(0,0,init=True)
# Create the mpl Figure and FigCanvas objects.
#
self.dpi = 150
self.fig = Figure((8.0, 4.0), dpi=self.dpi)
self.canvas = FigureCanvas(self.fig)
self.canvas.setParent(self)
self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus )
self.canvas.setFocus()
self.canvas.mpl_connect('key_press_event', self.on_key)
self.canvas.mpl_connect('button_press_event', self.on_click)
vbox = QtGui.QVBoxLayout()
vbox.addWidget(self.canvas)
self.setLayout(vbox)
# Draw on init
self.on_draw()
# Key stroke
def on_key(self,event):
# Init
rescale = True
flg = 0
## NAVIGATING
if event.key in self.psdict['nav']:
flg = navigate(self.psdict,event)
if event.key in ['b','t','W','Z','Y','l','r']:
rescale = False
self.on_draw(rescale=rescale)
# Click of main mouse button
def on_click(self,event):
return # DO NOTHING FOR NOW
try:
print('button={:d}, x={:f}, y={:f}, xdata={:f}, ydata={:f}'.format(
event.button, event.x, event.y, event.xdata, event.ydata))
except ValueError:
return
if event.button == 1: # Draw line
self.ax.plot( [event.xdata,event.xdata], self.psdict['ymnx'], ':', color='green')
self.on_draw()
# Print values
try:
self.statusBar().showMessage('x,y = {:f}, {:f}'.format(event.xdata,event.ydata))
except AttributeError:
return
def on_draw(self, rescale=True):
""" Redraws the figure
"""
#
self.ax = self.fig.add_subplot(1,1,1)
self.ax.clear()
ymx = 0.
for ii,iwrest in enumerate(self.wrest):
# Velocity
wvobs = (1+self.z) * iwrest
velo = (self.spec.dispersion/wvobs - 1.)*const.c.to('km/s').value
gdp = np.where((velo > self.psdict['xmnx'][0]) &
(velo < self.psdict['xmnx'][1]))[0]
# Normalize?
if self.norm is False:
per = xstats.basic.perc(self.spec.flux[gdp])
fsplice = per[1] / self.spec.flux[gdp]
else:
fsplice = 1./ self.spec.flux[gdp]
# AODM
cst = (10.**14.5761)/(self.lines[ii].atomic['fval']*iwrest.value)
Naodm = np.log(fsplice)*cst
ymx = max(ymx,np.max(Naodm))
# Plot
line, = self.ax.plot(velo[gdp], Naodm, '-', drawstyle='steps-mid')
# Labels
lbl = '{:g}'.format(iwrest)
clr = plt.getp(line, 'color')
self.ax.text(0.1, 1.-(0.05+0.05*ii), lbl, color=clr,
transform=self.ax.transAxes, size='small', ha='left')
self.ax.set_xlabel('Relative Velocity (km/s)')
self.ax.set_ylabel('N(AODM)')
# Zero line
self.ax.plot( [0., 0.], [-1e29, 1e29], ':', color='gray')
# Reset window limits
self.ax.set_xlim(self.psdict['xmnx'])
if rescale:
self.psdict['ymnx'] = [0.05*ymx, ymx*1.1]
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
self.ax.set_ylim(self.psdict['ymnx'])
# Draw
self.canvas.draw()
# ######
# Plot Doublet
def set_doublet(iself,event):
''' Set z and plot doublet
'''
wv_dict = {'C': (1548.195, 1550.770, 'CIV'), 'M': (2796.352, 2803.531, 'MgII'),
'4': (1393.755, 1402.770, 'SiIV'),
'X': (1031.9261, 1037.6167, 'OVI'), '8': (770.409, 780.324, 'NeVIII'),
'B': (1025.4433, 1215.6701, 'Lyba')}
wrest = wv_dict[event.key]
# Set z
iself.zabs = event.xdata/wrest[0] - 1.
try:
iself.statusBar().showMessage('z = {:g} for {:s}'.format(iself.zabs, wrest[2]))
except AttributeError:
print('z = {:g} for {:s}'.format(iself.zabs, wrest[2]))
return np.array(wrest[0:2])*(1.+iself.zabs)
# ######
# Navigate
def navigate(psdict,event,init=False):
''' Method to Navigate spectrum
init: (False) Initialize
Just pass back valid key strokes
'''
# Initalize
if init is True:
return ['l','r','b','t','T','i','I', 'o','O', '[',']','W','Z', 'Y', '{', '}']
#
if (not isinstance(event.xdata,float)) or (not isinstance(event.ydata,float)):
print('Navigate: You entered the {:s} key out of bounds'.format(event.key))
return 0
if event.key == 'l': # Set left
psdict['xmnx'][0] = event.xdata
elif event.key == 'r': # Set Right
psdict['xmnx'][1] = event.xdata
elif event.key == 'b': # Set Bottom
psdict['ymnx'][0] = event.ydata
elif event.key == 't': # Set Top
psdict['ymnx'][1] = event.ydata
elif event.key == 'T': # Set Top to 1.1
psdict['ymnx'][1] = 1.1
elif event.key == 'i': # Zoom in (and center)
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/4.
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'I': # Zoom in (and center)
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/16.
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'o': # Zoom in (and center)
deltx = psdict['xmnx'][1]-psdict['xmnx'][0]
psdict['xmnx'] = [event.xdata-deltx, event.xdata+deltx]
elif event.key == 'O': # Zoom in (and center)
deltx = psdict['xmnx'][1]-psdict['xmnx'][0]
psdict['xmnx'] = [event.xdata-2*deltx, event.xdata+2*deltx]
elif event.key == 'Y': # Zoom in (and center)
delty = psdict['ymnx'][1]-psdict['ymnx'][0]
psdict['ymnx'] = [event.ydata-delty, event.ydata+delty]
elif event.key in ['[',']','{','}']: # Pan
center = (psdict['xmnx'][1]+psdict['xmnx'][0])/2.
deltx = (psdict['xmnx'][1]-psdict['xmnx'][0])/2.
if event.key == '[':
new_center = center - deltx
elif event.key == ']':
new_center = center + deltx
elif event.key == '{':
new_center = center - 4*deltx
elif event.key == '}':
new_center = center + 4*deltx
psdict['xmnx'] = [new_center-deltx, new_center+deltx]
elif event.key == 'W': # Reset the Window
psdict['xmnx'] = psdict['sv_xy'][0]
psdict['ymnx'] = psdict['sv_xy'][1]
elif event.key == 'Z': # Zero
psdict['ymnx'][0] = 0.
else:
if not (event.key in ['shift']):
rstr = 'Key {:s} not supported.'.format(event.key)
print(rstr)
return 0
return 1
# ######
#
def set_llist(llist,in_dict=None):
''' Method to set a line list dict for the Widgets
'''
from linetools.lists.linelist import LineList
if in_dict is None:
in_dict = {}
if type(llist) in [str,unicode]: # Set line list from a file
in_dict['List'] = llist
if llist == 'None':
in_dict['Plot'] = False
else:
in_dict['Plot'] = True
# Load?
if not (llist in in_dict):
#line_file = xa_path+'/data/spec_lines/'+llist
#llist_cls = xspec.abs_line.Abs_Line_List(llist)
llist_cls = LineList(llist)
in_dict[llist] = llist_cls
elif isinstance(llist,list): # Set from a list of wrest
from astropy.table import Column
in_dict['List'] = 'input.lst'
in_dict['Plot'] = True
# Fill
llist.sort()
tmp_dict = {}
# Parse from grb.lst
llist_cls = LineList('ISM', gd_lines=llist)
in_dict['input.lst'] = llist_cls
'''
line_file = xa_path+'/data/spec_lines/grb.lst'
llist_cls = xspec.abs_line.Abs_Line_List(line_file)
adict = llist_cls.data
# Fill
names = []
fval = []
for wrest in llist:
mt = np.where(np.abs(wrest-adict['wrest']) < 1e-3)[0]
if len(mt) != 1:
raise ValueError('Problem!')
names.append(adict['name'][mt][0])
fval.append(adict['fval'][mt][0])
# Set
#QtCore.pyqtRemoveInputHook()
#xdb.set_trace()
#QtCore.pyqtRestoreInputHook()
# Generate a Table
col0 = Column(np.array(llist), name='wrest', unit=u.AA) # Assumed Angstroms
col1 = Column(np.array(names), name='name')
col2 = Column(np.array(fval), name='fval')
in_dict['input.lst'] = Table( (col0,col1,col2) )
'''
# Return
return in_dict
# Read spectrum, pass back it and spec_file name
def read_spec(ispec, second_file=None):
#
if isinstance(ispec,str) or isinstance(ispec,unicode):
spec_fil = ispec
spec = lsi.readspec(spec_fil)
# Second file?
if not second_file is None:
spec2 = lsi.readspec(second_file)
if spec2.sig is None:
spec2.sig = np.zeros(spec.flux.size)
# Scale for convenience of plotting
xper1 = xstats.basic.perc(spec.flux, per=0.9)
xper2 = xstats.basic.perc(spec2.flux, per=0.9)
scl = xper1[1]/xper2[1]
# Stitch together
wave3 = np.append(spec.dispersion, spec2.dispersion)
flux3 = np.append(spec.flux, spec2.flux*scl)
sig3 = np.append(spec.sig, spec2.sig*scl)
spec3 = Spectrum1D.from_array(wave3, flux3, uncertainty=StdDevUncertainty(sig3))
# Overwrite
spec = spec3
spec.filename = spec_fil
else:
spec = ispec # Assuming Spectrum1D
spec_fil = spec.filename # Grab from Spectrum1D
# Return
return spec, spec_fil
# ################
# TESTING
if __name__ == "__main__":
from xastropy import spec as xspec
if len(sys.argv) == 1: #
flg_tst = 0
flg_tst += 2**0 # ExamineSpecWidget
#flg_tst += 2**1 # PlotLinesWidget
#flg_tst += 2**2 # SelectLineWidget
#flg_tst += 2**3 # AbsSysWidget
#flg_tst += 2**4 # VelPltWidget
#flg_tst += 2**5 # SelectedLinesWidget
#flg_tst += 2**6 # AODMWidget
else:
flg_tst = int(sys.argv[1])
# ExamineSpec
if (flg_tst % 2) == 1:
app = QtGui.QApplication(sys.argv)
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = lsi.readspec(spec_fil)
app.setApplicationName('XSpec')
main = ExamineSpecWidget(spec)
main.show()
sys.exit(app.exec_())
# PltLineWidget
if (flg_tst % 2**2) >= 2**1:
app = QtGui.QApplication(sys.argv)
app.setApplicationName('PltLine')
main = PlotLinesWidget()
main.show()
sys.exit(app.exec_())
# SelectLineWidget
if (flg_tst % 2**3) >= 2**2:
orig = False
llist_cls = LineList('ISM')
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectLine')
main = SelectLineWidget(llist_cls._data)
main.show()
app.exec_()
print(main.line)
# Another test
quant = main.line.split('::')[1].lstrip()
spltw = quant.split(' ')
wrest = Quantity(float(spltw[0]), unit=spltw[1])
print(wrest)
sys.exit()
# AbsSys Widget
if (flg_tst % 2**4) >= 2**3:
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ1004+0018_z2.746_id.fits'
abs_fil2 = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/SDSSJ2319-1040_z2.675_id.fits'
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AbsSys')
main = AbsSysWidget([abs_fil,abs_fil2])
main.show()
sys.exit(app.exec_())
# VelPlt Widget
if (flg_tst % 2**5) >= 2**4:
specf = 0
if specf == 0: # PH957 DLA
# Spectrum
spec_fil = '/u/xavier/Keck/HIRES/RedData/PH957/PH957_f.fits'
spec = lsi.readspec(spec_fil)
# Abs_sys
abs_sys = xiaa.GenericAbsSystem()
abs_sys.clm_fil = '/Users/xavier/DLA/Abund/PH957.z2309.clm'
abs_sys.get_ions(skip_ions=True, fill_lines=True)
abs_sys.zabs = abs_sys.clm_analy.zsys
elif specf == 1: # UM184 LLS
# Spectrum
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = lsi.readspec(spec_fil)
# Abs_sys
abs_fil = '/Users/xavier/paper/LLS/Optical/Data/Analysis/MAGE/UM184_z2.930_id.fits'
abs_sys = xiaa.GenericAbsSystem()
abs_sys.parse_absid_file(abs_fil)
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('VelPlot')
main = VelPlotWidget(spec, abs_sys=abs_sys)
main.show()
sys.exit(app.exec_())
# SelectedLines Widget
if (flg_tst % 2**6) >= 2**5:
print('Test: SelectedLines Widget')
llist = set_llist('ISM')
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('SelectedLines')
main = SelectedLinesWidget(llist['ISM'])#._data)
main.show()
sys.exit(app.exec_())
# AODM Widget
if (flg_tst % 2**7) >= 2**6:
spec_fil = '/Users/xavier/PROGETTI/LLSZ3/data/normalize/UM184_nF.fits'
spec = lsi.readspec(spec_fil)
z=2.96916
lines = np.array([1548.195, 1550.770]) * u.AA
# Launch
app = QtGui.QApplication(sys.argv)
app.setApplicationName('AODM')
main = AODMWidget(spec, z, lines)
main.show()
sys.exit(app.exec_())
| bsd-3-clause |
mjudsp/Tsallis | sklearn/svm/classes.py | 34 | 40599 | import warnings
import numpy as np
from .base import _fit_liblinear, BaseSVC, BaseLibSVM
from ..base import BaseEstimator, RegressorMixin
from ..linear_model.base import LinearClassifierMixin, SparseCoefMixin, \
LinearModel
from ..feature_selection.from_model import _LearntSelectorMixin
from ..utils import check_X_y
from ..utils.validation import _num_samples
from ..utils.multiclass import check_classification_targets
class LinearSVC(BaseEstimator, LinearClassifierMixin,
_LearntSelectorMixin, SparseCoefMixin):
"""Linear Support Vector Classification.
Similar to SVC with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input and the multiclass support
is handled according to a one-vs-the-rest scheme.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
loss : string, 'hinge' or 'squared_hinge' (default='squared_hinge')
Specifies the loss function. 'hinge' is the standard SVM loss
(used e.g. by the SVC class) while 'squared_hinge' is the
square of the hinge loss.
penalty : string, 'l1' or 'l2' (default='l2')
Specifies the norm used in the penalization. The 'l2'
penalty is the standard used in SVC. The 'l1' leads to ``coef_``
vectors that are sparse.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
multi_class: string, 'ovr' or 'crammer_singer' (default='ovr')
Determines the multi-class strategy if `y` contains more than
two classes.
``"ovr"`` trains n_classes one-vs-rest classifiers, while ``"crammer_singer"``
optimizes a joint objective over all classes.
While `crammer_singer` is interesting from a theoretical perspective
as it is consistent, it is seldom used in practice as it rarely leads
to better accuracy and is more expensive to compute.
If ``"crammer_singer"`` is chosen, the options loss, penalty and dual will
be ignored.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
``[x, self.intercept_scaling]``,
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to ``class_weight[i]*C`` for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
``coef_`` is a readonly property derived from ``raw_coef_`` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
Notes
-----
The underlying C implementation uses a random number generator to
select features when fitting the model. It is thus not uncommon
to have slightly different results for the same input data. If
that happens, try with a smaller ``tol`` parameter.
The underlying implementation, liblinear, uses a sparse internal
representation for the data that will incur a memory copy.
Predict output may not match that of standalone liblinear in certain
cases. See :ref:`differences from liblinear <liblinear_differences>`
in the narrative documentation.
References
----------
`LIBLINEAR: A Library for Large Linear Classification
<http://www.csie.ntu.edu.tw/~cjlin/liblinear/>`__
See also
--------
SVC
Implementation of Support Vector Machine classifier using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
Furthermore SVC multi-class mode is implemented using one
vs one scheme while LinearSVC uses one vs the rest. It is
possible to implement one vs the rest with SVC by using the
:class:`sklearn.multiclass.OneVsRestClassifier` wrapper.
Finally SVC can fit dense data without memory copy if the input
is C-contiguous. Sparse data will still incur memory copy though.
sklearn.linear_model.SGDClassifier
SGDClassifier can optimize the same cost function as LinearSVC
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, penalty='l2', loss='squared_hinge', dual=True, tol=1e-4,
C=1.0, multi_class='ovr', fit_intercept=True,
intercept_scaling=1, class_weight=None, verbose=0,
random_state=None, max_iter=1000):
self.dual = dual
self.tol = tol
self.C = C
self.multi_class = multi_class
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.class_weight = class_weight
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.penalty = penalty
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'hinge', 'l2': 'squared_hinge'}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
check_classification_targets(y)
self.classes_ = np.unique(y)
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
self.class_weight, self.penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, self.multi_class,
self.loss)
if self.multi_class == "crammer_singer" and len(self.classes_) == 2:
self.coef_ = (self.coef_[1] - self.coef_[0]).reshape(1, -1)
if self.fit_intercept:
intercept = self.intercept_[1] - self.intercept_[0]
self.intercept_ = np.array([intercept])
return self
class LinearSVR(LinearModel, RegressorMixin):
"""Linear Support Vector Regression.
Similar to SVR with parameter kernel='linear', but implemented in terms of
liblinear rather than libsvm, so it has more flexibility in the choice of
penalties and loss functions and should scale better to large numbers of
samples.
This class supports both dense and sparse input.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term. The penalty is a squared
l2 penalty. The bigger this parameter, the less regularization is used.
loss : string, 'epsilon_insensitive' or 'squared_epsilon_insensitive' (default='epsilon_insensitive')
Specifies the loss function. 'l1' is the epsilon-insensitive loss
(standard SVR) while 'l2' is the squared epsilon-insensitive loss.
epsilon : float, optional (default=0.1)
Epsilon parameter in the epsilon-insensitive loss function. Note
that the value of this parameter depends on the scale of the target
variable y. If unsure, set ``epsilon=0``.
dual : bool, (default=True)
Select the algorithm to either solve the dual or primal
optimization problem. Prefer dual=False when n_samples > n_features.
tol : float, optional (default=1e-4)
Tolerance for stopping criteria.
fit_intercept : boolean, optional (default=True)
Whether to calculate the intercept for this model. If set
to false, no intercept will be used in calculations
(i.e. data is expected to be already centered).
intercept_scaling : float, optional (default=1)
When self.fit_intercept is True, instance vector x becomes
[x, self.intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
The intercept becomes intercept_scaling * synthetic feature weight
Note! the synthetic feature weight is subject to l1/l2 regularization
as all other features.
To lessen the effect of regularization on synthetic feature weight
(and therefore on the intercept) intercept_scaling has to be increased.
verbose : int, (default=0)
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in liblinear that, if enabled, may not work
properly in a multithreaded context.
random_state : int seed, RandomState instance, or None (default=None)
The seed of the pseudo random number generator to use when
shuffling the data.
max_iter : int, (default=1000)
The maximum number of iterations to be run.
Attributes
----------
coef_ : array, shape = [n_features] if n_classes == 2 else [n_classes, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `raw_coef_` that
follows the internal memory layout of liblinear.
intercept_ : array, shape = [1] if n_classes == 2 else [n_classes]
Constants in decision function.
See also
--------
LinearSVC
Implementation of Support Vector Machine classifier using the
same library as this class (liblinear).
SVR
Implementation of Support Vector Machine regression using libsvm:
the kernel can be non-linear but its SMO algorithm does not
scale to large number of samples as LinearSVC does.
sklearn.linear_model.SGDRegressor
SGDRegressor can optimize the same cost function as LinearSVR
by adjusting the penalty and loss parameters. In addition it requires
less memory, allows incremental (online) learning, and implements
various loss functions and regularization regimes.
"""
def __init__(self, epsilon=0.0, tol=1e-4, C=1.0,
loss='epsilon_insensitive', fit_intercept=True,
intercept_scaling=1., dual=True, verbose=0,
random_state=None, max_iter=1000):
self.tol = tol
self.C = C
self.epsilon = epsilon
self.fit_intercept = fit_intercept
self.intercept_scaling = intercept_scaling
self.verbose = verbose
self.random_state = random_state
self.max_iter = max_iter
self.dual = dual
self.loss = loss
def fit(self, X, y):
"""Fit the model according to the given training data.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples]
Target vector relative to X
Returns
-------
self : object
Returns self.
"""
# FIXME Remove l1/l2 support in 1.0 -----------------------------------
loss_l = self.loss.lower()
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the loss='%s' will be removed in %s")
# FIXME change loss_l --> self.loss after 0.18
if loss_l in ('l1', 'l2'):
old_loss = self.loss
self.loss = {'l1': 'epsilon_insensitive',
'l2': 'squared_epsilon_insensitive'
}.get(loss_l)
warnings.warn(msg % (old_loss, self.loss, old_loss, '1.0'),
DeprecationWarning)
# ---------------------------------------------------------------------
if self.C < 0:
raise ValueError("Penalty term must be positive; got (C=%r)"
% self.C)
X, y = check_X_y(X, y, accept_sparse='csr',
dtype=np.float64, order="C")
penalty = 'l2' # SVR only accepts l2 penalty
self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear(
X, y, self.C, self.fit_intercept, self.intercept_scaling,
None, penalty, self.dual, self.verbose,
self.max_iter, self.tol, self.random_state, loss=self.loss,
epsilon=self.epsilon)
self.coef_ = self.coef_.ravel()
return self
class SVC(BaseSVC):
"""C-Support Vector Classification.
The implementation is based on libsvm. The fit time complexity
is more than quadratic with the number of samples which makes it hard
to scale to dataset with more than a couple of 10000 samples.
The multiclass support is handled according to a one-vs-one scheme.
For details on the precise mathematical formulation of the provided
kernel functions and how `gamma`, `coef0` and `degree` affect each
other, see the corresponding section in the narrative documentation:
:ref:`svm_kernels`.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to pre-compute the kernel matrix from data matrices; that matrix
should be an array of shape ``(n_samples, n_samples)``.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'balanced'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one.
The "balanced" mode uses the values of y to automatically adjust
weights inversely proportional to class frequencies in the input data
as ``n_samples / (n_classes * np.bincount(y))``
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in the
SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is a readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import SVC
>>> clf = SVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVC(C=1.0, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVR
Support Vector Machine for Regression implemented using libsvm.
LinearSVC
Scalable Linear Support Vector Machine for classification
implemented using liblinear. Check the See also section of
LinearSVC for more comparison element.
"""
def __init__(self, C=1.0, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None,
verbose=False, max_iter=-1, decision_function_shape=None,
random_state=None):
super(SVC, self).__init__(
impl='c_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class NuSVC(BaseSVC):
"""Nu-Support Vector Classification.
Similar to SVC but uses a parameter to control the number of support
vectors.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_classification>`.
Parameters
----------
nu : float, optional (default=0.5)
An upper bound on the fraction of training errors and a lower
bound of the fraction of support vectors. Should be in the
interval (0, 1].
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
probability : boolean, optional (default=False)
Whether to enable probability estimates. This must be enabled prior
to calling `fit`, and will slow down that method.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
class_weight : {dict, 'auto'}, optional
Set the parameter C of class i to class_weight[i]*C for
SVC. If not given, all classes are supposed to have
weight one. The 'auto' mode uses the values of y to
automatically adjust weights inversely proportional to
class frequencies.
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
decision_function_shape : 'ovo', 'ovr' or None, default=None
Whether to return a one-vs-rest ('ovr') decision function of shape
(n_samples, n_classes) as all other classifiers, or the original
one-vs-one ('ovo') decision function of libsvm which has shape
(n_samples, n_classes * (n_classes - 1) / 2).
The default of None will currently behave as 'ovo' for backward
compatibility and raise a deprecation warning, but will change 'ovr'
in 0.18.
.. versionadded:: 0.17
*decision_function_shape='ovr'* is recommended.
.. versionchanged:: 0.17
Deprecated *decision_function_shape='ovo' and None*.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [n_SV, n_features]
Support vectors.
n_support_ : array-like, dtype=int32, shape = [n_class]
Number of support vectors for each class.
dual_coef_ : array, shape = [n_class-1, n_SV]
Coefficients of the support vector in the decision function.
For multiclass, coefficient for all 1-vs-1 classifiers.
The layout of the coefficients in the multiclass case is somewhat
non-trivial. See the section about multi-class classification in
the SVM section of the User Guide for details.
coef_ : array, shape = [n_class-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [n_class * (n_class-1) / 2]
Constants in decision function.
Examples
--------
>>> import numpy as np
>>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]])
>>> y = np.array([1, 1, 2, 2])
>>> from sklearn.svm import NuSVC
>>> clf = NuSVC()
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVC(cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=None, degree=3, gamma='auto', kernel='rbf',
max_iter=-1, nu=0.5, probability=False, random_state=None,
shrinking=True, tol=0.001, verbose=False)
>>> print(clf.predict([[-0.8, -1]]))
[1]
See also
--------
SVC
Support Vector Machine for classification using libsvm.
LinearSVC
Scalable linear Support Vector Machine for classification using
liblinear.
"""
def __init__(self, nu=0.5, kernel='rbf', degree=3, gamma='auto',
coef0=0.0, shrinking=True, probability=False,
tol=1e-3, cache_size=200, class_weight=None, verbose=False,
max_iter=-1, decision_function_shape=None, random_state=None):
super(NuSVC, self).__init__(
impl='nu_svc', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=0., nu=nu, shrinking=shrinking,
probability=probability, cache_size=cache_size,
class_weight=class_weight, verbose=verbose, max_iter=max_iter,
decision_function_shape=decision_function_shape,
random_state=random_state)
class SVR(BaseLibSVM, RegressorMixin):
"""Epsilon-Support Vector Regression.
The free parameters in the model are C and epsilon.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
epsilon : float, optional (default=0.1)
Epsilon in the epsilon-SVR model. It specifies the epsilon-tube
within which no penalty is associated in the training loss function
with points predicted within a distance epsilon from the actual
value.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import SVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = SVR(C=1.0, epsilon=0.2)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
SVR(C=1.0, cache_size=200, coef0=0.0, degree=3, epsilon=0.2, gamma='auto',
kernel='rbf', max_iter=-1, shrinking=True, tol=0.001, verbose=False)
See also
--------
NuSVR
Support Vector Machine for regression implemented using libsvm
using a parameter to control the number of support vectors.
LinearSVR
Scalable Linear Support Vector Machine for regression
implemented using liblinear.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, C=1.0, epsilon=0.1, shrinking=True,
cache_size=200, verbose=False, max_iter=-1):
super(SVR, self).__init__(
'epsilon_svr', kernel=kernel, degree=degree, gamma=gamma,
coef0=coef0, tol=tol, C=C, nu=0., epsilon=epsilon, verbose=verbose,
shrinking=shrinking, probability=False, cache_size=cache_size,
class_weight=None, max_iter=max_iter, random_state=None)
class NuSVR(BaseLibSVM, RegressorMixin):
"""Nu Support Vector Regression.
Similar to NuSVC, for regression, uses a parameter nu to control
the number of support vectors. However, unlike NuSVC, where nu
replaces C, here nu replaces the parameter epsilon of epsilon-SVR.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_regression>`.
Parameters
----------
C : float, optional (default=1.0)
Penalty parameter C of the error term.
nu : float, optional
An upper bound on the fraction of training errors and a lower bound of
the fraction of support vectors. Should be in the interval (0, 1]. By
default 0.5 will be taken.
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
shrinking : boolean, optional (default=True)
Whether to use the shrinking heuristic.
tol : float, optional (default=1e-3)
Tolerance for stopping criterion.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [1, n_SV]
Coefficients of the support vector in the decision function.
coef_ : array, shape = [1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`.
intercept_ : array, shape = [1]
Constants in decision function.
Examples
--------
>>> from sklearn.svm import NuSVR
>>> import numpy as np
>>> n_samples, n_features = 10, 5
>>> np.random.seed(0)
>>> y = np.random.randn(n_samples)
>>> X = np.random.randn(n_samples, n_features)
>>> clf = NuSVR(C=1.0, nu=0.1)
>>> clf.fit(X, y) #doctest: +NORMALIZE_WHITESPACE
NuSVR(C=1.0, cache_size=200, coef0=0.0, degree=3, gamma='auto',
kernel='rbf', max_iter=-1, nu=0.1, shrinking=True, tol=0.001,
verbose=False)
See also
--------
NuSVC
Support Vector Machine for classification implemented with libsvm
with a parameter to control the number of support vectors.
SVR
epsilon Support Vector Machine for regression implemented with libsvm.
"""
def __init__(self, nu=0.5, C=1.0, kernel='rbf', degree=3,
gamma='auto', coef0=0.0, shrinking=True, tol=1e-3,
cache_size=200, verbose=False, max_iter=-1):
super(NuSVR, self).__init__(
'nu_svr', kernel=kernel, degree=degree, gamma=gamma, coef0=coef0,
tol=tol, C=C, nu=nu, epsilon=0., shrinking=shrinking,
probability=False, cache_size=cache_size, class_weight=None,
verbose=verbose, max_iter=max_iter, random_state=None)
class OneClassSVM(BaseLibSVM):
"""Unsupervised Outlier Detection.
Estimate the support of a high-dimensional distribution.
The implementation is based on libsvm.
Read more in the :ref:`User Guide <svm_outlier_detection>`.
Parameters
----------
kernel : string, optional (default='rbf')
Specifies the kernel type to be used in the algorithm.
It must be one of 'linear', 'poly', 'rbf', 'sigmoid', 'precomputed' or
a callable.
If none is given, 'rbf' will be used. If a callable is given it is
used to precompute the kernel matrix.
nu : float, optional
An upper bound on the fraction of training
errors and a lower bound of the fraction of support
vectors. Should be in the interval (0, 1]. By default 0.5
will be taken.
degree : int, optional (default=3)
Degree of the polynomial kernel function ('poly').
Ignored by all other kernels.
gamma : float, optional (default='auto')
Kernel coefficient for 'rbf', 'poly' and 'sigmoid'.
If gamma is 'auto' then 1/n_features will be used instead.
coef0 : float, optional (default=0.0)
Independent term in kernel function.
It is only significant in 'poly' and 'sigmoid'.
tol : float, optional
Tolerance for stopping criterion.
shrinking : boolean, optional
Whether to use the shrinking heuristic.
cache_size : float, optional
Specify the size of the kernel cache (in MB).
verbose : bool, default: False
Enable verbose output. Note that this setting takes advantage of a
per-process runtime setting in libsvm that, if enabled, may not work
properly in a multithreaded context.
max_iter : int, optional (default=-1)
Hard limit on iterations within solver, or -1 for no limit.
random_state : int seed, RandomState instance, or None (default)
The seed of the pseudo random number generator to use when
shuffling the data for probability estimation.
Attributes
----------
support_ : array-like, shape = [n_SV]
Indices of support vectors.
support_vectors_ : array-like, shape = [nSV, n_features]
Support vectors.
dual_coef_ : array, shape = [n_classes-1, n_SV]
Coefficients of the support vectors in the decision function.
coef_ : array, shape = [n_classes-1, n_features]
Weights assigned to the features (coefficients in the primal
problem). This is only available in the case of a linear kernel.
`coef_` is readonly property derived from `dual_coef_` and
`support_vectors_`
intercept_ : array, shape = [n_classes-1]
Constants in decision function.
"""
def __init__(self, kernel='rbf', degree=3, gamma='auto', coef0=0.0,
tol=1e-3, nu=0.5, shrinking=True, cache_size=200,
verbose=False, max_iter=-1, random_state=None):
super(OneClassSVM, self).__init__(
'one_class', kernel, degree, gamma, coef0, tol, 0., nu, 0.,
shrinking, False, cache_size, None, verbose, max_iter,
random_state)
def fit(self, X, y=None, sample_weight=None, **params):
"""
Detects the soft boundary of the set of samples X.
Parameters
----------
X : {array-like, sparse matrix}, shape (n_samples, n_features)
Set of samples, where n_samples is the number of samples and
n_features is the number of features.
sample_weight : array-like, shape (n_samples,)
Per-sample weights. Rescale C per sample. Higher weights
force the classifier to put more emphasis on these points.
Returns
-------
self : object
Returns self.
Notes
-----
If X is not a C-ordered contiguous array it is copied.
"""
super(OneClassSVM, self).fit(X, np.ones(_num_samples(X)), sample_weight=sample_weight,
**params)
return self
def decision_function(self, X):
"""Distance of the samples X to the separating hyperplane.
Parameters
----------
X : array-like, shape (n_samples, n_features)
Returns
-------
X : array-like, shape (n_samples,)
Returns the decision function of the samples.
"""
dec = self._decision_function(X)
return dec
| bsd-3-clause |
stpsomad/thesis | pointcloud/duplicates/duplicate.py | 2 | 2699 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 03 20:20 2016
@author: Stella Psomadaki
Command line executable to remove duplicates. Only one folder per run.
This module takes a laz, las file and removes duplicate points in the
(x,y) dimensions. This is an important preprocessing step because the IOT
does not allow duplicates in the index.
"""
import numpy as np
from pandas import DataFrame
import time
from laspy.file import File
import os
from pointcloud.reader import readFileLaspy
from pointcloud.utils import getFiles
import sys, getopt
def removeDuplicate(file):
"""Removes duplicate points based on X, Y coordinates
Returns a numpy array"""
df = DataFrame(np.vstack((file.x, file.y, file.z)).transpose(), columns=['X', 'Y', 'Z'])
df.drop_duplicates(subset=['X','Y'], inplace=True)
return df.values
def writeFile(directory,name,header,coords):
"""Write a laz file using laspy and numpy arrays"""
output = File(directory + name, mode = "w", header=header)
output.x = coords[0]
output.y = coords[1]
output.z = coords[2]
output.close()
def checkDirectory(directory):
""" Checks if the specified directory exists, and otherwise it creates it"""
try:
os.makedirs(directory)
except OSError:
if not os.path.isdir(directory):
raise
def lasDuplicateFree(directory, output):
""" Takes a directory with las [laz] files and an output directory
Returns las, [laz] files free from duplicates"""
files = getFiles(directory,['laz'],True)
checkDirectory(output)
for file in files:
print file
fh = readFileLaspy(file)
writeFile(output,file[file.rfind('\\')+1:],fh.header,removeDuplicate(fh).transpose())
def main(argv):
inputdir = ''
outputdir = ''
try:
opts, args = getopt.getopt(argv, "hi:o:", ["help", "input=", "output="])
except getopt.GetoptError:
print 'lasduplicate.py -i <inputDirectory> -o <outputDirectory>'
sys.exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
print argv[0] +' -i <inputDirectory> -o <outputDirectory>'
sys.exit()
elif opt in ("-i", "--input"):
inputdir = arg
elif opt in ("-o", "--output"):
outputdir = arg
lasDuplicateFree(inputdir, outputdir)
if __name__ =="__main__":
start = time.time()
main(sys.argv[1:])
end = time.time()
print "Finished in ", end - start
#Example run: python duplicate.py -i D:\ -o D:\output\
| isc |
pandas-ml/pandas-ml | pandas_ml/skaccessors/test/test_manifold.py | 2 | 3754 | #!/usr/bin/env python
import pytest
import numpy as np
import sklearn.datasets as datasets
import sklearn.manifold as manifold
import pandas_ml as pdml
import pandas_ml.util.testing as tm
class TestManifold(tm.TestCase):
def test_objectmapper(self):
df = pdml.ModelFrame([])
self.assertIs(df.manifold.LocallyLinearEmbedding,
manifold.LocallyLinearEmbedding)
self.assertIs(df.manifold.Isomap, manifold.Isomap)
self.assertIs(df.manifold.MDS, manifold.MDS)
self.assertIs(df.manifold.SpectralEmbedding, manifold.SpectralEmbedding)
self.assertIs(df.manifold.TSNE, manifold.TSNE)
def test_locally_linear_embedding(self):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
result = df.manifold.locally_linear_embedding(3, 3)
expected = manifold.locally_linear_embedding(iris.data, 3, 3)
self.assertEqual(len(result), 2)
self.assertIsInstance(result[0], pdml.ModelFrame)
tm.assert_index_equal(result[0].index, df.index)
tm.assert_numpy_array_equal(result[0].values, expected[0])
self.assertEqual(result[1], expected[1])
def test_spectral_embedding(self):
N = 10
m = np.random.random_integers(50, 200, size=(N, N))
m = (m + m.T) / 2
df = pdml.ModelFrame(m)
self.assert_numpy_array_almost_equal(df.data.values, m)
result = df.manifold.spectral_embedding(random_state=self.random_state)
expected = manifold.spectral_embedding(m, random_state=self.random_state)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
# signs can be inversed
self.assert_numpy_array_almost_equal(np.abs(result.data.values),
np.abs(expected))
@pytest.mark.parametrize("algo", ['Isomap'])
def test_Isomap(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.manifold, algo)()
mod2 = getattr(manifold, algo)()
df.fit(mod1)
mod2.fit(iris.data)
result = df.transform(mod1)
expected = mod2.transform(iris.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
self.assert_numpy_array_almost_equal(result.data.values, expected)
@pytest.mark.parametrize("algo", ['MDS'])
def test_MDS(self, algo):
iris = datasets.load_iris()
df = pdml.ModelFrame(iris)
mod1 = getattr(df.manifold, algo)(random_state=self.random_state)
mod2 = getattr(manifold, algo)(random_state=self.random_state)
result = df.fit_transform(mod1)
expected = mod2.fit_transform(iris.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
self.assert_numpy_array_almost_equal(result.data.values, expected)
@pytest.mark.parametrize("algo", ['TSNE'])
def test_TSNE(self, algo):
digits = datasets.load_digits()
df = pdml.ModelFrame(digits)
mod1 = getattr(df.manifold, algo)(n_components=2, random_state=self.random_state)
mod2 = getattr(manifold, algo)(n_components=2, random_state=self.random_state)
# np.random.seed(1)
result = df.fit_transform(mod1)
# np.random.seed(1)
expected = mod2.fit_transform(digits.data)
self.assertIsInstance(result, pdml.ModelFrame)
tm.assert_index_equal(result.index, df.index)
self.assert_numpy_array_almost_equal(result.data.shape, expected.shape)
| bsd-3-clause |
joshrobo/aubio | python/demos/demo_mel-energy.py | 9 | 2203 | #! /usr/bin/env python
import sys
from aubio import fvec, source, pvoc, filterbank
from numpy import vstack, zeros
win_s = 512 # fft size
hop_s = win_s / 4 # hop size
if len(sys.argv) < 2:
print "Usage: %s <filename> [samplerate]" % sys.argv[0]
sys.exit(1)
filename = sys.argv[1]
samplerate = 0
if len( sys.argv ) > 2: samplerate = int(sys.argv[2])
s = source(filename, samplerate, hop_s)
samplerate = s.samplerate
pv = pvoc(win_s, hop_s)
f = filterbank(40, win_s)
f.set_mel_coeffs_slaney(samplerate)
energies = zeros((40,))
o = {}
total_frames = 0
downsample = 2
while True:
samples, read = s()
fftgrain = pv(samples)
new_energies = f(fftgrain)
print '%f' % (total_frames / float(samplerate) ),
print ' '.join(['%f' % b for b in new_energies])
energies = vstack( [energies, new_energies] )
total_frames += read
if read < hop_s: break
if 1:
print "done computing, now plotting"
import matplotlib.pyplot as plt
from demo_waveform_plot import get_waveform_plot
from demo_waveform_plot import set_xlabels_sample2time
fig = plt.figure()
plt.rc('lines',linewidth='.8')
wave = plt.axes([0.1, 0.75, 0.8, 0.19])
get_waveform_plot(filename, samplerate, block_size = hop_s, ax = wave )
wave.yaxis.set_visible(False)
wave.xaxis.set_visible(False)
n_plots = len(energies.T)
all_desc_times = [ x * hop_s for x in range(len(energies)) ]
for i, band in enumerate(energies.T):
ax = plt.axes ( [0.1, 0.75 - ((i+1) * 0.65 / n_plots), 0.8, 0.65 / n_plots], sharex = wave )
ax.plot(all_desc_times, band, '-', label = 'band %d' % i)
#ax.set_ylabel(method, rotation = 0)
ax.xaxis.set_visible(False)
ax.yaxis.set_visible(False)
ax.axis(xmax = all_desc_times[-1], xmin = all_desc_times[0])
ax.annotate('band %d' % i, xy=(-10, 0), xycoords='axes points',
horizontalalignment='right', verticalalignment='bottom',
size = 'xx-small',
)
set_xlabels_sample2time( ax, all_desc_times[-1], samplerate)
#plt.ylabel('spectral descriptor value')
ax.xaxis.set_visible(True)
plt.show()
| gpl-3.0 |
dyf/primopt | curveopt.py | 1 | 6605 | import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import spline
from scipy.optimize import minimize
import numpy as np
from scipy.misc import imread,imsave
from skimage.draw import line
from primitive import image_error
import os
import skimage.feature
import skimage.color
import scipy.ndimage.morphology
def line_clipped(r0, c0, r1, c1, shape):
ln = line(r0, c0, r1, c1)
lngood = [ (ln[i] >= 0) & (ln[i] < shape[i]) for i in range(len(shape)) ]
lngood = lngood[0] & lngood[1]
return [ v[lngood] for v in ln ]
class SplinePrimitive(object):
def __init__(self, dims=2):
self.ps = []
self.vs = []
self.coeffs = []
self.dims = 2
def add_point(self, p, v):
self.ps.append(p)
self.vs.append(v)
return
if len(self.ps) > 1:
self.coeffs.append(spline.cubic_spline_coeffs(self.ps[-2], self.vs[-2],
self.ps[-1], self.vs[-1]))
def set_point(self, i, p, v):
self.ps[i] = p
self.vs[i] = v
return
if i < 0:
i += len(self.ps)
if i > 0:
self.coeffs[i-1] = spline.cubic_spline_coeffs(self.ps[i-1], self.vs[i-1],
self.ps[i], self.vs[i])
if i < (len(self.ps) - 1):
self.coeffs[i] = spline.cubic_spline_coeffs(self.ps[i], self.vs[i],
self.ps[i+1], self.vs[i+1])
def add_random_point(self):
s = np.random.choice([-1,1], size=(self.dims,))
self.add_point(np.random.random(self.dims),
s*np.sqrt(np.random.random(self.dims)))
def randomize_endpoint(self):
s = np.random.choice([-1,1], size=(self.dims,))
self.set_point(-1,
np.random.random(self.dims),
s*np.sqrt(np.random.random(self.dims)))
def mutate_endpoint(self, d):
self.set_point(-1,
np.random.randn(self.dims)*d + 1,
np.random.randn(self.dims)*d + 1)
def remove_endpoint(self):
del self.ps[-1]
del self.vs[-1]
return
if len(self.coeffs):
del self.coeffs[-1]
def render(self, im, segs=20):
import scipy.interpolate
t0 = np.linspace(0, 1, len(self.ps))
t1 = np.linspace(0, 1, len(self.ps)*segs)
try:
xx = np.array([ scipy.interpolate.interp1d(t0, [ p[0] for p in self.ps ], kind='cubic')(t1),
scipy.interpolate.interp1d(t0, [ p[1] for p in self.ps ], kind='cubic')(t1) ])
except Exception as e:
print(self.ps)
raise
# xx = spline.cubic_spline(segs, coeffs_list=self.coeffs)
for i in range(self.dims):
xx[i] *= im.shape[i]
xx = xx.astype(int)
im.fill(0)
for i in range(xx.shape[1]-1):
ln = line_clipped(xx[0,i],xx[1,i],xx[0,i+1],xx[1,i+1],im.shape)
im[ln] = 1
return im
def mutate(self, d):
spl = SplinePrimitive()
for i in range(len(self.ps)):
spl.add_point(self.ps[i] * (np.random.randn(self.dims)*d + 1),
self.vs[i] * (np.random.randn(self.dims)*d + 1))
return spl
@classmethod
def random(dims=2):
spl = SplinePrimitive(dims)
spl.add_random_point()
spl.add_random_point()
spl.add_random_point()
spl.add_random_point()
return spl
def gradient_image(im, sqr=False, norm_pct=95):
im = im.astype(float)
if len(im.shape) == 2:
gx, gy = np.gradient(im)
else:
gx, gy = np.gradient(im, axis=[0,1])
gx = gx.max(axis=2)
gy = gy.max(axis=2)
gmag = gx*gx + gy*gy
if sqr:
gmag = np.sqrt(gmag)
if norm_pct is not None:
v = np.percentile(gmag[:], 95)
return gmag / v
else:
return gmag
def canny_dist_image(im, sigma):
gim = skimage.feature.canny(skimage.color.rgb2gray(im).astype(float), sigma=sigma).astype(float)
gim = scipy.ndimage.morphology.distance_transform_edt(1.0-gim)
return 1.0 / (gim + 1.0)
def curveopt(im, N_pts=100, N_init=1000, N_rand=1000):
#gim = gradient_image(im)
gim = canny_dist_image(im, 2)
buf = np.zeros_like(gim)
yield gim
best_spl = None
best_error = float("inf")#image_error(buf,gim)
# pick a good starting point
for i in range(N_init):
spl = SplinePrimitive.random()
spl.render(buf)
err = image_error(buf,gim)
if err < best_error:
best_error = err
best_spl = spl
for i in range(N_rand):
new_spl = best_spl.mutate(.1)
new_spl.render(buf)
err = image_error(buf,gim)
if err < best_error:
best_error = err
best_spl = new_spl
best_spl.render(buf)
best_error = image_error(buf,gim)
yield buf
# add new points
for i in range(N_pts):
best_p = None
best_v = None
best_spl.add_random_point()
for j in range(N_init):
best_spl.randomize_endpoint()
best_spl.render(buf)
err = image_error(buf,gim)
if err < best_error:
best_error = err
best_p = best_spl.ps[-1].copy()
best_v = best_spl.vs[-1].copy()
for j in range(N_rand):
best_spl.mutate_endpoint(.1)
best_spl.render(buf)
err = image_error(buf,gim)
if err < best_error:
best_error = err
best_p = best_spl.ps[-1].copy()
best_v = best_spl.vs[-1].copy()
if best_p is not None:
best_spl.set_point(-1, best_p, best_v)
best_spl.render(buf)
yield buf
def main():
im = imread("kermit.jpg")
savedir = "/mnt/c/Users/davidf/workspace/curveopt/"
for i, cim in enumerate(curveopt(im, N_pts=200, N_init=5000, N_rand=5000)):
print(i)
savepath = os.path.join(savedir, "test_%05d.jpg" % i)
imsave(savepath, 1.0 - cim.clip(0,1))
if __name__ == "__main__": main()
| bsd-2-clause |
chichilalescu/bfps | tests/test_time_step.py | 1 | 4891 | #######################################################################
# #
# Copyright 2015 Max Planck Institute #
# for Dynamics and Self-Organization #
# #
# This file is part of bfps. #
# #
# bfps is free software: you can redistribute it and/or modify #
# it under the terms of the GNU General Public License as published #
# by the Free Software Foundation, either version 3 of the License, #
# or (at your option) any later version. #
# #
# bfps is distributed in the hope that it will be useful, #
# but WITHOUT ANY WARRANTY; without even the implied warranty of #
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the #
# GNU General Public License for more details. #
# #
# You should have received a copy of the GNU General Public License #
# along with bfps. If not, see <http://www.gnu.org/licenses/> #
# #
# Contact: Cristian.Lalescu@ds.mpg.de #
# #
#######################################################################
import numpy as np
import h5py
from mpl_toolkits.mplot3d import axes3d
import matplotlib.pyplot as plt
from base import *
def convergence_test(
opt,
code_launch_routine,
init_vorticity = None,
code_class = bfps.NavierStokes):
opt.simname = 'N{0:0>4}_0'.format(opt.n)
clist = []
clist.append(code_launch_routine(
opt,
vorticity_field = init_vorticity,
dt = 0.04,
code_class = code_class))
clist[0].compute_statistics()
opt.initialize = True
dtlist = []
errlist = []
for i in range(1, 5):
dtlist.append(clist[-1].parameters['dt']*clist[-1].statistics['vel_max'] / (2*np.pi / clist[-1].parameters['nx']))
opt.simname = 'N{0:0>4}_{1}'.format(opt.n, i)
init_vorticity = np.fromfile(
os.path.join(clist[0].work_dir, clist[0].simname + '_cvorticity_i00000'),
dtype = clist[0].dtype)
opt.niter_todo *= 2
opt.niter_stat *= 2
clist.append(code_launch_routine(
opt,
dt = clist[0].parameters['dt']/(2**i),
vorticity_field = init_vorticity,
code_class = code_class,
tracer_state_file = h5py.File(os.path.join(clist[0].work_dir, clist[0].simname + '.h5'), 'r')))
clist[-1].compute_statistics()
converter = bfps.fluid_converter(fluid_precision = opt.precision)
converter.write_src()
converter.set_host_info({'type' : 'pc'})
for c in clist:
converter.work_dir = c.work_dir
converter.simname = c.simname + '_converter'
for key in converter.parameters.keys():
if key in c.parameters.keys():
converter.parameters[key] = c.parameters[key]
converter.parameters['fluid_name'] = c.simname
converter.write_par()
converter.run(
ncpu = 2)
f1 = np.fromfile(os.path.join(clist[0].work_dir,
clist[0].simname + '_rvelocity_i{0:0>5x}'.format(clist[0].parameters['niter_todo'])),
dtype = clist[0].dtype)
for i in range(1, len(clist)):
f2 = np.fromfile(os.path.join(clist[i].work_dir,
clist[i].simname + '_rvelocity_i{0:0>5x}'.format(clist[i].parameters['niter_todo'])),
dtype = clist[i].dtype)
errlist.append(np.max(np.abs(f1 - f2)) / np.max(f1))
f1 = f2
fig = plt.figure()
a = fig.add_subplot(111)
a.plot(dtlist, errlist, marker = '.')
a.plot(dtlist, np.array(dtlist), dashes = (1, 1))
a.plot(dtlist, np.array(dtlist)**2, dashes = (2, 2))
a.set_xscale('log')
a.set_yscale('log')
a.set_xlabel('$\\|u\\|_\\infty \\frac{\\Delta t}{\\Delta x}$')
fig.savefig('vel_err_vs_dt_{0}.pdf'.format(opt.precision))
return None
if __name__ == '__main__':
opt = parser.parse_args(
['-n', '32',
'--run',
'--initialize',
'--ncpu', '2',
'--nparticles', '1000',
'--niter_todo', '16',
'--precision', 'single',
'--wd', 'data/single'] +
sys.argv[1:])
convergence_test(opt, launch)
| gpl-3.0 |
wangz19/TEST | Fracture_Mechanics/Mode_ii.py | 1 | 1720 | import numpy as np
import matplotlib.pyplot as plt
theta = np.arange(-np.pi,np.pi,0.01)
S_rr = -5./4.*np.sin(theta/2)+3./4.*np.sin(3*theta/2)
S_ss = -3./4.*np.sin(theta/2)-3./4.*np.sin(3*theta/2)
S_sr = 1./4.*np.cos(theta/2)+3./4.*np.cos(3*theta/2)
pressure = -2.*np.sin(theta/2)/3
sigma_rr, = plt.plot(theta,S_rr,"b",label='$\sigma_{rr}$',linestyle='-')
sigma_tt, = plt.plot(theta,S_ss,"ro",label='$\sigma_{\\theta\\theta}$',linestyle='-')
sigma_rt, = plt.plot(theta,S_sr,"g",label='$\sigma_{r\\theta}$',linestyle='-')
sigma_pressure, = plt.plot(theta,pressure,"k",label='pressure',linestyle='--')
plt.legend()
plt.axis([-np.pi,np.pi,-2.5,2.5])
plt.xlabel('theta (rad)')
plt.ylabel('theta dependent part')
plt.title('Mode II crack tip field')
plt.grid(True)
# find the local maxima
m = (np.diff(np.sign(np.diff(S_ss)))<0).nonzero()[0]+1
n = (np.diff(np.sign(np.diff(S_ss)))>0).nonzero()[0]+1
max_x_norm = theta[m]
max_y_norm = S_ss[m]
min_x_norm = theta[n]
min_y_norm= S_ss[n]
# Adding anotations
plt.annotate('local maxima normal stress \napproximately %d degree'%(max_x_norm/np.pi*180), xy =(max_x_norm, max_y_norm),
xytext=(max_x_norm+0.5, max_y_norm+0.7),
arrowprops=dict(facecolor='black', shrink=0.1),
)
plt.annotate('local macima normal stress \napproximately %d degree'%(min_x_norm/np.pi*180-1), xy =(min_x_norm, min_y_norm),
xytext=(min_x_norm-0.5, min_y_norm-0.7),
arrowprops=dict(facecolor='black', shrink=0.1),
)
# plt.annotate('max pressure', xy =(max_x_pressure, max_y_pressure), xytext=(max_x_pressure+1, max_y_pressure+0.5),
# arrowprops=dict(facecolor='black', shrink=0.1),
# )
plt.savefig("test.png")
plt.show()
| gpl-2.0 |
Averroes/statsmodels | statsmodels/sandbox/examples/try_quantile_regression.py | 33 | 1302 | '''Example to illustrate Quantile Regression
Author: Josef Perktold
'''
import numpy as np
from statsmodels.compat.python import zip
import statsmodels.api as sm
from statsmodels.regression.quantile_regression import QuantReg
sige = 5
nobs, k_vars = 500, 5
x = np.random.randn(nobs, k_vars)
#x[:,0] = 1
y = x.sum(1) + sige * (np.random.randn(nobs)/2 + 1)**3
p = 0.5
exog = np.column_stack((np.ones(nobs), x))
res_qr = QuantReg(y, exog).fit(p)
res_qr2 = QuantReg(y, exog).fit(0.25)
res_qr3 = QuantReg(y, exog).fit(0.75)
res_ols = sm.OLS(y, exog).fit()
##print 'ols ', res_ols.params
##print '0.25', res_qr2
##print '0.5 ', res_qr
##print '0.75', res_qr3
params = [res_ols.params, res_qr2.params, res_qr.params, res_qr3.params]
labels = ['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75']
import matplotlib.pyplot as plt
#sortidx = np.argsort(y)
fitted_ols = np.dot(res_ols.model.exog, params[0])
sortidx = np.argsort(fitted_ols)
x_sorted = res_ols.model.exog[sortidx]
fitted_ols = np.dot(x_sorted, params[0])
plt.figure()
plt.plot(y[sortidx], 'o', alpha=0.75)
for lab, beta in zip(['ols', 'qr 0.25', 'qr 0.5', 'qr 0.75'], params):
print('%-8s'%lab, np.round(beta, 4))
fitted = np.dot(x_sorted, beta)
lw = 2 if lab == 'ols' else 1
plt.plot(fitted, lw=lw, label=lab)
plt.legend()
plt.show()
| bsd-3-clause |
BioMedIA/IRTK | wrapping/cython/scripts/learn_mser.py | 5 | 8173 | #!/usr/bin/python
import cv2
import sys
import csv
import numpy as np
from math import cos,sin
import math
import SimpleITK as sitk
import argparse
from glob import glob
from sklearn import cluster
from sklearn import neighbors
from sklearn import svm
from sklearn.externals import joblib
from joblib import Parallel, delayed
from scipy.stats.mstats import mquantiles
######################################################################
def is_in_ellipse( (x,y), ((xe,ye),(we,he),theta)):
theta = theta / 180 * np.pi
u = cos(theta)*(x-xe)+ sin(theta)*(y-ye)
v = -sin(theta)*(x-xe)+cos(theta)*(y-ye)
# http://answers.opencv.org/question/497/extract-a-rotatedrect-area/
# http://felix.abecassis.me/2011/10/opencv-rotation-deskewing/
# if theta < -45:
# tmp = we
# we = he
# he = we
a = we/2
b = he/2
return (u/a)**2 + (v/b)**2 <= 1
def process_file( raw_file, ga, coordinates, size, classifier, N, NEW_SAMPLING, DEBUG=False ):
X = []
Y = []
ofd_model = np.array([ -4.97315445e-03,
3.19846853e-01,
-2.60839214e+00,
2.62679565e+01])
OFD = 0
for k in range(4):
OFD += ofd_model[3-k]*ga**k
OFD /= NEW_SAMPLING
mser = cv2.MSER( _delta=5,
_min_area=60,
_max_area=14400,
_max_variation=0.15,
_min_diversity=.1,
_max_evolution=200,
_area_threshold=1.01,
_min_margin=0.003,
_edge_blur_size=5)
sift = cv2.SIFT( nfeatures=0,
nOctaveLayers=3,
contrastThreshold=0.04,
edgeThreshold=10,
sigma=0.8)
siftExtractor = cv2.DescriptorExtractor_create("SIFT")
coordinates = np.array(map(float,coordinates.split(',')),dtype='float')
size = np.array(map(float,size.split(',')),dtype='float')
sitk_img = sitk.ReadImage( raw_file )
raw_spacing = sitk_img.GetSpacing()
## Resample
resample = sitk.ResampleImageFilter()
resample.SetOutputDirection(sitk_img.GetDirection())
resample.SetOutputOrigin(sitk_img.GetOrigin())
resample.SetOutputSpacing([NEW_SAMPLING,NEW_SAMPLING,raw_spacing[2]])
resample.SetSize([int(sitk_img.GetSize()[0]*raw_spacing[0]/NEW_SAMPLING),
int(sitk_img.GetSize()[1]*raw_spacing[1]/NEW_SAMPLING),
sitk_img.GetSize()[2]])
sitk_img = resample.Execute(sitk_img)
# Adjust coordinates and size
box = coordinates * np.array([1.0,raw_spacing[1]/NEW_SAMPLING,raw_spacing[0]/NEW_SAMPLING],dtype='float')
box_size = size * np.array([1.0,raw_spacing[1]/NEW_SAMPLING,raw_spacing[0]/NEW_SAMPLING],dtype='float')
z0,y0,x0 = box.astype('int')
d0,h0,w0 = box_size.astype('int')
brain_center = (x0 + w0/2, y0 + h0/2)
data = sitk.GetArrayFromImage( sitk_img ).astype("float")
## Contrast-stretch with saturation
q = mquantiles(data.flatten(),[0.01,0.99])
data[data<q[0]] = q[0]
data[data>q[1]] = q[1]
data -= data.min()
data /= data.max()
data *= 255
data = data.astype('uint8')
for z in range(data.shape[0]):
contours = mser.detect(data[z,:,:])
keypoints = sift.detect(data[z,:,:])
if keypoints is None or len(keypoints) == 0:
continue
(keypoints, descriptors) = siftExtractor.compute(data[z,:,:],keypoints)
for i,c in enumerate(contours):
hist = np.zeros(N, dtype='float')
ellipse = cv2.fitEllipse(np.array(map(lambda x:[x],
c),dtype='int32'))
# filter by size
if ( ellipse[1][0] > OFD
or ellipse[1][1] > OFD
or ellipse[1][0] < 0.5*OFD
or ellipse[1][1] < 0.5*OFD ) :
continue
# filter by eccentricity
# if math.sqrt(1-(np.min(ellipse[1])/np.max(ellipse[1]))**2) > 0.75:
# continue
distance = math.sqrt((ellipse[0][0]-brain_center[0])**2
+(ellipse[0][1]-brain_center[1])**2)
if max(w0,h0)/2 >= distance >= min(w0,h0)/8:
continue
for k,d in zip(keypoints,descriptors):
if is_in_ellipse(k.pt,ellipse):
c = classifier.kneighbors(d, return_distance=False)
hist[c] += 1
# Normalize histogram
norm = np.linalg.norm(hist)
if norm > 0:
hist /= norm
if distance > max(w0,h0)/4:
if DEBUG: print 0
X.append(hist)
Y.append(0)
else:
if distance < min(w0,h0)/8 and z0 + d0/8 <= z <= z0+7*d0/8:
if DEBUG: print 1
X.append(hist)
Y.append(1)
else:
continue
if DEBUG:
img_color = cv2.cvtColor( data[z,:,:], cv2.cv.CV_GRAY2RGB )
cv2.ellipse( img_color, (ellipse[0],
(ellipse[1][0],ellipse[1][1]),
ellipse[2]) , (0,0,255))
for k_id,k in enumerate(keypoints):
if is_in_ellipse(k.pt,ellipse):
if Y[-1] == 1:
cv2.circle( img_color,
(int(k.pt[0]),int(k.pt[1])),
2,
(0,255,0),
-1)
else:
cv2.circle( img_color,
(int(k.pt[0]),int(k.pt[1])),
2,
(0,0,255),
-1)
cv2.imwrite("/tmp/"+str(z) + '_' +str(i) +'_'+str(k_id)+".png",img_color)
# cv2.imshow("show",img_color)
# cv2.waitKey(0)
return X,Y
######################################################################
parser = argparse.ArgumentParser(
description='Learn MSER classifier using SIFT BOW.' )
parser.add_argument( '--training_patients' )
parser.add_argument( '--original_folder' )
parser.add_argument( '--ga_file' )
parser.add_argument( '--clean_brainboxes' )
parser.add_argument( '--new_sampling', type=float )
parser.add_argument( '--vocabulary' )
parser.add_argument( '--output' )
parser.add_argument( '--debug', action="store_true", default=False )
args = parser.parse_args()
vocabulary = open(args.vocabulary, 'rb')
voca = np.load(vocabulary)
classifier = neighbors.NearestNeighbors(1)
N = voca.shape[0]
classifier.fit(voca)
f = open( args.training_patients, "r" )
patients = []
for p in f:
patients.append(p.rstrip())
f.close()
reader = csv.reader( open( args.ga_file, "rb"), delimiter=" " )
all_ga = {}
for patient_id, ga in reader:
all_ga[patient_id] = float(ga)
reader = csv.reader( open( args.clean_brainboxes, "r" ),
delimiter='\t' )
training_patients = []
for patient_id, raw_file, cl, coordinates, size in reader:
if patient_id not in patients:
print "Skipping testing patient: " + patient_id
continue
training_patients.append((args.original_folder + '/' + raw_file,all_ga[patient_id],coordinates, size))
XY = Parallel(n_jobs=-1)(delayed(process_file)(raw_file,ga,coordinates, size, classifier,N,args.new_sampling,args.debug)
for raw_file,ga,coordinates, size in training_patients )
print len(XY)
X = []
Y = []
for x,y in XY:
X.extend(x)
Y.extend(y)
print "RATIO = ", np.sum(Y), len(Y)
X = np.array(X,dtype='float')
Y = np.array(Y,dtype='float')
# svc = svm.SVC()
svc = svm.LinearSVC(dual=False,)
svc.fit(X, Y)
print svc.score(X, Y)
joblib.dump(svc, args.output)
| apache-2.0 |
charterscruz/auto-encoder-tests | example.py | 1 | 4089 | #!/usr/bin/env python
from keras.layers import Input, Dense
from keras.models import Model
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"] = "1"
# this is the size of our encoded representations
encoding_dim = 32 # 32 floats -> compression of factor 24.5, assuming the input is 784 floats
# this is our input placeholder
input_img = Input(shape=(784,))
# "encoded" is the encoded representation of the input
encoded = Dense(encoding_dim, activation='relu')(input_img)
# "decoded" is the lossy reconstruction of the input
decoded = Dense(784, activation='sigmoid')(encoded)
# this model maps an input to its reconstruction
autoencoder = Model(input_img, decoded)
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
# create a placeholder for an encoded (32-dimensional) input
encoded_input = Input(shape=(encoding_dim,))
# retrieve the last layer of the autoencoder model
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
# load data
from keras.datasets import mnist
import numpy as np
(x_train, _), (x_test, _) = mnist.load_data()
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))
print x_train.shape
print x_test.shape
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
plt.savefig('fig1.png')
## ------------ Sparsity constraints ----------- ##
from keras import regularizers
encoding_dim = 32
input_img = Input(shape=(784,))
# add a Dense layer with a L1 activity regularizer
encoded = Dense(encoding_dim, activation='relu',
activity_regularizer=regularizers.l1(10e-5))(input_img)
decoded = Dense(784, activation='sigmoid')(encoded)
autoencoder = Model(input_img, decoded)
autoencoder.compile(optimizer='adadelta', loss='binary_crossentropy')
autoencoder.fit(x_train, x_train,
epochs=50,
batch_size=256,
shuffle=True,
validation_data=(x_test, x_test))
# this model maps an input to its encoded representation
encoder = Model(input_img, encoded)
decoder_layer = autoencoder.layers[-1]
# create the decoder model
decoder = Model(encoded_input, decoder_layer(encoded_input))
# encode and decode some digits
# note that we take them from the *test* set
encoded_imgs = encoder.predict(x_test)
decoded_imgs = decoder.predict(encoded_imgs)
# use Matplotlib (don't ask)
import matplotlib.pyplot as plt
n = 10 # how many digits we will display
plt.figure(figsize=(20, 4))
for i in range(n):
# display original
ax = plt.subplot(2, n, i + 1)
plt.imshow(x_test[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
# display reconstruction
ax = plt.subplot(2, n, i + 1 + n)
plt.imshow(decoded_imgs[i].reshape(28, 28))
plt.gray()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
plt.show()
plt.savefig('fig2.png')
| mit |
kaichogami/scikit-learn | examples/covariance/plot_robust_vs_empirical_covariance.py | 73 | 6451 | r"""
=======================================
Robust vs Empirical covariance estimate
=======================================
The usual covariance maximum likelihood estimate is very sensitive to the
presence of outliers in the data set. In such a case, it would be better to
use a robust estimator of covariance to guarantee that the estimation is
resistant to "erroneous" observations in the data set.
Minimum Covariance Determinant Estimator
----------------------------------------
The Minimum Covariance Determinant estimator is a robust, high-breakdown point
(i.e. it can be used to estimate the covariance matrix of highly contaminated
datasets, up to
:math:`\frac{n_\text{samples} - n_\text{features}-1}{2}` outliers) estimator of
covariance. The idea is to find
:math:`\frac{n_\text{samples} + n_\text{features}+1}{2}`
observations whose empirical covariance has the smallest determinant, yielding
a "pure" subset of observations from which to compute standards estimates of
location and covariance. After a correction step aiming at compensating the
fact that the estimates were learned from only a portion of the initial data,
we end up with robust estimates of the data set location and covariance.
The Minimum Covariance Determinant estimator (MCD) has been introduced by
P.J.Rousseuw in [1]_.
Evaluation
----------
In this example, we compare the estimation errors that are made when using
various types of location and covariance estimates on contaminated Gaussian
distributed data sets:
- The mean and the empirical covariance of the full dataset, which break
down as soon as there are outliers in the data set
- The robust MCD, that has a low error provided
:math:`n_\text{samples} > 5n_\text{features}`
- The mean and the empirical covariance of the observations that are known
to be good ones. This can be considered as a "perfect" MCD estimation,
so one can trust our implementation by comparing to this case.
References
----------
.. [1] P. J. Rousseeuw. Least median of squares regression. Journal of American
Statistical Ass., 79:871, 1984.
.. [2] Johanna Hardin, David M Rocke. The distribution of robust distances.
Journal of Computational and Graphical Statistics. December 1, 2005,
14(4): 928-946.
.. [3] Zoubir A., Koivunen V., Chakhchoukh Y. and Muma M. (2012). Robust
estimation in signal processing: A tutorial-style treatment of
fundamental concepts. IEEE Signal Processing Magazine 29(4), 61-80.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.font_manager
from sklearn.covariance import EmpiricalCovariance, MinCovDet
# example settings
n_samples = 80
n_features = 5
repeat = 10
range_n_outliers = np.concatenate(
(np.linspace(0, n_samples / 8, 5),
np.linspace(n_samples / 8, n_samples / 2, 5)[1:-1]))
# definition of arrays to store results
err_loc_mcd = np.zeros((range_n_outliers.size, repeat))
err_cov_mcd = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_full = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_full = np.zeros((range_n_outliers.size, repeat))
err_loc_emp_pure = np.zeros((range_n_outliers.size, repeat))
err_cov_emp_pure = np.zeros((range_n_outliers.size, repeat))
# computation
for i, n_outliers in enumerate(range_n_outliers):
for j in range(repeat):
rng = np.random.RandomState(i * j)
# generate data
X = rng.randn(n_samples, n_features)
# add some outliers
outliers_index = rng.permutation(n_samples)[:n_outliers]
outliers_offset = 10. * \
(np.random.randint(2, size=(n_outliers, n_features)) - 0.5)
X[outliers_index] += outliers_offset
inliers_mask = np.ones(n_samples).astype(bool)
inliers_mask[outliers_index] = False
# fit a Minimum Covariance Determinant (MCD) robust estimator to data
mcd = MinCovDet().fit(X)
# compare raw robust estimates with the true location and covariance
err_loc_mcd[i, j] = np.sum(mcd.location_ ** 2)
err_cov_mcd[i, j] = mcd.error_norm(np.eye(n_features))
# compare estimators learned from the full data set with true
# parameters
err_loc_emp_full[i, j] = np.sum(X.mean(0) ** 2)
err_cov_emp_full[i, j] = EmpiricalCovariance().fit(X).error_norm(
np.eye(n_features))
# compare with an empirical covariance learned from a pure data set
# (i.e. "perfect" mcd)
pure_X = X[inliers_mask]
pure_location = pure_X.mean(0)
pure_emp_cov = EmpiricalCovariance().fit(pure_X)
err_loc_emp_pure[i, j] = np.sum(pure_location ** 2)
err_cov_emp_pure[i, j] = pure_emp_cov.error_norm(np.eye(n_features))
# Display results
font_prop = matplotlib.font_manager.FontProperties(size=11)
plt.subplot(2, 1, 1)
lw = 2
plt.errorbar(range_n_outliers, err_loc_mcd.mean(1),
yerr=err_loc_mcd.std(1) / np.sqrt(repeat),
label="Robust location", lw=lw, color='m')
plt.errorbar(range_n_outliers, err_loc_emp_full.mean(1),
yerr=err_loc_emp_full.std(1) / np.sqrt(repeat),
label="Full data set mean", lw=lw, color='green')
plt.errorbar(range_n_outliers, err_loc_emp_pure.mean(1),
yerr=err_loc_emp_pure.std(1) / np.sqrt(repeat),
label="Pure data set mean", lw=lw, color='black')
plt.title("Influence of outliers on the location estimation")
plt.ylabel(r"Error ($||\mu - \hat{\mu}||_2^2$)")
plt.legend(loc="upper left", prop=font_prop)
plt.subplot(2, 1, 2)
x_size = range_n_outliers.size
plt.errorbar(range_n_outliers, err_cov_mcd.mean(1),
yerr=err_cov_mcd.std(1),
label="Robust covariance (mcd)", color='m')
plt.errorbar(range_n_outliers[:(x_size / 5 + 1)],
err_cov_emp_full.mean(1)[:(x_size / 5 + 1)],
yerr=err_cov_emp_full.std(1)[:(x_size / 5 + 1)],
label="Full data set empirical covariance", color='green')
plt.plot(range_n_outliers[(x_size / 5):(x_size / 2 - 1)],
err_cov_emp_full.mean(1)[(x_size / 5):(x_size / 2 - 1)], color='green',
ls='--')
plt.errorbar(range_n_outliers, err_cov_emp_pure.mean(1),
yerr=err_cov_emp_pure.std(1),
label="Pure data set empirical covariance", color='black')
plt.title("Influence of outliers on the covariance estimation")
plt.xlabel("Amount of contamination (%)")
plt.ylabel("RMSE")
plt.legend(loc="upper center", prop=font_prop)
plt.show()
| bsd-3-clause |
jdavidrcamacho/Tests_GP | 07 - MCMC results/kernel_qp_all.py | 1 | 37708 | # -*- coding: utf-8 -*-
import Gedi as gedi
import numpy as np; #np.random.seed(13042017)
import matplotlib.pylab as pl; pl.close("all")
from matplotlib.ticker import MaxNLocator
import astropy.table as Table
from time import time
import sys
#sys.path.append(...\Emcee)
import emcee
print
print 'It has began.'
print
###############################################################################
#number of spots
ijk=21
#walkers
percentage=0.01
#Defining what's supose to run: 1 runs/0 doesn't
day_1=1
daydecay_1=1
daydecaygap_1=1
day_4=1
daydecay_4=1
daydecaygap_4=1
###############################################################################
for ijk in range(1,21):
#file to use
soap_file='output_spots{0}'.format(ijk)
#data of spots on the 2 hemispheres
samples_amp=[[0,0], \
[30,40],[30,40],[10,20],[65,75],[80,90], \
[10,20],[15,25],[50,60],[55,65],[75,85], \
[30,40],[40,50],[55,65],[45,55],[60,70], \
[45,55],[60,70],[65,75],[30,40],[75,85], \
[80,90],[115,125],[45,55],[65,75],[65,75], \
[55,65],[75,85],[75,85],[105,115],[55,165], \
[95,105],[75,85],[165,175],[95,105],[120,130], \
[75,85],[85,95],[170,180],[145,155],[125,135]]
#kernel data
amplitude= np.random.uniform(samples_amp[ijk][0],samples_amp[ijk][1])#amplitude
l1= np.random.uniform(0.5,1.0)#lenght scale
l2= np.random.uniform(0.5,1.0)#lenght scale
period= np.random.uniform(24,26)#period
whitenoise= np.random.uniform(0.1,1.0)#amplitude of the white noise
##### FILE ####################################################################
if day_1 ==1:
f=open("{0}_1day_normal.txt".format(soap_file),"w")
sys.stdout = f
start=time()
###############################################################################
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr)
t= np.array(range(1,101))
#pl.figure('data')
#pl.plot(t,y,'*')
#pl.close('data')
print "Done."
###############################################################################
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude,l1,l2,period) +\
gedi.kernel.WhiteNoise(whitenoise)
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Gradients =', gedi.kernel_likelihood.gradient_likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
###############################################################################
print '> Preparing mcmc.'
print
def lnprob(p):
global kernel
# Trivial improper prior: uniform in the log.
if np.any((-10 > p) + (p > 10)):
return -np.inf
lnprior = 0.0
# Update the kernel and compute the lnlikelihood.
params=np.exp(p)
kernel=gedi.kernel_optimization.new_kernel(kernel,params)
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Initialize the walkers.
initial_walk=[percentage*np.log(n) for n in kernel.pars]
#initial_walk=1e-4
p0 = [np.log(kernel.pars) + initial_walk * np.random.randn(ndim)
for i in range(nwalkers)]
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, 2000)
print "Running production chain"
print
sampler.run_mcmc(p0, 2000)
print 'Done.'
print
###############################################################################
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot(np.exp(sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot(np.exp(sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_normal.png'.format(soap_file))
pl.close('all')
print 'Done.'
print
###############################################################################
print '> Preparing solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ####################################################################
if daydecay_1==1:
f=open("{0}_1day_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
###############################################################################
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y= np.array(spots_info+yerr)
decay=np.linspace(1,0.5,len(y))
y=[n*m for n,m in zip(y,decay)]
y=np.array(y)
t= np.array(range(1,101))
#pl.figure('data')
#pl.plot(t,y,'*')
#pl.close('data')
print "Done."
print
###############################################################################
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude,l1,l2,period) +\
gedi.kernel.WhiteNoise(whitenoise)
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Gradients =', gedi.kernel_likelihood.gradient_likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
###############################################################################
print '> Preparing mcmc.'
print
def lnprob(p):
global kernel
# Trivial improper prior: uniform in the log.
if np.any((-10 > p) + (p > 10)):
return -np.inf
lnprior = 0.0
# Update the kernel and compute the lnlikelihood.
params=np.exp(p)
kernel=gedi.kernel_optimization.new_kernel(kernel,params)
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Initialize the walkers.
initial_walk=[percentage*np.log(n) for n in kernel.pars]
#initial_walk=1e-4
p0 = [np.log(kernel.pars) + initial_walk * np.random.randn(ndim)
for i in range(nwalkers)]
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, 2000)
print "Running production chain"
print
sampler.run_mcmc(p0, 2000)
print 'Done.'
print
###############################################################################
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot(np.exp(sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot(np.exp(sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decay.png'.format(soap_file))
pl.close('all')
print 'Done.'
print
###############################################################################
print '> Preparing solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ####################################################################
if daydecaygap_1==1:
f=open("{0}_1day_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
###############################################################################
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot= rdb_data['RV_tot'][1:101]
spot= np.array(spot)
spot= spot.astype('Float64')
spotfinal= np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into a measurement per day
spots_info= []
for i in np.arange(0,399,4):
spots_info.append(spotfinal[i]*1000)
yerr= np.array(0.5*np.random.randn(len(spots_info)))
y0= np.array(spots_info+yerr)
decay=np.linspace(1,0.5,len(y0))
y0=[n*m for n,m in zip(y0,decay)]
#new t and y
t1= range(1,30)
t2= range(60,101)
t=np.array(t1+t2)
y=[]
yerr1=[]
for i,e in enumerate(t):
y.append(y0[e-1])
yerr1.append(yerr[e-1])
yerr=np.array(yerr1)
y=np.array(y)
#pl.figure('data')
#pl.plot(t,y,'*')
#pl.close('data')
print "Done."
print
###############################################################################
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude,l1,l2,period) +\
gedi.kernel.WhiteNoise(whitenoise)
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Gradients =', gedi.kernel_likelihood.gradient_likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
###############################################################################
print '> Preparing mcmc.'
print
def lnprob(p):
global kernel
# Trivial improper prior: uniform in the log.
if np.any((-10 > p) + (p > 10)):
return -np.inf
lnprior = 0.0
# Update the kernel and compute the lnlikelihood.
params=np.exp(p)
kernel=gedi.kernel_optimization.new_kernel(kernel,params)
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Initialize the walkers.
initial_walk=[percentage*np.log(n) for n in kernel.pars]
#initial_walk=1e-4
p0 = [np.log(kernel.pars) + initial_walk * np.random.randn(ndim)
for i in range(nwalkers)]
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, 2000)
print "Running production chain"
print
sampler.run_mcmc(p0, 2000)
print 'Done.'
print
###############################################################################
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot(np.exp(sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot(np.exp(sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_1day_decaygap.png'.format(soap_file))
pl.close('all')
print 'Done.'
print
###############################################################################
print '> Preparing solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
print 'It finished for 1 measurement a day.'
print
else:
pass
###############################################################################
###############################################################################
##### FILE ####################################################################
if day_4==1:
f=open("{0}_4days_normal.txt".format(soap_file),"w")
sys.stdout = f
start=time()
###############################################################################
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot=rdb_data['RV_tot'][1:101]
spot=np.array(spot)
spot=spot.astype('Float64')
spotfinal=np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into just 30 measurments
spots_yy=[]
for ii in np.arange(4,401,4):
a=(spotfinal[ii-4]+spotfinal[ii-3]+spotfinal[ii-2]+spotfinal[ii-1])*1000/4.
spots_yy.append(a)
spots_data=[]
for j in np.arange(1,100,3.3):
spots_data.append(spots_yy[int(round(j))])
yerr= np.array(0.5*np.random.randn(len(spots_data)))
y=np.array(spots_data + yerr)
t=range(1,31)
t=np.array([4*n for n in t])
#pl.figure('data')
#pl.plot(t,y,'*')
#pl.close('data')
print "Done."
###############################################################################
print
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude,l1,l2,period) +\
gedi.kernel.WhiteNoise(whitenoise)
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Gradients =', gedi.kernel_likelihood.gradient_likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
###############################################################################
print '> Preparing mcmc.'
print
def lnprob(p):
global kernel
# Trivial improper prior: uniform in the log.
if np.any((-10 > p) + (p > 10)):
return -np.inf
lnprior = 0.0
# Update the kernel and compute the lnlikelihood.
params=np.exp(p)
kernel=gedi.kernel_optimization.new_kernel(kernel,params)
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Initialize the walkers.
initial_walk=[percentage*np.log(n) for n in kernel.pars]
#initial_walk=1e-4
p0 = [np.log(kernel.pars) + initial_walk * np.random.randn(ndim)
for i in range(nwalkers)]
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, 2000)
print "Running production chain"
print
sampler.run_mcmc(p0, 2000)
print 'Done.'
print
###############################################################################
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot(np.exp(sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot(np.exp(sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_normal.png'.format(soap_file))
pl.close('all')
print 'Done.'
print
###############################################################################
print '> Preparing solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ####################################################################
if daydecay_4==1:
f=open("{0}_4days_decay.txt".format(soap_file),"w")
sys.stdout = f
start=time()
###############################################################################
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot=rdb_data['RV_tot'][1:101]
spot=np.array(spot)
spot=spot.astype('Float64')
spotfinal=np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into just 30 measurments
spots_yy=[]
for ii in np.arange(4,401,4):
a=(spotfinal[ii-4]+spotfinal[ii-3]+spotfinal[ii-2]+spotfinal[ii-1])*1000/4.
spots_yy.append(a)
spots_data=[]
for j in np.arange(1,100,3.3):
spots_data.append(spots_yy[int(round(j))])
yerr= np.array(0.5*np.random.randn(len(spots_data)))
y=np.array(spots_data + yerr)
decay=np.linspace(1,0.5,len(y))
y0=[n*m for n,m in zip(y,decay)]
t=range(1,31)
t=np.array([4*n for n in t])
y=np.array(y0)
#pl.figure('data')
#pl.plot(t,y,'*')
#pl.close('data')
print "Done."
print
###############################################################################
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude,l1,l2,period) +\
gedi.kernel.WhiteNoise(whitenoise)
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Gradients =', gedi.kernel_likelihood.gradient_likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
###############################################################################
print '> Preparing mcmc.'
print
def lnprob(p):
global kernel
# Trivial improper prior: uniform in the log.
if np.any((-10 > p) + (p > 10)):
return -np.inf
lnprior = 0.0
# Update the kernel and compute the lnlikelihood.
params=np.exp(p)
kernel=gedi.kernel_optimization.new_kernel(kernel,params)
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Initialize the walkers.
initial_walk=[percentage*np.log(n) for n in kernel.pars]
#initial_walk=1e-4
p0 = [np.log(kernel.pars) + initial_walk * np.random.randn(ndim)
for i in range(nwalkers)]
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, 2000)
print "Running production chain"
print
sampler.run_mcmc(p0, 2000)
print 'Done.'
print
###############################################################################
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot(np.exp(sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot(np.exp(sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decay.png'.format(soap_file))
pl.close('all')
print 'Done.'
print
###############################################################################
print '> Preparing solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
##### FILE ####################################################################
if daydecaygap_4==1:
f=open("{0}_4days_decaygap.txt".format(soap_file),"w")
sys.stdout = f
start=time()
###############################################################################
print
print '> Preparing data.'
print
print 'Loading {0}.rdb file.'.format(soap_file)
print
#data from .rdb file
rdb_data= Table.Table.read('{0}.rdb'.format(soap_file),format='ascii')
spot=rdb_data['RV_tot'][1:101]
spot=np.array(spot)
spot=spot.astype('Float64')
spotfinal=np.concatenate((spot,spot,spot,spot),axis=0)
#to organize the data into just 30 measurments
spots_yy=[]
for ii in np.arange(4,401,4):
a=(spotfinal[ii-4]+spotfinal[ii-3]+spotfinal[ii-2]+spotfinal[ii-1])*1000/4.
spots_yy.append(a)
spots_data=[]
for j in np.arange(1,100,3.3):
spots_data.append(spots_yy[int(round(j))])
yerr= np.array(0.5*np.random.randn(len(spots_data)))
y=np.array(spots_data + yerr)
decay=np.linspace(1,0.5,len(y))
y0=[n*m for n,m in zip(y,decay)]
t0=range(1,31)
t0=np.array([4*n for n in t0])
y0=np.array(y0)
#new t and y
y=[]
yerr1=[]
t=[]
for i in range(0,10):
t.append(t0[i])
y.append(y0[i])
yerr1.append(yerr[i])
for i in range(18,30):
t.append(t0[i])
y.append(y0[i])
yerr1.append(yerr[i])
yerr=np.array(yerr1)
y=np.array(y)
t=np.array(t)
#pl.figure('data')
#pl.plot(t,y,'*')
#pl.close('data')
print "Done."
print
###############################################################################
print '> Preparing kernel.'
print
kernel=gedi.kernel.QuasiPeriodic(amplitude,l1,l2,period) +\
gedi.kernel.WhiteNoise(whitenoise)
print 'Kernel =', kernel
print
print 'Likelihood =', gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
print
print 'Gradients =', gedi.kernel_likelihood.gradient_likelihood(kernel,t,y,yerr)
print
print 'Done.'
print
###############################################################################
print '> Preparing mcmc.'
print
def lnprob(p):
global kernel
# Trivial improper prior: uniform in the log.
if np.any((-10 > p) + (p > 10)):
return -np.inf
lnprior = 0.0
# Update the kernel and compute the lnlikelihood.
params=np.exp(p)
kernel=gedi.kernel_optimization.new_kernel(kernel,params)
new_likelihood=gedi.kernel_likelihood.likelihood(kernel,t,y,yerr)
return lnprior + new_likelihood
# Set up the sampler.
nwalkers, ndim = 10, len(kernel.pars)
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnprob)
# Initialize the walkers.
initial_walk=[percentage*np.log(n) for n in kernel.pars]
#initial_walk=1e-4
p0 = [np.log(kernel.pars) + initial_walk * np.random.randn(ndim)
for i in range(nwalkers)]
print "Running burn-in"
print
p0, _, _ = sampler.run_mcmc(p0, 2000)
print "Running production chain"
print
sampler.run_mcmc(p0, 2000)
print 'Done.'
print
###############################################################################
print '> Preparing graphics.'
print
fig, axes = pl.subplots(5, 1, sharex=True, figsize=(8, 9))
axes[0].plot(np.exp(sampler.chain[:, :, 0]).T, color="k", alpha=0.4)
axes[0].yaxis.set_major_locator(MaxNLocator(5))
axes[0].set_ylabel("$theta$")
axes[1].plot(np.exp(sampler.chain[:, :, 1]).T, color="k", alpha=0.4)
axes[1].yaxis.set_major_locator(MaxNLocator(5))
axes[1].set_ylabel("$l1$")
axes[2].plot(np.exp(sampler.chain[:, :, 2]).T, color="k", alpha=0.4)
axes[2].yaxis.set_major_locator(MaxNLocator(5))
axes[2].set_ylabel("$l2$")
axes[3].plot(np.exp(sampler.chain[:, :, 3]).T, color="k", alpha=0.4)
axes[3].yaxis.set_major_locator(MaxNLocator(5))
axes[3].set_ylabel("$P$")
axes[4].plot(np.exp(sampler.chain[:, :, 4]).T, color="k", alpha=0.4)
axes[4].yaxis.set_major_locator(MaxNLocator(5))
axes[4].set_ylabel("$WN$")
axes[4].set_xlabel("step number")
fig.tight_layout(h_pad=0.0)
fig.savefig('{0}_4days_decaygap.png'.format(soap_file))
pl.close('all')
print 'Done.'
print
###############################################################################
print '> Preparing solution.'
print
# Compute the quantiles.
burnin = 50
samples = sampler.chain[:, burnin:, :].reshape((-1, ndim))
samples[:, 0] = np.exp(samples[:, 0]) #amplitude
samples[:, 1] = np.exp(samples[:, 1]) #lenght scale 1
samples[:, 2] = np.exp(samples[:, 2]) #lenght scale 2
samples[:, 3] = np.exp(samples[:, 3]) #period
samples[:, 4] = np.exp(samples[:, 4]) #white noise
theta_mcmc,l_mcmc,l2_mcmc,p_mcmc,wn_mcmc = map(lambda v: (v[1], v[2]-v[1], v[1]-v[0]),
zip(*np.percentile(samples, [16, 50, 84],
axis=0)))
print 'theta = {0[0]} +{0[1]} -{0[2]}'.format(theta_mcmc)
print 'l1 = {0[0]} +{0[1]} -{0[2]}'.format(l_mcmc)
print 'l2 = {0[0]} +{0[1]} -{0[2]}'.format(l2_mcmc)
print 'period = {0[0]} +{0[1]} -{0[2]}'.format(p_mcmc)
print 'white noise = {0[0]} +{0[1]} -{0[2]}'.format(wn_mcmc)
print
tempo=(time() - start)
print 'Everything took', tempo,'s'
print
print 'Done.'
print
sys.stdout = sys.__stdout__
f.close()
else:
pass
print 'It is over.'
print | mit |
zhenv5/scikit-learn | examples/cluster/plot_birch_vs_minibatchkmeans.py | 333 | 3694 | """
=================================
Compare BIRCH and MiniBatchKMeans
=================================
This example compares the timing of Birch (with and without the global
clustering step) and MiniBatchKMeans on a synthetic dataset having
100,000 samples and 2 features generated using make_blobs.
If ``n_clusters`` is set to None, the data is reduced from 100,000
samples to a set of 158 clusters. This can be viewed as a preprocessing
step before the final (global) clustering step that further reduces these
158 clusters to 100 clusters.
"""
# Authors: Manoj Kumar <manojkumarsivaraj334@gmail.com
# Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# License: BSD 3 clause
print(__doc__)
from itertools import cycle
from time import time
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import Birch, MiniBatchKMeans
from sklearn.datasets.samples_generator import make_blobs
# Generate centers for the blobs so that it forms a 10 X 10 grid.
xx = np.linspace(-22, 22, 10)
yy = np.linspace(-22, 22, 10)
xx, yy = np.meshgrid(xx, yy)
n_centres = np.hstack((np.ravel(xx)[:, np.newaxis],
np.ravel(yy)[:, np.newaxis]))
# Generate blobs to do a comparison between MiniBatchKMeans and Birch.
X, y = make_blobs(n_samples=100000, centers=n_centres, random_state=0)
# Use all colors that matplotlib provides by default.
colors_ = cycle(colors.cnames.keys())
fig = plt.figure(figsize=(12, 4))
fig.subplots_adjust(left=0.04, right=0.98, bottom=0.1, top=0.9)
# Compute clustering with Birch with and without the final clustering step
# and plot.
birch_models = [Birch(threshold=1.7, n_clusters=None),
Birch(threshold=1.7, n_clusters=100)]
final_step = ['without global clustering', 'with global clustering']
for ind, (birch_model, info) in enumerate(zip(birch_models, final_step)):
t = time()
birch_model.fit(X)
time_ = time() - t
print("Birch %s as the final step took %0.2f seconds" % (
info, (time() - t)))
# Plot result
labels = birch_model.labels_
centroids = birch_model.subcluster_centers_
n_clusters = np.unique(labels).size
print("n_clusters : %d" % n_clusters)
ax = fig.add_subplot(1, 3, ind + 1)
for this_centroid, k, col in zip(centroids, range(n_clusters), colors_):
mask = labels == k
ax.plot(X[mask, 0], X[mask, 1], 'w',
markerfacecolor=col, marker='.')
if birch_model.n_clusters is None:
ax.plot(this_centroid[0], this_centroid[1], '+', markerfacecolor=col,
markeredgecolor='k', markersize=5)
ax.set_ylim([-25, 25])
ax.set_xlim([-25, 25])
ax.set_autoscaley_on(False)
ax.set_title('Birch %s' % info)
# Compute clustering with MiniBatchKMeans.
mbk = MiniBatchKMeans(init='k-means++', n_clusters=100, batch_size=100,
n_init=10, max_no_improvement=10, verbose=0,
random_state=0)
t0 = time()
mbk.fit(X)
t_mini_batch = time() - t0
print("Time taken to run MiniBatchKMeans %0.2f seconds" % t_mini_batch)
mbk_means_labels_unique = np.unique(mbk.labels_)
ax = fig.add_subplot(1, 3, 3)
for this_centroid, k, col in zip(mbk.cluster_centers_,
range(n_clusters), colors_):
mask = mbk.labels_ == k
ax.plot(X[mask, 0], X[mask, 1], 'w', markerfacecolor=col, marker='.')
ax.plot(this_centroid[0], this_centroid[1], '+', markeredgecolor='k',
markersize=5)
ax.set_xlim([-25, 25])
ax.set_ylim([-25, 25])
ax.set_title("MiniBatchKMeans")
ax.set_autoscaley_on(False)
plt.show()
| bsd-3-clause |
lin-credible/scikit-learn | sklearn/svm/tests/test_svm.py | 116 | 31653 | """
Testing for Support Vector Machine module (sklearn.svm)
TODO: remove hard coded numerical results when possible
"""
import numpy as np
import itertools
from numpy.testing import assert_array_equal, assert_array_almost_equal
from numpy.testing import assert_almost_equal
from scipy import sparse
from nose.tools import assert_raises, assert_true, assert_equal, assert_false
from sklearn.base import ChangedBehaviorWarning
from sklearn import svm, linear_model, datasets, metrics, base
from sklearn.cross_validation import train_test_split
from sklearn.datasets import make_classification, make_blobs
from sklearn.metrics import f1_score
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_random_state
from sklearn.utils import ConvergenceWarning
from sklearn.utils.validation import NotFittedError
from sklearn.utils.testing import assert_greater, assert_in, assert_less
from sklearn.utils.testing import assert_raises_regexp, assert_warns
from sklearn.utils.testing import assert_warns_message, assert_raise_message
from sklearn.utils.testing import ignore_warnings
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
Y = [1, 1, 1, 2, 2, 2]
T = [[-1, -1], [2, 2], [3, 2]]
true_result = [1, 2, 2]
# also load the iris dataset
iris = datasets.load_iris()
rng = check_random_state(42)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_libsvm_parameters():
# Test parameters on classes that make use of libsvm.
clf = svm.SVC(kernel='linear').fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.support_vectors_, (X[1], X[3]))
assert_array_equal(clf.intercept_, [0.])
assert_array_equal(clf.predict(X), Y)
def test_libsvm_iris():
# Check consistency on dataset iris.
# shuffle the dataset so that labels are not ordered
for k in ('linear', 'rbf'):
clf = svm.SVC(kernel=k).fit(iris.data, iris.target)
assert_greater(np.mean(clf.predict(iris.data) == iris.target), 0.9)
assert_array_equal(clf.classes_, np.sort(clf.classes_))
# check also the low-level API
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64))
pred = svm.libsvm.predict(iris.data, *model)
assert_greater(np.mean(pred == iris.target), .95)
model = svm.libsvm.fit(iris.data, iris.target.astype(np.float64),
kernel='linear')
pred = svm.libsvm.predict(iris.data, *model, kernel='linear')
assert_greater(np.mean(pred == iris.target), .95)
pred = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_greater(np.mean(pred == iris.target), .95)
# If random_seed >= 0, the libsvm rng is seeded (by calling `srand`), hence
# we should get deteriministic results (assuming that there is no other
# thread calling this wrapper calling `srand` concurrently).
pred2 = svm.libsvm.cross_validation(iris.data,
iris.target.astype(np.float64), 5,
kernel='linear',
random_seed=0)
assert_array_equal(pred, pred2)
def test_single_sample_1d():
# Test whether SVCs work on a single sample given as a 1-d array
clf = svm.SVC().fit(X, Y)
clf.predict(X[0])
clf = svm.LinearSVC(random_state=0).fit(X, Y)
clf.predict(X[0])
def test_precomputed():
# SVC with a precomputed kernel.
# We test it with a toy dataset and with iris.
clf = svm.SVC(kernel='precomputed')
# Gram matrix for train data (square matrix)
# (we use just a linear kernel)
K = np.dot(X, np.array(X).T)
clf.fit(K, Y)
# Gram matrix for test data (rectangular matrix)
KT = np.dot(T, np.array(X).T)
pred = clf.predict(KT)
assert_raises(ValueError, clf.predict, KT.T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.support_, [1, 3])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
KT = np.zeros_like(KT)
for i in range(len(T)):
for j in clf.support_:
KT[i, j] = np.dot(T[i], X[j])
pred = clf.predict(KT)
assert_array_equal(pred, true_result)
# same as before, but using a callable function instead of the kernel
# matrix. kernel is just a linear kernel
kfunc = lambda x, y: np.dot(x, y.T)
clf = svm.SVC(kernel=kfunc)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_equal(clf.dual_coef_, [[-0.25, .25]])
assert_array_equal(clf.intercept_, [0])
assert_array_almost_equal(clf.support_, [1, 3])
assert_array_equal(pred, true_result)
# test a precomputed kernel with the iris dataset
# and check parameters against a linear SVC
clf = svm.SVC(kernel='precomputed')
clf2 = svm.SVC(kernel='linear')
K = np.dot(iris.data, iris.data.T)
clf.fit(K, iris.target)
clf2.fit(iris.data, iris.target)
pred = clf.predict(K)
assert_array_almost_equal(clf.support_, clf2.support_)
assert_array_almost_equal(clf.dual_coef_, clf2.dual_coef_)
assert_array_almost_equal(clf.intercept_, clf2.intercept_)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
# Gram matrix for test data but compute KT[i,j]
# for support vectors j only.
K = np.zeros_like(K)
for i in range(len(iris.data)):
for j in clf.support_:
K[i, j] = np.dot(iris.data[i], iris.data[j])
pred = clf.predict(K)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
clf = svm.SVC(kernel=kfunc)
clf.fit(iris.data, iris.target)
assert_almost_equal(np.mean(pred == iris.target), .99, decimal=2)
def test_svr():
# Test Support Vector Regression
diabetes = datasets.load_diabetes()
for clf in (svm.NuSVR(kernel='linear', nu=.4, C=1.0),
svm.NuSVR(kernel='linear', nu=.4, C=10.),
svm.SVR(kernel='linear', C=10.),
svm.LinearSVR(C=10.),
svm.LinearSVR(C=10.),
):
clf.fit(diabetes.data, diabetes.target)
assert_greater(clf.score(diabetes.data, diabetes.target), 0.02)
# non-regression test; previously, BaseLibSVM would check that
# len(np.unique(y)) < 2, which must only be done for SVC
svm.SVR().fit(diabetes.data, np.ones(len(diabetes.data)))
svm.LinearSVR().fit(diabetes.data, np.ones(len(diabetes.data)))
def test_linearsvr():
# check that SVR(kernel='linear') and LinearSVC() give
# comparable results
diabetes = datasets.load_diabetes()
lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
score1 = lsvr.score(diabetes.data, diabetes.target)
svr = svm.SVR(kernel='linear', C=1e3).fit(diabetes.data, diabetes.target)
score2 = svr.score(diabetes.data, diabetes.target)
assert np.linalg.norm(lsvr.coef_ - svr.coef_) / np.linalg.norm(svr.coef_) < .1
assert np.abs(score1 - score2) < 0.1
def test_svr_errors():
X = [[0.0], [1.0]]
y = [0.0, 0.5]
# Bad kernel
clf = svm.SVR(kernel=lambda x, y: np.array([[1.0]]))
clf.fit(X, y)
assert_raises(ValueError, clf.predict, X)
def test_oneclass():
# Test OneClassSVM
clf = svm.OneClassSVM()
clf.fit(X)
pred = clf.predict(T)
assert_array_almost_equal(pred, [-1, -1, -1])
assert_array_almost_equal(clf.intercept_, [-1.008], decimal=3)
assert_array_almost_equal(clf.dual_coef_,
[[0.632, 0.233, 0.633, 0.234, 0.632, 0.633]],
decimal=3)
assert_raises(ValueError, lambda: clf.coef_)
def test_oneclass_decision_function():
# Test OneClassSVM decision function
clf = svm.OneClassSVM()
rnd = check_random_state(2)
# Generate train data
X = 0.3 * rnd.randn(100, 2)
X_train = np.r_[X + 2, X - 2]
# Generate some regular novel observations
X = 0.3 * rnd.randn(20, 2)
X_test = np.r_[X + 2, X - 2]
# Generate some abnormal novel observations
X_outliers = rnd.uniform(low=-4, high=4, size=(20, 2))
# fit the model
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.1)
clf.fit(X_train)
# predict things
y_pred_test = clf.predict(X_test)
assert_greater(np.mean(y_pred_test == 1), .9)
y_pred_outliers = clf.predict(X_outliers)
assert_greater(np.mean(y_pred_outliers == -1), .9)
dec_func_test = clf.decision_function(X_test)
assert_array_equal((dec_func_test > 0).ravel(), y_pred_test == 1)
dec_func_outliers = clf.decision_function(X_outliers)
assert_array_equal((dec_func_outliers > 0).ravel(), y_pred_outliers == 1)
def test_tweak_params():
# Make sure some tweaking of parameters works.
# We change clf.dual_coef_ at run time and expect .predict() to change
# accordingly. Notice that this is not trivial since it involves a lot
# of C/Python copying in the libsvm bindings.
# The success of this test ensures that the mapping between libsvm and
# the python classifier is complete.
clf = svm.SVC(kernel='linear', C=1.0)
clf.fit(X, Y)
assert_array_equal(clf.dual_coef_, [[-.25, .25]])
assert_array_equal(clf.predict([[-.1, -.1]]), [1])
clf._dual_coef_ = np.array([[.0, 1.]])
assert_array_equal(clf.predict([[-.1, -.1]]), [2])
def test_probability():
# Predict probabilities using SVC
# This uses cross validation, so we use a slightly bigger testing set.
for clf in (svm.SVC(probability=True, random_state=0, C=1.0),
svm.NuSVC(probability=True, random_state=0)):
clf.fit(iris.data, iris.target)
prob_predict = clf.predict_proba(iris.data)
assert_array_almost_equal(
np.sum(prob_predict, 1), np.ones(iris.data.shape[0]))
assert_true(np.mean(np.argmax(prob_predict, 1)
== clf.predict(iris.data)) > 0.9)
assert_almost_equal(clf.predict_proba(iris.data),
np.exp(clf.predict_log_proba(iris.data)), 8)
def test_decision_function():
# Test decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
# multi class:
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(iris.data, iris.target)
dec = np.dot(iris.data, clf.coef_.T) + clf.intercept_
assert_array_almost_equal(dec, clf.decision_function(iris.data))
# binary:
clf.fit(X, Y)
dec = np.dot(X, clf.coef_.T) + clf.intercept_
prediction = clf.predict(X)
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
assert_array_almost_equal(
prediction,
clf.classes_[(clf.decision_function(X) > 0).astype(np.int)])
expected = np.array([-1., -0.66, -1., 0.66, 1., 1.])
assert_array_almost_equal(clf.decision_function(X), expected, 2)
# kernel binary:
clf = svm.SVC(kernel='rbf', gamma=1, decision_function_shape='ovo')
clf.fit(X, Y)
rbfs = rbf_kernel(X, clf.support_vectors_, gamma=clf.gamma)
dec = np.dot(rbfs, clf.dual_coef_.T) + clf.intercept_
assert_array_almost_equal(dec.ravel(), clf.decision_function(X))
def test_decision_function_shape():
# check that decision_function_shape='ovr' gives
# correct shape and is consistent with predict
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(iris.data, iris.target)
dec = clf.decision_function(iris.data)
assert_equal(dec.shape, (len(iris.data), 3))
assert_array_equal(clf.predict(iris.data), np.argmax(dec, axis=1))
# with five classes:
X, y = make_blobs(n_samples=80, centers=5, random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovr').fit(X_train, y_train)
dec = clf.decision_function(X_test)
assert_equal(dec.shape, (len(X_test), 5))
assert_array_equal(clf.predict(X_test), np.argmax(dec, axis=1))
# check shape of ovo_decition_function=True
clf = svm.SVC(kernel='linear', C=0.1,
decision_function_shape='ovo').fit(X_train, y_train)
dec = clf.decision_function(X_train)
assert_equal(dec.shape, (len(X_train), 10))
# check deprecation warning
clf.decision_function_shape = None
msg = "change the shape of the decision function"
dec = assert_warns_message(ChangedBehaviorWarning, msg,
clf.decision_function, X_train)
assert_equal(dec.shape, (len(X_train), 10))
def test_svr_decision_function():
# Test SVR's decision_function
# Sanity check, test that decision_function implemented in python
# returns the same as the one in libsvm
X = iris.data
y = iris.target
# linear kernel
reg = svm.SVR(kernel='linear', C=0.1).fit(X, y)
dec = np.dot(X, reg.coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
# rbf kernel
reg = svm.SVR(kernel='rbf', gamma=1).fit(X, y)
rbfs = rbf_kernel(X, reg.support_vectors_, gamma=reg.gamma)
dec = np.dot(rbfs, reg.dual_coef_.T) + reg.intercept_
assert_array_almost_equal(dec.ravel(), reg.decision_function(X).ravel())
def test_weight():
# Test class weights
clf = svm.SVC(class_weight={1: 0.1})
# we give a small weights to class 1
clf.fit(X, Y)
# so all predicted values belong to class 2
assert_array_almost_equal(clf.predict(X), [2] * 6)
X_, y_ = make_classification(n_samples=200, n_features=10,
weights=[0.833, 0.167], random_state=2)
for clf in (linear_model.LogisticRegression(),
svm.LinearSVC(random_state=0), svm.SVC()):
clf.set_params(class_weight={0: .1, 1: 10})
clf.fit(X_[:100], y_[:100])
y_pred = clf.predict(X_[100:])
assert_true(f1_score(y_[100:], y_pred) > .3)
def test_sample_weights():
# Test weights on individual samples
# TODO: check on NuSVR, OneClass, etc.
clf = svm.SVC()
clf.fit(X, Y)
assert_array_equal(clf.predict(X[2]), [1.])
sample_weight = [.1] * 3 + [10] * 3
clf.fit(X, Y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X[2]), [2.])
# test that rescaling all samples is the same as changing C
clf = svm.SVC()
clf.fit(X, Y)
dual_coef_no_weight = clf.dual_coef_
clf.set_params(C=100)
clf.fit(X, Y, sample_weight=np.repeat(0.01, len(X)))
assert_array_almost_equal(dual_coef_no_weight, clf.dual_coef_)
def test_auto_weight():
# Test class weights for imbalanced data
from sklearn.linear_model import LogisticRegression
# We take as dataset the two-dimensional projection of iris so
# that it is not separable and remove half of predictors from
# class 1.
# We add one to the targets as a non-regression test: class_weight="balanced"
# used to work only when the labels where a range [0..K).
from sklearn.utils import compute_class_weight
X, y = iris.data[:, :2], iris.target + 1
unbalanced = np.delete(np.arange(y.size), np.where(y > 2)[0][::2])
classes = np.unique(y[unbalanced])
class_weights = compute_class_weight('balanced', classes, y[unbalanced])
assert_true(np.argmax(class_weights) == 2)
for clf in (svm.SVC(kernel='linear'), svm.LinearSVC(random_state=0),
LogisticRegression()):
# check that score is better when class='balanced' is set.
y_pred = clf.fit(X[unbalanced], y[unbalanced]).predict(X)
clf.set_params(class_weight='balanced')
y_pred_balanced = clf.fit(X[unbalanced], y[unbalanced],).predict(X)
assert_true(metrics.f1_score(y, y_pred, average='weighted')
<= metrics.f1_score(y, y_pred_balanced,
average='weighted'))
def test_bad_input():
# Test that it gives proper exception on deficient input
# impossible value of C
assert_raises(ValueError, svm.SVC(C=-1).fit, X, Y)
# impossible value of nu
clf = svm.NuSVC(nu=0.0)
assert_raises(ValueError, clf.fit, X, Y)
Y2 = Y[:-1] # wrong dimensions for labels
assert_raises(ValueError, clf.fit, X, Y2)
# Test with arrays that are non-contiguous.
for clf in (svm.SVC(), svm.LinearSVC(random_state=0)):
Xf = np.asfortranarray(X)
assert_false(Xf.flags['C_CONTIGUOUS'])
yf = np.ascontiguousarray(np.tile(Y, (2, 1)).T)
yf = yf[:, -1]
assert_false(yf.flags['F_CONTIGUOUS'])
assert_false(yf.flags['C_CONTIGUOUS'])
clf.fit(Xf, yf)
assert_array_equal(clf.predict(T), true_result)
# error for precomputed kernelsx
clf = svm.SVC(kernel='precomputed')
assert_raises(ValueError, clf.fit, X, Y)
# sample_weight bad dimensions
clf = svm.SVC()
assert_raises(ValueError, clf.fit, X, Y, sample_weight=range(len(X) - 1))
# predict with sparse input when trained with dense
clf = svm.SVC().fit(X, Y)
assert_raises(ValueError, clf.predict, sparse.lil_matrix(X))
Xt = np.array(X).T
clf.fit(np.dot(X, Xt), Y)
assert_raises(ValueError, clf.predict, X)
clf = svm.SVC()
clf.fit(X, Y)
assert_raises(ValueError, clf.predict, Xt)
def test_sparse_precomputed():
clf = svm.SVC(kernel='precomputed')
sparse_gram = sparse.csr_matrix([[1, 0], [0, 1]])
try:
clf.fit(sparse_gram, [0, 1])
assert not "reached"
except TypeError as e:
assert_in("Sparse precomputed", str(e))
def test_linearsvc_parameters():
# Test possible parameter combinations in LinearSVC
# Generate list of possible parameter combinations
losses = ['hinge', 'squared_hinge', 'logistic_regression', 'foo']
penalties, duals = ['l1', 'l2', 'bar'], [True, False]
X, y = make_classification(n_samples=5, n_features=5)
for loss, penalty, dual in itertools.product(losses, penalties, duals):
clf = svm.LinearSVC(penalty=penalty, loss=loss, dual=dual)
if ((loss, penalty) == ('hinge', 'l1') or
(loss, penalty, dual) == ('hinge', 'l2', False) or
(penalty, dual) == ('l1', True) or
loss == 'foo' or penalty == 'bar'):
assert_raises_regexp(ValueError,
"Unsupported set of arguments.*penalty='%s.*"
"loss='%s.*dual=%s"
% (penalty, loss, dual),
clf.fit, X, y)
else:
clf.fit(X, y)
# Incorrect loss value - test if explicit error message is raised
assert_raises_regexp(ValueError, ".*loss='l3' is not supported.*",
svm.LinearSVC(loss="l3").fit, X, y)
# FIXME remove in 1.0
def test_linearsvx_loss_penalty_deprecations():
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the %s will be removed in %s")
# LinearSVC
# loss l1/L1 --> hinge
assert_warns_message(DeprecationWarning,
msg % ("l1", "hinge", "loss='l1'", "1.0"),
svm.LinearSVC(loss="l1").fit, X, y)
# loss l2/L2 --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("L2", "squared_hinge", "loss='L2'", "1.0"),
svm.LinearSVC(loss="L2").fit, X, y)
# LinearSVR
# loss l1/L1 --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("L1", "epsilon_insensitive", "loss='L1'",
"1.0"),
svm.LinearSVR(loss="L1").fit, X, y)
# loss l2/L2 --> squared_epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("l2", "squared_epsilon_insensitive",
"loss='l2'", "1.0"),
svm.LinearSVR(loss="l2").fit, X, y)
# FIXME remove in 0.18
def test_linear_svx_uppercase_loss_penalty():
# Check if Upper case notation is supported by _fit_liblinear
# which is called by fit
X, y = [[0.0], [1.0]], [0, 1]
msg = ("loss='%s' has been deprecated in favor of "
"loss='%s' as of 0.16. Backward compatibility"
" for the uppercase notation will be removed in %s")
# loss SQUARED_hinge --> squared_hinge
assert_warns_message(DeprecationWarning,
msg % ("SQUARED_hinge", "squared_hinge", "0.18"),
svm.LinearSVC(loss="SQUARED_hinge").fit, X, y)
# penalty L2 --> l2
assert_warns_message(DeprecationWarning,
msg.replace("loss", "penalty")
% ("L2", "l2", "0.18"),
svm.LinearSVC(penalty="L2").fit, X, y)
# loss EPSILON_INSENSITIVE --> epsilon_insensitive
assert_warns_message(DeprecationWarning,
msg % ("EPSILON_INSENSITIVE", "epsilon_insensitive",
"0.18"),
svm.LinearSVR(loss="EPSILON_INSENSITIVE").fit, X, y)
def test_linearsvc():
# Test basic routines using LinearSVC
clf = svm.LinearSVC(random_state=0).fit(X, Y)
# by default should have intercept
assert_true(clf.fit_intercept)
assert_array_equal(clf.predict(T), true_result)
assert_array_almost_equal(clf.intercept_, [0], decimal=3)
# the same with l1 penalty
clf = svm.LinearSVC(penalty='l1', loss='squared_hinge', dual=False, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty with dual formulation
clf = svm.LinearSVC(penalty='l2', dual=True, random_state=0).fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# l2 penalty, l1 loss
clf = svm.LinearSVC(penalty='l2', loss='hinge', dual=True, random_state=0)
clf.fit(X, Y)
assert_array_equal(clf.predict(T), true_result)
# test also decision function
dec = clf.decision_function(T)
res = (dec > 0).astype(np.int) + 1
assert_array_equal(res, true_result)
def test_linearsvc_crammer_singer():
# Test LinearSVC with crammer_singer multi-class svm
ovr_clf = svm.LinearSVC(random_state=0).fit(iris.data, iris.target)
cs_clf = svm.LinearSVC(multi_class='crammer_singer', random_state=0)
cs_clf.fit(iris.data, iris.target)
# similar prediction for ovr and crammer-singer:
assert_true((ovr_clf.predict(iris.data) ==
cs_clf.predict(iris.data)).mean() > .9)
# classifiers shouldn't be the same
assert_true((ovr_clf.coef_ != cs_clf.coef_).all())
# test decision function
assert_array_equal(cs_clf.predict(iris.data),
np.argmax(cs_clf.decision_function(iris.data), axis=1))
dec_func = np.dot(iris.data, cs_clf.coef_.T) + cs_clf.intercept_
assert_array_almost_equal(dec_func, cs_clf.decision_function(iris.data))
def test_crammer_singer_binary():
# Test Crammer-Singer formulation in the binary case
X, y = make_classification(n_classes=2, random_state=0)
for fit_intercept in (True, False):
acc = svm.LinearSVC(fit_intercept=fit_intercept,
multi_class="crammer_singer",
random_state=0).fit(X, y).score(X, y)
assert_greater(acc, 0.9)
def test_linearsvc_iris():
# Test that LinearSVC gives plausible predictions on the iris dataset
# Also, test symbolic class names (classes_).
target = iris.target_names[iris.target]
clf = svm.LinearSVC(random_state=0).fit(iris.data, target)
assert_equal(set(clf.classes_), set(iris.target_names))
assert_greater(np.mean(clf.predict(iris.data) == target), 0.8)
dec = clf.decision_function(iris.data)
pred = iris.target_names[np.argmax(dec, 1)]
assert_array_equal(pred, clf.predict(iris.data))
def test_dense_liblinear_intercept_handling(classifier=svm.LinearSVC):
# Test that dense liblinear honours intercept_scaling param
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = classifier(fit_intercept=True, penalty='l1', loss='squared_hinge',
dual=False, C=4, tol=1e-7, random_state=0)
assert_true(clf.intercept_scaling == 1, clf.intercept_scaling)
assert_true(clf.fit_intercept)
# when intercept_scaling is low the intercept value is highly "penalized"
# by regularization
clf.intercept_scaling = 1
clf.fit(X, y)
assert_almost_equal(clf.intercept_, 0, decimal=5)
# when intercept_scaling is sufficiently high, the intercept value
# is not affected by regularization
clf.intercept_scaling = 100
clf.fit(X, y)
intercept1 = clf.intercept_
assert_less(intercept1, -1)
# when intercept_scaling is sufficiently high, the intercept value
# doesn't depend on intercept_scaling value
clf.intercept_scaling = 1000
clf.fit(X, y)
intercept2 = clf.intercept_
assert_array_almost_equal(intercept1, intercept2, decimal=2)
def test_liblinear_set_coef():
# multi-class case
clf = svm.LinearSVC().fit(iris.data, iris.target)
values = clf.decision_function(iris.data)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(iris.data)
assert_array_almost_equal(values, values2)
# binary-class case
X = [[2, 1],
[3, 1],
[1, 3],
[2, 3]]
y = [0, 0, 1, 1]
clf = svm.LinearSVC().fit(X, y)
values = clf.decision_function(X)
clf.coef_ = clf.coef_.copy()
clf.intercept_ = clf.intercept_.copy()
values2 = clf.decision_function(X)
assert_array_equal(values, values2)
def test_immutable_coef_property():
# Check that primal coef modification are not silently ignored
svms = [
svm.SVC(kernel='linear').fit(iris.data, iris.target),
svm.NuSVC(kernel='linear').fit(iris.data, iris.target),
svm.SVR(kernel='linear').fit(iris.data, iris.target),
svm.NuSVR(kernel='linear').fit(iris.data, iris.target),
svm.OneClassSVM(kernel='linear').fit(iris.data),
]
for clf in svms:
assert_raises(AttributeError, clf.__setattr__, 'coef_', np.arange(3))
assert_raises((RuntimeError, ValueError),
clf.coef_.__setitem__, (0, 0), 0)
def test_linearsvc_verbose():
# stdout: redirect
import os
stdout = os.dup(1) # save original stdout
os.dup2(os.pipe()[1], 1) # replace it
# actual call
clf = svm.LinearSVC(verbose=1)
clf.fit(X, Y)
# stdout: restore
os.dup2(stdout, 1) # restore original stdout
def test_svc_clone_with_callable_kernel():
# create SVM with callable linear kernel, check that results are the same
# as with built-in linear kernel
svm_callable = svm.SVC(kernel=lambda x, y: np.dot(x, y.T),
probability=True, random_state=0,
decision_function_shape='ovr')
# clone for checking clonability with lambda functions..
svm_cloned = base.clone(svm_callable)
svm_cloned.fit(iris.data, iris.target)
svm_builtin = svm.SVC(kernel='linear', probability=True, random_state=0,
decision_function_shape='ovr')
svm_builtin.fit(iris.data, iris.target)
assert_array_almost_equal(svm_cloned.dual_coef_,
svm_builtin.dual_coef_)
assert_array_almost_equal(svm_cloned.intercept_,
svm_builtin.intercept_)
assert_array_equal(svm_cloned.predict(iris.data),
svm_builtin.predict(iris.data))
assert_array_almost_equal(svm_cloned.predict_proba(iris.data),
svm_builtin.predict_proba(iris.data),
decimal=4)
assert_array_almost_equal(svm_cloned.decision_function(iris.data),
svm_builtin.decision_function(iris.data))
def test_svc_bad_kernel():
svc = svm.SVC(kernel=lambda x, y: x)
assert_raises(ValueError, svc.fit, X, Y)
def test_timeout():
a = svm.SVC(kernel=lambda x, y: np.dot(x, y.T), probability=True,
random_state=0, max_iter=1)
assert_warns(ConvergenceWarning, a.fit, X, Y)
def test_unfitted():
X = "foo!" # input validation not required when SVM not fitted
clf = svm.SVC()
assert_raises_regexp(Exception, r".*\bSVC\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
clf = svm.NuSVR()
assert_raises_regexp(Exception, r".*\bNuSVR\b.*\bnot\b.*\bfitted\b",
clf.predict, X)
# ignore convergence warnings from max_iter=1
@ignore_warnings
def test_consistent_proba():
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_1 = a.fit(X, Y).predict_proba(X)
a = svm.SVC(probability=True, max_iter=1, random_state=0)
proba_2 = a.fit(X, Y).predict_proba(X)
assert_array_almost_equal(proba_1, proba_2)
def test_linear_svc_convergence_warnings():
# Test that warnings are raised if model does not converge
lsvc = svm.LinearSVC(max_iter=2, verbose=1)
assert_warns(ConvergenceWarning, lsvc.fit, X, Y)
assert_equal(lsvc.n_iter_, 2)
def test_svr_coef_sign():
# Test that SVR(kernel="linear") has coef_ with the right sign.
# Non-regression test for #2933.
X = np.random.RandomState(21).randn(10, 3)
y = np.random.RandomState(12).randn(10)
for svr in [svm.SVR(kernel='linear'), svm.NuSVR(kernel='linear'),
svm.LinearSVR()]:
svr.fit(X, y)
assert_array_almost_equal(svr.predict(X),
np.dot(X, svr.coef_.ravel()) + svr.intercept_)
def test_linear_svc_intercept_scaling():
# Test that the right error message is thrown when intercept_scaling <= 0
for i in [-1, 0]:
lsvc = svm.LinearSVC(intercept_scaling=i)
msg = ('Intercept scaling is %r but needs to be greater than 0.'
' To disable fitting an intercept,'
' set fit_intercept=False.' % lsvc.intercept_scaling)
assert_raise_message(ValueError, msg, lsvc.fit, X, Y)
def test_lsvc_intercept_scaling_zero():
# Test that intercept_scaling is ignored when fit_intercept is False
lsvc = svm.LinearSVC(fit_intercept=False)
lsvc.fit(X, Y)
assert_equal(lsvc.intercept_, 0.)
def test_hasattr_predict_proba():
# Method must be (un)available before or after fit, switched by
# `probability` param
G = svm.SVC(probability=True)
assert_true(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_true(hasattr(G, 'predict_proba'))
G = svm.SVC(probability=False)
assert_false(hasattr(G, 'predict_proba'))
G.fit(iris.data, iris.target)
assert_false(hasattr(G, 'predict_proba'))
# Switching to `probability=True` after fitting should make
# predict_proba available, but calling it must not work:
G.probability = True
assert_true(hasattr(G, 'predict_proba'))
msg = "predict_proba is not available when fitted with probability=False"
assert_raise_message(NotFittedError, msg, G.predict_proba, iris.data)
| bsd-3-clause |
gimli-org/gimli | pygimli/viewer/mpl/matrixview.py | 1 | 3685 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Functions to draw various pygimli matrices with matplotlib."""
import numpy as np
import matplotlib.pyplot as plt
import pygimli as pg
def drawSparseMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Parameters
----------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.matrix.SparseMatrix or pg.matrix.SparseMapMatrix
Returns
-------
mpl.lines.line2d
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> from pygimli.viewer.mpl import drawSparseMatrix
>>> A = pg.randn((10,10), seed=0)
>>> SM = pg.core.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, i, 5.0)
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True, sharex=True)
>>> _ = drawSparseMatrix(ax1, A, colOffset=5, rowOffset=5, color='blue')
>>> _ = drawSparseMatrix(ax2, SM, color='green')
"""
row = kwargs.pop('rowOffset', 0)
col = kwargs.pop('colOffset', 0)
color = kwargs.pop('color', None)
mat = pg.utils.sparseMatrix2coo(mat)
mat.row += row
mat.col += col
gci = ax.spy(mat, color=color)
ax.autoscale(enable=True, axis='both', tight=True)
return gci
def drawBlockMatrix(ax, mat, **kwargs):
"""Draw a view of a matrix into the axes.
Arguments
---------
ax : mpl axis instance, optional
Axis instance where the matrix will be plotted.
mat: pg.Matrix.BlockMatrix
Keyword Arguments
-----------------
spy: bool [False]
Draw all matrix entries instead of colored blocks
Returns
-------
ax:
Examples
--------
>>> import numpy as np
>>> import pygimli as pg
>>> I = pg.matrix.IdentityMatrix(10)
>>> SM = pg.matrix.SparseMapMatrix()
>>> for i in range(10):
... SM.setVal(i, 10 - i, 5.0)
... SM.setVal(i, i, 5.0)
>>> B = pg.matrix.BlockMatrix()
>>> B.add(I, 0, 0)
0
>>> B.add(SM, 10, 10)
1
>>> print(B)
pg.matrix.BlockMatrix of size 20 x 21 consisting of 2 submatrices.
>>> fig, (ax1, ax2) = pg.plt.subplots(1, 2, sharey=True)
>>> _ = pg.show(B, ax=ax1)
>>> _ = pg.show(B, spy=True, ax=ax2)
"""
if kwargs.pop('spy', False):
gci = []
ids = pg.unique([e.matrixID for e in mat.entries()])
cMap = pg.plt.cm.get_cmap("Set3", len(ids))
for e in mat.entries():
mid = e.matrixID
mati = mat.mat(mid)
if isinstance(mati, pg.core.IdentityMatrix):
mati = np.eye(mati.size())
gci.append(drawSparseMatrix(ax, mati,
rowOffset=e.rowStart,
colOffset=e.colStart,
color=cMap(mid)))
return gci, None
else:
plcs = []
for e in mat.entries():
mid = e.matrixID
widthy = mat.mat(mid).rows() - 0.1 # to make sure non-matrix regions are not connected in the plot
widthx = mat.mat(mid).cols() - 0.1
plc = pg.meshtools.createRectangle([e.colStart, e.rowStart],
[e.colStart + widthx, e.rowStart + widthy],
marker=mid)
plcs.append(plc)
bm = pg.meshtools.mergePLC(plcs)
gci, cBar = pg.viewer.mpl.drawPLC(ax, bm, fitView=False)
ax.invert_yaxis()
ax.xaxis.tick_top()
cBar.set_label("Matrix ID")
if len(mat.entries()) > 10:
gci.set_cmap("viridis")
return gci, cBar
| apache-2.0 |
NoahZinsmeister/sf_crime_classification_kaggle | main.py | 1 | 6199 | import os
import pickle
import csv
import re
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import make_scorer, log_loss
from sklearn.ensemble import RandomForestClassifier
from sklearn.grid_search import GridSearchCV
if __name__ == "__main__":
# create a matrix of features (X) and a vector of class labels (y)
X = []
y = []
with open(os.getcwd() + '/train.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for i, row in enumerate(reader):
if i == 0:
pass
else:
date = re.search("([0-9]{4})-([0-9]{2})-([0-9]{2})",
row[0]).groups()
date = [int(x) for x in date]
time = re.search("([0-9]{2}):([0-9]{2}):([0-9]{2})",
row[0]).groups()
time = [int(x) for x in time]
category_string = row[1]
dayofweek_string = row[3]
pddistrict_string = row[4]
longitude = float(row[7])
latitude = float(row[8])
X_row = date + time + [longitude, latitude, \
dayofweek_string, pddistrict_string]
y_label = category_string
X.append(X_row)
y.append(y_label)
# one-hot encoding for dayofweek and pddistrict vars
dayofweek_set = set()
pddistrict_set = set()
for row in X:
dayofweek_set.add(row[-2])
pddistrict_set.add(row[-1])
dayofweek_dict = {item: i for i, item in enumerate(dayofweek_set)}
pddistrict_dict = {item: i for i, item in enumerate(pddistrict_set)}
num_unique_dayofweek = len(dayofweek_dict)
num_unique_pddistrict = len(pddistrict_dict)
for i, row in enumerate(X):
encoded_dayofweek = [0]*num_unique_dayofweek
encoded_pddistrict = [0]*num_unique_pddistrict
current_dayofweek = row[-2]
current_pddistrict = row[-1]
encoded_dayofweek[dayofweek_dict[current_dayofweek]] = 1
encoded_pddistrict[pddistrict_dict[current_pddistrict]] = 1
X[i] = row[:-2] + encoded_dayofweek + encoded_pddistrict
# label binarization
category_set = set()
for label in y:
category_set.add(label)
category_dict = {item: i for i, item in enumerate(sorted(category_set))}
num_unique_category = len(category_dict)
for i, label in enumerate(y):
y[i] = category_dict[label]
# ranges for cross validation parameters (I know this is only 1
# combination, checking more would require more than my 8GB RAM :/
n_estimators_range = [i for i in range(20,22,40)]
max_features_range = [i for i in range(3,5,2)]
# does CV and fits the best model
param_grid = {'n_estimators': n_estimators_range, 'max_features':
max_features_range}
rfc = RandomForestClassifier(random_state = 2, n_jobs = -1)
clf = GridSearchCV(rfc, param_grid = param_grid, scoring = make_scorer(log_loss, greater_is_better = False, needs_proba = True),
refit = True, cv = 4)
trained_clf = clf.fit(X, y)
# you can pickle the best CV estimator if you want
#pickle.dump(trained_clf.best_estimator_, open("trainedclassifier.p", "wb"))
# plot a CV log loss plot
scores = [-1*x[1] for x in trained_clf.grid_scores_]
scores = np.array(scores).reshape(len(max_features_range),
len(n_estimators_range))
plt.figure(figsize=(12, 12), dpi = 400)
plt.imshow(scores, interpolation='nearest', cmap=plt.cm.Blues)
plt.xlabel('n_estimators')
plt.ylabel('max_features')
plt.colorbar()
plt.xticks(np.arange(len(n_estimators_range)), n_estimators_range, rotation=0)
plt.yticks(np.arange(len(max_features_range)), max_features_range)
plt.figtext(.5,.96,'4-Fold CV Accuracy', fontsize = 25, ha = 'center')
plt.figtext(.5,.94, "Best Performance:" + str(-1*trained_clf.best_score_), fontsize = 15, ha = 'center')
plt.figtext(.5,.92, "Best Parameters:" + str(trained_clf.best_params_), fontsize = 15, ha = 'center')
plt.savefig("CV_plot.png")
# create a matrix of features (X_test) for unlabeled test data
X_test = []
with open(os.getcwd() + '/test.csv', 'r') as csvfile:
reader = csv.reader(csvfile)
for i, row in enumerate(reader):
if i == 0:
pass
else:
date = re.search("([0-9]{4})-([0-9]{2})-([0-9]{2})", row[1]).groups()
# date is of the form [year, month, day]
date = [int(x) for x in date]
time = re.search("([0-9]{2}):([0-9]{2}):([0-9]{2})", row[1]).groups()
# time is of the form [hour, minute, second]
time = [int(x) for x in time]
dayofweek_string = row[2]
pddistrict_string = row[3]
longitude = float(row[5])
latitude = float(row[6])
X_row = date + time + [longitude, latitude, \
dayofweek_string, pddistrict_string]
X_test.append(X_row)
# one-hot encoding from existing dicts
for i, row in enumerate(X_test):
encoded_dayofweek = [0]*num_unique_dayofweek
encoded_pddistrict = [0]*num_unique_pddistrict
current_dayofweek = row[-2]
current_pddistrict = row[-1]
try:
encoded_dayofweek[dayofweek_dict[current_dayofweek]] = 1
except KeyError:
encoded_dayofweek[0] = 1
try:
encoded_pddistrict[pddistrict_dict[current_pddistrict]] = 1
except KeyError:
encoded_pddistrict[0] = 1
X_test[i] = row[:-2] + encoded_dayofweek + encoded_pddistrict
# write predicted probabilities to file
num_classes = trained_clf.best_estimator_.n_classes_
predicted_probas = trained_clf.predict_proba(X_test)
with open(os.getcwd() + '/submit.csv', 'w') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(["Id"] + sorted(category_set))
for i, prediction in enumerate(predicted_probas):
writer.writerow([i] + prediction.tolist())
| gpl-2.0 |
boada/vpCluster | data/boada/scripts/plot_cluster_regions.py | 1 | 1555 | import matplotlib
matplotlib.use('Agg')
import aplpy
import numpy as np
from astLib import astCoords
files='./../august_2012/new_astrometry/coords/'
gc = aplpy.FITSFigure(
'./../august_2012/sdss_imaging/c354p41+0p27/fpC-002662-r4-0245.fit',
north=True, figsize=(5,5))
gc.show_grayscale(stretch='log')
gc.recenter(354.4155423, 0.2713716, radius=3/60.)
gc.show_circles(354.4155423, 0.2713716, radius=2.3/60.)
gc.set_auto_refresh(False)
fibers = np.loadtxt(files+'c354p41+0p27_NE_D1_coords.txt', dtype='str')
for ra, dec in zip(fibers[:,1], fibers[:,2]):
x=astCoords.hms2decimal(ra,':')
y=astCoords.dms2decimal(dec,':')
gc.show_circles(x,y,2/3600.,color='black')
fibers = np.loadtxt(files+'c354p41+0p27_NW_D1_coords.txt', dtype='str')
for ra, dec in zip(fibers[:,1], fibers[:,2]):
x=astCoords.hms2decimal(ra,':')
y=astCoords.dms2decimal(dec,':')
gc.show_circles(x,y,2/3600.,color='#348abd')
fibers = np.loadtxt(files+'c354p41+0p27_SW_D1_coords.txt', dtype='str')
for ra, dec in zip(fibers[:,1], fibers[:,2]):
x=astCoords.hms2decimal(ra,':')
y=astCoords.dms2decimal(dec,':')
gc.show_circles(x,y,2/3600.,color='#467821')
fibers = np.loadtxt(files+'c354p41+0p27_SE_D1_coords.txt', dtype='str')
for ra, dec in zip(fibers[:,1], fibers[:,2]):
x=astCoords.hms2decimal(ra,':')
y=astCoords.dms2decimal(dec,':')
gc.show_circles(x,y,2/3600.,color='#7a68a6')
gc.hide_tick_labels()
gc.hide_axis_labels()
gc.ticks.hide()
gc.set_theme('publication')
gc.refresh()
gc.save('pointing.pdf')
| mit |
carrillo/scikit-learn | sklearn/pipeline.py | 162 | 21103 | """
The :mod:`sklearn.pipeline` module implements utilities to build a composite
estimator, as a chain of transforms and estimators.
"""
# Author: Edouard Duchesnay
# Gael Varoquaux
# Virgile Fritsch
# Alexandre Gramfort
# Lars Buitinck
# Licence: BSD
from collections import defaultdict
import numpy as np
from scipy import sparse
from .base import BaseEstimator, TransformerMixin
from .externals.joblib import Parallel, delayed
from .externals import six
from .utils import tosequence
from .utils.metaestimators import if_delegate_has_method
from .externals.six import iteritems
__all__ = ['Pipeline', 'FeatureUnion']
class Pipeline(BaseEstimator):
"""Pipeline of transforms with a final estimator.
Sequentially apply a list of transforms and a final estimator.
Intermediate steps of the pipeline must be 'transforms', that is, they
must implement fit and transform methods.
The final estimator only needs to implement fit.
The purpose of the pipeline is to assemble several steps that can be
cross-validated together while setting different parameters.
For this, it enables setting parameters of the various steps using their
names and the parameter name separated by a '__', as in the example below.
Read more in the :ref:`User Guide <pipeline>`.
Parameters
----------
steps : list
List of (name, transform) tuples (implementing fit/transform) that are
chained, in the order in which they are chained, with the last object
an estimator.
Attributes
----------
named_steps : dict
Read-only attribute to access any step parameter by user given name.
Keys are step names and values are steps parameters.
Examples
--------
>>> from sklearn import svm
>>> from sklearn.datasets import samples_generator
>>> from sklearn.feature_selection import SelectKBest
>>> from sklearn.feature_selection import f_regression
>>> from sklearn.pipeline import Pipeline
>>> # generate some data to play with
>>> X, y = samples_generator.make_classification(
... n_informative=5, n_redundant=0, random_state=42)
>>> # ANOVA SVM-C
>>> anova_filter = SelectKBest(f_regression, k=5)
>>> clf = svm.SVC(kernel='linear')
>>> anova_svm = Pipeline([('anova', anova_filter), ('svc', clf)])
>>> # You can set the parameters using the names issued
>>> # For instance, fit using a k of 10 in the SelectKBest
>>> # and a parameter 'C' of the svm
>>> anova_svm.set_params(anova__k=10, svc__C=.1).fit(X, y)
... # doctest: +ELLIPSIS
Pipeline(steps=[...])
>>> prediction = anova_svm.predict(X)
>>> anova_svm.score(X, y) # doctest: +ELLIPSIS
0.77...
>>> # getting the selected features chosen by anova_filter
>>> anova_svm.named_steps['anova'].get_support()
... # doctest: +NORMALIZE_WHITESPACE
array([ True, True, True, False, False, True, False, True, True, True,
False, False, True, False, True, False, False, False, False,
True], dtype=bool)
"""
# BaseEstimator interface
def __init__(self, steps):
names, estimators = zip(*steps)
if len(dict(steps)) != len(steps):
raise ValueError("Provided step names are not unique: %s" % (names,))
# shallow copy of steps
self.steps = tosequence(steps)
transforms = estimators[:-1]
estimator = estimators[-1]
for t in transforms:
if (not (hasattr(t, "fit") or hasattr(t, "fit_transform")) or not
hasattr(t, "transform")):
raise TypeError("All intermediate steps of the chain should "
"be transforms and implement fit and transform"
" '%s' (type %s) doesn't)" % (t, type(t)))
if not hasattr(estimator, "fit"):
raise TypeError("Last step of chain should implement fit "
"'%s' (type %s) doesn't)"
% (estimator, type(estimator)))
@property
def _estimator_type(self):
return self.steps[-1][1]._estimator_type
def get_params(self, deep=True):
if not deep:
return super(Pipeline, self).get_params(deep=False)
else:
out = self.named_steps
for name, step in six.iteritems(self.named_steps):
for key, value in six.iteritems(step.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(Pipeline, self).get_params(deep=False))
return out
@property
def named_steps(self):
return dict(self.steps)
@property
def _final_estimator(self):
return self.steps[-1][1]
# Estimator interface
def _pre_transform(self, X, y=None, **fit_params):
fit_params_steps = dict((step, {}) for step, _ in self.steps)
for pname, pval in six.iteritems(fit_params):
step, param = pname.split('__', 1)
fit_params_steps[step][param] = pval
Xt = X
for name, transform in self.steps[:-1]:
if hasattr(transform, "fit_transform"):
Xt = transform.fit_transform(Xt, y, **fit_params_steps[name])
else:
Xt = transform.fit(Xt, y, **fit_params_steps[name]) \
.transform(Xt)
return Xt, fit_params_steps[self.steps[-1][0]]
def fit(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then fit the transformed data using the final estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
self.steps[-1][-1].fit(Xt, y, **fit_params)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all the transforms one after the other and transform the
data, then use fit_transform on transformed data using the final
estimator.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
if hasattr(self.steps[-1][-1], 'fit_transform'):
return self.steps[-1][-1].fit_transform(Xt, y, **fit_params)
else:
return self.steps[-1][-1].fit(Xt, y, **fit_params).transform(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict(self, X):
"""Applies transforms to the data, and the predict method of the
final estimator. Valid only if the final estimator implements
predict.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def fit_predict(self, X, y=None, **fit_params):
"""Applies fit_predict of last step in pipeline after transforms.
Applies fit_transforms of a pipeline to the data, followed by the
fit_predict method of the final estimator in the pipeline. Valid
only if the final estimator implements fit_predict.
Parameters
----------
X : iterable
Training data. Must fulfill input requirements of first step of
the pipeline.
y : iterable, default=None
Training targets. Must fulfill label requirements for all steps
of the pipeline.
"""
Xt, fit_params = self._pre_transform(X, y, **fit_params)
return self.steps[-1][-1].fit_predict(Xt, y, **fit_params)
@if_delegate_has_method(delegate='_final_estimator')
def predict_proba(self, X):
"""Applies transforms to the data, and the predict_proba method of the
final estimator. Valid only if the final estimator implements
predict_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def decision_function(self, X):
"""Applies transforms to the data, and the decision_function method of
the final estimator. Valid only if the final estimator implements
decision_function.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].decision_function(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def predict_log_proba(self, X):
"""Applies transforms to the data, and the predict_log_proba method of
the final estimator. Valid only if the final estimator implements
predict_log_proba.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].predict_log_proba(Xt)
@if_delegate_has_method(delegate='_final_estimator')
def transform(self, X):
"""Applies transforms to the data, and the transform method of the
final estimator. Valid only if the final estimator implements
transform.
Parameters
----------
X : iterable
Data to predict on. Must fulfill input requirements of first step of
the pipeline.
"""
Xt = X
for name, transform in self.steps:
Xt = transform.transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def inverse_transform(self, X):
"""Applies inverse transform to the data.
Starts with the last step of the pipeline and applies ``inverse_transform`` in
inverse order of the pipeline steps.
Valid only if all steps of the pipeline implement inverse_transform.
Parameters
----------
X : iterable
Data to inverse transform. Must fulfill output requirements of the
last step of the pipeline.
"""
if X.ndim == 1:
X = X[None, :]
Xt = X
for name, step in self.steps[::-1]:
Xt = step.inverse_transform(Xt)
return Xt
@if_delegate_has_method(delegate='_final_estimator')
def score(self, X, y=None):
"""Applies transforms to the data, and the score method of the
final estimator. Valid only if the final estimator implements
score.
Parameters
----------
X : iterable
Data to score. Must fulfill input requirements of first step of the
pipeline.
y : iterable, default=None
Targets used for scoring. Must fulfill label requirements for all steps of
the pipeline.
"""
Xt = X
for name, transform in self.steps[:-1]:
Xt = transform.transform(Xt)
return self.steps[-1][-1].score(Xt, y)
@property
def classes_(self):
return self.steps[-1][-1].classes_
@property
def _pairwise(self):
# check if first estimator expects pairwise input
return getattr(self.steps[0][1], '_pairwise', False)
def _name_estimators(estimators):
"""Generate names for estimators."""
names = [type(estimator).__name__.lower() for estimator in estimators]
namecount = defaultdict(int)
for est, name in zip(estimators, names):
namecount[name] += 1
for k, v in list(six.iteritems(namecount)):
if v == 1:
del namecount[k]
for i in reversed(range(len(estimators))):
name = names[i]
if name in namecount:
names[i] += "-%d" % namecount[name]
namecount[name] -= 1
return list(zip(names, estimators))
def make_pipeline(*steps):
"""Construct a Pipeline from the given estimators.
This is a shorthand for the Pipeline constructor; it does not require, and
does not permit, naming the estimators. Instead, they will be given names
automatically based on their types.
Examples
--------
>>> from sklearn.naive_bayes import GaussianNB
>>> from sklearn.preprocessing import StandardScaler
>>> make_pipeline(StandardScaler(), GaussianNB()) # doctest: +NORMALIZE_WHITESPACE
Pipeline(steps=[('standardscaler',
StandardScaler(copy=True, with_mean=True, with_std=True)),
('gaussiannb', GaussianNB())])
Returns
-------
p : Pipeline
"""
return Pipeline(_name_estimators(steps))
def _fit_one_transformer(transformer, X, y):
return transformer.fit(X, y)
def _transform_one(transformer, name, X, transformer_weights):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
return transformer.transform(X) * transformer_weights[name]
return transformer.transform(X)
def _fit_transform_one(transformer, name, X, y, transformer_weights,
**fit_params):
if transformer_weights is not None and name in transformer_weights:
# if we have a weight for this transformer, muliply output
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed * transformer_weights[name], transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed * transformer_weights[name], transformer
if hasattr(transformer, 'fit_transform'):
X_transformed = transformer.fit_transform(X, y, **fit_params)
return X_transformed, transformer
else:
X_transformed = transformer.fit(X, y, **fit_params).transform(X)
return X_transformed, transformer
class FeatureUnion(BaseEstimator, TransformerMixin):
"""Concatenates results of multiple transformer objects.
This estimator applies a list of transformer objects in parallel to the
input data, then concatenates the results. This is useful to combine
several feature extraction mechanisms into a single transformer.
Read more in the :ref:`User Guide <feature_union>`.
Parameters
----------
transformer_list: list of (string, transformer) tuples
List of transformer objects to be applied to the data. The first
half of each tuple is the name of the transformer.
n_jobs: int, optional
Number of jobs to run in parallel (default 1).
transformer_weights: dict, optional
Multiplicative weights for features per transformer.
Keys are transformer names, values the weights.
"""
def __init__(self, transformer_list, n_jobs=1, transformer_weights=None):
self.transformer_list = transformer_list
self.n_jobs = n_jobs
self.transformer_weights = transformer_weights
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
feature_names = []
for name, trans in self.transformer_list:
if not hasattr(trans, 'get_feature_names'):
raise AttributeError("Transformer %s does not provide"
" get_feature_names." % str(name))
feature_names.extend([name + "__" + f for f in
trans.get_feature_names()])
return feature_names
def fit(self, X, y=None):
"""Fit all transformers using X.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data, used to fit transformers.
"""
transformers = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_one_transformer)(trans, X, y)
for name, trans in self.transformer_list)
self._update_transformer_list(transformers)
return self
def fit_transform(self, X, y=None, **fit_params):
"""Fit all transformers using X, transform the data and concatenate
results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
result = Parallel(n_jobs=self.n_jobs)(
delayed(_fit_transform_one)(trans, name, X, y,
self.transformer_weights, **fit_params)
for name, trans in self.transformer_list)
Xs, transformers = zip(*result)
self._update_transformer_list(transformers)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def transform(self, X):
"""Transform X separately by each transformer, concatenate results.
Parameters
----------
X : array-like or sparse matrix, shape (n_samples, n_features)
Input data to be transformed.
Returns
-------
X_t : array-like or sparse matrix, shape (n_samples, sum_n_components)
hstack of results of transformers. sum_n_components is the
sum of n_components (output dimension) over transformers.
"""
Xs = Parallel(n_jobs=self.n_jobs)(
delayed(_transform_one)(trans, name, X, self.transformer_weights)
for name, trans in self.transformer_list)
if any(sparse.issparse(f) for f in Xs):
Xs = sparse.hstack(Xs).tocsr()
else:
Xs = np.hstack(Xs)
return Xs
def get_params(self, deep=True):
if not deep:
return super(FeatureUnion, self).get_params(deep=False)
else:
out = dict(self.transformer_list)
for name, trans in self.transformer_list:
for key, value in iteritems(trans.get_params(deep=True)):
out['%s__%s' % (name, key)] = value
out.update(super(FeatureUnion, self).get_params(deep=False))
return out
def _update_transformer_list(self, transformers):
self.transformer_list[:] = [
(name, new)
for ((name, old), new) in zip(self.transformer_list, transformers)
]
# XXX it would be nice to have a keyword-only n_jobs argument to this function,
# but that's not allowed in Python 2.x.
def make_union(*transformers):
"""Construct a FeatureUnion from the given transformers.
This is a shorthand for the FeatureUnion constructor; it does not require,
and does not permit, naming the transformers. Instead, they will be given
names automatically based on their types. It also does not allow weighting.
Examples
--------
>>> from sklearn.decomposition import PCA, TruncatedSVD
>>> make_union(PCA(), TruncatedSVD()) # doctest: +NORMALIZE_WHITESPACE
FeatureUnion(n_jobs=1,
transformer_list=[('pca', PCA(copy=True, n_components=None,
whiten=False)),
('truncatedsvd',
TruncatedSVD(algorithm='randomized',
n_components=2, n_iter=5,
random_state=None, tol=0.0))],
transformer_weights=None)
Returns
-------
f : FeatureUnion
"""
return FeatureUnion(_name_estimators(transformers))
| bsd-3-clause |
hellodmp/segmentnet | preprocess/Image.py | 1 | 4261 | from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
import SimpleITK as sitk
from skimage import io
from skimage import data
from preprocess import dicomparser
#http://insightsoftwareconsortium.github.io/SimpleITK-Notebooks/03_Image_Details.html
def get_imageData(ct_file):
ct = dicomparser.DicomParser(filename=ct_file)
print ct.GetSeriesInfo()
imageData = ct.GetImageData()
return imageData
def read_images(fileList):
image_dict = dict()
rescalFilt = sitk.RescaleIntensityImageFilter()
rescalFilt.SetOutputMaximum(1)
rescalFilt.SetOutputMinimum(0)
for path in fileList:
info = get_imageData(path)
image = rescalFilt.Execute(sitk.Cast(sitk.ReadImage(path), sitk.sitkFloat32))
image_dict[info["position"][2]] = sitk.GetArrayFromImage(image)
return image_dict
def read_series(dir):
reader = sitk.ImageSeriesReader()
series_list = reader.GetGDCMSeriesIDs(dir)
for series_id in series_list:
dicom_names = reader.GetGDCMSeriesFileNames(dir, series_id)
if len(dicom_names) > 1:
break
reader.SetFileNames(dicom_names)
image = reader.Execute()
return image
def read_image(path):
#read dicom
data = sitk.ReadImage(path, sitk.sitkFloat32)
filter = sitk.RescaleIntensityImageFilter()
data = filter.Execute(data,0,1)
nda = sitk.GetArrayFromImage(data)
(d, w, h)=nda.shape
image = nda.reshape((d,w,h)).transpose(1, 2, 0)
return image
def dict2vol(imageDict):
list = sorted(imageDict.iteritems(), key=lambda d: d[0])
(d, w, h) = list[0][1].shape
data = np.zeros((len(list), w, h))
for i in range(len(list)):
data[i,:,:] = list[i][1]
image = sitk.GetImageFromArray(data)
return image
def convertNumpyData(img, volSize, dstRes, method=sitk.sitkLinear):
# we rotate the image according to its transformation using the direction and according to the final spacing we want
factor = np.asarray(img.GetSpacing()) / [dstRes[0], dstRes[1], dstRes[2]]
factorSize = np.asarray(img.GetSize() * factor, dtype=float)
newSize = np.max([factorSize, volSize], axis=0)
newSize = newSize.astype(dtype=int)
T = sitk.AffineTransform(3)
T.SetMatrix(img.GetDirection())
resampler = sitk.ResampleImageFilter()
resampler.SetReferenceImage(img)
resampler.SetOutputSpacing([dstRes[0], dstRes[1], dstRes[2]])
resampler.SetSize(newSize)
resampler.SetInterpolator(method)
'''
if params['normDir']:
resampler.SetTransform(T.GetInverse())
'''
imgResampled = resampler.Execute(img)
imgCentroid = np.asarray(newSize, dtype=float) / 2.0
imgStartPx = (imgCentroid - volSize / 2.0).astype(dtype=int)
regionExtractor = sitk.RegionOfInterestImageFilter()
regionExtractor.SetSize(list(volSize.astype(dtype=int)))
regionExtractor.SetIndex(list(imgStartPx))
imgResampledCropped = regionExtractor.Execute(imgResampled)
ret = np.transpose(sitk.GetArrayFromImage(imgResampledCropped).astype(dtype=float), [2, 1, 0])
return ret
def sitk_show(nda, title=None, margin=0.0, dpi=40):
figsize = (1 + margin) * nda.shape[0] / dpi, (1 + margin) * nda.shape[1] / dpi
extent = (0, nda.shape[1], nda.shape[0], 0)
fig = plt.figure(figsize=figsize, dpi=dpi)
ax = fig.add_axes([margin, margin, 1 - 2 * margin, 1 - 2 * margin])
for k in range(0, nda.shape[2]):
print "printing slice " + str(k)
ax.imshow(np.squeeze(nda[:, :, k]),cmap ='gray', extent=extent, interpolation=None)
plt.draw()
#plt.pause(1)
plt.waitforbuttonpress()
if __name__ == "__main__":
path = "../Dataset/V13265"
ct_list = [path +"/"+ f for f in listdir(path) if isfile(join(path, f)) and f.startswith('CT')]
'''
for file in ct_list:
get_imageData(file)
'''
read_series(path)
'''
ct_list = [path + f for f in listdir(path) if isfile(join(path, f)) and f.startswith('CT')]
images = read_images(ct_list)
image = dict2vol(images)
volSize = np.asarray([128,128,64],dtype=int)
dstRes = np.asarray([1,1,5],dtype=float)
data = convertNumpyData(image,volSize,dstRes)
print data.shape
'''
| gpl-3.0 |
lenovor/scikit-learn | examples/applications/plot_species_distribution_modeling.py | 254 | 7434 | """
=============================
Species distribution modeling
=============================
Modeling species' geographic distributions is an important
problem in conservation biology. In this example we
model the geographic distribution of two south american
mammals given past observations and 14 environmental
variables. Since we have only positive examples (there are
no unsuccessful observations), we cast this problem as a
density estimation problem and use the `OneClassSVM` provided
by the package `sklearn.svm` as our modeling tool.
The dataset is provided by Phillips et. al. (2006).
If available, the example uses
`basemap <http://matplotlib.sourceforge.net/basemap/doc/html/>`_
to plot the coast lines and national boundaries of South America.
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from __future__ import print_function
from time import time
import numpy as np
import matplotlib.pyplot as plt
from sklearn.datasets.base import Bunch
from sklearn.datasets import fetch_species_distributions
from sklearn.datasets.species_distributions import construct_grids
from sklearn import svm, metrics
# if basemap is available, we'll use it.
# otherwise, we'll improvise later...
try:
from mpl_toolkits.basemap import Basemap
basemap = True
except ImportError:
basemap = False
print(__doc__)
def create_species_bunch(species_name, train, test, coverages, xgrid, ygrid):
"""Create a bunch with information about a particular organism
This will use the test/train record arrays to extract the
data specific to the given species name.
"""
bunch = Bunch(name=' '.join(species_name.split("_")[:2]))
species_name = species_name.encode('ascii')
points = dict(test=test, train=train)
for label, pts in points.items():
# choose points associated with the desired species
pts = pts[pts['species'] == species_name]
bunch['pts_%s' % label] = pts
# determine coverage values for each of the training & testing points
ix = np.searchsorted(xgrid, pts['dd long'])
iy = np.searchsorted(ygrid, pts['dd lat'])
bunch['cov_%s' % label] = coverages[:, -iy, ix].T
return bunch
def plot_species_distribution(species=("bradypus_variegatus_0",
"microryzomys_minutus_0")):
"""
Plot the species distribution.
"""
if len(species) > 2:
print("Note: when more than two species are provided,"
" only the first two will be used")
t0 = time()
# Load the compressed data
data = fetch_species_distributions()
# Set up the data grid
xgrid, ygrid = construct_grids(data)
# The grid in x,y coordinates
X, Y = np.meshgrid(xgrid, ygrid[::-1])
# create a bunch for each species
BV_bunch = create_species_bunch(species[0],
data.train, data.test,
data.coverages, xgrid, ygrid)
MM_bunch = create_species_bunch(species[1],
data.train, data.test,
data.coverages, xgrid, ygrid)
# background points (grid coordinates) for evaluation
np.random.seed(13)
background_points = np.c_[np.random.randint(low=0, high=data.Ny,
size=10000),
np.random.randint(low=0, high=data.Nx,
size=10000)].T
# We'll make use of the fact that coverages[6] has measurements at all
# land points. This will help us decide between land and water.
land_reference = data.coverages[6]
# Fit, predict, and plot for each species.
for i, species in enumerate([BV_bunch, MM_bunch]):
print("_" * 80)
print("Modeling distribution of species '%s'" % species.name)
# Standardize features
mean = species.cov_train.mean(axis=0)
std = species.cov_train.std(axis=0)
train_cover_std = (species.cov_train - mean) / std
# Fit OneClassSVM
print(" - fit OneClassSVM ... ", end='')
clf = svm.OneClassSVM(nu=0.1, kernel="rbf", gamma=0.5)
clf.fit(train_cover_std)
print("done.")
# Plot map of South America
plt.subplot(1, 2, i + 1)
if basemap:
print(" - plot coastlines using basemap")
m = Basemap(projection='cyl', llcrnrlat=Y.min(),
urcrnrlat=Y.max(), llcrnrlon=X.min(),
urcrnrlon=X.max(), resolution='c')
m.drawcoastlines()
m.drawcountries()
else:
print(" - plot coastlines from coverage")
plt.contour(X, Y, land_reference,
levels=[-9999], colors="k",
linestyles="solid")
plt.xticks([])
plt.yticks([])
print(" - predict species distribution")
# Predict species distribution using the training data
Z = np.ones((data.Ny, data.Nx), dtype=np.float64)
# We'll predict only for the land points.
idx = np.where(land_reference > -9999)
coverages_land = data.coverages[:, idx[0], idx[1]].T
pred = clf.decision_function((coverages_land - mean) / std)[:, 0]
Z *= pred.min()
Z[idx[0], idx[1]] = pred
levels = np.linspace(Z.min(), Z.max(), 25)
Z[land_reference == -9999] = -9999
# plot contours of the prediction
plt.contourf(X, Y, Z, levels=levels, cmap=plt.cm.Reds)
plt.colorbar(format='%.2f')
# scatter training/testing points
plt.scatter(species.pts_train['dd long'], species.pts_train['dd lat'],
s=2 ** 2, c='black',
marker='^', label='train')
plt.scatter(species.pts_test['dd long'], species.pts_test['dd lat'],
s=2 ** 2, c='black',
marker='x', label='test')
plt.legend()
plt.title(species.name)
plt.axis('equal')
# Compute AUC with regards to background points
pred_background = Z[background_points[0], background_points[1]]
pred_test = clf.decision_function((species.cov_test - mean)
/ std)[:, 0]
scores = np.r_[pred_test, pred_background]
y = np.r_[np.ones(pred_test.shape), np.zeros(pred_background.shape)]
fpr, tpr, thresholds = metrics.roc_curve(y, scores)
roc_auc = metrics.auc(fpr, tpr)
plt.text(-35, -70, "AUC: %.3f" % roc_auc, ha="right")
print("\n Area under the ROC curve : %f" % roc_auc)
print("\ntime elapsed: %.2fs" % (time() - t0))
plot_species_distribution()
plt.show()
| bsd-3-clause |
lthurlow/Network-Grapher | proj/external/matplotlib-1.2.1/examples/pylab_examples/transoffset.py | 13 | 1666 | #!/usr/bin/env python
'''
This illustrates the use of transforms.offset_copy to
make a transform that positions a drawing element such as
a text string at a specified offset in screen coordinates
(dots or inches) relative to a location given in any
coordinates.
Every Artist--the mpl class from which classes such as
Text and Line are derived--has a transform that can be
set when the Artist is created, such as by the corresponding
pylab command. By default this is usually the Axes.transData
transform, going from data units to screen dots. We can
use the offset_copy function to make a modified copy of
this transform, where the modification consists of an
offset.
'''
import pylab as P
from matplotlib.transforms import offset_copy
X = P.arange(7)
Y = X**2
fig = P.figure(figsize=(5,10))
ax = P.subplot(2,1,1)
# If we want the same offset for each text instance,
# we only need to make one transform. To get the
# transform argument to offset_copy, we need to make the axes
# first; the subplot command above is one way to do this.
transOffset = offset_copy(ax.transData, fig=fig,
x = 0.05, y=0.10, units='inches')
for x, y in zip(X, Y):
P.plot((x,),(y,), 'ro')
P.text(x, y, '%d, %d' % (int(x),int(y)), transform=transOffset)
# offset_copy works for polar plots also.
ax = P.subplot(2,1,2, polar=True)
transOffset = offset_copy(ax.transData, fig=fig, y = 6, units='dots')
for x, y in zip(X, Y):
P.polar((x,),(y,), 'ro')
P.text(x, y, '%d, %d' % (int(x),int(y)),
transform=transOffset,
horizontalalignment='center',
verticalalignment='bottom')
P.show()
| mit |
keflavich/pyspeckit-obsolete | pyspeckit/spectrum/models/radex_modelgrid.py | 3 | 4499 | """
Fit a line based on parameters output from a grid of RADEX models
"""
import numpy as np
from pyspeckit.mpfit import mpfit
from .. import units
from . import fitter,model
import matplotlib.cbook as mpcb
import copy
try:
import astropy.io.fits as pyfits
except ImportError:
import pyfits
class radex_model(object):
def __init__(self, xarr,
grid_vwidth=1.0,
grid_vwidth_scale=False,
texgrid=None,
taugrid=None,
hdr=None,
path_to_texgrid='',
path_to_taugrid='',
temperature_gridnumber=3,
debug=False,
verbose=False,
modelfunc=None,
**kwargs):
"""
Use a grid of RADEX-computed models to make a model line spectrum
The RADEX models have to be available somewhere.
OR they can be passed as arrays. If as arrays, the form should be:
texgrid = ((minfreq1,maxfreq1,texgrid1),(minfreq2,maxfreq2,texgrid2))
xarr must be a SpectroscopicAxis instance
xoff_v, width are both in km/s. With is 'sigma'
grid_vwidth is the velocity assumed when computing the grid in km/s
this is important because tau = modeltau / width (see, e.g.,
Draine 2011 textbook pgs 219-230)
grid_vwidth_scale is True or False: False for LVG, True for Sphere
A modelfunc must be specified. Model functions should take an xarr and
a series of keyword arguments corresponding to the line parameters
(Tex, tau, xoff_v, and width (gaussian sigma, not FWHM))
"""
self.modelfunc = modelfunc
if self.modelfunc is None:
raise ValueError("Must specify a spectral model function. See class help for form.")
if texgrid is None and taugrid is None:
if path_to_texgrid == '' or path_to_taugrid=='':
raise IOError("Must specify model grids to use.")
else:
self.taugrid = [pyfits.getdata(path_to_taugrid)]
self.texgrid = [pyfits.getdata(path_to_texgrid)]
hdr = pyfits.getheader(path_to_taugrid)
self.yinds,self.xinds = np.indices(self.taugrid[0].shape[1:])
self.densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
self.columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
self.minfreq = (4.8,)
self.maxfreq = (5.0,)
elif len(taugrid)==len(texgrid) and hdr is not None:
self.minfreq,self.maxfreq,self.texgrid = zip(*texgrid)
self.minfreq,self.maxfreq,self.taugrid = zip(*taugrid)
self.yinds,self.xinds = np.indices(self.taugrid[0].shape[1:])
self.densityarr = (xinds+hdr['CRPIX1']-1)*hdr['CD1_1']+hdr['CRVAL1'] # log density
self.columnarr = (yinds+hdr['CRPIX2']-1)*hdr['CD2_2']+hdr['CRVAL2'] # log column
else:
raise Exception
# Convert X-units to frequency in GHz
self.xarr = copy.copy(xarr)
self.xarr.convert_to_unit('Hz', quiet=True)
#tau = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,taugrid[temperature_gridnumber,:,:])
#tex = modelgrid.line_params_2D(gridval1,gridval2,densityarr,columnarr,texgrid[temperature_gridnumber,:,:])
if debug:
import pdb; pdb.set_trace()
def __call__(self, density=4, column=13, xoff_v=0.0, width=1.0,):
self.gridval1 = np.interp(density, self.densityarr[0,:], xinds[0,:])
self.gridval2 = np.interp(column, self.columnarr[:,0], yinds[:,0])
if np.isnan(gridval1) or np.isnan(gridval2):
raise ValueError("Invalid column/density")
if scipyOK:
tau = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[self.gridval2],[self.gridval1]]),order=1) for tg in self.taugrid]
tex = [scipy.ndimage.map_coordinates(tg[temperature_gridnumber,:,:],np.array([[self.gridval2],[self.gridval1]]),order=1) for tg in self.texgrid]
else:
raise ImportError("Couldn't import scipy, therefore cannot interpolate")
if verbose:
print "density %20.12g column %20.12g: tau %20.12g tex %20.12g" % (density, column, tau, tex)
if debug:
import pdb; pdb.set_trace()
return self.modelfunc(self.xarr,Tex=self.tex,tau=tau,xoff_v=xoff_v,width=width,**kwargs)
| mit |
BigTone2009/sms-tools | lectures/03-Fourier-properties/plots-code/symmetry.py | 26 | 1178 | import matplotlib.pyplot as plt
import numpy as np
import sys
from scipy.fftpack import fft, ifft, fftshift
import math
sys.path.append('../../../software/models/')
import utilFunctions as UF
import dftModel as DF
(fs, x) = UF.wavread('../../../sounds/soprano-E4.wav')
w = np.hamming(511)
N = 512
pin = 5000
hM1 = int(math.floor((w.size+1)/2))
hM2 = int(math.floor(w.size/2))
fftbuffer = np.zeros(N)
x1 = x[pin-hM1:pin+hM2]
xw = x1*w
fftbuffer[:hM1] = xw[hM2:]
fftbuffer[N-hM2:] = xw[:hM2]
X = fftshift(fft(fftbuffer))
mX = 20 * np.log10(abs(X))
pX = np.unwrap(np.angle(X))
plt.figure(1, figsize=(9.5, 7))
plt.subplot(311)
plt.plot(np.arange(-hM1, hM2), x1, lw=1.5)
plt.axis([-hM1, hM2, min(x1), max(x1)])
plt.ylabel('amplitude')
plt.title('x (soprano-E4.wav)')
plt.subplot(3,1,2)
plt.plot(np.arange(-N/2,N/2), mX, 'r', lw=1.5)
plt.axis([-N/2,N/2,-48,max(mX)])
plt.title ('mX = 20*log10(abs(X))')
plt.ylabel('amplitude (dB)')
plt.subplot(3,1,3)
plt.plot(np.arange(-N/2,N/2), pX, 'c', lw=1.5)
plt.axis([-N/2,N/2,min(pX),max(pX)])
plt.title ('pX = unwrap(angle(X))')
plt.ylabel('phase (radians)')
plt.tight_layout()
plt.savefig('symmetry.png')
plt.show()
| agpl-3.0 |
EFord36/normalise | normalise/class_ALPHA.py | 1 | 9314 | # -*- coding: utf-8 -*-
from __future__ import division, print_function, unicode_literals
import sys
import re
import pickle
from io import open
import numpy as np
from sklearn.semi_supervised import LabelPropagation as lp
from roman import romanNumeralPattern
from normalise.detect import mod_path
from normalise.tagger import tagify, is_digbased, acr_pattern
from normalise.class_NUMB import gen_frame
from normalise.splitter import split, retagify
from normalise.data.measurements import meas_dict, meas_dict_pl
from normalise.data.abbrev_dict import abbrev_dict
from normalise.data.element_dict import element_dict
with open('{}/data/wordlist.pickle'.format(mod_path), mode='rb') as file:
wordlist = pickle.load(file)
with open('{}/data/NSW_dict.pickle'.format(mod_path), mode='rb') as file:
NSWs = pickle.load(file)
with open('{}/data/word_tokenized.pickle'.format(mod_path), mode='rb') as file:
word_tokenized = pickle.load(file)
with open('{}/data/word_tokenized_lowered.pickle'.format(mod_path), mode='rb') as f:
word_tokenized_lowered = pickle.load(f)
with open('{}/data/clf_ALPHA.pickle'.format(mod_path), mode='rb') as file:
clf_ALPHA = pickle.load(file)
with open('{}/data/names.pickle'.format(mod_path), mode='rb') as file:
names_lower = pickle.load(file)
if __name__ == "__main__":
tagged = tagify(NSWs, verbose=False)
ALPHA_dict = {ind: (nsw, tag) for ind, (nsw, tag) in tagged.items()
if tag == 'ALPHA'}
SPLT_dict = {ind: (nsw, tag) for ind, (nsw, tag) in tagged.items()
if tag == 'SPLT'}
splitted = split(SPLT_dict, verbose=False)
retagged = retagify(splitted, verbose=False)
retagged_ALPHA_dict = {ind: (nsw, tag)
for ind, (nsw, tag) in retagged.items()
if tag == 'SPLT-ALPHA'}
ALPHA_dict.update(retagged_ALPHA_dict)
ALPHAs_context = []
for item in ALPHA_dict.items():
if item[0] < 1206200:
for word in gen_frame(item, word_tokenized):
ALPHAs_context.append(' {} '.format(word))
third_ALPHA_dict = {}
count = 0
for item in ALPHA_dict.items():
count += 1
if count % 3 == 0:
third_ALPHA_dict.update((item,))
ampm = ['am', 'pm', 'AM', 'PM', 'a.m.', 'p.m.', 'A.M.', 'P.M.', 'pm.', 'am.']
adbc = ['AD', 'A.D.', 'ad', 'a.d.', 'BC', 'B.C.', 'bc', 'B.C.']
def run_clfALPHA(dic, text, verbose=True, user_abbrevs={}):
"""Train classifier on training data, return dictionary with added tag.
dic: dictionary entry where key is index of word in orig text, value
is a tuple with the nsw and the tag (ALPHA, NUMB, SPLT-, MISC).
The dictionary returned has the same entries with the tuple extended with
a more specific number tag assigned to it by the classifier.
"""
clf = clf_ALPHA
int_tag_dict = {
1: 'EXPN',
2: 'LSEQ',
3: 'WDLK',
}
out = {}
for (ind, (nsw, tag)) in dic.items():
if verbose:
sys.stdout.write("\r{} of {} classified".format(len(out), len(dic)))
sys.stdout.flush()
if romanNumeralPattern.match(nsw) and gen_frame((ind, (nsw, tag)), text)[1].lower() in names_lower:
out.update({ind: (nsw, 'NUMB', 'NORD')})
elif nsw in user_abbrevs:
out.update({ind: (nsw, 'ALPHA', 'EXPN')})
else:
pred_int = int(clf.predict(gen_featuresetsALPHA({ind: (nsw, tag)}, text)))
ntag = int_tag_dict[pred_int]
out.update({ind: (nsw, tag, ntag)})
if verbose:
sys.stdout.write("\r{} of {} classified".format(len(out), len(dic)))
sys.stdout.flush()
print("\n")
return out
def gen_featuresetsALPHA(tagged_dict, text):
"""Return an array for features for each item in the input dict."""
return np.array([give_featuresALPHA(item, text)
for item in tagged_dict.items()],
dtype=float)
def give_featuresALPHA(item, text):
"""Return a list of features for a dictionary item."""
ind, nsw, tag = item[0], item[1][0], item[1][1]
context = gen_frame(item, text)
out = [
]
out.extend(seed_features(item, context))
"""out.extend(in_features(nsw))"""
return out
def seed_features(item, context):
"""Return a list of features equivalent to those used in the seedset."""
ind, nsw, tag = item[0], item[1][0], item[1][1]
out = [
nsw in ['Mr.', 'Mrs.', 'Mr', 'Mrs'],
nsw in ['i.e.', 'ie.', 'e.g.', 'eg.'],
nsw.endswith('.') and nsw.istitle() and not acr_pattern.match(nsw),
(nsw.isupper() and is_cons(nsw) and not (nsw in meas_dict
and is_digbased(context[1])) and not acr_pattern.match(nsw)),
(nsw in meas_dict or nsw in meas_dict_pl) and is_digbased(context[1]),
(nsw in ampm or nsw in adbc) and is_digbased(context[1]),
(nsw.istitle() and nsw.isalpha() and len(nsw) > 3 and not is_cons(nsw)),
(((nsw.startswith("O'") or nsw.startswith("D'")) and nsw[2:].istitle())
or (nsw.endswith("s'") and nsw[:-2].istitle())
or (nsw.endswith("'s") and nsw[:-2].istitle())),
(not (nsw.isupper() or nsw.endswith('s') and nsw[:-1].isupper())
and (nsw.lower() in wordlist
or (nsw[:-1].lower() in wordlist and nsw.endswith('s')))
and nsw not in ampm),
triple_rep(nsw) and len(nsw) > 3,
bool(acr_pattern.match(nsw) and nsw not in meas_dict),
nsw.isalpha() and nsw.islower() and len(nsw) > 3,
nsw.endswith('s') and nsw[:-1].isupper(),
nsw in element_dict,
nsw.isalpha and nsw.islower() and len(nsw) > 2,
nsw.lower() in abbrev_dict or nsw in ['St.', 'st.', 'St']
]
return out
def gen_seed(dic, text):
"""Return a list of the (integer) labels assigned to the seedset."""
seedset = []
for ind, (nsw, tag) in dic.items():
seedset.append(seed((ind, (nsw, tag)), text))
return seedset
def gen_feats_and_seed(dic, text):
"""Return a tuple of arrays - the first of features, the second, labels."""
seedset = []
featset = []
for ind, (nsw, tag) in dic.items():
seedset.append(seed((ind, (nsw, tag)), text))
featset.append(give_featuresALPHA((ind, (nsw, tag)), text))
return (np.array(featset, dtype=int), np.array(seedset))
def fit_clf(dic, text):
"""Fit a Label Propogation classifier to the input dictionary."""
model = lp(tol=0.01)
X, y = gen_feats_and_seed(dic, text)
model.fit(X, y)
return model
def fit_and_store_clf(dic, text):
"""fit a Label Propogation classifier, and store in clf_ALPHA.pickle"""
clf = fit_clf(dic, text)
with open('{}/data/clf_ALPHA.pickle'.format(mod_path), 'wb') as file:
pickle.dump(clf, file, protocol=2)
def seed(dict_tup, text):
"""Assign a seedset label to the input tuple.
Generate seeds for the seedset by assigning integer labels to obvious
cases. Where there is no obvious case, '-1' is returned.
"""
ind, nsw, tag = dict_tup[0], dict_tup[1][0], dict_tup[1][1]
context = gen_frame((ind, (nsw, tag)), text)
if nsw in ['Mr.', 'Mrs.', 'Mr', 'Mrs']:
return 3
elif nsw in ['i.e.', 'ie.', 'e.g.', 'eg.']:
return 2
elif nsw.endswith('.') and nsw.istitle() and not acr_pattern.match(nsw):
return 1
elif nsw.lower() in abbrev_dict or nsw in ['St.', 'st.', 'St']:
return 1
elif (nsw.isupper() and is_cons(nsw) and not (nsw in meas_dict
and is_digbased(context[1]))):
return 2
elif nsw.endswith('s') and nsw[:-1].isupper():
return 2
elif (nsw in meas_dict or nsw in meas_dict_pl) and is_digbased(context[1]):
return 1
elif (nsw in ampm or nsw in adbc) and is_digbased(context[1]):
return 2
elif nsw.istitle() and nsw.isalpha() and len(nsw) > 3 and not is_cons(nsw):
return 3
elif (((nsw.startswith("O'") or nsw.startswith("D'")) and nsw[2:].istitle())
or (nsw.endswith("s'") and nsw[:-2].istitle())):
return 3
elif nsw in element_dict:
return 1
elif (not (nsw.isupper() or nsw.endswith('s') and nsw[:-1].isupper())
and (nsw.lower() in wordlist
or (nsw[:-1].lower() in wordlist and nsw.endswith('s')))
and nsw not in ampm):
return 3
elif triple_rep(nsw) and len(nsw) > 3:
return 3
elif nsw.isalpha() and nsw.islower() and len(nsw) > 3:
return 3
elif acr_pattern.match(nsw) and nsw not in meas_dict:
return 2
elif len(nsw) == 1:
return 2
elif nsw.isalpha and nsw.islower() and len(nsw) > 2:
return 3
else:
return -1
def is_cons(w):
"""Return True if no vowels in w."""
for lt in w:
if lt in ['A', 'E', 'I', 'O', 'U', 'a', 'e', 'i', 'o', 'u']:
return False
return True
def triple_rep(w):
"""Return 'True' if w has a letter repeated 3 times consecutively."""
for i in range(len(w) - 2):
if w[i] == w[i + 1] and w[i] == w[i + 2] and w[i].isalpha():
return True
return False
| gpl-3.0 |
lin-credible/scikit-learn | benchmarks/bench_plot_lasso_path.py | 301 | 4003 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
#ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
#ax.legend()
i += 1
plt.show()
| bsd-3-clause |
forever342/naarad | src/naarad/graphing/matplotlib_naarad.py | 4 | 9100 | # coding=utf-8
"""
Copyright 2013 LinkedIn Corp. All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy
import os
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from mpl_toolkits.axes_grid1 import host_subplot
import mpl_toolkits.axisartist as AA
import logging
import naarad.naarad_constants as CONSTANTS
logger = logging.getLogger('naarad.graphing.matplotlib')
def convert_to_mdate(date_str):
mdate = mdates.epoch2num(int(date_str) / 1000)
return mdate
# MPL-WA-07
# matplotlib does not rotate colors correctly when using multiple y axes. This method fills in that gap.
def get_current_color(index):
return CONSTANTS.COLOR_PALETTE[index % len(CONSTANTS.COLOR_PALETTE)]
def get_graph_metadata(plots):
height = 0
width = 0
title = ''
for plot in plots:
if plot.graph_height > height:
height = plot.graph_height
if plot.graph_width > width:
width = plot.graph_width
if title == '':
title = plot.graph_title
elif title != plot.graph_title:
title = title + ',' + plot.graph_title
return height / 80, width / 80, title
def curate_plot_list(plots):
delete_nodes = []
for plot in plots:
if os.path.exists(plot.input_csv):
if not os.path.getsize(plot.input_csv):
logger.warning("%s file is empty. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
else:
logger.warning("%s file does not exist. No plot corresponding to this file will be generated", plot.input_csv)
delete_nodes.append(plot)
for node in delete_nodes:
plots.remove(node)
return plots
def highlight_region(plt, start_x, end_x):
"""
Highlight a region on the chart between the specified start and end x-co-ordinates.
param pyplot plt: matplotlibk pyplot which contains the charts to be highlighted
param string start_x : epoch time millis
param string end_x : epoch time millis
"""
start_x = convert_to_mdate(start_x)
end_x = convert_to_mdate(end_x)
plt.axvspan(start_x, end_x, color=CONSTANTS.HIGHLIGHT_COLOR, alpha=CONSTANTS.HIGHLIGHT_ALPHA)
def graph_data(list_of_plots, output_directory, resource_path, output_filename):
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(list_of_plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
current_axis = axis
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
timestamp, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',', converters={0: convert_to_mdate})
maximum_yvalue = numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count)
minimum_yvalue = numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count)
if current_plot_count == 0:
current_axis.yaxis.set_ticks_position('left')
if current_plot_count > 1:
current_axis = axis.twinx()
current_axis.yaxis.grid(False)
# Set right y-axis for additional plots
current_axis.yaxis.set_ticks_position('right')
# Offset the right y axis to avoid overlap
current_axis.spines['right'].set_position(('axes', 1 + CONSTANTS.Y_AXIS_OFFSET * (current_plot_count - 2)))
current_axis.spines['right'].set_smart_bounds(False)
current_axis.spines['right'].set_color(get_current_color(current_plot_count))
current_axis.set_frame_on(True)
current_axis.patch.set_visible(False)
current_axis.set_ylabel(plot.y_label, color=get_current_color(current_plot_count), fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
current_axis.set_ylim([minimum_yvalue, maximum_yvalue])
if plot.graph_type == 'line':
current_axis.plot_date(x=timestamp, y=yval, linestyle='-', marker=None, color=get_current_color(current_plot_count))
else:
current_axis.plot_date(x=timestamp, y=yval, marker='.', color=get_current_color(current_plot_count))
y_ticks = current_axis.get_yticklabels()
for y_tick in y_ticks:
y_tick.set_color(get_current_color(current_plot_count))
y_tick.set_fontsize(CONSTANTS.Y_TICKS_FONTSIZE)
for x_tick in current_axis.get_xticklabels():
x_tick.set_fontsize(CONSTANTS.X_TICKS_FONTSIZE)
if plot.highlight_regions is not None:
for region in plot.highlight_regions:
highlight_region(plt, str(region.start_timestamp), str(region.end_timestamp))
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
axis.set_xlabel('Time')
x_date_format = mdates.DateFormatter(CONSTANTS.X_TICKS_DATEFORMAT)
axis.xaxis.set_major_formatter(x_date_format)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align="center"><strong>' + os.path.basename(plot_file_name) +
'</strong></p></div><hr />')
return True, os.path.join(output_directory, output_filename + '.div')
def graph_data_on_the_same_graph(list_of_plots, output_directory, resource_path, output_filename):
"""
graph_data_on_the_same_graph: put a list of plots on the same graph: currently it supports CDF
"""
maximum_yvalue = -float('inf')
minimum_yvalue = float('inf')
plots = curate_plot_list(list_of_plots)
plot_count = len(plots)
if plot_count == 0:
return False, None
graph_height, graph_width, graph_title = get_graph_metadata(plots)
current_plot_count = 0
fig, axis = plt.subplots()
fig.set_size_inches(graph_width, graph_height)
if plot_count < 2:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET, right=CONSTANTS.SUBPLOT_RIGHT_OFFSET)
else:
fig.subplots_adjust(left=CONSTANTS.SUBPLOT_LEFT_OFFSET, bottom=CONSTANTS.SUBPLOT_BOTTOM_OFFSET,
right=CONSTANTS.SUBPLOT_RIGHT_OFFSET - CONSTANTS.Y_AXIS_OFFSET * (plot_count - 2))
# Generate each plot on the graph
for plot in plots:
current_plot_count += 1
logger.info('Processing: ' + plot.input_csv + ' [ ' + output_filename + ' ]')
xval, yval = numpy.loadtxt(plot.input_csv, unpack=True, delimiter=',')
axis.plot(xval, yval, linestyle='-', marker=None, color=get_current_color(current_plot_count), label=plot.plot_label)
axis.legend()
maximum_yvalue = max(maximum_yvalue, numpy.amax(yval) * (1.0 + CONSTANTS.ZOOM_FACTOR * current_plot_count))
minimum_yvalue = min(minimum_yvalue, numpy.amin(yval) * (1.0 - CONSTANTS.ZOOM_FACTOR * current_plot_count))
# Set properties of the plots
axis.yaxis.set_ticks_position('left')
axis.set_xlabel(plots[0].x_label)
axis.set_ylabel(plots[0].y_label, fontsize=CONSTANTS.Y_LABEL_FONTSIZE)
axis.set_ylim([minimum_yvalue, maximum_yvalue])
axis.yaxis.grid(True)
axis.xaxis.grid(True)
axis.set_title(graph_title)
plot_file_name = os.path.join(output_directory, output_filename + ".png")
fig.savefig(plot_file_name)
plt.close()
# Create html fragment to be used for creation of the report
with open(os.path.join(output_directory, output_filename + '.div'), 'w') as div_file:
div_file.write('<a name="' + os.path.basename(plot_file_name).replace(".png", "").replace(".diff", "") + '"></a><div class="col-md-12"><img src="' +
resource_path + '/' + os.path.basename(plot_file_name) + '" id="' + os.path.basename(plot_file_name) +
'" width="100%" height="auto"/></div><div class="col-md-12"><p align=center>' + os.path.basename(plot_file_name) + '<br/></p></div>')
return True, os.path.join(output_directory, output_filename + '.div')
| apache-2.0 |