repo_name
stringlengths 7
92
| path
stringlengths 5
149
| copies
stringlengths 1
3
| size
stringlengths 4
6
| content
stringlengths 911
693k
| license
stringclasses 15
values |
---|---|---|---|---|---|
tmhm/scikit-learn | examples/plot_kernel_approximation.py | 262 | 8004 | """
==================================================
Explicit feature map approximation for RBF kernels
==================================================
An example illustrating the approximation of the feature map
of an RBF kernel.
.. currentmodule:: sklearn.kernel_approximation
It shows how to use :class:`RBFSampler` and :class:`Nystroem` to
approximate the feature map of an RBF kernel for classification with an SVM on
the digits dataset. Results using a linear SVM in the original space, a linear
SVM using the approximate mappings and using a kernelized SVM are compared.
Timings and accuracy for varying amounts of Monte Carlo samplings (in the case
of :class:`RBFSampler`, which uses random Fourier features) and different sized
subsets of the training set (for :class:`Nystroem`) for the approximate mapping
are shown.
Please note that the dataset here is not large enough to show the benefits
of kernel approximation, as the exact SVM is still reasonably fast.
Sampling more dimensions clearly leads to better classification results, but
comes at a greater cost. This means there is a tradeoff between runtime and
accuracy, given by the parameter n_components. Note that solving the Linear
SVM and also the approximate kernel SVM could be greatly accelerated by using
stochastic gradient descent via :class:`sklearn.linear_model.SGDClassifier`.
This is not easily possible for the case of the kernelized SVM.
The second plot visualized the decision surfaces of the RBF kernel SVM and
the linear SVM with approximate kernel maps.
The plot shows decision surfaces of the classifiers projected onto
the first two principal components of the data. This visualization should
be taken with a grain of salt since it is just an interesting slice through
the decision surface in 64 dimensions. In particular note that
a datapoint (represented as a dot) does not necessarily be classified
into the region it is lying in, since it will not lie on the plane
that the first two principal components span.
The usage of :class:`RBFSampler` and :class:`Nystroem` is described in detail
in :ref:`kernel_approximation`.
"""
print(__doc__)
# Author: Gael Varoquaux <gael dot varoquaux at normalesup dot org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# License: BSD 3 clause
# Standard scientific Python imports
import matplotlib.pyplot as plt
import numpy as np
from time import time
# Import datasets, classifiers and performance metrics
from sklearn import datasets, svm, pipeline
from sklearn.kernel_approximation import (RBFSampler,
Nystroem)
from sklearn.decomposition import PCA
# The digits dataset
digits = datasets.load_digits(n_class=9)
# To apply an classifier on this data, we need to flatten the image, to
# turn the data in a (samples, feature) matrix:
n_samples = len(digits.data)
data = digits.data / 16.
data -= data.mean(axis=0)
# We learn the digits on the first half of the digits
data_train, targets_train = data[:n_samples / 2], digits.target[:n_samples / 2]
# Now predict the value of the digit on the second half:
data_test, targets_test = data[n_samples / 2:], digits.target[n_samples / 2:]
#data_test = scaler.transform(data_test)
# Create a classifier: a support vector classifier
kernel_svm = svm.SVC(gamma=.2)
linear_svm = svm.LinearSVC()
# create pipeline from kernel approximation
# and linear svm
feature_map_fourier = RBFSampler(gamma=.2, random_state=1)
feature_map_nystroem = Nystroem(gamma=.2, random_state=1)
fourier_approx_svm = pipeline.Pipeline([("feature_map", feature_map_fourier),
("svm", svm.LinearSVC())])
nystroem_approx_svm = pipeline.Pipeline([("feature_map", feature_map_nystroem),
("svm", svm.LinearSVC())])
# fit and predict using linear and kernel svm:
kernel_svm_time = time()
kernel_svm.fit(data_train, targets_train)
kernel_svm_score = kernel_svm.score(data_test, targets_test)
kernel_svm_time = time() - kernel_svm_time
linear_svm_time = time()
linear_svm.fit(data_train, targets_train)
linear_svm_score = linear_svm.score(data_test, targets_test)
linear_svm_time = time() - linear_svm_time
sample_sizes = 30 * np.arange(1, 10)
fourier_scores = []
nystroem_scores = []
fourier_times = []
nystroem_times = []
for D in sample_sizes:
fourier_approx_svm.set_params(feature_map__n_components=D)
nystroem_approx_svm.set_params(feature_map__n_components=D)
start = time()
nystroem_approx_svm.fit(data_train, targets_train)
nystroem_times.append(time() - start)
start = time()
fourier_approx_svm.fit(data_train, targets_train)
fourier_times.append(time() - start)
fourier_score = fourier_approx_svm.score(data_test, targets_test)
nystroem_score = nystroem_approx_svm.score(data_test, targets_test)
nystroem_scores.append(nystroem_score)
fourier_scores.append(fourier_score)
# plot the results:
plt.figure(figsize=(8, 8))
accuracy = plt.subplot(211)
# second y axis for timeings
timescale = plt.subplot(212)
accuracy.plot(sample_sizes, nystroem_scores, label="Nystroem approx. kernel")
timescale.plot(sample_sizes, nystroem_times, '--',
label='Nystroem approx. kernel')
accuracy.plot(sample_sizes, fourier_scores, label="Fourier approx. kernel")
timescale.plot(sample_sizes, fourier_times, '--',
label='Fourier approx. kernel')
# horizontal lines for exact rbf and linear kernels:
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_score, linear_svm_score], label="linear svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[linear_svm_time, linear_svm_time], '--', label='linear svm')
accuracy.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_score, kernel_svm_score], label="rbf svm")
timescale.plot([sample_sizes[0], sample_sizes[-1]],
[kernel_svm_time, kernel_svm_time], '--', label='rbf svm')
# vertical line for dataset dimensionality = 64
accuracy.plot([64, 64], [0.7, 1], label="n_features")
# legends and labels
accuracy.set_title("Classification accuracy")
timescale.set_title("Training times")
accuracy.set_xlim(sample_sizes[0], sample_sizes[-1])
accuracy.set_xticks(())
accuracy.set_ylim(np.min(fourier_scores), 1)
timescale.set_xlabel("Sampling steps = transformed feature dimension")
accuracy.set_ylabel("Classification accuracy")
timescale.set_ylabel("Training time in seconds")
accuracy.legend(loc='best')
timescale.legend(loc='best')
# visualize the decision surface, projected down to the first
# two principal components of the dataset
pca = PCA(n_components=8).fit(data_train)
X = pca.transform(data_train)
# Gemerate grid along first two principal components
multiples = np.arange(-2, 2, 0.1)
# steps along first component
first = multiples[:, np.newaxis] * pca.components_[0, :]
# steps along second component
second = multiples[:, np.newaxis] * pca.components_[1, :]
# combine
grid = first[np.newaxis, :, :] + second[:, np.newaxis, :]
flat_grid = grid.reshape(-1, data.shape[1])
# title for the plots
titles = ['SVC with rbf kernel',
'SVC (linear kernel)\n with Fourier rbf feature map\n'
'n_components=100',
'SVC (linear kernel)\n with Nystroem rbf feature map\n'
'n_components=100']
plt.tight_layout()
plt.figure(figsize=(12, 5))
# predict and plot
for i, clf in enumerate((kernel_svm, nystroem_approx_svm,
fourier_approx_svm)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
plt.subplot(1, 3, i + 1)
Z = clf.predict(flat_grid)
# Put the result into a color plot
Z = Z.reshape(grid.shape[:-1])
plt.contourf(multiples, multiples, Z, cmap=plt.cm.Paired)
plt.axis('off')
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=targets_train, cmap=plt.cm.Paired)
plt.title(titles[i])
plt.tight_layout()
plt.show()
| bsd-3-clause |
ahaberlie/MetPy | examples/plots/Hodograph_Inset.py | 8 | 2367 | # Copyright (c) 2016 MetPy Developers.
# Distributed under the terms of the BSD 3-Clause License.
# SPDX-License-Identifier: BSD-3-Clause
"""
Hodograph Inset
===============
Layout a Skew-T plot with a hodograph inset into the plot.
"""
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
import pandas as pd
import metpy.calc as mpcalc
from metpy.cbook import get_test_data
from metpy.plots import add_metpy_logo, Hodograph, SkewT
from metpy.units import units
###########################################
# Upper air data can be obtained using the siphon package, but for this example we will use
# some of MetPy's sample data.
col_names = ['pressure', 'height', 'temperature', 'dewpoint', 'direction', 'speed']
df = pd.read_fwf(get_test_data('may4_sounding.txt', as_file_obj=False),
skiprows=5, usecols=[0, 1, 2, 3, 6, 7], names=col_names)
# Drop any rows with all NaN values for T, Td, winds
df = df.dropna(subset=('temperature', 'dewpoint', 'direction', 'speed'
), how='all').reset_index(drop=True)
###########################################
# We will pull the data out of the example dataset into individual variables and
# assign units.
hght = df['height'].values * units.hPa
p = df['pressure'].values * units.hPa
T = df['temperature'].values * units.degC
Td = df['dewpoint'].values * units.degC
wind_speed = df['speed'].values * units.knots
wind_dir = df['direction'].values * units.degrees
u, v = mpcalc.wind_components(wind_speed, wind_dir)
###########################################
# Create a new figure. The dimensions here give a good aspect ratio
fig = plt.figure(figsize=(9, 9))
add_metpy_logo(fig, 115, 100)
# Grid for plots
skew = SkewT(fig, rotation=45)
# Plot the data using normal plotting functions, in this case using
# log scaling in Y, as dictated by the typical meteorological plot
skew.plot(p, T, 'r')
skew.plot(p, Td, 'g')
skew.plot_barbs(p, u, v)
skew.ax.set_ylim(1000, 100)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
# Good bounds for aspect ratio
skew.ax.set_xlim(-50, 60)
# Create a hodograph
ax_hod = inset_axes(skew.ax, '40%', '40%', loc=1)
h = Hodograph(ax_hod, component_range=80.)
h.add_grid(increment=20)
h.plot_colormapped(u, v, hght)
# Show the plot
plt.show()
| bsd-3-clause |
shahankhatch/scikit-learn | examples/cluster/plot_agglomerative_clustering.py | 343 | 2931 | """
Agglomerative clustering with and without structure
===================================================
This example shows the effect of imposing a connectivity graph to capture
local structure in the data. The graph is simply the graph of 20 nearest
neighbors.
Two consequences of imposing a connectivity can be seen. First clustering
with a connectivity matrix is much faster.
Second, when using a connectivity matrix, average and complete linkage are
unstable and tend to create a few clusters that grow very quickly. Indeed,
average and complete linkage fight this percolation behavior by considering all
the distances between two clusters when merging them. The connectivity
graph breaks this mechanism. This effect is more pronounced for very
sparse graphs (try decreasing the number of neighbors in
kneighbors_graph) and with complete linkage. In particular, having a very
small number of neighbors in the graph, imposes a geometry that is
close to that of single linkage, which is well known to have this
percolation instability.
"""
# Authors: Gael Varoquaux, Nelle Varoquaux
# License: BSD 3 clause
import time
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.neighbors import kneighbors_graph
# Generate sample data
n_samples = 1500
np.random.seed(0)
t = 1.5 * np.pi * (1 + 3 * np.random.rand(1, n_samples))
x = t * np.cos(t)
y = t * np.sin(t)
X = np.concatenate((x, y))
X += .7 * np.random.randn(2, n_samples)
X = X.T
# Create a graph capturing local connectivity. Larger number of neighbors
# will give more homogeneous clusters to the cost of computation
# time. A very large number of neighbors gives more evenly distributed
# cluster sizes, but may not impose the local manifold structure of
# the data
knn_graph = kneighbors_graph(X, 30, include_self=False)
for connectivity in (None, knn_graph):
for n_clusters in (30, 3):
plt.figure(figsize=(10, 4))
for index, linkage in enumerate(('average', 'complete', 'ward')):
plt.subplot(1, 3, index + 1)
model = AgglomerativeClustering(linkage=linkage,
connectivity=connectivity,
n_clusters=n_clusters)
t0 = time.time()
model.fit(X)
elapsed_time = time.time() - t0
plt.scatter(X[:, 0], X[:, 1], c=model.labels_,
cmap=plt.cm.spectral)
plt.title('linkage=%s (time %.2fs)' % (linkage, elapsed_time),
fontdict=dict(verticalalignment='top'))
plt.axis('equal')
plt.axis('off')
plt.subplots_adjust(bottom=0, top=.89, wspace=0,
left=0, right=1)
plt.suptitle('n_cluster=%i, connectivity=%r' %
(n_clusters, connectivity is not None), size=17)
plt.show()
| bsd-3-clause |
MartinSavc/scikit-learn | examples/decomposition/plot_pca_3d.py | 354 | 2432 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Principal components analysis (PCA)
=========================================================
These figures aid in illustrating how a point cloud
can be very flat in one direction--which is where PCA
comes in to choose a direction that is not flat.
"""
print(__doc__)
# Authors: Gael Varoquaux
# Jaques Grobler
# Kevin Hughes
# License: BSD 3 clause
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
###############################################################################
# Create the data
e = np.exp(1)
np.random.seed(4)
def pdf(x):
return 0.5 * (stats.norm(scale=0.25 / e).pdf(x)
+ stats.norm(scale=4 / e).pdf(x))
y = np.random.normal(scale=0.5, size=(30000))
x = np.random.normal(scale=0.5, size=(30000))
z = np.random.normal(scale=0.1, size=len(x))
density = pdf(x) * pdf(y)
pdf_z = pdf(5 * z)
density *= pdf_z
a = x + y
b = 2 * y
c = a - b + z
norm = np.sqrt(a.var() + b.var())
a /= norm
b /= norm
###############################################################################
# Plot the figures
def plot_figs(fig_num, elev, azim):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, rect=[0, 0, .95, 1], elev=elev, azim=azim)
ax.scatter(a[::10], b[::10], c[::10], c=density[::10], marker='+', alpha=.4)
Y = np.c_[a, b, c]
# Using SciPy's SVD, this would be:
# _, pca_score, V = scipy.linalg.svd(Y, full_matrices=False)
pca = PCA(n_components=3)
pca.fit(Y)
pca_score = pca.explained_variance_ratio_
V = pca.components_
x_pca_axis, y_pca_axis, z_pca_axis = V.T * pca_score / pca_score.min()
x_pca_axis, y_pca_axis, z_pca_axis = 3 * V.T
x_pca_plane = np.r_[x_pca_axis[:2], - x_pca_axis[1::-1]]
y_pca_plane = np.r_[y_pca_axis[:2], - y_pca_axis[1::-1]]
z_pca_plane = np.r_[z_pca_axis[:2], - z_pca_axis[1::-1]]
x_pca_plane.shape = (2, 2)
y_pca_plane.shape = (2, 2)
z_pca_plane.shape = (2, 2)
ax.plot_surface(x_pca_plane, y_pca_plane, z_pca_plane)
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
elev = -40
azim = -80
plot_figs(1, elev, azim)
elev = 30
azim = 20
plot_figs(2, elev, azim)
plt.show()
| bsd-3-clause |
jblackburne/scikit-learn | doc/tutorial/text_analytics/solutions/exercise_02_sentiment.py | 104 | 3139 | """Build a sentiment analysis / polarity model
Sentiment analysis can be casted as a binary text classification problem,
that is fitting a linear classifier on features extracted from the text
of the user messages so as to guess wether the opinion of the author is
positive or negative.
In this examples we will use a movie review dataset.
"""
# Author: Olivier Grisel <olivier.grisel@ensta.org>
# License: Simplified BSD
import sys
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.svm import LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.datasets import load_files
from sklearn.model_selection import train_test_split
from sklearn import metrics
if __name__ == "__main__":
# NOTE: we put the following in a 'if __name__ == "__main__"' protected
# block to be able to use a multi-core grid search that also works under
# Windows, see: http://docs.python.org/library/multiprocessing.html#windows
# The multiprocessing module is used as the backend of joblib.Parallel
# that is used when n_jobs != 1 in GridSearchCV
# the training data folder must be passed as first argument
movie_reviews_data_folder = sys.argv[1]
dataset = load_files(movie_reviews_data_folder, shuffle=False)
print("n_samples: %d" % len(dataset.data))
# split the dataset in training and test set:
docs_train, docs_test, y_train, y_test = train_test_split(
dataset.data, dataset.target, test_size=0.25, random_state=None)
# TASK: Build a vectorizer / classifier pipeline that filters out tokens
# that are too rare or too frequent
pipeline = Pipeline([
('vect', TfidfVectorizer(min_df=3, max_df=0.95)),
('clf', LinearSVC(C=1000)),
])
# TASK: Build a grid search to find out whether unigrams or bigrams are
# more useful.
# Fit the pipeline on the training set using grid search for the parameters
parameters = {
'vect__ngram_range': [(1, 1), (1, 2)],
}
grid_search = GridSearchCV(pipeline, parameters, n_jobs=-1)
grid_search.fit(docs_train, y_train)
# TASK: print the mean and std for each candidate along with the parameter
# settings for all the candidates explored by grid search.
n_candidates = len(grid_search.cv_results_['params'])
for i in range(n_candidates):
print(i, 'params - %s; mean - %0.2f; std - %0.2f'
% (grid_search.cv_results_['params'][i],
grid_search.cv_results_['mean_test_score'][i],
grid_search.cv_results_['std_test_score'][i]))
# TASK: Predict the outcome on the testing set and store it in a variable
# named y_predicted
y_predicted = grid_search.predict(docs_test)
# Print the classification report
print(metrics.classification_report(y_test, y_predicted,
target_names=dataset.target_names))
# Print and plot the confusion matrix
cm = metrics.confusion_matrix(y_test, y_predicted)
print(cm)
# import matplotlib.pyplot as plt
# plt.matshow(cm)
# plt.show()
| bsd-3-clause |
lht142934/vnpy | vn.datayes/storage.py | 29 | 18623 | import os
import json
import pymongo
import pandas as pd
from datetime import datetime, timedelta
from api import Config, PyApi
from api import BaseDataContainer, History, Bar
from errors import (VNPAST_ConfigError, VNPAST_RequestError,
VNPAST_DataConstructorError, VNPAST_DatabaseError)
class DBConfig(Config):
"""
Json-like config object; inherits from Config()
Contains all kinds of settings relating to database settings.
privates
--------
Inherited from api.Config, plus:
* client: pymongo.MongoClient object, the connection
that is to be used for this session.
* body: dictionary; the main content of config.
- client: pymongo.MongoClient(), refers to self.client.
- dbs: dictionary, is a mapping from database alias
to another dictionary, which inclues configurations
and themselves(i.e. pymongo.database entity)
Concretely, dbs has the structure like:
{
alias1 : {
'self': client[dbName1],
'index': dbIndex1,
'collNames': collectionNameType1
},
alias2 : {
'self': client[dbName2],
'index': dbIndex2,
'collNames': collectionNameType2
}, ...
}
where alias#: string;
dbs.alias#.self: pymongo.database;
dbs.alias#.index: string;
dbs.alias#.collNames: string;
- dbNames: list; a list of database alias.
"""
head = 'DB config'
client = pymongo.MongoClient()
body = {
'client': client,
'dbs': {
'EQU_M1': {
'self': client['DATAYES_EQUITY_M1'],
'index': 'dateTime',
'collNames': 'secID'
},
'EQU_D1': {
'self': client['DATAYES_EQUITY_D1'],
'index': 'date',
'collNames': 'equTicker'
},
'FUT_D1': {
'self': client['DATAYES_FUTURE_D1'],
'index': 'date',
'collNames': 'futTicker'
},
'OPT_D1': {
'self': client['DATAYES_OPTION_D1'],
'index': 'date',
'collNames': 'optTicker'
},
'FUD_D1': {
'self': client['DATAYES_FUND_D1'],
'index': 'date',
'collNames': 'fudTicker'
},
'IDX_D1': {
'self': client['DATAYES_INDEX_D1'],
'index': 'date',
'collNames': 'idxTicker'
}
},
'dbNames': ['EQU_M1', 'EQU_D1', 'FUT_D1',
'OPT_D1', 'FUD_D1', 'IDX_D1']
}
def __init__(self, head=None, token=None, body=None):
"""
Inherited constructor.
parameters
----------
* head: string; the name of config file. Default is None.
* token: string; user's token.
* body: dictionary; the main content of config
"""
super(DBConfig, self).__init__(head, token, body)
def view(self):
""" Reloaded Prettify printing method. """
config_view = {
'dbConfig_head' : self.head,
'dbConfig_body' : str(self.body),
}
print json.dumps(config_view,
indent=4,
sort_keys=True)
#----------------------------------------------------------------------
# MongoDB Controller class
class MongodController(object):
"""
The MongoDB controller interface.
MongodController is initialized with a DBConfig configuration
object and a PyApi object, which has already been contructed with
its own Config json. The default version of constructor actually
does nothing special about the database. Yet if user executes shell
script prepare.sh to prepare the connection, MongodController will
firstly gather symbols that are going to become collection names
in corresponding databases. This process is done one database by another,
user can skip useless databases by editing the scripts.
Then, it ensures the index of each collection due to the 'index' value
in DBConfig.body.dbs. Concretely, for D1 bars, the index will be 'date',
and for intraday bars, it will be 'dateTime'; both take the form of
datetime.datetime timestamp.
download() and update() methods of controller dynamically construct
and maintain the databases, requesting data via PyApi. Once the database
is constructed, MongodController can access required data via its fetch()
method.
privates
--------
* _config: DBConfig object; a container of all useful settings for the
databases.
* _api: PyApi object; is responsible for making requests.
* _client: pymongo.MongoClient object; the connection to MongoDB.
* _dbs: dictionary; a mapping from database names to another dictionary,
which includes configurations of the database and the pymongo.database
entity. Inherited from _config.body.['dbs']. Note that keys
self._dbs are mere strings, only self._dbs[key]['self'] refers to the
pymongo.Database object.
* _dbNames: list; a list of names of databases.
* _collNames: dictionary; mapping from self._db[key]['collNames'] attribute
to the names of collections(i.e. tickers) within.
- example: _collNames['equTicker'] = ['000001', '000002', ...]
* _connected: boolean; whether the MongoClient was connected to or not.
* _mapTickersToSecIDs: dictionary; mapping from stock tickers to
its security ID.
example
-------
>> myApi = PyApi(Config())
>> mydbs = DBConfig()
>> controller = MongodController(mydbs, myApi)
>> controller._get_coll_names()
>> controller._ensure_index()
>> controller.download_equity_D1(20130101, 20150801)
>> controller.update_equity_D1()
"""
_config = DBConfig()
_api = None
_client = None
_dbs = None
_dbNames = []
_collNames = dict()
_connected = False
_mapTickersToSecIDs = dict()
def __init__(self, config, api):
"""
Constructor.
parameters
----------
* config: DBConfig object; specifies database configs.
* api: PyApi object.
"""
self._api = api # Set Datayes PyApi.
if config.body:
try:
self._config = config.body
self._client = config.body['client']
self._dbs = config.body['dbs']
self._dbNames = config.body['dbNames']
self._connected = True
except KeyError:
msg = '[MONGOD]: Unable to configure database; ' + \
'config file is incomplete.'
raise VNPAST_ConfigError(msg)
except Exception,e:
msg = '[MONGOD]: Unable to configure database; ' + str(e)
raise VNPAST_ConfigError(msg)
if self._connected:
#self._get_coll_names()
#self._ensure_index()
pass
def view(self):
"""
NOT IMPLEMENTED
"""
return
#----------------------------------------------------------------------
# Get collection names methods.
"""
Decorator;
Targeting at path dName, if exists, read data from this file;
if not, execute handle() which returns a json-like data and
stores the data at dName path.
parameters
----------
* dName: string; the specific path of file that __md looks at.
"""
def __md(dName):
def _md(get):
def handle(*args, **kwargs):
try:
if os.path.isfile(dName):
# if directory exists, read from it.
jsonFile = open(dName,'r')
data = json.loads(jsonFile.read())
jsonFile.close()
else:
# if not, get data via *get method,
# then write to the file.
data = get(*args, **kwargs)
jsonFile = open(dName, 'w+')
jsonFile.write(json.dumps(data))
jsonFile.close()
#print data
return data
except Exception,e:
raise e
return handle
return _md
@__md('names/equTicker.json')
def _allEquTickers(self):
"""get all equity tickers, decorated by @__md()."""
data = self._api.get_equity_D1()
allEquTickers = list(data.body['ticker'])
return allEquTickers
@__md('names/secID.json')
def _allSecIds(self):
"""get all security IDs, decorated by @__md()."""
data = self._api.get_equity_D1()
allTickers = list(data.body['ticker'])
exchangeCDs = list(data.body['exchangeCD'])
allSecIds = [allTickers[k]+'.'+exchangeCDs[k] for k in range(
len(allTickers))]
return allSecIds
@__md('names/futTicker.json')
def _allFutTickers(self):
"""get all future tickers, decorated by @__md()."""
data = self._api.get_future_D1()
allFutTickers = list(data.body['ticker'])
return allFutTickers
@__md('names/optTicker.json')
def _allOptTickers(self):
"""get all option tickers, decorated by @__md()."""
data = self._api.get_option_D1()
allOptTickers = list(data.body['ticker'])
return allOptTickers
@__md('names/fudTicker.json')
def _allFudTickers(self):
"""get all fund tickers, decorated by @__md()."""
data = self._api.get_fund_D1()
allFudTickers = list(data.body['ticker'])
return allFudTickers
@__md('names/idxTicker.json')
def _allIdxTickers(self):
"""get all index tickers, decorated by @__md()."""
data = self._api.get_index_D1()
allIdxTickers = list(data.body['ticker'])
return allIdxTickers
@__md('names/bndTicker.json')
def _allBndTickers(self):
"""get all bond tickers, decorated by @__md()."""
data = self._api.get_bond_D1()
allBndTickers = list(data.body['ticker'])
return allBndTickers
def _get_coll_names(self):
"""
get all instruments'names and store them in self._collNames.
"""
try:
if not os.path.exists('names'):
os.makedirs('names')
self._collNames['equTicker'] = self._allEquTickers()
self._collNames['fudTicker'] = self._allFudTickers()
self._collNames['secID'] = self._allSecIds()
self._collNames['futTicker'] = self._allFutTickers()
self._collNames['optTicker'] = self._allOptTickers()
self._collNames['idxTicker'] = self._allIdxTickers()
print '[MONGOD]: Collection names gotten.'
return 1
except AssertionError:
warning = '[MONGOD]: Warning, collection names ' + \
'is an empty list.'
print warning
except Exception, e:
msg = '[MONGOD]: Unable to set collection names; ' + \
str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Ensure collection index method.
def _ensure_index(self):
"""
Ensure indices for all databases and collections.
first access self._dbs config to get index column names;
then get collection names from self._collNames and loop
over all collections.
"""
if self._collNames and self._dbs:
try:
for dbName in self._dbs:
# Iterate over database configurations.
db = self._dbs[dbName]
dbSelf = db['self']
index = db['index']
collNames = self._collNames[db['collNames']]
# db['self'] is the pymongo.Database object.
for name in collNames:
coll = dbSelf[name]
coll.ensure_index([(index,
pymongo.DESCENDING)], unique=True)
print '[MONGOD]: MongoDB index set.'
return 1
except KeyError:
msg = '[MONGOD]: Unable to set collection indices; ' + \
'infomation in Config.body["dbs"] is incomplete.'
raise VNPAST_DatabaseError(msg)
except Exception, e:
msg = '[MONGOD]: Unable to set collection indices; ' + str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Download method.
def download_equity_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['EQU_D1']['self']
self._api.get_equity_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_equity_M1(self, tasks, startYr=2012, endYr=2015):
"""
"""
try:
# map equity tickers to security IDs.
if self._mapTickersToSecIDs:
maps = self._mapTickersToSecIDs
else:
assert os.isfile('./names/secID.json')
jsonFile = open(dName,'r')
allSecIds = json.loads(jsonFile.read())
jsonFile.close()
allTickers = [s.split('.')[0] for s in allSecIds]
maps = dict(zip(allTickers, allSecIds))
self._mapTickersToSecIDs = maps
tasks_ = [maps[task] for task in tasks]
db = self._dbs['EQU_M1']['self']
self._api.get_equity_M1_interMonth(db, id=1,
startYr = startYr,
endYr = endYr,
tasks = tasks_)
except AssertionError:
msg = '[MONGOD]: Cannot map tickers to secIDs; ' + \
'secID.json does not exist.'
raise VNPAST_DatabaseError(msg)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_bond_D1(self, start, end, sessionNum=30):
"""
"""
pass
def download_future_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['FUT_D1']['self']
self._api.get_future_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_option_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['OPT_D1']['self']
self._api.get_option_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_index_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['IDX_D1']['self']
self._api.get_index_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def download_fund_D1(self, start, end, sessionNum=30):
"""
"""
try:
db = self._dbs['FUD_D1']['self']
self._api.get_fund_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to download data; ' + str(e)
raise VNPAST_DatabaseError(msg)
#----------------------------------------------------------------------
# Update methods.
def __update(self, key, target1, target2, sessionNum):
"""
Basic update method.
Looks into the database specified by 'key', find the latest
record in the collection of it. Then update the collections
till last trading date.
parameters
----------
* key: string; a database alias (refer to the database config)
e.g., 'EQU_D1'.
* target1: method; pointer to the function with which controller
obtain all tickers in the database. Concretely, target1 are
self._all#Tickers methods.
* target2: method; pointer to the api overlord requesting functions
i.e. self._api.get_###_mongod methods.
* sessionNum: integer; the number of threads.
"""
try:
# get databases and tickers
db = self._dbs[key]['self']
index = self._dbs[key]['index']
allTickers = target1()
coll = db[allTickers[0]]
# find the latest timestamp in collection.
latest = coll.find_one(
sort=[(index, pymongo.DESCENDING)])[index]
start = datetime.strftime(
latest + timedelta(days=1),'%Y%m%d')
end = datetime.strftime(datetime.now(), '%Y%m%d')
# then download.
target2(db, start, end, sessionNum)
return db
except Exception, e:
msg = '[MONGOD]: Unable to update data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def update_equity_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'EQU_D1',
target1 = self._allEquTickers,
target2 = self._api.get_equity_D1_mongod,
sessionNum = sessionNum)
return db
def update_future_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'FUT_D1',
target1 = self._allFutTickers,
target2 = self._api.get_future_D1_mongod,
sessionNum = sessionNum)
return db
def update_option_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'OPT_D1',
target1 = self._allOptTickers,
target2 = self._api.get_option_D1_mongod,
sessionNum = sessionNum)
return db
def update_index_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'IDX_D1',
target1 = self._allIdxTickers,
target2 = self._api.get_index_D1_mongod,
sessionNum = sessionNum)
return db
def update_fund_D1(self, sessionNum=30):
"""
"""
db = self.__update(key = 'FUD_D1',
target1 = self._allFudTickers,
target2 = self._api.get_fund_D1_mongod,
sessionNum = sessionNum)
return db
#----------------------------------------------------------------------#
# stuff that will be deprecated
def update_equity_D1_(self, sessionNum=30):
"""
"""
try:
# set databases and tickers
db = self._dbs['EQU_D1']['self']
index = self._dbs['EQU_D1']['index']
allEquTickers = self._allEquTickers()
coll = db[allEquTickers[0]]
# find the latest timestamp in collection.
latest = coll.find_one(
sort=[(index, pymongo.DESCENDING)])[index]
start = datetime.strftime(latest + timedelta(days=1),'%Y%m%d')
end = datetime.strftime(datetime.now(), '%Y%m%d')
# then download.
self._api.get_equity_D1_mongod(db, start, end, sessionNum)
except Exception, e:
msg = '[MONGOD]: Unable to update data; ' + str(e)
raise VNPAST_DatabaseError(msg)
def update_equity_M1(self):
"""
"""
pass
#----------------------------------------------------------------------
# Fetch method.
def fetch(self, dbName, ticker, start, end, output='list'):
"""
"""
# check inputs' validity.
if output not in ['df', 'list', 'json']:
raise ValueError('[MONGOD]: Unsupported output type.')
if dbName not in self._dbNames:
raise ValueError('[MONGOD]: Unable to locate database name.')
db = self._dbs[dbName]
dbSelf = db['self']
dbIndex = db['index']
try:
coll = db[ticker]
if len(start)==8 and len(end)==8:
# yyyymmdd, len()=8
start = datetime.strptime(start, '%Y%m%d')
end = datetime.strptime(end, '%Y%m%d')
elif len(start)==14 and len(end)==14:
# yyyymmdd HH:MM, len()=14
start = datetime.strptime(start, '%Y%m%d %H:%M')
end = datetime.strptime(end, '%Y%m%d %H:%M')
else:
pass
docs = []
# find in MongoDB.
for doc in coll.find(filter={dbIndex: {'$lte': end,
'$gte': start}}, projection={'_id': False}):
docs.append(doc)
if output == 'list':
return docs[::-1]
except Exception, e:
msg = '[MONGOD]: Error encountered when fetching data' + \
'from MongoDB; '+ str(e)
return -1
if __name__ == '__main__':
dc = DBConfig()
api = PyApi(Config())
mc = MongodController(dc, api)
mc.update_index_D1()
| mit |
petosegan/scikit-learn | examples/ensemble/plot_adaboost_multiclass.py | 354 | 4124 | """
=====================================
Multi-class AdaBoosted Decision Trees
=====================================
This example reproduces Figure 1 of Zhu et al [1] and shows how boosting can
improve prediction accuracy on a multi-class problem. The classification
dataset is constructed by taking a ten-dimensional standard normal distribution
and defining three classes separated by nested concentric ten-dimensional
spheres such that roughly equal numbers of samples are in each class (quantiles
of the :math:`\chi^2` distribution).
The performance of the SAMME and SAMME.R [1] algorithms are compared. SAMME.R
uses the probability estimates to update the additive model, while SAMME uses
the classifications only. As the example illustrates, the SAMME.R algorithm
typically converges faster than SAMME, achieving a lower test error with fewer
boosting iterations. The error of each algorithm on the test set after each
boosting iteration is shown on the left, the classification error on the test
set of each tree is shown in the middle, and the boost weight of each tree is
shown on the right. All trees have a weight of one in the SAMME.R algorithm and
therefore are not shown.
.. [1] J. Zhu, H. Zou, S. Rosset, T. Hastie, "Multi-class AdaBoost", 2009.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
from sklearn.externals.six.moves import zip
import matplotlib.pyplot as plt
from sklearn.datasets import make_gaussian_quantiles
from sklearn.ensemble import AdaBoostClassifier
from sklearn.metrics import accuracy_score
from sklearn.tree import DecisionTreeClassifier
X, y = make_gaussian_quantiles(n_samples=13000, n_features=10,
n_classes=3, random_state=1)
n_split = 3000
X_train, X_test = X[:n_split], X[n_split:]
y_train, y_test = y[:n_split], y[n_split:]
bdt_real = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1)
bdt_discrete = AdaBoostClassifier(
DecisionTreeClassifier(max_depth=2),
n_estimators=600,
learning_rate=1.5,
algorithm="SAMME")
bdt_real.fit(X_train, y_train)
bdt_discrete.fit(X_train, y_train)
real_test_errors = []
discrete_test_errors = []
for real_test_predict, discrete_train_predict in zip(
bdt_real.staged_predict(X_test), bdt_discrete.staged_predict(X_test)):
real_test_errors.append(
1. - accuracy_score(real_test_predict, y_test))
discrete_test_errors.append(
1. - accuracy_score(discrete_train_predict, y_test))
n_trees_discrete = len(bdt_discrete)
n_trees_real = len(bdt_real)
# Boosting might terminate early, but the following arrays are always
# n_estimators long. We crop them to the actual number of trees here:
discrete_estimator_errors = bdt_discrete.estimator_errors_[:n_trees_discrete]
real_estimator_errors = bdt_real.estimator_errors_[:n_trees_real]
discrete_estimator_weights = bdt_discrete.estimator_weights_[:n_trees_discrete]
plt.figure(figsize=(15, 5))
plt.subplot(131)
plt.plot(range(1, n_trees_discrete + 1),
discrete_test_errors, c='black', label='SAMME')
plt.plot(range(1, n_trees_real + 1),
real_test_errors, c='black',
linestyle='dashed', label='SAMME.R')
plt.legend()
plt.ylim(0.18, 0.62)
plt.ylabel('Test Error')
plt.xlabel('Number of Trees')
plt.subplot(132)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_errors,
"b", label='SAMME', alpha=.5)
plt.plot(range(1, n_trees_real + 1), real_estimator_errors,
"r", label='SAMME.R', alpha=.5)
plt.legend()
plt.ylabel('Error')
plt.xlabel('Number of Trees')
plt.ylim((.2,
max(real_estimator_errors.max(),
discrete_estimator_errors.max()) * 1.2))
plt.xlim((-20, len(bdt_discrete) + 20))
plt.subplot(133)
plt.plot(range(1, n_trees_discrete + 1), discrete_estimator_weights,
"b", label='SAMME')
plt.legend()
plt.ylabel('Weight')
plt.xlabel('Number of Trees')
plt.ylim((0, discrete_estimator_weights.max() * 1.2))
plt.xlim((-20, n_trees_discrete + 20))
# prevent overlapping y-axis labels
plt.subplots_adjust(wspace=0.25)
plt.show()
| bsd-3-clause |
akrherz/iem | htdocs/plotting/auto/scripts100/p153.py | 1 | 6880 | """Highest hourly values"""
from collections import OrderedDict
import datetime
import pandas as pd
from pandas.io.sql import read_sql
from matplotlib.font_manager import FontProperties
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.plot.use_agg import plt
from pyiem.exceptions import NoDataFound
PDICT = OrderedDict(
[
("max_dwpf", "Highest Dew Point Temperature"),
("min_dwpf", "Lowest Dew Point Temperature"),
("max_tmpf", "Highest Air Temperature"),
("min_tmpf", "Lowest Air Temperature"),
("max_feel", "Highest Feels Like Temperature"),
("min_feel", "Lowest Feels Like Temperature"),
("max_mslp", "Maximum Sea Level Pressure"),
("min_mslp", "Minimum Sea Level Pressure"),
("max_alti", "Maximum Pressure Altimeter"),
("min_alti", "Minimum Pressure Altimeter"),
]
)
UNITS = {
"max_dwpf": "F",
"max_tmpf": "F",
"min_dwpf": "F",
"min_tmpf": "F",
"min_feel": "F",
"max_feel": "F",
"max_mslp": "mb",
"min_mslp": "mb",
"max_alti": "in",
"min_alti": "in",
}
MDICT = OrderedDict(
[
("all", "No Month Limit"),
("spring", "Spring (MAM)"),
("fall", "Fall (SON)"),
("winter", "Winter (DJF)"),
("summer", "Summer (JJA)"),
("gs", "1 May to 30 Sep"),
("jan", "January"),
("feb", "February"),
("mar", "March"),
("apr", "April"),
("may", "May"),
("jun", "June"),
("jul", "July"),
("aug", "August"),
("sep", "September"),
("oct", "October"),
("nov", "November"),
("dec", "December"),
]
)
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc[
"description"
] = """This table presents the extreme hourly value of
some variable of your choice based on available observations maintained
by the IEM. Sadly, this app will likely point out some bad data points
as such points tend to be obvious at extremes. If you contact us to
point out troubles, we'll certainly attempt to fix the archive to
remove the bad data points. Observations are arbitrarly bumped 10
minutes into the future to place the near to top of the hour obs on
that hour. For example, a 9:53 AM observation becomes the ob for 10 AM.
"""
desc["arguments"] = [
dict(
type="zstation",
name="zstation",
default="AMW",
network="IA_ASOS",
label="Select Station:",
),
dict(
type="select",
name="month",
default="all",
options=MDICT,
label="Select Month/Season/All",
),
dict(
type="select",
name="var",
options=PDICT,
default="max_dwpf",
label="Which Variable to Plot",
),
]
return desc
def plotter(fdict):
""" Go """
font0 = FontProperties()
font0.set_family("monospace")
font0.set_size(16)
font1 = FontProperties()
font1.set_size(16)
pgconn = get_dbconn("asos")
ctx = get_autoplot_context(fdict, get_description())
varname = ctx["var"]
varname2 = varname.split("_")[1]
if varname2 in ["dwpf", "tmpf", "feel"]:
varname2 = "i" + varname2
month = ctx["month"]
station = ctx["zstation"]
if month == "all":
months = range(1, 13)
elif month == "fall":
months = [9, 10, 11]
elif month == "winter":
months = [12, 1, 2]
elif month == "spring":
months = [3, 4, 5]
elif month == "summer":
months = [6, 7, 8]
elif month == "gs":
months = [5, 6, 7, 8, 9]
else:
ts = datetime.datetime.strptime("2000-" + month + "-01", "%Y-%b-%d")
# make sure it is length two for the trick below in SQL
months = [ts.month]
df = read_sql(
f"""
WITH obs as (
SELECT (valid + '10 minutes'::interval) at time zone %s as ts,
tmpf::int as itmpf, dwpf::int as idwpf,
feel::int as ifeel, mslp, alti from alldata
where station = %s and
extract(month from valid at time zone %s) in %s),
agg1 as (
SELECT extract(hour from ts) as hr,
max(idwpf) as max_dwpf,
max(itmpf) as max_tmpf,
min(idwpf) as min_dwpf,
min(itmpf) as min_tmpf,
min(ifeel) as min_feel,
max(ifeel) as max_feel,
max(alti) as max_alti,
min(alti) as min_alti,
max(mslp) as max_mslp,
min(mslp) as min_mslp
from obs GROUP by hr)
SELECT o.ts, a.hr::int as hr,
a.{varname} from agg1 a JOIN obs o on
(a.hr = extract(hour from o.ts)
and a.{varname} = o.{varname2})
ORDER by a.hr ASC, o.ts DESC
""",
pgconn,
params=(
ctx["_nt"].sts[station]["tzname"],
station,
ctx["_nt"].sts[station]["tzname"],
tuple(months),
),
index_col=None,
)
if df.empty:
raise NoDataFound("No Data was found.")
y0 = 0.1
yheight = 0.8
dy = yheight / 24.0
(fig, ax) = plt.subplots(1, 1, figsize=(8, 8))
ax.set_position([0.12, y0, 0.57, yheight])
ax.barh(df["hr"], df[varname], align="center")
ax.set_ylim(-0.5, 23.5)
ax.set_yticks([0, 4, 8, 12, 16, 20])
ax.set_yticklabels(["Mid", "4 AM", "8 AM", "Noon", "4 PM", "8 PM"])
ax.grid(True)
ax.set_xlim([df[varname].min() - 5, df[varname].max() + 5])
ax.set_ylabel(
"Local Time %s" % (ctx["_nt"].sts[station]["tzname"],),
fontproperties=font1,
)
ab = ctx["_nt"].sts[station]["archive_begin"]
if ab is None:
raise NoDataFound("Unknown station metadata")
fig.text(
0.5,
0.93,
("%s [%s] %s-%s\n" "%s [%s]")
% (
ctx["_nt"].sts[station]["name"],
station,
ab.year,
datetime.date.today().year,
PDICT[varname],
MDICT[month],
),
ha="center",
fontproperties=font1,
)
ypos = y0 + (dy / 2.0)
for hr in range(24):
sdf = df[df["hr"] == hr]
if sdf.empty:
continue
row = sdf.iloc[0]
fig.text(
0.7,
ypos,
"%3.0f: %s%s"
% (
row[varname],
pd.Timestamp(row["ts"]).strftime("%d %b %Y"),
("*" if len(sdf.index) > 1 else ""),
),
fontproperties=font0,
va="center",
)
ypos += dy
ax.set_xlabel(
"%s %s, * denotes ties" % (PDICT[varname], UNITS[varname]),
fontproperties=font1,
)
return plt.gcf(), df
if __name__ == "__main__":
plotter(dict())
| mit |
ComputoCienciasUniandes/MetodosComputacionalesLaboratorio | 2017-1/lab8_EJ3/lab8SOL_eJ3/spring_mass.py | 1 | 1084 | import numpy as np
import matplotlib.pyplot as plt
N = 5000 #number of steps to take
xo = 0.2 #initial position in m
vo = 0.0 #initial velocity
tau = 4.0 #total time for the simulation in s .
dt = tau/float(N) # time step
k = 42.0 #spring constant in N/m
m = 0.25 #mass in kg
g = 9.8 #in m/ s ^2
mu = 0.15 #friction coefficient
y = np.zeros([N,2])
#y is the vector of positions and velocities.
y[0,0] = xo #initial position
y[0,1] = vo #initial velocity
#This function defines the derivatives of the system.
def SpringMass(state,time) :
g0=state[1]
if g0 > 0 :
g1=-k/m*state[0]-g*mu
else:
g1=-k/m*state[0]+g*mu
return np.array([g0,g1])
#This is the basic step in the Euler Method for solving ODEs.
def euler (y,time,dt,derivs) :
k0 = dt*derivs(y,time)
ynext = y + k0
return ynext
for j in range (N-1):
y[j+1] = euler(y[j],0,dt,SpringMass)
#Just to plot
time = np.linspace(0,tau,N)
plt.plot(time, y[:,0],'b',label="position")
plt.xlabel( "time" )
plt.ylabel( "position" )
plt.savefig('spring_mass.png') | mit |
hughdbrown/QSTK-nohist | src/qstkfeat/featutil.py | 1 | 18051 | '''
(c) 2011, 2012 Georgia Tech Research Corporation
This source code is released under the New BSD license. Please see
http://wiki.quantsoftware.org/index.php?title=QSTK_License
for license details.
Created on Nov 7, 2011
@author: John Cornwell
@contact: JohnWCornwellV@gmail.com
@summary: Contains utility functions to interact with feature functions in features.py
'''
''' Python imports '''
import math
import pickle
import datetime as dt
from dateutil.relativedelta import relativedelta
''' 3rd Party Imports '''
import numpy as np
import matplotlib.pyplot as plt
''' Our Imports '''
import qstklearn.kdtknn as kdt
from qstkutil import DataAccess as da
from qstkutil import qsdateutil as du
from qstkutil import tsutil as tsu
from qstkfeat.features import *
from qstkfeat.classes import class_fut_ret
def getMarketRel(dData, sRel='$SPX'):
'''
@summary: Calculates market relative data.
@param dData - Dictionary containing data to be used, requires specific naming: open/high/low/close/volume
@param sRel - Stock ticker to make the data relative to, $SPX is default.
@return: Dictionary of market relative values
'''
if sRel not in dData['close'].columns:
raise KeyError('Market relative stock %s not found in getMR()' % sRel)
dRet = {}
''' Make all data market relative, except for volume '''
for sKey in dData.keys():
''' Don't calculate market relative volume, but still copy it over '''
if sKey == 'volume':
dRet['volume'] = dData['volume']
continue
dfAbsolute = dData[sKey]
dfRelative = pand.DataFrame(index=dfAbsolute.index, columns=dfAbsolute.columns, data=np.zeros(dfAbsolute.shape))
''' Get returns and strip off the market returns '''
naRets = dfAbsolute.values.copy()
tsu.returnize0(naRets)
naMarkRets = naRets[:, list(dfAbsolute.columns).index(sRel)]
for i, sStock in enumerate(dfAbsolute.columns):
''' Don't change the 'market' stock '''
if sStock == sRel:
dfRelative.values[:, i] = dfAbsolute.values[:, i]
continue
naMarkRel = (naRets[:, i] - naMarkRets) + 1.0
''' Find the first non-nan value and start the price at 100 '''
for j in range(0, dfAbsolute.values.shape[0]):
if pand.isnull(dfAbsolute.values[j][i]):
dfRelative.values[j][i] = float('nan')
continue
dfRelative.values[j][i] = 100
break
''' Now fill prices out using market relative returns '''
for j in range(j + 1, dfAbsolute.values.shape[0]):
dfRelative.values[j][i] = dfRelative.values[j - 1][i] * naMarkRel[j]
''' Add dataFrame to dictionary to return, move to next key '''
dRet[sKey] = dfRelative
return dRet
def applyFeatures(dData, lfcFeatures, ldArgs, sMarketRel=None, sLog=None):
'''
@summary: Calculates the feature values using a list of feature functions and arguments.
@param dData - Dictionary containing data to be used, requires specific naming: open/high/low/close/volume
@param lfcFeatures: List of feature functions, most likely coming from features.py
@param ldArgs: List of dictionaries containing arguments, passed as **kwargs
There is a special argument 'MR', if it exists, the data will be made market relative
@param sMarketRel: If not none, the data will all be made relative to the symbol provided
@param sLog: If not None, will be filename to log all of the features to
@return: list of dataframes containing values
'''
ldfRet = []
''' Calculate market relative data '''
if sMarketRel is not None:
dDataRelative = getMarketRel(dData, sRel=sMarketRel)
''' Loop though feature functions, pass each data dictionary and arguments '''
for i, fcFeature in enumerate(lfcFeatures):
''' Check for special arguments '''
if 'MR' in ldArgs[i]:
if not ldArgs[i]['MR']:
print 'Warning, setting MR to false will still be Market Relative',\
'simply do not include MR key in args'
if sMarketRel is None:
raise AssertionError('Functions require market relative stock but sMarketRel=None')
del ldArgs[i]['MR']
ldfRet.append(fcFeature(dDataRelative, **ldArgs[i]))
else:
ldfRet.append(fcFeature(dData, **ldArgs[i]))
if not sLog is None:
with open(sLog, 'wb') as fFile:
pickle.dump(ldfRet, fFile, -1)
return ldfRet
def loadFeatures(sLog):
'''
@summary: Loads cached features.
@param sLog: Filename of features.
@return: Numpy array containing values
'''
ldfRet = []
if not sLog is None:
with open(sLog, 'rb') as fFile:
ldfRet = pickle.load(fFile)
return ldfRet
def stackSyms(ldfFeatures, dtStart=None, dtEnd=None, lsSym=None, sDelNan='ALL', bShowRemoved=False):
'''
@summary: Remove symbols from the dataframes, effectively stacking all stocks on top of each other.
@param ldfFeatures: List of data frames of features.
@param dtStart: Start time, if None, uses all
@param dtEnd: End time, if None uses all
@param lsSym: List of symbols to use, if None, all are used.
@param sDelNan: Optional, default is ALL: delete any rows with a NaN in it
FEAT: Delete if any of the feature points are NaN, allow NaN classification
None: Do not delete any NaN rows
@return: Numpy array containing all features as columns and all
'''
if dtStart is None:
dtStart = ldfFeatures[0].index[0]
if dtEnd is None:
dtEnd = ldfFeatures[0].index[-1]
naRet = None
''' Stack stocks vertically '''
for sStock in ldfFeatures[0].columns:
if lsSym is not None and sStock not in lsSym:
continue
naStkData = None
''' Loop through all features, stacking columns horizontally '''
for dfFeat in ldfFeatures:
dfFeat = dfFeat.ix[dtStart:dtEnd]
if naStkData is None:
naStkData = np.array(dfFeat[sStock].values.reshape(-1, 1))
else:
naStkData = np.hstack((naStkData, dfFeat[sStock].values.reshape(-1, 1)))
''' Remove nan rows possibly'''
if 'ALL' == sDelNan or 'FEAT' == sDelNan:
llValidRows = []
for i in range(naStkData.shape[0]):
if 'ALL' == sDelNan and not math.isnan(np.sum(naStkData[i, :])) or \
'FEAT' == sDelNan and not math.isnan(np.sum(naStkData[i, :-1])):
llValidRows.append(i)
elif bShowRemoved:
print 'Removed', sStock, naStkData[i, :]
naStkData = naStkData[llValidRows, :]
''' Now stack each block of stock data vertically '''
if naRet is None:
naRet = naStkData
else:
naRet = np.vstack((naRet, naStkData))
return naRet
def normFeatures(naFeatures, fMin, fMax, bAbsolute, bIgnoreLast=True):
'''
@summary: Normalizes the featurespace.
@param naFeatures: Numpy array of features,
@param fMin: Data frame containing the price information for all of the stocks.
@param fMax: List of feature functions, most likely coming from features.py
@param bAbsolute: If true, min value will be scaled to fMin, max to fMax, if false,
+-1 standard deviations will be scaled to fit between fMin and fMax, i.e. ~69% of the values
@param bIgnoreLast: If true, last column is ignored (assumed to be classification)
@return: list of (weights, shifts) to be used to normalize the query points
'''
fNewRange = fMax - fMin
lUseCols = naFeatures.shape[1]
if bIgnoreLast:
lUseCols -= 1
ltRet = []
''' Loop through all features '''
for i in range(lUseCols):
''' If absolutely scaled use exact min and max '''
if bAbsolute:
fFeatMin = np.min(naFeatures[:, i])
fFeatMax = np.max(naFeatures[:, i])
else:
''' Otherwise use mean +-1 std deviations for min/max (~94% of data) '''
fMean = np.average(naFeatures[:, i])
fStd = np.std(naFeatures[:, i])
fFeatMin = fMean - fStd
fFeatMax = fMean + fStd
''' Calculate multiplier and shift variable so that new data fits in specified range '''
fRange = fFeatMax - fFeatMin
fMult = fNewRange / fRange
fShift = fMin - (fFeatMin * fMult)
''' scale and shift, save in return array '''
naFeatures[:, i] *= fMult
naFeatures[:, i] += fShift
ltRet.append((fMult, fShift))
return ltRet
def normQuery(naQueries, ltWeightShift):
'''
@summary: Normalizes the queries using the given normalization parameters generated from training data.
@param naQueries: Numpy array of queries
@param ltWeightShift: List of weights and shift amounts to be applied to each query.
@return: None, modifies naQueries
'''
assert naQueries.shape[1] == len(ltWeightShift)
for i in range(naQueries.shape[1]):
''' scale and shift, save in return array '''
naQueries[:, i] *= ltWeightShift[i][0]
naQueries[:, i] += ltWeightShift[i][1]
def createKnnLearner(naFeatures, lKnn=30, leafsize=10, method='mean'):
'''
@summary: Creates a quick KNN learner
@param naFeatures: Numpy array of features,
@param fMin: Data frame containing the price information for all of the stocks.
@param fMax: List of feature functions, most likely coming from features.py
@param bAbsolute: If true, min value will be scaled to fMin, max to fMax, if false,
+-1 standard deviations will be scaled to fit between fMin and fMax, i.e. ~69% of the values
@param bIgnoreLast: If true, last column is ignored (assumed to be classification)
@return: None, data is modified in place
'''
cLearner = kdt.kdtknn(k=lKnn, method=method, leafsize=leafsize)
cLearner.addEvidence(naFeatures)
return cLearner
def log500(sLog):
'''
@summary: Loads cached features.
@param sLog: Filename of features.
@return: Nothing, logs features to desired location
'''
lsSym = ['A', 'AA', 'AAPL', 'ABC', 'ABT', 'ACE', 'ACN', 'ADBE', 'ADI', 'ADM', 'ADP', 'ADSK', 'AEE', 'AEP', 'AES', 'AET', 'AFL', 'AGN', 'AIG', 'AIV', 'AIZ', 'AKAM', 'AKS', 'ALL', 'ALTR', 'AMAT', 'AMD', 'AMGN', 'AMP', 'AMT', 'AMZN', 'AN', 'ANF', 'ANR', 'AON', 'APA', 'APC', 'APD', 'APH', 'APOL', 'ARG', 'ATI', 'AVB', 'AVP', 'AVY', 'AXP', 'AZO', 'BA', 'BAC', 'BAX', 'BBBY', 'BBT', 'BBY', 'BCR', 'BDX', 'BEN', 'BF.B', 'BHI', 'BIG', 'BIIB', 'BK', 'BLK', 'BLL', 'BMC', 'BMS', 'BMY', 'BRCM', 'BRK.B', 'BSX', 'BTU', 'BXP', 'C', 'CA', 'CAG', 'CAH', 'CAM', 'CAT', 'CB', 'CBG', 'CBS', 'CCE', 'CCL', 'CEG', 'CELG', 'CERN', 'CF', 'CFN', 'CHK', 'CHRW', 'CI', 'CINF', 'CL', 'CLF', 'CLX', 'CMA', 'CMCSA', 'CME', 'CMG', 'CMI', 'CMS', 'CNP', 'CNX', 'COF', 'COG', 'COH', 'COL', 'COP', 'COST', 'COV', 'CPB', 'CPWR', 'CRM', 'CSC', 'CSCO', 'CSX', 'CTAS', 'CTL', 'CTSH', 'CTXS', 'CVC', 'CVH', 'CVS', 'CVX', 'D', 'DD', 'DE', 'DELL', 'DF', 'DFS', 'DGX', 'DHI', 'DHR', 'DIS', 'DISCA', 'DNB', 'DNR', 'DO', 'DOV', 'DOW', 'DPS', 'DRI', 'DTE', 'DTV', 'DUK', 'DV', 'DVA', 'DVN', 'EBAY', 'ECL', 'ED', 'EFX', 'EIX', 'EL', 'EMC', 'EMN', 'EMR', 'EOG', 'EP', 'EQR', 'EQT', 'ERTS', 'ESRX', 'ETFC', 'ETN', 'ETR', 'EW', 'EXC', 'EXPD', 'EXPE', 'F', 'FAST', 'FCX', 'FDO', 'FDX', 'FE', 'FFIV', 'FHN', 'FII', 'FIS', 'FISV', 'FITB', 'FLIR', 'FLR', 'FLS', 'FMC', 'FO', 'FRX', 'FSLR', 'FTI', 'FTR', 'GAS', 'GCI', 'GD', 'GE', 'GILD', 'GIS', 'GLW', 'GME', 'GNW', 'GOOG', 'GPC', 'GPS', 'GR', 'GS', 'GT', 'GWW', 'HAL', 'HAR', 'HAS', 'HBAN', 'HCBK', 'HCN', 'HCP', 'HD', 'HES', 'HIG', 'HNZ', 'HOG', 'HON', 'HOT', 'HP', 'HPQ', 'HRB', 'HRL', 'HRS', 'HSP', 'HST', 'HSY', 'HUM', 'IBM', 'ICE', 'IFF', 'IGT', 'INTC', 'INTU', 'IP', 'IPG', 'IR', 'IRM', 'ISRG', 'ITT', 'ITW', 'IVZ', 'JBL', 'JCI', 'JCP', 'JDSU', 'JEC', 'JNJ', 'JNPR', 'JNS', 'JOYG', 'JPM', 'JWN', 'K', 'KEY', 'KFT', 'KIM', 'KLAC', 'KMB', 'KMX', 'KO', 'KR', 'KSS', 'L', 'LEG', 'LEN', 'LH', 'LIFE', 'LLL', 'LLTC', 'LLY', 'LM', 'LMT', 'LNC', 'LO', 'LOW', 'LSI', 'LTD', 'LUK', 'LUV', 'LXK', 'M', 'MA', 'MAR', 'MAS', 'MAT', 'MCD', 'MCHP', 'MCK', 'MCO', 'MDT', 'MET', 'MHP', 'MHS', 'MJN', 'MKC', 'MMC', 'MMI', 'MMM', 'MO', 'MOLX', 'MON', 'MOS', 'MPC', 'MRK', 'MRO', 'MS', 'MSFT', 'MSI', 'MTB', 'MU', 'MUR', 'MWV', 'MWW', 'MYL', 'NBL', 'NBR', 'NDAQ', 'NE', 'NEE', 'NEM', 'NFLX', 'NFX', 'NI', 'NKE', 'NOC', 'NOV', 'NRG', 'NSC', 'NTAP', 'NTRS', 'NU', 'NUE', 'NVDA', 'NVLS', 'NWL', 'NWSA', 'NYX', 'OI', 'OKE', 'OMC', 'ORCL', 'ORLY', 'OXY', 'PAYX', 'PBCT', 'PBI', 'PCAR', 'PCG', 'PCL', 'PCLN', 'PCP', 'PCS', 'PDCO', 'PEG', 'PEP', 'PFE', 'PFG', 'PG', 'PGN', 'PGR', 'PH', 'PHM', 'PKI', 'PLD', 'PLL', 'PM', 'PNC', 'PNW', 'POM', 'PPG', 'PPL', 'PRU', 'PSA', 'PWR', 'PX', 'PXD', 'QCOM', 'QEP', 'R', 'RAI', 'RDC', 'RF', 'RHI', 'RHT', 'RL', 'ROK', 'ROP', 'ROST', 'RRC', 'RRD', 'RSG', 'RTN', 'S', 'SAI', 'SBUX', 'SCG', 'SCHW', 'SE', 'SEE', 'SHLD', 'SHW', 'SIAL', 'SJM', 'SLB', 'SLE', 'SLM', 'SNA', 'SNDK', 'SNI', 'SO', 'SPG', 'SPLS', 'SRCL', 'SRE', 'STI', 'STJ', 'STT', 'STZ', 'SUN', 'SVU', 'SWK', 'SWN', 'SWY', 'SYK', 'SYMC', 'SYY', 'T', 'TAP', 'TDC', 'TE', 'TEG', 'TEL', 'TER', 'TGT', 'THC', 'TIE', 'TIF', 'TJX', 'TLAB', 'TMK', 'TMO', 'TROW', 'TRV', 'TSN', 'TSO', 'TSS', 'TWC', 'TWX', 'TXN', 'TXT', 'TYC', 'UNH', 'UNM', 'UNP', 'UPS', 'URBN', 'USB', 'UTX', 'V', 'VAR', 'VFC', 'VIA.B', 'VLO', 'VMC', 'VNO', 'VRSN', 'VTR', 'VZ', 'WAG', 'WAT', 'WDC', 'WEC', 'WFC', 'WFM', 'WFR', 'WHR', 'WIN', 'WLP', 'WM', 'WMB', 'WMT', 'WPI', 'WPO', 'WU', 'WY', 'WYN', 'WYNN', 'X', 'XEL', 'XL', 'XLNX', 'XOM', 'XRAY', 'XRX', 'YHOO', 'YUM', 'ZION', 'ZMH']
lsSym.append('$SPX')
lsSym.sort()
''' Max lookback is 6 months '''
dtEnd = dt.datetime.now()
dtEnd = dtEnd.replace(hour=16, minute=0, second=0, microsecond=0)
dtStart = dtEnd - relativedelta(months=6)
''' Pull in current data '''
norObj = da.DataAccess('Norgate')
''' Get 2 extra months for moving averages and future returns '''
ldtTimestamps = du.getNYSEdays(dtStart - relativedelta(months=2),
dtEnd + relativedelta(months=2), dt.timedelta(hours=16))
dfPrice = norObj.get_data(ldtTimestamps, lsSym, 'close')
dfVolume = norObj.get_data(ldtTimestamps, lsSym, 'volume')
''' Imported functions from qstkfeat.features, NOTE: last function is classification '''
lfcFeatures, ldArgs, lsNames = getFeatureFuncs()
''' Generate a list of DataFrames, one for each feature, with the same index/column structure as price data '''
applyFeatures(dfPrice, dfVolume, lfcFeatures, ldArgs, sLog=sLog)
def getFeatureFuncs():
'''
@summary: Gets feature functions supported by the website.
@return: Tuple containing (list of functions, list of arguments, list of names)
'''
lfcFeatures = [featMA, featMA, featRSI, featDrawDown, featRunUp, featVolumeDelta, featAroon, featAroon, featStochastic, featBeta, featBollinger, featCorrelation, featPrice, class_fut_ret]
lsNames = ['MovingAverage', 'RelativeMovingAverage', 'RSI', 'DrawDown', 'RunUp', 'VolumeDelta', 'AroonUp', 'AroonLow', 'Stochastic', 'Beta', 'Bollinger', 'Correlation', 'Price', 'FutureReturn']
''' Custom Arguments '''
ldArgs = [
{'lLookback':30, 'bRel':False},
{'lLookback':30, 'bRel':True},
{'lLookback':14},
{'lLookback':30},
{'lLookback':30},
{'lLookback':30},
{'bDown':False, 'lLookback':25},
{'bDown':True, 'lLookback':25},
{'lLookback':14},
{'lLookback':14, 'sMarket':'SPY'},
{'lLookback':20},
{'lLookback':20, 'sRel':'SPY'},
{},
{'lLookforward':5, 'sRel':None, 'bUseOpen':False}
]
return lfcFeatures, ldArgs, lsNames
def testFeature(fcFeature, dArgs):
'''
@summary: Quick function to run a feature on some data and plot it to see if it works.
@param fcFeature: Feature function to test
@param dArgs: Arguments to pass into feature function
@return: Void
'''
''' Get Train data for 2009-2010 '''
dtStart = dt.datetime(2009, 1, 1)
dtEnd = dt.datetime(2009, 5, 1)
''' Pull in current training data and test data '''
norObj = da.DataAccess('Norgate')
''' Get 2 extra months for moving averages and future returns '''
ldtTimestamps = du.getNYSEdays(dtStart, dtEnd, dt.timedelta(hours=16))
lsSym = ['GOOG']
lsSym.append('WMT')
lsSym.append('$SPX')
lsSym.append('$VIX')
lsSym.sort()
lsKeys = ['open', 'high', 'low', 'close', 'volume']
ldfData = norObj.get_data(ldtTimestamps, lsSym, lsKeys)
dData = dict(zip(lsKeys, ldfData))
dfPrice = dData['close']
#print dfPrice.values
''' Generate a list of DataFrames, one for each feature, with the same index/column structure as price data '''
dtStart = dt.datetime.now()
ldfFeatures = applyFeatures(dData, [fcFeature], [dArgs], sMarketRel='$SPX')
print 'Runtime:', dt.datetime.now() - dtStart
''' Use last 3 months of index, to avoid lookback nans '''
dfPrint = ldfFeatures[0]['GOOG']
print 'GOOG values:', dfPrint.values
print 'GOOG Sum:', dfPrint.ix[dfPrint.notnull()].sum()
for sSym in lsSym:
plt.subplot(211)
plt.plot(ldfFeatures[0].index[-60:], dfPrice[sSym].values[-60:])
plt.plot(ldfFeatures[0].index[-60:], dfPrice['$SPX'].values[-60:] * dfPrice[sSym].values[-60] / dfPrice['$SPX'].values[-60])
plt.legend((sSym, '$SPX'))
plt.title(sSym)
plt.subplot(212)
plt.plot(ldfFeatures[0].index[-60:], ldfFeatures[0][sSym].values[-60:])
plt.title('%s-%s' % (fcFeature.__name__, str(dArgs)))
plt.show()
if __name__ == '__main__':
pass
| bsd-3-clause |
tombstone/models | research/skip_thoughts/skip_thoughts/vocabulary_expansion.py | 1 | 7375 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Compute an expanded vocabulary of embeddings using a word2vec model.
This script loads the word embeddings from a trained skip-thoughts model and
from a trained word2vec model (typically with a larger vocabulary). It trains a
linear regression model without regularization to learn a linear mapping from
the word2vec embedding space to the skip-thoughts embedding space. The model is
then applied to all words in the word2vec vocabulary, yielding vectors in the
skip-thoughts word embedding space for the union of the two vocabularies.
The linear regression task is to learn a parameter matrix W to minimize
|| X - Y * W ||^2,
where X is a matrix of skip-thoughts embeddings of shape [num_words, dim1],
Y is a matrix of word2vec embeddings of shape [num_words, dim2], and W is a
matrix of shape [dim2, dim1].
This is based on the "Translation Matrix" method from the paper:
"Exploiting Similarities among Languages for Machine Translation"
Tomas Mikolov, Quoc V. Le, Ilya Sutskever
https://arxiv.org/abs/1309.4168
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os.path
import gensim.models
import numpy as np
import sklearn.linear_model
import tensorflow as tf
FLAGS = tf.flags.FLAGS
tf.flags.DEFINE_string("skip_thoughts_model", None,
"Checkpoint file or directory containing a checkpoint "
"file.")
tf.flags.DEFINE_string("skip_thoughts_vocab", None,
"Path to vocabulary file containing a list of newline-"
"separated words where the word id is the "
"corresponding 0-based index in the file.")
tf.flags.DEFINE_string("word2vec_model", None,
"File containing a word2vec model in binary format.")
tf.flags.DEFINE_string("output_dir", None, "Output directory.")
tf.logging.set_verbosity(tf.logging.INFO)
def _load_skip_thoughts_embeddings(checkpoint_path):
"""Loads the embedding matrix from a skip-thoughts model checkpoint.
Args:
checkpoint_path: Model checkpoint file or directory containing a checkpoint
file.
Returns:
word_embedding: A numpy array of shape [vocab_size, embedding_dim].
Raises:
ValueError: If no checkpoint file matches checkpoint_path.
"""
if tf.gfile.IsDirectory(checkpoint_path):
checkpoint_file = tf.train.latest_checkpoint(checkpoint_path)
if not checkpoint_file:
raise ValueError("No checkpoint file found in %s" % checkpoint_path)
else:
checkpoint_file = checkpoint_path
tf.logging.info("Loading skip-thoughts embedding matrix from %s",
checkpoint_file)
reader = tf.train.NewCheckpointReader(checkpoint_file)
word_embedding = reader.get_tensor("word_embedding")
tf.logging.info("Loaded skip-thoughts embedding matrix of shape %s",
word_embedding.shape)
return word_embedding
def _load_vocabulary(filename):
"""Loads a vocabulary file.
Args:
filename: Path to text file containing newline-separated words.
Returns:
vocab: A dictionary mapping word to word id.
"""
tf.logging.info("Reading vocabulary from %s", filename)
vocab = collections.OrderedDict()
with tf.gfile.GFile(filename, mode="rb") as f:
for i, line in enumerate(f):
word = line.decode("utf-8").strip()
assert word not in vocab, "Attempting to add word twice: %s" % word
vocab[word] = i
tf.logging.info("Read vocabulary of size %d", len(vocab))
return vocab
def _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab, word2vec):
"""Runs vocabulary expansion on a skip-thoughts model using a word2vec model.
Args:
skip_thoughts_emb: A numpy array of shape [skip_thoughts_vocab_size,
skip_thoughts_embedding_dim].
skip_thoughts_vocab: A dictionary of word to id.
word2vec: An instance of gensim.models.Word2Vec.
Returns:
combined_emb: A dictionary mapping words to embedding vectors.
"""
# Find words shared between the two vocabularies.
tf.logging.info("Finding shared words")
shared_words = [w for w in word2vec.vocab if w in skip_thoughts_vocab]
# Select embedding vectors for shared words.
tf.logging.info("Selecting embeddings for %d shared words", len(shared_words))
shared_st_emb = skip_thoughts_emb[[
skip_thoughts_vocab[w] for w in shared_words
]]
shared_w2v_emb = word2vec[shared_words]
# Train a linear regression model on the shared embedding vectors.
tf.logging.info("Training linear regression model")
model = sklearn.linear_model.LinearRegression()
model.fit(shared_w2v_emb, shared_st_emb)
# Create the expanded vocabulary.
tf.logging.info("Creating embeddings for expanded vocabuary")
combined_emb = collections.OrderedDict()
for w in word2vec.vocab:
# Ignore words with underscores (spaces).
if "_" not in w:
w_emb = model.predict(word2vec[w].reshape(1, -1))
combined_emb[w] = w_emb.reshape(-1)
for w in skip_thoughts_vocab:
combined_emb[w] = skip_thoughts_emb[skip_thoughts_vocab[w]]
tf.logging.info("Created expanded vocabulary of %d words", len(combined_emb))
return combined_emb
def main(unused_argv):
if not FLAGS.skip_thoughts_model:
raise ValueError("--skip_thoughts_model is required.")
if not FLAGS.skip_thoughts_vocab:
raise ValueError("--skip_thoughts_vocab is required.")
if not FLAGS.word2vec_model:
raise ValueError("--word2vec_model is required.")
if not FLAGS.output_dir:
raise ValueError("--output_dir is required.")
if not tf.gfile.IsDirectory(FLAGS.output_dir):
tf.gfile.MakeDirs(FLAGS.output_dir)
# Load the skip-thoughts embeddings and vocabulary.
skip_thoughts_emb = _load_skip_thoughts_embeddings(FLAGS.skip_thoughts_model)
skip_thoughts_vocab = _load_vocabulary(FLAGS.skip_thoughts_vocab)
# Load the Word2Vec model.
word2vec = gensim.models.KeyedVectors.load_word2vec_format(
FLAGS.word2vec_model, binary=True)
# Run vocabulary expansion.
embedding_map = _expand_vocabulary(skip_thoughts_emb, skip_thoughts_vocab,
word2vec)
# Save the output.
vocab = embedding_map.keys()
vocab_file = os.path.join(FLAGS.output_dir, "vocab.txt")
with tf.gfile.GFile(vocab_file, "w") as f:
f.write("\n".join(vocab))
tf.logging.info("Wrote vocabulary file to %s", vocab_file)
embeddings = np.array(embedding_map.values())
embeddings_file = os.path.join(FLAGS.output_dir, "embeddings.npy")
np.save(embeddings_file, embeddings)
tf.logging.info("Wrote embeddings file to %s", embeddings_file)
if __name__ == "__main__":
tf.app.run()
| apache-2.0 |
shoyer/xray | xarray/core/utils.py | 1 | 18865 | """Internal utilties; not for external use
"""
import contextlib
import functools
import itertools
import os.path
import re
import warnings
from collections import OrderedDict
from typing import (
AbstractSet, Any, Callable, Container, Dict, Hashable, Iterable, Iterator,
Mapping, MutableMapping, MutableSet, Optional, Sequence, Tuple, TypeVar,
cast)
import numpy as np
import pandas as pd
from .pycompat import dask_array_type
try: # Fix typed collections in Python 3.5.0~3.5.2
from .pycompat import Mapping, MutableMapping, MutableSet # noqa: F811
except ImportError:
pass
K = TypeVar('K')
V = TypeVar('V')
T = TypeVar('T')
def _check_inplace(inplace: Optional[bool], default: bool = False) -> bool:
if inplace is None:
inplace = default
else:
warnings.warn('The inplace argument has been deprecated and will be '
'removed in a future version of xarray.',
FutureWarning, stacklevel=3)
return inplace
def alias_message(old_name: str, new_name: str) -> str:
return '%s has been deprecated. Use %s instead.' % (old_name, new_name)
def alias_warning(old_name: str, new_name: str, stacklevel: int = 3) -> None:
warnings.warn(alias_message(old_name, new_name), FutureWarning,
stacklevel=stacklevel)
def alias(obj: Callable[..., T], old_name: str) -> Callable[..., T]:
assert isinstance(old_name, str)
@functools.wraps(obj)
def wrapper(*args, **kwargs):
alias_warning(old_name, obj.__name__)
return obj(*args, **kwargs)
wrapper.__doc__ = alias_message(old_name, obj.__name__)
return wrapper
def _maybe_cast_to_cftimeindex(index: pd.Index) -> pd.Index:
from ..coding.cftimeindex import CFTimeIndex
if len(index) > 0 and index.dtype == 'O':
try:
return CFTimeIndex(index)
except (ImportError, TypeError):
return index
else:
return index
def safe_cast_to_index(array: Any) -> pd.Index:
"""Given an array, safely cast it to a pandas.Index.
If it is already a pandas.Index, return it unchanged.
Unlike pandas.Index, if the array has dtype=object or dtype=timedelta64,
this function will not attempt to do automatic type conversion but will
always return an index with dtype=object.
"""
if isinstance(array, pd.Index):
index = array
elif hasattr(array, 'to_index'):
index = array.to_index()
else:
kwargs = {}
if hasattr(array, 'dtype') and array.dtype.kind == 'O':
kwargs['dtype'] = object
index = pd.Index(np.asarray(array), **kwargs)
return _maybe_cast_to_cftimeindex(index)
def multiindex_from_product_levels(levels: Sequence[pd.Index],
names: Optional[Sequence[str]] = None
) -> pd.MultiIndex:
"""Creating a MultiIndex from a product without refactorizing levels.
Keeping levels the same gives back the original labels when we unstack.
Parameters
----------
levels : sequence of pd.Index
Values for each MultiIndex level.
names : optional sequence of objects
Names for each level.
Returns
-------
pandas.MultiIndex
"""
if any(not isinstance(lev, pd.Index) for lev in levels):
raise TypeError('levels must be a list of pd.Index objects')
split_labels, levels = zip(*[lev.factorize() for lev in levels])
labels_mesh = np.meshgrid(*split_labels, indexing='ij')
labels = [x.ravel() for x in labels_mesh]
return pd.MultiIndex(levels, labels, sortorder=0, names=names)
def maybe_wrap_array(original, new_array):
"""Wrap a transformed array with __array_wrap__ is it can be done safely.
This lets us treat arbitrary functions that take and return ndarray objects
like ufuncs, as long as they return an array with the same shape.
"""
# in case func lost array's metadata
if isinstance(new_array, np.ndarray) and new_array.shape == original.shape:
return original.__array_wrap__(new_array)
else:
return new_array
def equivalent(first: T, second: T) -> bool:
"""Compare two objects for equivalence (identity or equality), using
array_equiv if either object is an ndarray
"""
# TODO: refactor to avoid circular import
from . import duck_array_ops
if isinstance(first, np.ndarray) or isinstance(second, np.ndarray):
return duck_array_ops.array_equiv(first, second)
else:
return ((first is second) or
(first == second) or
(pd.isnull(first) and pd.isnull(second)))
def peek_at(iterable: Iterable[T]) -> Tuple[T, Iterator[T]]:
"""Returns the first value from iterable, as well as a new iterator with
the same content as the original iterable
"""
gen = iter(iterable)
peek = next(gen)
return peek, itertools.chain([peek], gen)
def update_safety_check(first_dict: MutableMapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent) -> None:
"""Check the safety of updating one dictionary with another.
Raises ValueError if dictionaries have non-compatible values for any key,
where compatibility is determined by identity (they are the same item) or
the `compat` function.
Parameters
----------
first_dict, second_dict : dict-like
All items in the second dictionary are checked against for conflicts
against items in the first dictionary.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k, v in second_dict.items():
if k in first_dict and not compat(v, first_dict[k]):
raise ValueError('unsafe to merge dictionaries without '
'overriding values; conflicting key %r' % k)
def remove_incompatible_items(first_dict: MutableMapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent
) -> None:
"""Remove incompatible items from the first dictionary in-place.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
"""
for k in list(first_dict):
if k not in second_dict or not compat(first_dict[k], second_dict[k]):
del first_dict[k]
def is_dict_like(value: Any) -> bool:
return hasattr(value, 'keys') and hasattr(value, '__getitem__')
def is_full_slice(value: Any) -> bool:
return isinstance(value, slice) and value == slice(None)
def either_dict_or_kwargs(pos_kwargs: Optional[Mapping[Hashable, T]],
kw_kwargs: Mapping[str, T],
func_name: str
) -> Mapping[Hashable, T]:
if pos_kwargs is not None:
if not is_dict_like(pos_kwargs):
raise ValueError('the first argument to .%s must be a dictionary'
% func_name)
if kw_kwargs:
raise ValueError('cannot specify both keyword and positional '
'arguments to .%s' % func_name)
return pos_kwargs
else:
# Need an explicit cast to appease mypy due to invariance; see
# https://github.com/python/mypy/issues/6228
return cast(Mapping[Hashable, T], kw_kwargs)
def is_scalar(value: Any) -> bool:
"""Whether to treat a value as a scalar.
Any non-iterable, string, or 0-D array
"""
return (
getattr(value, 'ndim', None) == 0 or
isinstance(value, (str, bytes)) or not
isinstance(value, (Iterable, ) + dask_array_type))
def is_valid_numpy_dtype(dtype: Any) -> bool:
try:
np.dtype(dtype)
except (TypeError, ValueError):
return False
else:
return True
def to_0d_object_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray with dtype=object.
"""
result = np.empty((), dtype=object)
result[()] = value
return result
def to_0d_array(value: Any) -> np.ndarray:
"""Given a value, wrap it in a 0-D numpy.ndarray.
"""
if np.isscalar(value) or (isinstance(value, np.ndarray) and
value.ndim == 0):
return np.array(value)
else:
return to_0d_object_array(value)
def dict_equiv(first: Mapping[K, V], second: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent) -> bool:
"""Test equivalence of two dict-like objects. If any of the values are
numpy arrays, compare them correctly.
Parameters
----------
first, second : dict-like
Dictionaries to compare for equality
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
equals : bool
True if the dictionaries are equal
"""
for k in first:
if k not in second or not compat(first[k], second[k]):
return False
for k in second:
if k not in first:
return False
return True
def ordered_dict_intersection(first_dict: Mapping[K, V],
second_dict: Mapping[K, V],
compat: Callable[[V, V], bool] = equivalent
) -> MutableMapping[K, V]:
"""Return the intersection of two dictionaries as a new OrderedDict.
Items are retained if their keys are found in both dictionaries and the
values are compatible.
Parameters
----------
first_dict, second_dict : dict-like
Mappings to merge.
compat : function, optional
Binary operator to determine if two values are compatible. By default,
checks for equivalence.
Returns
-------
intersection : OrderedDict
Intersection of the contents.
"""
new_dict = OrderedDict(first_dict)
remove_incompatible_items(new_dict, second_dict, compat)
return new_dict
class SingleSlotPickleMixin:
"""Mixin class to add the ability to pickle objects whose state is defined
by a single __slots__ attribute. Only necessary under Python 2.
"""
def __getstate__(self):
return getattr(self, self.__slots__[0])
def __setstate__(self, state):
setattr(self, self.__slots__[0], state)
class Frozen(Mapping[K, V], SingleSlotPickleMixin):
"""Wrapper around an object implementing the mapping interface to make it
immutable. If you really want to modify the mapping, the mutable version is
saved under the `mapping` attribute.
"""
__slots__ = ['mapping']
def __init__(self, mapping: Mapping[K, V]):
self.mapping = mapping
def __getitem__(self, key: K) -> V:
return self.mapping[key]
def __iter__(self) -> Iterator[K]:
return iter(self.mapping)
def __len__(self) -> int:
return len(self.mapping)
def __contains__(self, key: object) -> bool:
return key in self.mapping
def __repr__(self) -> str:
return '%s(%r)' % (type(self).__name__, self.mapping)
def FrozenOrderedDict(*args, **kwargs) -> Frozen:
return Frozen(OrderedDict(*args, **kwargs))
class SortedKeysDict(MutableMapping[K, V], SingleSlotPickleMixin):
"""An wrapper for dictionary-like objects that always iterates over its
items in sorted order by key but is otherwise equivalent to the underlying
mapping.
"""
__slots__ = ['mapping']
def __init__(self, mapping: Optional[MutableMapping[K, V]] = None):
self.mapping = {} if mapping is None else mapping
def __getitem__(self, key: K) -> V:
return self.mapping[key]
def __setitem__(self, key: K, value: V) -> None:
self.mapping[key] = value
def __delitem__(self, key: K) -> None:
del self.mapping[key]
def __iter__(self) -> Iterator[K]:
return iter(sorted(self.mapping))
def __len__(self) -> int:
return len(self.mapping)
def __contains__(self, key: object) -> bool:
return key in self.mapping
def __repr__(self) -> str:
return '%s(%r)' % (type(self).__name__, self.mapping)
class OrderedSet(MutableSet[T]):
"""A simple ordered set.
The API matches the builtin set, but it preserves insertion order of
elements, like an OrderedDict.
"""
def __init__(self, values: Optional[AbstractSet[T]] = None):
self._ordered_dict = OrderedDict() # type: MutableMapping[T, None]
if values is not None:
# Disable type checking - both mypy and PyCharm believes that
# we're altering the type of self in place (see signature of
# MutableSet.__ior__)
self |= values # type: ignore
# Required methods for MutableSet
def __contains__(self, value: object) -> bool:
return value in self._ordered_dict
def __iter__(self) -> Iterator[T]:
return iter(self._ordered_dict)
def __len__(self) -> int:
return len(self._ordered_dict)
def add(self, value: T) -> None:
self._ordered_dict[value] = None
def discard(self, value: T) -> None:
del self._ordered_dict[value]
# Additional methods
def update(self, values: AbstractSet[T]) -> None:
# See comment on __init__ re. type checking
self |= values # type: ignore
def __repr__(self) -> str:
return '%s(%r)' % (type(self).__name__, list(self))
class NdimSizeLenMixin:
"""Mixin class that extends a class that defines a ``shape`` property to
one that also defines ``ndim``, ``size`` and ``__len__``.
"""
@property
def ndim(self: Any) -> int:
return len(self.shape)
@property
def size(self: Any) -> int:
# cast to int so that shape = () gives size = 1
return int(np.prod(self.shape))
def __len__(self: Any) -> int:
try:
return self.shape[0]
except IndexError:
raise TypeError('len() of unsized object')
class NDArrayMixin(NdimSizeLenMixin):
"""Mixin class for making wrappers of N-dimensional arrays that conform to
the ndarray interface required for the data argument to Variable objects.
A subclass should set the `array` property and override one or more of
`dtype`, `shape` and `__getitem__`.
"""
@property
def dtype(self: Any) -> np.dtype:
return self.array.dtype
@property
def shape(self: Any) -> Tuple[int]:
return self.array.shape
def __getitem__(self: Any, key):
return self.array[key]
def __repr__(self: Any) -> str:
return '%s(array=%r)' % (type(self).__name__, self.array)
class ReprObject:
"""Object that prints as the given value, for use with sentinel values.
"""
def __init__(self, value: str):
self._value = value
def __repr__(self) -> str:
return self._value
@contextlib.contextmanager
def close_on_error(f):
"""Context manager to ensure that a file opened by xarray is closed if an
exception is raised before the user sees the file object.
"""
try:
yield
except Exception:
f.close()
raise
def is_remote_uri(path: str) -> bool:
return bool(re.search(r'^https?\://', path))
def is_grib_path(path: str) -> bool:
_, ext = os.path.splitext(path)
return ext in ['.grib', '.grb', '.grib2', '.grb2']
def is_uniform_spaced(arr, **kwargs) -> bool:
"""Return True if values of an array are uniformly spaced and sorted.
>>> is_uniform_spaced(range(5))
True
>>> is_uniform_spaced([-4, 0, 100])
False
kwargs are additional arguments to ``np.isclose``
"""
arr = np.array(arr, dtype=float)
diffs = np.diff(arr)
return bool(np.isclose(diffs.min(), diffs.max(), **kwargs))
def hashable(v: Any) -> bool:
"""Determine whether `v` can be hashed.
"""
try:
hash(v)
except TypeError:
return False
return True
def not_implemented(*args, **kwargs):
return NotImplemented
def decode_numpy_dict_values(attrs: Mapping[K, V]) -> Dict[K, V]:
"""Convert attribute values from numpy objects to native Python objects,
for use in to_dict
"""
attrs = dict(attrs)
for k, v in attrs.items():
if isinstance(v, np.ndarray):
attrs[k] = v.tolist()
elif isinstance(v, np.generic):
attrs[k] = v.item()
return attrs
def ensure_us_time_resolution(val):
"""Convert val out of numpy time, for use in to_dict.
Needed because of numpy bug GH#7619"""
if np.issubdtype(val.dtype, np.datetime64):
val = val.astype('datetime64[us]')
elif np.issubdtype(val.dtype, np.timedelta64):
val = val.astype('timedelta64[us]')
return val
class HiddenKeyDict(MutableMapping[K, V]):
"""Acts like a normal dictionary, but hides certain keys.
"""
# ``__init__`` method required to create instance from class.
def __init__(self, data: MutableMapping[K, V], hidden_keys: Iterable[K]):
self._data = data
self._hidden_keys = frozenset(hidden_keys)
def _raise_if_hidden(self, key: K) -> None:
if key in self._hidden_keys:
raise KeyError('Key `%r` is hidden.' % key)
# The next five methods are requirements of the ABC.
def __setitem__(self, key: K, value: V) -> None:
self._raise_if_hidden(key)
self._data[key] = value
def __getitem__(self, key: K) -> V:
self._raise_if_hidden(key)
return self._data[key]
def __delitem__(self, key: K) -> None:
self._raise_if_hidden(key)
del self._data[key]
def __iter__(self) -> Iterator[K]:
for k in self._data:
if k not in self._hidden_keys:
yield k
def __len__(self) -> int:
num_hidden = len(self._hidden_keys & self._data.keys())
return len(self._data) - num_hidden
def get_temp_dimname(dims: Container[Hashable], new_dim: Hashable) -> Hashable:
""" Get an new dimension name based on new_dim, that is not used in dims.
If the same name exists, we add an underscore(s) in the head.
Example1:
dims: ['a', 'b', 'c']
new_dim: ['_rolling']
-> ['_rolling']
Example2:
dims: ['a', 'b', 'c', '_rolling']
new_dim: ['_rolling']
-> ['__rolling']
"""
while new_dim in dims:
new_dim = '_' + str(new_dim)
return new_dim
| apache-2.0 |
atsao72/sympy | sympy/physics/quantum/tensorproduct.py | 64 | 13572 | """Abstract tensor product."""
from __future__ import print_function, division
from sympy import Expr, Add, Mul, Matrix, Pow, sympify
from sympy.core.compatibility import u, range
from sympy.core.trace import Tr
from sympy.printing.pretty.stringpict import prettyForm
from sympy.physics.quantum.qexpr import QuantumError
from sympy.physics.quantum.dagger import Dagger
from sympy.physics.quantum.commutator import Commutator
from sympy.physics.quantum.anticommutator import AntiCommutator
from sympy.physics.quantum.state import Ket, Bra
from sympy.physics.quantum.matrixutils import (
numpy_ndarray,
scipy_sparse_matrix,
matrix_tensor_product
)
__all__ = [
'TensorProduct',
'tensor_product_simp'
]
#-----------------------------------------------------------------------------
# Tensor product
#-----------------------------------------------------------------------------
_combined_printing = False
def combined_tensor_printing(combined):
"""Set flag controlling whether tensor products of states should be
printed as a combined bra/ket or as an explicit tensor product of different
bra/kets. This is a global setting for all TensorProduct class instances.
Parameters
----------
combine : bool
When true, tensor product states are combined into one ket/bra, and
when false explicit tensor product notation is used between each
ket/bra.
"""
global _combined_printing
_combined_printing = combined
class TensorProduct(Expr):
"""The tensor product of two or more arguments.
For matrices, this uses ``matrix_tensor_product`` to compute the Kronecker
or tensor product matrix. For other objects a symbolic ``TensorProduct``
instance is returned. The tensor product is a non-commutative
multiplication that is used primarily with operators and states in quantum
mechanics.
Currently, the tensor product distinguishes between commutative and non-
commutative arguments. Commutative arguments are assumed to be scalars and
are pulled out in front of the ``TensorProduct``. Non-commutative arguments
remain in the resulting ``TensorProduct``.
Parameters
==========
args : tuple
A sequence of the objects to take the tensor product of.
Examples
========
Start with a simple tensor product of sympy matrices::
>>> from sympy import I, Matrix, symbols
>>> from sympy.physics.quantum import TensorProduct
>>> m1 = Matrix([[1,2],[3,4]])
>>> m2 = Matrix([[1,0],[0,1]])
>>> TensorProduct(m1, m2)
Matrix([
[1, 0, 2, 0],
[0, 1, 0, 2],
[3, 0, 4, 0],
[0, 3, 0, 4]])
>>> TensorProduct(m2, m1)
Matrix([
[1, 2, 0, 0],
[3, 4, 0, 0],
[0, 0, 1, 2],
[0, 0, 3, 4]])
We can also construct tensor products of non-commutative symbols:
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> tp = TensorProduct(A, B)
>>> tp
AxB
We can take the dagger of a tensor product (note the order does NOT reverse
like the dagger of a normal product):
>>> from sympy.physics.quantum import Dagger
>>> Dagger(tp)
Dagger(A)xDagger(B)
Expand can be used to distribute a tensor product across addition:
>>> C = Symbol('C',commutative=False)
>>> tp = TensorProduct(A+B,C)
>>> tp
(A + B)xC
>>> tp.expand(tensorproduct=True)
AxC + BxC
"""
is_commutative = False
def __new__(cls, *args):
if isinstance(args[0], (Matrix, numpy_ndarray, scipy_sparse_matrix)):
return matrix_tensor_product(*args)
c_part, new_args = cls.flatten(sympify(args))
c_part = Mul(*c_part)
if len(new_args) == 0:
return c_part
elif len(new_args) == 1:
return c_part * new_args[0]
else:
tp = Expr.__new__(cls, *new_args)
return c_part * tp
@classmethod
def flatten(cls, args):
# TODO: disallow nested TensorProducts.
c_part = []
nc_parts = []
for arg in args:
cp, ncp = arg.args_cnc()
c_part.extend(list(cp))
nc_parts.append(Mul._from_args(ncp))
return c_part, nc_parts
def _eval_adjoint(self):
return TensorProduct(*[Dagger(i) for i in self.args])
def _eval_rewrite(self, pattern, rule, **hints):
sargs = self.args
terms = [t._eval_rewrite(pattern, rule, **hints) for t in sargs]
return TensorProduct(*terms).expand(tensorproduct=True)
def _sympystr(self, printer, *args):
from sympy.printing.str import sstr
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + '('
s = s + sstr(self.args[i])
if isinstance(self.args[i], (Add, Pow, Mul)):
s = s + ')'
if i != length - 1:
s = s + 'x'
return s
def _pretty(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print('', *args)
length_i = len(self.args[i].args)
for j in range(length_i):
part_pform = printer._print(self.args[i].args[j], *args)
next_pform = prettyForm(*next_pform.right(part_pform))
if j != length_i - 1:
next_pform = prettyForm(*next_pform.right(', '))
if len(self.args[i].args) > 1:
next_pform = prettyForm(
*next_pform.parens(left='{', right='}'))
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
pform = prettyForm(*pform.right(',' + ' '))
pform = prettyForm(*pform.left(self.args[0].lbracket))
pform = prettyForm(*pform.right(self.args[0].rbracket))
return pform
length = len(self.args)
pform = printer._print('', *args)
for i in range(length):
next_pform = printer._print(self.args[i], *args)
if isinstance(self.args[i], (Add, Mul)):
next_pform = prettyForm(
*next_pform.parens(left='(', right=')')
)
pform = prettyForm(*pform.right(next_pform))
if i != length - 1:
if printer._use_unicode:
pform = prettyForm(*pform.right(u('\N{N-ARY CIRCLED TIMES OPERATOR}') + u(' ')))
else:
pform = prettyForm(*pform.right('x' + ' '))
return pform
def _latex(self, printer, *args):
if (_combined_printing and
(all([isinstance(arg, Ket) for arg in self.args]) or
all([isinstance(arg, Bra) for arg in self.args]))):
def _label_wrap(label, nlabels):
return label if nlabels == 1 else r"\left\{%s\right\}" % label
s = r", ".join([_label_wrap(arg._print_label_latex(printer, *args),
len(arg.args)) for arg in self.args])
return r"{%s%s%s}" % (self.args[0].lbracket_latex, s,
self.args[0].rbracket_latex)
length = len(self.args)
s = ''
for i in range(length):
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\left('
# The extra {} brackets are needed to get matplotlib's latex
# rendered to render this properly.
s = s + '{' + printer._print(self.args[i], *args) + '}'
if isinstance(self.args[i], (Add, Mul)):
s = s + '\\right)'
if i != length - 1:
s = s + '\\otimes '
return s
def doit(self, **hints):
return TensorProduct(*[item.doit(**hints) for item in self.args])
def _eval_expand_tensorproduct(self, **hints):
"""Distribute TensorProducts across addition."""
args = self.args
add_args = []
stop = False
for i in range(len(args)):
if isinstance(args[i], Add):
for aa in args[i].args:
tp = TensorProduct(*args[:i] + (aa,) + args[i + 1:])
if isinstance(tp, TensorProduct):
tp = tp._eval_expand_tensorproduct()
add_args.append(tp)
break
if add_args:
return Add(*add_args)
else:
return self
def _eval_trace(self, **kwargs):
indices = kwargs.get('indices', None)
exp = tensor_product_simp(self)
if indices is None or len(indices) == 0:
return Mul(*[Tr(arg).doit() for arg in exp.args])
else:
return Mul(*[Tr(value).doit() if idx in indices else value
for idx, value in enumerate(exp.args)])
def tensor_product_simp_Mul(e):
"""Simplify a Mul with TensorProducts.
Current the main use of this is to simplify a ``Mul`` of ``TensorProduct``s
to a ``TensorProduct`` of ``Muls``. It currently only works for relatively
simple cases where the initial ``Mul`` only has scalars and raw
``TensorProduct``s, not ``Add``, ``Pow``, ``Commutator``s of
``TensorProduct``s.
Parameters
==========
e : Expr
A ``Mul`` of ``TensorProduct``s to be simplified.
Returns
=======
e : Expr
A ``TensorProduct`` of ``Mul``s.
Examples
========
This is an example of the type of simplification that this function
performs::
>>> from sympy.physics.quantum.tensorproduct import \
tensor_product_simp_Mul, TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp_Mul(e)
(A*C)x(B*D)
"""
# TODO: This won't work with Muls that have other composites of
# TensorProducts, like an Add, Pow, Commutator, etc.
# TODO: This only works for the equivalent of single Qbit gates.
if not isinstance(e, Mul):
return e
c_part, nc_part = e.args_cnc()
n_nc = len(nc_part)
if n_nc == 0 or n_nc == 1:
return e
elif e.has(TensorProduct):
current = nc_part[0]
if not isinstance(current, TensorProduct):
raise TypeError('TensorProduct expected, got: %r' % current)
n_terms = len(current.args)
new_args = list(current.args)
for next in nc_part[1:]:
# TODO: check the hilbert spaces of next and current here.
if isinstance(next, TensorProduct):
if n_terms != len(next.args):
raise QuantumError(
'TensorProducts of different lengths: %r and %r' %
(current, next)
)
for i in range(len(new_args)):
new_args[i] = new_args[i] * next.args[i]
else:
# this won't quite work as we don't want next in the
# TensorProduct
for i in range(len(new_args)):
new_args[i] = new_args[i] * next
current = next
return Mul(*c_part) * TensorProduct(*new_args)
else:
return e
def tensor_product_simp(e, **hints):
"""Try to simplify and combine TensorProducts.
In general this will try to pull expressions inside of ``TensorProducts``.
It currently only works for relatively simple cases where the products have
only scalars, raw ``TensorProducts``, not ``Add``, ``Pow``, ``Commutators``
of ``TensorProducts``. It is best to see what it does by showing examples.
Examples
========
>>> from sympy.physics.quantum import tensor_product_simp
>>> from sympy.physics.quantum import TensorProduct
>>> from sympy import Symbol
>>> A = Symbol('A',commutative=False)
>>> B = Symbol('B',commutative=False)
>>> C = Symbol('C',commutative=False)
>>> D = Symbol('D',commutative=False)
First see what happens to products of tensor products:
>>> e = TensorProduct(A,B)*TensorProduct(C,D)
>>> e
AxB*CxD
>>> tensor_product_simp(e)
(A*C)x(B*D)
This is the core logic of this function, and it works inside, powers, sums,
commutators and anticommutators as well:
>>> tensor_product_simp(e**2)
(A*C)x(B*D)**2
"""
if isinstance(e, Add):
return Add(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, Pow):
return tensor_product_simp(e.base) ** e.exp
elif isinstance(e, Mul):
return tensor_product_simp_Mul(e)
elif isinstance(e, Commutator):
return Commutator(*[tensor_product_simp(arg) for arg in e.args])
elif isinstance(e, AntiCommutator):
return AntiCommutator(*[tensor_product_simp(arg) for arg in e.args])
else:
return e
| bsd-3-clause |
vortex-ape/scikit-learn | conftest.py | 2 | 2347 | # Even if empty this file is useful so that when running from the root folder
# ./sklearn is added to sys.path by pytest. See
# https://docs.pytest.org/en/latest/pythonpath.html for more details. For
# example, this allows to build extensions in place and run pytest
# doc/modules/clustering.rst and use sklearn from the local folder rather than
# the one from site-packages.
import platform
from distutils.version import LooseVersion
import pytest
from _pytest.doctest import DoctestItem
from sklearn.utils.fixes import PY3_OR_LATER
PYTEST_MIN_VERSION = '3.3.0'
if LooseVersion(pytest.__version__) < PYTEST_MIN_VERSION:
raise('Your version of pytest is too old, you should have at least '
'pytest >= {} installed.'.format(PYTEST_MIN_VERSION))
def pytest_addoption(parser):
parser.addoption("--skip-network", action="store_true", default=False,
help="skip network tests")
def pytest_collection_modifyitems(config, items):
# FeatureHasher is not compatible with PyPy
if platform.python_implementation() == 'PyPy':
skip_marker = pytest.mark.skip(
reason='FeatureHasher is not compatible with PyPy')
for item in items:
if item.name == 'sklearn.feature_extraction.hashing.FeatureHasher':
item.add_marker(skip_marker)
# Skip tests which require internet if the flag is provided
if config.getoption("--skip-network"):
skip_network = pytest.mark.skip(
reason="test requires internet connectivity")
for item in items:
if "network" in item.keywords:
item.add_marker(skip_network)
# numpy changed the str/repr formatting of numpy arrays in 1.14. We want to
# run doctests only for numpy >= 1.14. We want to skip the doctest for
# python 2 due to unicode.
skip_doctests = False
if not PY3_OR_LATER:
skip_doctests = True
try:
import numpy as np
if LooseVersion(np.__version__) < LooseVersion('1.14'):
skip_doctests = True
except ImportError:
pass
if skip_doctests:
skip_marker = pytest.mark.skip(
reason='doctests are only run for numpy >= 1.14 and python >= 3')
for item in items:
if isinstance(item, DoctestItem):
item.add_marker(skip_marker)
| bsd-3-clause |
drewokane/xray | xarray/core/groupby.py | 1 | 20086 | import functools
import numpy as np
import pandas as pd
from . import ops
from .combine import concat
from .common import (
ImplementsArrayReduce, ImplementsDatasetReduce, _maybe_promote,
)
from .pycompat import zip
from .utils import peek_at, maybe_wrap_array, safe_cast_to_index
from .variable import as_variable, Variable, Coordinate
def unique_value_groups(ar):
"""Group an array by its unique values.
Parameters
----------
ar : array-like
Input array. This will be flattened if it is not already 1-D.
Returns
-------
values : np.ndarray
Sorted, unique values as returned by `np.unique`.
indices : list of lists of int
Each element provides the integer indices in `ar` with values given by
the corresponding value in `unique_values`.
"""
inverse, values = pd.factorize(ar, sort=True)
groups = [[] for _ in range(len(values))]
for n, g in enumerate(inverse):
if g >= 0:
# pandas uses -1 to mark NaN, but doesn't include them in values
groups[g].append(n)
return values, groups
def _get_fill_value(dtype):
"""Return a fill value that appropriately promotes types when used with
np.concatenate
"""
dtype, fill_value = _maybe_promote(dtype)
return fill_value
def _dummy_copy(xarray_obj):
from .dataset import Dataset
from .dataarray import DataArray
if isinstance(xarray_obj, Dataset):
res = Dataset(dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.data_vars.items()),
dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
xarray_obj.attrs)
elif isinstance(xarray_obj, DataArray):
res = DataArray(_get_fill_value(xarray_obj.dtype),
dict((k, _get_fill_value(v.dtype))
for k, v in xarray_obj.coords.items()
if k not in xarray_obj.dims),
name=xarray_obj.name,
attrs=xarray_obj.attrs)
else: # pragma: no cover
raise AssertionError
return res
class GroupBy(object):
"""A object that implements the split-apply-combine pattern.
Modeled after `pandas.GroupBy`. The `GroupBy` object can be iterated over
(unique_value, grouped_array) pairs, but the main way to interact with a
groupby object are with the `apply` or `reduce` methods. You can also
directly call numpy methods like `mean` or `std`.
You should create a GroupBy object by using the `DataArray.groupby` or
`Dataset.groupby` methods.
See Also
--------
Dataset.groupby
DataArray.groupby
"""
def __init__(self, obj, group, squeeze=False, grouper=None):
"""Create a GroupBy object
Parameters
----------
obj : Dataset or DataArray
Object to group.
group : DataArray or Coordinate
1-dimensional array with the group values.
squeeze : boolean, optional
If "group" is a coordinate of object, `squeeze` controls whether
the subarrays have a dimension of length 1 along that coordinate or
if the dimension is squeezed out.
grouper : pd.Grouper, optional
Used for grouping values along the `group` array.
"""
from .dataset import as_dataset
if group.ndim != 1:
# TODO: remove this limitation?
raise ValueError('`group` must be 1 dimensional')
if getattr(group, 'name', None) is None:
raise ValueError('`group` must have a name')
if not hasattr(group, 'dims'):
raise ValueError("`group` must have a 'dims' attribute")
group_dim, = group.dims
try:
expected_size = obj.dims[group_dim]
except TypeError:
expected_size = obj.shape[obj.get_axis_num(group_dim)]
if group.size != expected_size:
raise ValueError('the group variable\'s length does not '
'match the length of this variable along its '
'dimension')
full_index = None
if grouper is not None:
# time-series resampling
index = safe_cast_to_index(group)
if not index.is_monotonic:
# TODO: sort instead of raising an error
raise ValueError('index must be monotonic for resampling')
s = pd.Series(np.arange(index.size), index)
first_items = s.groupby(grouper).first()
if first_items.isnull().any():
full_index = first_items.index
first_items = first_items.dropna()
bins = first_items.values.astype(np.int64)
group_indices = ([slice(i, j) for i, j in zip(bins[:-1], bins[1:])] +
[slice(bins[-1], None)])
unique_coord = Coordinate(group.name, first_items.index)
elif group.name in obj.dims:
# assume that group already has sorted, unique values
if group.dims != (group.name,):
raise ValueError('`group` is required to be a coordinate if '
'`group.name` is a dimension in `obj`')
group_indices = np.arange(group.size)
if not squeeze:
# group_indices = group_indices.reshape(-1, 1)
# use slices to do views instead of fancy indexing
group_indices = [slice(i, i + 1) for i in group_indices]
unique_coord = group
else:
# look through group to find the unique values
unique_values, group_indices = unique_value_groups(group)
unique_coord = Coordinate(group.name, unique_values)
self.obj = obj
self.group = group
self.group_dim = group_dim
self.group_indices = group_indices
self.unique_coord = unique_coord
self._groups = None
self._full_index = full_index
@property
def groups(self):
# provided to mimic pandas.groupby
if self._groups is None:
self._groups = dict(zip(self.unique_coord.values,
self.group_indices))
return self._groups
def __len__(self):
return self.unique_coord.size
def __iter__(self):
return zip(self.unique_coord.values, self._iter_grouped())
def _iter_grouped(self):
"""Iterate over each element in this group"""
for indices in self.group_indices:
yield self.obj.isel(**{self.group_dim: indices})
def _infer_concat_args(self, applied_example):
if self.group_dim in applied_example.dims:
concat_dim = self.group
positions = self.group_indices
else:
concat_dim = self.unique_coord
positions = None
return concat_dim, positions
@staticmethod
def _binary_op(f, reflexive=False, **ignored_kwargs):
@functools.wraps(f)
def func(self, other):
g = f if not reflexive else lambda x, y: f(y, x)
applied = self._yield_binary_applied(g, other)
combined = self._concat(applied)
return combined
return func
def _yield_binary_applied(self, func, other):
dummy = None
for group_value, obj in self:
try:
other_sel = other.sel(**{self.group.name: group_value})
except AttributeError:
raise TypeError('GroupBy objects only support binary ops '
'when the other argument is a Dataset or '
'DataArray')
except KeyError:
if self.group.name not in other.dims:
raise ValueError('incompatible dimensions for a grouped '
'binary operation: the group variable %r '
'is not a dimension on the other argument'
% self.group.name)
if dummy is None:
dummy = _dummy_copy(other)
other_sel = dummy
result = func(obj, other_sel)
yield result
def _maybe_restore_empty_groups(self, combined):
"""Our index contained empty groups (e.g., from a resampling). If we
reduced on that dimension, we want to restore the full index.
"""
if (self._full_index is not None and self.group.name in combined.dims):
indexers = {self.group.name: self._full_index}
combined = combined.reindex(**indexers)
return combined
def fillna(self, value):
"""Fill missing values in this object by group.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic, except the result is aligned to this
object (``join='left'``) instead of aligned to the intersection of
index coordinates (``join='inner'``).
Parameters
----------
value : valid type for the grouped object's fillna method
Used to fill all matching missing values by group.
Returns
-------
same type as the grouped object
See also
--------
Dataset.fillna
DataArray.fillna
"""
return self._fillna(value)
def where(self, cond):
"""Return an object of the same shape with all entries where cond is
True and all other entries masked.
This operation follows the normal broadcasting and alignment rules that
xarray uses for binary arithmetic.
Parameters
----------
cond : DataArray or Dataset
Returns
-------
same type as the grouped object
See also
--------
Dataset.where
"""
return self._where(cond)
def _first_or_last(self, op, skipna, keep_attrs):
if isinstance(self.group_indices[0], (int, np.integer)):
# NB. this is currently only used for reductions along an existing
# dimension
return self.obj
return self.reduce(op, self.group_dim, skipna=skipna,
keep_attrs=keep_attrs, allow_lazy=True)
def first(self, skipna=None, keep_attrs=True):
"""Return the first element of each group along the group dimension
"""
return self._first_or_last(ops.first, skipna, keep_attrs)
def last(self, skipna=None, keep_attrs=True):
"""Return the last element of each group along the group dimension
"""
return self._first_or_last(ops.last, skipna, keep_attrs)
def assign_coords(self, **kwargs):
"""Assign coordinates by group.
See also
--------
Dataset.assign_coords
"""
return self.apply(lambda ds: ds.assign_coords(**kwargs))
class DataArrayGroupBy(GroupBy, ImplementsArrayReduce):
"""GroupBy object specialized to grouping DataArray objects
"""
def _iter_grouped_shortcut(self):
"""Fast version of `_iter_grouped` that yields Variables without
metadata
"""
var = self.obj.variable
for indices in self.group_indices:
yield var[{self.group_dim: indices}]
def _concat_shortcut(self, applied, concat_dim, positions):
# nb. don't worry too much about maintaining this method -- it does
# speed things up, but it's not very interpretable and there are much
# faster alternatives (e.g., doing the grouped aggregation in a
# compiled language)
stacked = Variable.concat(
applied, concat_dim, positions, shortcut=True)
stacked.attrs.update(self.obj.attrs)
result = self.obj._replace_maybe_drop_dims(stacked)
result._coords[concat_dim.name] = as_variable(concat_dim, copy=True)
return result
def _restore_dim_order(self, stacked):
def lookup_order(dimension):
if dimension == self.group.name:
dimension, = self.group.dims
if dimension in self.obj.dims:
axis = self.obj.get_axis_num(dimension)
else:
axis = 1e6 # some arbitrarily high value
return axis
new_order = sorted(stacked.dims, key=lookup_order)
return stacked.transpose(*new_order)
def apply(self, func, shortcut=False, **kwargs):
"""Apply a function over each array in the group and concatenate them
together into a new array.
`func` is called like `func(ar, *args, **kwargs)` for each array `ar`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the array. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped array after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each array.
shortcut : bool, optional
Whether or not to shortcut evaluation under the assumptions that:
(1) The action of `func` does not depend on any of the array
metadata (attributes or coordinates) but only on the data and
dimensions.
(2) The action of `func` creates arrays with homogeneous metadata,
that is, with the same dimensions and attributes.
If these conditions are satisfied `shortcut` provides significant
speedup. This should be the case for many common groupby operations
(e.g., applying numpy ufuncs).
**kwargs
Used to call `func(ar, **kwargs)` for each array `ar`.
Returns
-------
applied : DataArray
The result of splitting, applying and combining this array.
"""
if shortcut:
grouped = self._iter_grouped_shortcut()
else:
grouped = self._iter_grouped()
applied = (maybe_wrap_array(arr, func(arr, **kwargs)) for arr in grouped)
combined = self._concat(applied, shortcut=shortcut)
result = self._maybe_restore_empty_groups(combined)
return result
def _concat(self, applied, shortcut=False):
# peek at applied to determine which coordinate to stack over
applied_example, applied = peek_at(applied)
concat_dim, positions = self._infer_concat_args(applied_example)
if shortcut:
combined = self._concat_shortcut(applied, concat_dim, positions)
else:
combined = concat(applied, concat_dim, positions=positions)
if isinstance(combined, type(self.obj)):
combined = self._restore_dim_order(combined)
return combined
def reduce(self, func, dim=None, axis=None, keep_attrs=False,
shortcut=True, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_array(ar):
return ar.reduce(func, dim, axis, keep_attrs=keep_attrs, **kwargs)
return self.apply(reduce_array, shortcut=shortcut)
ops.inject_reduce_methods(DataArrayGroupBy)
ops.inject_binary_ops(DataArrayGroupBy)
class DatasetGroupBy(GroupBy, ImplementsDatasetReduce):
def apply(self, func, **kwargs):
"""Apply a function over each Dataset in the group and concatenate them
together into a new Dataset.
`func` is called like `func(ds, *args, **kwargs)` for each dataset `ds`
in this group.
Apply uses heuristics (like `pandas.GroupBy.apply`) to figure out how
to stack together the datasets. The rule is:
1. If the dimension along which the group coordinate is defined is
still in the first grouped item after applying `func`, then stack
over this dimension.
2. Otherwise, stack over the new dimension given by name of this
grouping (the argument to the `groupby` function).
Parameters
----------
func : function
Callable to apply to each sub-dataset.
**kwargs
Used to call `func(ds, **kwargs)` for each sub-dataset `ar`.
Returns
-------
applied : Dataset
The result of splitting, applying and combining this dataset.
"""
kwargs.pop('shortcut', None) # ignore shortcut if set (for now)
applied = (func(ds, **kwargs) for ds in self._iter_grouped())
combined = self._concat(applied)
result = self._maybe_restore_empty_groups(combined)
return result
def _concat(self, applied):
applied_example, applied = peek_at(applied)
concat_dim, positions = self._infer_concat_args(applied_example)
combined = concat(applied, concat_dim, positions=positions)
return combined
def reduce(self, func, dim=None, keep_attrs=False, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, axis=axis, **kwargs)` to return the result of collapsing an
np.ndarray over an integer valued axis.
dim : str or sequence of str, optional
Dimension(s) over which to apply `func`.
axis : int or sequence of int, optional
Axis(es) over which to apply `func`. Only one of the 'dimension'
and 'axis' arguments can be supplied. If neither are supplied, then
`func` is calculated over all dimension for each group item.
keep_attrs : bool, optional
If True, the datasets's attributes (`attrs`) will be copied from
the original object to the new one. If False (default), the new
object will be returned without attributes.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : Array
Array with summarized data and the indicated dimension(s)
removed.
"""
def reduce_dataset(ds):
return ds.reduce(func, dim, keep_attrs, **kwargs)
return self.apply(reduce_dataset)
def assign(self, **kwargs):
"""Assign data variables by group.
See also
--------
Dataset.assign
"""
return self.apply(lambda ds: ds.assign(**kwargs))
ops.inject_reduce_methods(DatasetGroupBy)
ops.inject_binary_ops(DatasetGroupBy)
| apache-2.0 |
panda4life/idpserver | mysite/idp/plotting.py | 1 | 3702 | # -*- coding: utf-8 -*-
"""
Created on Wed Apr 30 16:43:00 2014
@author: jahad
"""
import matplotlib.pyplot as plt
from matplotlib.font_manager import FontProperties
import os
def phasePlot(fp,fm,seqname,saveAs):
if(os.path.exists(saveAs)):
os.remove(saveAs)
for x,y,label in zip(fp,fm,seqname):
plt.scatter(x,y,marker='.',color='Black')
plt.annotate(label,xy=(x+.01,y+.01))
reg1, = plt.fill([0,0,.25],[0,.25,0],color = 'Chartreuse',alpha=.75)
reg2, = plt.fill([0,0,.35,.25],[.25,.35,0,0],color = 'MediumSeaGreen',alpha=.75)
reg3, = plt.fill([0,.35,.65,.35],[.35,.65,.35,0],color = 'DarkGreen',alpha=.75)
reg4, = plt.fill([0,0,.35],[.35,1,.65],color = 'Red',alpha=.75)
reg5, = plt.fill([.35,.65,1],[0,.35,0],color = 'Blue',alpha=.75)
plt.ylim([0,1])
plt.xlim([0,1])
plt.xlabel('f+')
plt.ylabel('f-')
plt.title('Phase Diagram')
fontP = FontProperties()
fontP.set_size('x-small')
plt.legend([reg1,reg2,reg3,reg4,reg5],
['Weak Polyampholytes & Polyelectrolytes:\nGlobules & Tadpoles',
'Boundary Region',
'Strong Polyampholytes:\nCoils, Hairpins, Chimeras',
'Negatively Charged Strong Polyelectrolytes:\nSwollen Coils',
'Positively Charged Strong Polyelectrolytes:\nSwollen Coils'],
prop = fontP)
plt.savefig(saveAs,dpi=200)
plt.close()
return plt
def testPhasePlot():
graph = phasePlot([.65,.32,.15],[.34,.21,.42],['derp1','harro','nyan'],'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\test.png')
def testPhasePlotNull():
graph = phasePlot([],[],[],'/work/jahad/IDP_patterning/idpserver/mysite/output/test.png')
import computation as comp
def NCPRPlot(sequence, bloblen, saveAs):
if(not sequence is None):
data = sequence.NCPRdist(bloblen)
plt.plot(data[0,:], data[1,:])
else:
plt.plot([],[])
plt.xlim([0,50])
plt.title('NCPR Distribution')
plt.xlabel('Blob Index')
plt.ylabel('NCPR')
plt.ylim([-1.1,1.1])
plt.savefig(saveAs, dpi=200)
plt.close()
return plt
def testNCPRPlot():
graph = NCPRPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testNCPR.png')
def SigmaPlot(sequence, bloblen, saveAs):
if(not sequence is None):
data = sequence.Sigmadist(bloblen)
plt.plot(data[0,:], data[1,:])
else:
plt.plot([],[])
plt.xlim([0,50])
plt.title('Sigma Distribution')
plt.xlabel('Blob Index')
plt.ylabel('Sigma')
plt.ylim([-.1,1.1])
plt.savefig(saveAs, dpi=200)
plt.close()
return plt
def testSigmaPlot():
graph = SigmaPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testSigma.png')
def HydroPlot(sequence, bloblen, saveAs):
if(not sequence is None):
data = sequence.Hydrodist(bloblen)
plt.plot(data[0,:], data[1,:])
else:
plt.plot([],[])
plt.xlim([0,50])
plt.title('Hydropathy Distribution')
plt.xlabel('Blob Index')
plt.ylabel('Hydropathy')
plt.savefig(saveAs, dpi=200)
plt.close()
return plt
def testHydroPlot():
graph = HydroPlot(comp.Sequence('EEEEEEKKKKEKEKEKEKEKEEEEEEEKKKKKKEKEKEKEKEKEKEKGGGGGGKEKEKE'),5, 'C:\\Users\\James Ahad\\Documents\\GitHub\\idpserver\\mysite\\output\\testHydro.png')
testNCPRPlot()
testSigmaPlot()
testHydroPlot() | gpl-3.0 |
olologin/scikit-learn | examples/ensemble/plot_adaboost_twoclass.py | 347 | 3268 | """
==================
Two-class AdaBoost
==================
This example fits an AdaBoosted decision stump on a non-linearly separable
classification dataset composed of two "Gaussian quantiles" clusters
(see :func:`sklearn.datasets.make_gaussian_quantiles`) and plots the decision
boundary and decision scores. The distributions of decision scores are shown
separately for samples of class A and B. The predicted class label for each
sample is determined by the sign of the decision score. Samples with decision
scores greater than zero are classified as B, and are otherwise classified
as A. The magnitude of a decision score determines the degree of likeness with
the predicted class label. Additionally, a new dataset could be constructed
containing a desired purity of class B, for example, by only selecting samples
with a decision score above some value.
"""
print(__doc__)
# Author: Noel Dawe <noel.dawe@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.datasets import make_gaussian_quantiles
# Construct dataset
X1, y1 = make_gaussian_quantiles(cov=2.,
n_samples=200, n_features=2,
n_classes=2, random_state=1)
X2, y2 = make_gaussian_quantiles(mean=(3, 3), cov=1.5,
n_samples=300, n_features=2,
n_classes=2, random_state=1)
X = np.concatenate((X1, X2))
y = np.concatenate((y1, - y2 + 1))
# Create and fit an AdaBoosted decision tree
bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=1),
algorithm="SAMME",
n_estimators=200)
bdt.fit(X, y)
plot_colors = "br"
plot_step = 0.02
class_names = "AB"
plt.figure(figsize=(10, 5))
# Plot the decision boundaries
plt.subplot(121)
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),
np.arange(y_min, y_max, plot_step))
Z = bdt.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
cs = plt.contourf(xx, yy, Z, cmap=plt.cm.Paired)
plt.axis("tight")
# Plot the training points
for i, n, c in zip(range(2), class_names, plot_colors):
idx = np.where(y == i)
plt.scatter(X[idx, 0], X[idx, 1],
c=c, cmap=plt.cm.Paired,
label="Class %s" % n)
plt.xlim(x_min, x_max)
plt.ylim(y_min, y_max)
plt.legend(loc='upper right')
plt.xlabel('x')
plt.ylabel('y')
plt.title('Decision Boundary')
# Plot the two-class decision scores
twoclass_output = bdt.decision_function(X)
plot_range = (twoclass_output.min(), twoclass_output.max())
plt.subplot(122)
for i, n, c in zip(range(2), class_names, plot_colors):
plt.hist(twoclass_output[y == i],
bins=10,
range=plot_range,
facecolor=c,
label='Class %s' % n,
alpha=.5)
x1, x2, y1, y2 = plt.axis()
plt.axis((x1, x2, y1, y2 * 1.2))
plt.legend(loc='upper right')
plt.ylabel('Samples')
plt.xlabel('Score')
plt.title('Decision Scores')
plt.tight_layout()
plt.subplots_adjust(wspace=0.35)
plt.show()
| bsd-3-clause |
amchagas/python-neo | examples/generated_data.py | 7 | 4873 | # -*- coding: utf-8 -*-
"""
This is an example for creating simple plots from various Neo structures.
It includes a function that generates toy data.
"""
from __future__ import division # Use same division in Python 2 and 3
import numpy as np
import quantities as pq
from matplotlib import pyplot as plt
import neo
def generate_block(n_segments=3, n_channels=8, n_units=3,
data_samples=1000, feature_samples=100):
"""
Generate a block with a single recording channel group and a number of
segments, recording channels and units with associated analog signals
and spike trains.
"""
feature_len = feature_samples / data_samples
# Create container and grouping objects
segments = [neo.Segment(index=i) for i in range(n_segments)]
rcg = neo.RecordingChannelGroup(name='T0')
for i in range(n_channels):
rc = neo.RecordingChannel(name='C%d' % i, index=i)
rc.recordingchannelgroups = [rcg]
rcg.recordingchannels.append(rc)
units = [neo.Unit('U%d' % i) for i in range(n_units)]
rcg.units = units
block = neo.Block()
block.segments = segments
block.recordingchannelgroups = [rcg]
# Create synthetic data
for seg in segments:
feature_pos = np.random.randint(0, data_samples - feature_samples)
# Analog signals: Noise with a single sinewave feature
wave = 3 * np.sin(np.linspace(0, 2 * np.pi, feature_samples))
for rc in rcg.recordingchannels:
sig = np.random.randn(data_samples)
sig[feature_pos:feature_pos + feature_samples] += wave
signal = neo.AnalogSignal(sig * pq.mV, sampling_rate=1 * pq.kHz)
seg.analogsignals.append(signal)
rc.analogsignals.append(signal)
# Spike trains: Random spike times with elevated rate in short period
feature_time = feature_pos / data_samples
for u in units:
random_spikes = np.random.rand(20)
feature_spikes = np.random.rand(5) * feature_len + feature_time
spikes = np.hstack([random_spikes, feature_spikes])
train = neo.SpikeTrain(spikes * pq.s, 1 * pq.s)
seg.spiketrains.append(train)
u.spiketrains.append(train)
block.create_many_to_one_relationship()
return block
block = generate_block()
# In this example, we treat each segment in turn, averaging over the channels
# in each:
for seg in block.segments:
print("Analysing segment %d" % seg.index)
siglist = seg.analogsignals
time_points = siglist[0].times
avg = np.mean(siglist, axis=0) # Average over signals of Segment
plt.figure()
plt.plot(time_points, avg)
plt.title("Peak response in segment %d: %f" % (seg.index, avg.max()))
# The second alternative is spatial traversal of the data (by channel), with
# averaging over trials. For example, perhaps you wish to see which physical
# location produces the strongest response, and each stimulus was the same:
# We assume that our block has only 1 RecordingChannelGroup and each
# RecordingChannel only has 1 AnalogSignal.
rcg = block.recordingchannelgroups[0]
for rc in rcg.recordingchannels:
print("Analysing channel %d: %s" % (rc.index, rc.name))
siglist = rc.analogsignals
time_points = siglist[0].times
avg = np.mean(siglist, axis=0) # Average over signals of RecordingChannel
plt.figure()
plt.plot(time_points, avg)
plt.title("Average response on channel %d" % rc.index)
# There are three ways to access the spike train data: by Segment,
# by RecordingChannel or by Unit.
# By Segment. In this example, each Segment represents data from one trial,
# and we want a peristimulus time histogram (PSTH) for each trial from all
# Units combined:
for seg in block.segments:
print("Analysing segment %d" % seg.index)
stlist = [st - st.t_start for st in seg.spiketrains]
count, bins = np.histogram(np.hstack(stlist))
plt.figure()
plt.bar(bins[:-1], count, width=bins[1] - bins[0])
plt.title("PSTH in segment %d" % seg.index)
# By Unit. Now we can calculate the PSTH averaged over trials for each Unit:
for unit in block.list_units:
stlist = [st - st.t_start for st in unit.spiketrains]
count, bins = np.histogram(np.hstack(stlist))
plt.figure()
plt.bar(bins[:-1], count, width=bins[1] - bins[0])
plt.title("PSTH of unit %s" % unit.name)
# By RecordingChannelGroup. Here we calculate a PSTH averaged over trials by
# channel location, blending all Units:
for rcg in block.recordingchannelgroups:
stlist = []
for unit in rcg.units:
stlist.extend([st - st.t_start for st in unit.spiketrains])
count, bins = np.histogram(np.hstack(stlist))
plt.figure()
plt.bar(bins[:-1], count, width=bins[1] - bins[0])
plt.title("PSTH blend of recording channel group %s" % rcg.name)
plt.show()
| bsd-3-clause |
cpcloud/ibis | ibis/pandas/tests/test_core.py | 1 | 4872 | from typing import Any
import pandas as pd
import pandas.util.testing as tm
import pytest
from multipledispatch.conflict import ambiguities
import ibis
import ibis.common.exceptions as com
import ibis.expr.datatypes as dt
import ibis.expr.operations as ops
from ibis.pandas.client import PandasClient
from ibis.pandas.core import is_computable_input
from ibis.pandas.dispatch import execute_node, post_execute, pre_execute
pytestmark = pytest.mark.pandas
@pytest.fixture
def dataframe():
return pd.DataFrame(
{
'plain_int64': list(range(1, 4)),
'plain_strings': list('abc'),
'dup_strings': list('dad'),
}
)
@pytest.fixture
def core_client(dataframe):
return ibis.pandas.connect({'df': dataframe})
@pytest.fixture
def ibis_table(core_client):
return core_client.table('df')
@pytest.mark.parametrize('func', [execute_node, pre_execute, post_execute])
def test_no_execute_ambiguities(func):
assert not ambiguities(func.funcs)
def test_from_dataframe(dataframe, ibis_table, core_client):
t = ibis.pandas.from_dataframe(dataframe)
result = t.execute()
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
t = ibis.pandas.from_dataframe(dataframe, name='foo')
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
client = core_client
t = ibis.pandas.from_dataframe(dataframe, name='foo', client=client)
expected = ibis_table.execute()
tm.assert_frame_equal(result, expected)
def test_pre_execute_basic():
"""
Test that pre_execute has intercepted execution and provided its own
scope dict
"""
@pre_execute.register(ops.Add)
def pre_execute_test(op, *clients, scope=None, **kwargs):
return {op: 4}
one = ibis.literal(1)
expr = one + one
result = ibis.pandas.execute(expr)
assert result == 4
del pre_execute.funcs[(ops.Add,)]
pre_execute.reorder()
pre_execute._cache.clear()
def test_execute_parameter_only():
param = ibis.param('int64')
result = ibis.pandas.execute(param, params={param: 42})
assert result == 42
def test_missing_data_sources():
t = ibis.table([('a', 'string')])
expr = t.a.length()
with pytest.raises(com.UnboundExpressionError):
ibis.pandas.execute(expr)
def test_missing_data_on_custom_client():
class MyClient(PandasClient):
def table(self, name):
return ops.DatabaseTable(
name, ibis.schema([('a', 'int64')]), self
).to_expr()
con = MyClient({})
t = con.table('t')
with pytest.raises(
NotImplementedError,
match=(
'Could not find signature for execute_node: '
'<DatabaseTable, MyClient>'
),
):
con.execute(t)
def test_post_execute_called_on_joins(dataframe, core_client, ibis_table):
count = [0]
@post_execute.register(ops.InnerJoin, pd.DataFrame)
def tmp_left_join_exe(op, lhs, **kwargs):
count[0] += 1
return lhs
left = ibis_table
right = left.view()
join = left.join(right, 'plain_strings')[left.plain_int64]
result = join.execute()
assert result is not None
assert not result.empty
assert count[0] == 1
def test_is_computable_input():
class MyObject:
def __init__(self, value: float) -> None:
self.value = value
def __getattr__(self, name: str) -> Any:
return getattr(self.value, name)
def __hash__(self) -> int:
return hash((type(self), self.value))
def __eq__(self, other):
return (
isinstance(other, type(self))
and isinstance(self, type(other))
and self.value == other.value
)
def __float__(self) -> float:
return self.value
@execute_node.register(ops.Add, int, MyObject)
def add_int_my_object(op, left, right, **kwargs):
return left + right.value
# This multimethod must be implemented to play nicely with other value
# types like columns and literals. In other words, for a custom
# non-expression object to play nicely it must somehow map to one of the
# types in ibis/expr/datatypes.py
@dt.infer.register(MyObject)
def infer_my_object(_, **kwargs):
return dt.float64
@is_computable_input.register(MyObject)
def is_computable_input_my_object(_):
return True
one = ibis.literal(1)
two = MyObject(2.0)
assert is_computable_input(two)
three = one + two
four = three + 1
result = ibis.pandas.execute(four)
assert result == 4.0
del execute_node.funcs[ops.Add, int, MyObject]
execute_node.reorder()
execute_node._cache.clear()
del dt.infer.funcs[(MyObject,)]
dt.infer.reorder()
dt.infer._cache.clear()
| apache-2.0 |
NZRS/content-analysis | netflix.py | 2 | 3126 | from bs4 import BeautifulSoup
from urllib2 import quote
import unicodedata
import requests
import json
import glob
import pandas as pd
movie_list = []
for page in glob.glob('*.html'):
with open(page, 'r+') as f:
my_page = f.read()
my_soup = BeautifulSoup(my_page)
for div in my_soup.find_all('div', class_='lockup'):
try:
movie_list.append(div.img.get('alt'))
except:
movie_list.append('movie could not be extracted from page')
['movie could not be extracted from page' for movie in movie_list if movie is None]
movie_list2 = []
for movie in movie_list:
try:
movie = quote(movie)
movie_list2.append(movie)
except:
try:
movie = unicodedata.normalize('NFKC', movie).encode('ascii','ignore')
movie = quote(movie)
movie_list2.append(movie)
except:
print movie
movie_list2.append('movie could not be processed')
all_movies_us = {}
for movie in movie_list2:
try:
query_url = 'http://www.omdbapi.com/?t=' + movie + '&y=&plot=full&r=json'
response = requests.get(query_url)
my_dict = json.loads(response.text)
all_movies_us[movie] = my_dict
except:
all_movies_us[movie] = 'No response'
print movie
# movies/single year shows
years_dict = {}
counter = 0
for k,v in all_movies.iteritems():
try:
if len(v['Year']) == 4:
try:
years_dict[v['Year']] += 1
except:
years_dict[v['Year']] = 1
continue
except:
counter += 1
continue
print counter
my_frame = pd.DataFrame.from_dict(years_dict, orient = 'index')
my_frame.to_csv('single_years.csv')
counter=0
score_dict = {}
for k,v in all_movies.iteritems():
try:
if v['imdbRating'] != 'N/A':
score_dict[v['Title']] = v['imdbRating']
except:
counter +=1
continue
print counter
score_dict2 ={}
for title, score in score_dict.iteritems():
try:
score_dict2[title] = float(score)
except:
print score
score_dict = score_dict2
average_score = (sum(score_dict.values()))/len(score_dict)
top_25 =
print average_score
years = []
country = []
language =[]
actors = []
for movie, results in all_movies.iteritems():
try:
years.append(results['Year'])
except:
continue
try:
country.append(results['Country'])
except:
continue
try:
language.append(results['Language'])
except:
continue
try:
for actor in results['Actors'].split(','):
actors.append(actor)
except:
continue
# Ongoing shows
years_dict = {}
counter = 0
for k,v in all_movies.iteritems():
try:
print v['Year'][4]
except:
continue
# Languages
lang_list = []
for lang in language:
for x in lang.split(','):
lang_list.append(x)
lang_list
Counter(lang_list)
Counter(lang_list).most_common(10)
| agpl-3.0 |
rafaellehmkuhl/OpenCV-Python-GUI | CvPyGui/PlotContainer.py | 1 | 2407 | import pandas as pd
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QWidget, QLabel, QHBoxLayout,
QVBoxLayout, QPushButton, QSlider,
QComboBox)
from matplotlib.backends.backend_qt4agg import NavigationToolbar2QT as NavigationToolbar
from matplotlib.backends.backend_qt4agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.figure import Figure
from .FilterCvQtContainer import Filter
import random
class SinglePlotContainer(QWidget):
num_plots = 0
def __init__(self, parent=None):
super().__init__()
self.num_plots += 1
self.variable_df = pd.DataFrame()
self.figure = Figure() # don't use matplotlib.pyplot at all!
self.canvas = FigureCanvas(self.figure)
self.hLayout = QHBoxLayout(self)
self.dataConfigColumn = QVBoxLayout()
self.filtersColumn = QVBoxLayout()
self.hLayout.addLayout(self.dataConfigColumn)
self.hLayout.addWidget(self.canvas)
self.hLayout.addLayout(self.filtersColumn)
self.comboLoadVariable = QComboBox()
self.dataConfigColumn.addWidget(self.comboLoadVariable)
self.filter1 = Filter('Moving Average', 3, 30, 5, 1)
self.filtersColumn.addWidget(self.filter1)
# drawEvent = self.figure.canvas.mpl_connect('draw', self.updatePlot)
self.plotRandom()
def connectButtons(self):
self.comboLoadVariable.activated[str].connect(self.loadVariable)
def loadVariable(self, variable):
self.variable_df = self.parent().parent().original_df[variable]
self.plot()
def plot(self):
if self.num_plots != 0:
self.axes = self.figure.add_subplot(111, sharex=self.parent().parent().plots[0].axes)
else:
self.axes = self.figure.add_subplot(111)
self.axes.clear()
self.axes.plot(self.variable_df, '-')
self.canvas.draw()
def updatePlot(self):
ymax,ymin = self.axes.get_ylim()
self.axes.clear()
self.axes.set_ylim(ymax,ymin)
self.axes.plot(self.variable_df, '-')
self.canvas.draw()
def plotRandom(self):
''' plot some random stuff '''
data = [random.random() for i in range(10)]
self.axes = self.figure.add_subplot(111)
self.axes.clear()
self.axes.plot(data, '-')
self.canvas.draw()
| mit |
qifeigit/scikit-learn | examples/text/document_classification_20newsgroups.py | 222 | 10500 | """
======================================================
Classification of text documents using sparse features
======================================================
This is an example showing how scikit-learn can be used to classify documents
by topics using a bag-of-words approach. This example uses a scipy.sparse
matrix to store the features and demonstrates various classifiers that can
efficiently handle sparse matrices.
The dataset used in this example is the 20 newsgroups dataset. It will be
automatically downloaded, then cached.
The bar plot indicates the accuracy, training time (normalized) and test time
(normalized) of each classifier.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Olivier Grisel <olivier.grisel@ensta.org>
# Mathieu Blondel <mathieu@mblondel.org>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
import logging
import numpy as np
from optparse import OptionParser
import sys
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_20newsgroups
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import RidgeClassifier
from sklearn.pipeline import Pipeline
from sklearn.svm import LinearSVC
from sklearn.linear_model import SGDClassifier
from sklearn.linear_model import Perceptron
from sklearn.linear_model import PassiveAggressiveClassifier
from sklearn.naive_bayes import BernoulliNB, MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import NearestCentroid
from sklearn.ensemble import RandomForestClassifier
from sklearn.utils.extmath import density
from sklearn import metrics
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--report",
action="store_true", dest="print_report",
help="Print a detailed classification report.")
op.add_option("--chi2_select",
action="store", type="int", dest="select_chi2",
help="Select some number of features using a chi-squared test")
op.add_option("--confusion_matrix",
action="store_true", dest="print_cm",
help="Print the confusion matrix.")
op.add_option("--top10",
action="store_true", dest="print_top10",
help="Print ten most discriminative terms per class"
" for every classifier.")
op.add_option("--all_categories",
action="store_true", dest="all_categories",
help="Whether to use all categories or not.")
op.add_option("--use_hashing",
action="store_true",
help="Use a hashing vectorizer.")
op.add_option("--n_features",
action="store", type=int, default=2 ** 16,
help="n_features when using the hashing vectorizer.")
op.add_option("--filtered",
action="store_true",
help="Remove newsgroup information that is easily overfit: "
"headers, signatures, and quoting.")
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
print(__doc__)
op.print_help()
print()
###############################################################################
# Load some categories from the training set
if opts.all_categories:
categories = None
else:
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
if opts.filtered:
remove = ('headers', 'footers', 'quotes')
else:
remove = ()
print("Loading 20 newsgroups dataset for categories:")
print(categories if categories else "all")
data_train = fetch_20newsgroups(subset='train', categories=categories,
shuffle=True, random_state=42,
remove=remove)
data_test = fetch_20newsgroups(subset='test', categories=categories,
shuffle=True, random_state=42,
remove=remove)
print('data loaded')
categories = data_train.target_names # for case categories == None
def size_mb(docs):
return sum(len(s.encode('utf-8')) for s in docs) / 1e6
data_train_size_mb = size_mb(data_train.data)
data_test_size_mb = size_mb(data_test.data)
print("%d documents - %0.3fMB (training set)" % (
len(data_train.data), data_train_size_mb))
print("%d documents - %0.3fMB (test set)" % (
len(data_test.data), data_test_size_mb))
print("%d categories" % len(categories))
print()
# split a training set and a test set
y_train, y_test = data_train.target, data_test.target
print("Extracting features from the training data using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
vectorizer = HashingVectorizer(stop_words='english', non_negative=True,
n_features=opts.n_features)
X_train = vectorizer.transform(data_train.data)
else:
vectorizer = TfidfVectorizer(sublinear_tf=True, max_df=0.5,
stop_words='english')
X_train = vectorizer.fit_transform(data_train.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_train_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_train.shape)
print()
print("Extracting features from the test data using the same vectorizer")
t0 = time()
X_test = vectorizer.transform(data_test.data)
duration = time() - t0
print("done in %fs at %0.3fMB/s" % (duration, data_test_size_mb / duration))
print("n_samples: %d, n_features: %d" % X_test.shape)
print()
# mapping from integer feature name to original token string
if opts.use_hashing:
feature_names = None
else:
feature_names = vectorizer.get_feature_names()
if opts.select_chi2:
print("Extracting %d best features by a chi-squared test" %
opts.select_chi2)
t0 = time()
ch2 = SelectKBest(chi2, k=opts.select_chi2)
X_train = ch2.fit_transform(X_train, y_train)
X_test = ch2.transform(X_test)
if feature_names:
# keep selected feature names
feature_names = [feature_names[i] for i
in ch2.get_support(indices=True)]
print("done in %fs" % (time() - t0))
print()
if feature_names:
feature_names = np.asarray(feature_names)
def trim(s):
"""Trim string to fit on terminal (assuming 80-column display)"""
return s if len(s) <= 80 else s[:77] + "..."
###############################################################################
# Benchmark classifiers
def benchmark(clf):
print('_' * 80)
print("Training: ")
print(clf)
t0 = time()
clf.fit(X_train, y_train)
train_time = time() - t0
print("train time: %0.3fs" % train_time)
t0 = time()
pred = clf.predict(X_test)
test_time = time() - t0
print("test time: %0.3fs" % test_time)
score = metrics.accuracy_score(y_test, pred)
print("accuracy: %0.3f" % score)
if hasattr(clf, 'coef_'):
print("dimensionality: %d" % clf.coef_.shape[1])
print("density: %f" % density(clf.coef_))
if opts.print_top10 and feature_names is not None:
print("top 10 keywords per class:")
for i, category in enumerate(categories):
top10 = np.argsort(clf.coef_[i])[-10:]
print(trim("%s: %s"
% (category, " ".join(feature_names[top10]))))
print()
if opts.print_report:
print("classification report:")
print(metrics.classification_report(y_test, pred,
target_names=categories))
if opts.print_cm:
print("confusion matrix:")
print(metrics.confusion_matrix(y_test, pred))
print()
clf_descr = str(clf).split('(')[0]
return clf_descr, score, train_time, test_time
results = []
for clf, name in (
(RidgeClassifier(tol=1e-2, solver="lsqr"), "Ridge Classifier"),
(Perceptron(n_iter=50), "Perceptron"),
(PassiveAggressiveClassifier(n_iter=50), "Passive-Aggressive"),
(KNeighborsClassifier(n_neighbors=10), "kNN"),
(RandomForestClassifier(n_estimators=100), "Random forest")):
print('=' * 80)
print(name)
results.append(benchmark(clf))
for penalty in ["l2", "l1"]:
print('=' * 80)
print("%s penalty" % penalty.upper())
# Train Liblinear model
results.append(benchmark(LinearSVC(loss='l2', penalty=penalty,
dual=False, tol=1e-3)))
# Train SGD model
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty=penalty)))
# Train SGD with Elastic Net penalty
print('=' * 80)
print("Elastic-Net penalty")
results.append(benchmark(SGDClassifier(alpha=.0001, n_iter=50,
penalty="elasticnet")))
# Train NearestCentroid without threshold
print('=' * 80)
print("NearestCentroid (aka Rocchio classifier)")
results.append(benchmark(NearestCentroid()))
# Train sparse Naive Bayes classifiers
print('=' * 80)
print("Naive Bayes")
results.append(benchmark(MultinomialNB(alpha=.01)))
results.append(benchmark(BernoulliNB(alpha=.01)))
print('=' * 80)
print("LinearSVC with L1-based feature selection")
# The smaller C, the stronger the regularization.
# The more regularization, the more sparsity.
results.append(benchmark(Pipeline([
('feature_selection', LinearSVC(penalty="l1", dual=False, tol=1e-3)),
('classification', LinearSVC())
])))
# make some plots
indices = np.arange(len(results))
results = [[x[i] for x in results] for i in range(4)]
clf_names, score, training_time, test_time = results
training_time = np.array(training_time) / np.max(training_time)
test_time = np.array(test_time) / np.max(test_time)
plt.figure(figsize=(12, 8))
plt.title("Score")
plt.barh(indices, score, .2, label="score", color='r')
plt.barh(indices + .3, training_time, .2, label="training time", color='g')
plt.barh(indices + .6, test_time, .2, label="test time", color='b')
plt.yticks(())
plt.legend(loc='best')
plt.subplots_adjust(left=.25)
plt.subplots_adjust(top=.95)
plt.subplots_adjust(bottom=.05)
for i, c in zip(indices, clf_names):
plt.text(-.3, i, c)
plt.show()
| bsd-3-clause |
mattilyra/scikit-learn | benchmarks/bench_plot_omp_lars.py | 28 | 4471 | """Benchmarks of orthogonal matching pursuit (:ref:`OMP`) versus least angle
regression (:ref:`least_angle_regression`)
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path, orthogonal_mp
from sklearn.datasets.samples_generator import make_sparse_coded_signal
def compute_bench(samples_range, features_range):
it = 0
results = dict()
lars = np.empty((len(features_range), len(samples_range)))
lars_gram = lars.copy()
omp = lars.copy()
omp_gram = lars.copy()
max_it = len(samples_range) * len(features_range)
for i_s, n_samples in enumerate(samples_range):
for i_f, n_features in enumerate(features_range):
it += 1
n_informative = n_features / 10
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
# dataset_kwargs = {
# 'n_train_samples': n_samples,
# 'n_test_samples': 2,
# 'n_features': n_features,
# 'n_informative': n_informative,
# 'effective_rank': min(n_samples, n_features) / 10,
# #'effective_rank': None,
# 'bias': 0.0,
# }
dataset_kwargs = {
'n_samples': 1,
'n_components': n_features,
'n_features': n_samples,
'n_nonzero_coefs': n_informative,
'random_state': 0
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
y, X, _ = make_sparse_coded_signal(**dataset_kwargs)
X = np.asfortranarray(X)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, Gram=None, max_iter=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
lars[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (with Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=True,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp_gram[i_f, i_s] = delta
gc.collect()
print("benchmarking orthogonal_mp (without Gram):", end='')
sys.stdout.flush()
tstart = time()
orthogonal_mp(X, y, precompute=False,
n_nonzero_coefs=n_informative)
delta = time() - tstart
print("%0.3fs" % delta)
omp[i_f, i_s] = delta
results['time(LARS) / time(OMP)\n (w/ Gram)'] = (lars_gram / omp_gram)
results['time(LARS) / time(OMP)\n (w/o Gram)'] = (lars / omp)
return results
if __name__ == '__main__':
samples_range = np.linspace(1000, 5000, 5).astype(np.int)
features_range = np.linspace(1000, 5000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(np.max(t) for t in results.values())
import matplotlib.pyplot as plt
fig = plt.figure('scikit-learn OMP vs. LARS benchmark results')
for i, (label, timings) in enumerate(sorted(results.iteritems())):
ax = fig.add_subplot(1, 2, i+1)
vmax = max(1 - timings.min(), -1 + timings.max())
plt.matshow(timings, fignum=False, vmin=1 - vmax, vmax=1 + vmax)
ax.set_xticklabels([''] + map(str, samples_range))
ax.set_yticklabels([''] + map(str, features_range))
plt.xlabel('n_samples')
plt.ylabel('n_features')
plt.title(label)
plt.subplots_adjust(0.1, 0.08, 0.96, 0.98, 0.4, 0.63)
ax = plt.axes([0.1, 0.08, 0.8, 0.06])
plt.colorbar(cax=ax, orientation='horizontal')
plt.show()
| bsd-3-clause |
zuku1985/scikit-learn | sklearn/preprocessing/tests/test_imputation.py | 51 | 12300 |
import numpy as np
from scipy import sparse
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_false
from sklearn.preprocessing.imputation import Imputer
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn import tree
from sklearn.random_projection import sparse_random_matrix
def _check_statistics(X, X_true,
strategy, statistics, missing_values):
"""Utility function for testing imputation for a given strategy.
Test:
- along the two axes
- with dense and sparse arrays
Check that:
- the statistics (mean, median, mode) are correct
- the missing values are imputed correctly"""
err_msg = "Parameters: strategy = %s, missing_values = %s, " \
"axis = {0}, sparse = {1}" % (strategy, missing_values)
# Normal matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
X_trans = imputer.fit(X).transform(X.copy())
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, False))
assert_array_equal(X_trans, X_true, err_msg.format(0, False))
# Normal matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(X.transpose())
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform, X.copy().transpose())
else:
X_trans = imputer.transform(X.copy().transpose())
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, False))
# Sparse matrix, axis = 0
imputer = Imputer(missing_values, strategy=strategy, axis=0)
imputer.fit(sparse.csc_matrix(X))
X_trans = imputer.transform(sparse.csc_matrix(X.copy()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(imputer.statistics_, statistics,
err_msg.format(0, True))
assert_array_equal(X_trans, X_true, err_msg.format(0, True))
# Sparse matrix, axis = 1
imputer = Imputer(missing_values, strategy=strategy, axis=1)
imputer.fit(sparse.csc_matrix(X.transpose()))
if np.isnan(statistics).any():
assert_raises(ValueError, imputer.transform,
sparse.csc_matrix(X.copy().transpose()))
else:
X_trans = imputer.transform(sparse.csc_matrix(X.copy().transpose()))
if sparse.issparse(X_trans):
X_trans = X_trans.toarray()
assert_array_equal(X_trans, X_true.transpose(),
err_msg.format(1, True))
def test_imputation_shape():
# Verify the shapes of the imputed matrix for different strategies.
X = np.random.randn(10, 2)
X[::2] = np.nan
for strategy in ['mean', 'median', 'most_frequent']:
imputer = Imputer(strategy=strategy)
X_imputed = imputer.fit_transform(X)
assert_equal(X_imputed.shape, (10, 2))
X_imputed = imputer.fit_transform(sparse.csr_matrix(X))
assert_equal(X_imputed.shape, (10, 2))
def test_imputation_mean_median_only_zero():
# Test imputation using the mean and median strategies, when
# missing_values == 0.
X = np.array([
[np.nan, 0, 0, 0, 5],
[np.nan, 1, 0, np.nan, 3],
[np.nan, 2, 0, 0, 0],
[np.nan, 6, 0, 5, 13],
])
X_imputed_mean = np.array([
[3, 5],
[1, 3],
[2, 7],
[6, 13],
])
statistics_mean = [np.nan, 3, np.nan, np.nan, 7]
# Behaviour of median with NaN is undefined, e.g. different results in
# np.median and np.ma.median
X_for_median = X[:, [0, 1, 2, 4]]
X_imputed_median = np.array([
[2, 5],
[1, 3],
[2, 5],
[6, 13],
])
statistics_median = [np.nan, 2, np.nan, 5]
_check_statistics(X, X_imputed_mean, "mean", statistics_mean, 0)
_check_statistics(X_for_median, X_imputed_median, "median",
statistics_median, 0)
def safe_median(arr, *args, **kwargs):
# np.median([]) raises a TypeError for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.median(arr, *args, **kwargs)
def safe_mean(arr, *args, **kwargs):
# np.mean([]) raises a RuntimeWarning for numpy >= 1.10.1
length = arr.size if hasattr(arr, 'size') else len(arr)
return np.nan if length == 0 else np.mean(arr, *args, **kwargs)
def test_imputation_mean_median():
# Test imputation using the mean and median strategies, when
# missing_values != 0.
rng = np.random.RandomState(0)
dim = 10
dec = 10
shape = (dim * dim, dim + dec)
zeros = np.zeros(shape[0])
values = np.arange(1, shape[0] + 1)
values[4::2] = - values[4::2]
tests = [("mean", "NaN", lambda z, v, p: safe_mean(np.hstack((z, v)))),
("mean", 0, lambda z, v, p: np.mean(v)),
("median", "NaN", lambda z, v, p: safe_median(np.hstack((z, v)))),
("median", 0, lambda z, v, p: np.median(v))]
for strategy, test_missing_values, true_value_fun in tests:
X = np.empty(shape)
X_true = np.empty(shape)
true_statistics = np.empty(shape[1])
# Create a matrix X with columns
# - with only zeros,
# - with only missing values
# - with zeros, missing values and values
# And a matrix X_true containing all true values
for j in range(shape[1]):
nb_zeros = (j - dec + 1 > 0) * (j - dec + 1) * (j - dec + 1)
nb_missing_values = max(shape[0] + dec * dec
- (j + dec) * (j + dec), 0)
nb_values = shape[0] - nb_zeros - nb_missing_values
z = zeros[:nb_zeros]
p = np.repeat(test_missing_values, nb_missing_values)
v = values[rng.permutation(len(values))[:nb_values]]
true_statistics[j] = true_value_fun(z, v, p)
# Create the columns
X[:, j] = np.hstack((v, z, p))
if 0 == test_missing_values:
X_true[:, j] = np.hstack((v,
np.repeat(
true_statistics[j],
nb_missing_values + nb_zeros)))
else:
X_true[:, j] = np.hstack((v,
z,
np.repeat(true_statistics[j],
nb_missing_values)))
# Shuffle them the same way
np.random.RandomState(j).shuffle(X[:, j])
np.random.RandomState(j).shuffle(X_true[:, j])
# Mean doesn't support columns containing NaNs, median does
if strategy == "median":
cols_to_keep = ~np.isnan(X_true).any(axis=0)
else:
cols_to_keep = ~np.isnan(X_true).all(axis=0)
X_true = X_true[:, cols_to_keep]
_check_statistics(X, X_true, strategy,
true_statistics, test_missing_values)
def test_imputation_median_special_cases():
# Test median imputation with sparse boundary cases
X = np.array([
[0, np.nan, np.nan], # odd: implicit zero
[5, np.nan, np.nan], # odd: explicit nonzero
[0, 0, np.nan], # even: average two zeros
[-5, 0, np.nan], # even: avg zero and neg
[0, 5, np.nan], # even: avg zero and pos
[4, 5, np.nan], # even: avg nonzeros
[-4, -5, np.nan], # even: avg negatives
[-1, 2, np.nan], # even: crossing neg and pos
]).transpose()
X_imputed_median = np.array([
[0, 0, 0],
[5, 5, 5],
[0, 0, 0],
[-5, 0, -2.5],
[0, 5, 2.5],
[4, 5, 4.5],
[-4, -5, -4.5],
[-1, 2, .5],
]).transpose()
statistics_median = [0, 5, 0, -2.5, 2.5, 4.5, -4.5, .5]
_check_statistics(X, X_imputed_median, "median",
statistics_median, 'NaN')
def test_imputation_most_frequent():
# Test imputation using the most-frequent strategy.
X = np.array([
[-1, -1, 0, 5],
[-1, 2, -1, 3],
[-1, 1, 3, -1],
[-1, 2, 3, 7],
])
X_true = np.array([
[2, 0, 5],
[2, 3, 3],
[1, 3, 3],
[2, 3, 7],
])
# scipy.stats.mode, used in Imputer, doesn't return the first most
# frequent as promised in the doc but the lowest most frequent. When this
# test will fail after an update of scipy, Imputer will need to be updated
# to be consistent with the new (correct) behaviour
_check_statistics(X, X_true, "most_frequent", [np.nan, 2, 3, 3], -1)
def test_imputation_pipeline_grid_search():
# Test imputation within a pipeline + gridsearch.
pipeline = Pipeline([('imputer', Imputer(missing_values=0)),
('tree', tree.DecisionTreeRegressor(random_state=0))])
parameters = {
'imputer__strategy': ["mean", "median", "most_frequent"],
'imputer__axis': [0, 1]
}
l = 100
X = sparse_random_matrix(l, l, density=0.10)
Y = sparse_random_matrix(l, 1, density=0.10).toarray()
gs = GridSearchCV(pipeline, parameters)
gs.fit(X, Y)
def test_imputation_pickle():
# Test for pickling imputers.
import pickle
l = 100
X = sparse_random_matrix(l, l, density=0.10)
for strategy in ["mean", "median", "most_frequent"]:
imputer = Imputer(missing_values=0, strategy=strategy)
imputer.fit(X)
imputer_pickled = pickle.loads(pickle.dumps(imputer))
assert_array_equal(imputer.transform(X.copy()),
imputer_pickled.transform(X.copy()),
"Fail to transform the data after pickling "
"(strategy = %s)" % (strategy))
def test_imputation_copy():
# Test imputation with copy
X_orig = sparse_random_matrix(5, 5, density=0.75, random_state=0)
# copy=True, dense => copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_false(np.all(X == Xt))
# copy=True, sparse csr => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean", copy=True)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, dense => no copy
X = X_orig.copy().toarray()
imputer = Imputer(missing_values=0, strategy="mean", copy=False)
Xt = imputer.fit(X).transform(X)
Xt[0, 0] = -1
assert_array_equal(X, Xt)
# copy=False, sparse csr, axis=1 => no copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csc, axis=0 => no copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_array_equal(X.data, Xt.data)
# copy=False, sparse csr, axis=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=0)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csc, axis=1 => copy
X = X_orig.copy().tocsc()
imputer = Imputer(missing_values=X.data[0], strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
Xt.data[0] = -1
assert_false(np.all(X.data == Xt.data))
# copy=False, sparse csr, axis=1, missing_values=0 => copy
X = X_orig.copy()
imputer = Imputer(missing_values=0, strategy="mean",
copy=False, axis=1)
Xt = imputer.fit(X).transform(X)
assert_false(sparse.issparse(Xt))
# Note: If X is sparse and if missing_values=0, then a (dense) copy of X is
# made, even if copy=False.
| bsd-3-clause |
Vettejeep/Boulder_County_Home_Prices | value_vs_price.py | 1 | 4101 | # Simply uses the assessors estimate to predict price, so we can see how much better the machine learning models are.
# requires data from Assemble_Data.py
# Copyright (C) 2017 Kevin Maher
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
# Data for this project may be the property of the Boulder County Assessor's office,
# they gave me free access as a student but were not clear about any restrictions regarding
# sharing the URL from which the data was downloaded.
# The data has been pre-processed from xlsx to csv files because OpenOffice had
# problems with the xlsx files.
# Data was pre-processed by a data setup script, Assemble_Data.py which produced the
# file '$working_data_5c.csv'
import pandas as pd
import numpy as np
from math import sqrt
from sklearn.model_selection import train_test_split
from sklearn.model_selection import cross_val_score
from sklearn.metrics import mean_squared_error
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.ensemble import RandomForestRegressor, ExtraTreesRegressor
from sklearn.ensemble import GradientBoostingRegressor, AdaBoostRegressor
from sklearn.linear_model import LinearRegression
# https://stats.stackexchange.com/questions/58391/mean-absolute-percentage-error-mape-in-scikit-learn
def mean_absolute_percentage_error(y_true, y_pred):
return np.mean(np.abs((y_true - y_pred) / y_true)) * 100
working_df = pd.read_csv('Data\\$working_data_5c.csv')
# eliminate some outliers, homes above an estimated value of $2 million are especially difficult to model
# with the available data
working_df = working_df[working_df['Age_Yrs'] > 0]
working_df = working_df[working_df['totalActualVal'] <= 2000000]
y = working_df['price']
columns = working_df.columns[2:]
X = working_df.drop(columns, axis=1) # , 'totalActualVal'
X = X.drop(labels=['price'], axis=1)
# 70/30 split of data into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=245)
# determine metrics
gradient, intercept, r_value, p_value, std_err = stats.linregress(X_test['totalActualVal'], y_test)
print 'Gradient: %.4f' % gradient
print 'R Value: %.4f' % r_value
print 'R-Squared: %.4f' % r_value ** 2
# adjusted R-squared - https://www.easycalculation.com/statistics/learn-adjustedr2.php
r_sq_adj = 1 - ((1 - r_value ** 2) * (len(y_test) - 1) / (len(y_test) - X_train.shape[1] - 1))
print 'R-Squared Adjusted: %.4f' % r_sq_adj
mape = mean_absolute_percentage_error(y_test, X_test['totalActualVal'])
print 'MAPE: %.4f' % mape
# plot with regression lines, one for actual data, one to represent ideal answer
z = np.polyfit(X_test['totalActualVal'], y_test, 1)
print 'z'
print z
y_poly = [z[0] * x + z[1] for x in range(int(intercept), 3100000 + int(intercept), 100000)]
x_poly = [x for x in range(0, 3100000, 100000)]
y_perfect = [x for x in range(0, 3100000, 100000)]
plt.figure(0)
plt.plot(X_test, y_test, ".")
plt.plot(x_poly, y_poly, "-")
plt.plot(x_poly, y_perfect, "-")
plt.xlim(0, 4000000)
plt.ylim(0, 4000000)
plt.xlabel("Est Price")
plt.ylabel("Actual Price")
plt.title("Estimated vs. Actual Sales Price")
plt.show()
plt.close()
# delta_price = pd.Series((X_test['totalActualVal'] / y_test * 100.0) - 100.0)
# delta_price.to_csv('Data\\delta_price_basic.csv', index=False)
print 'min price, actual: %.2f' % np.min(y_test)
print 'min price, assessor estimate: %.2f' % np.min(X_test['totalActualVal'])
| gpl-3.0 |
keras-team/keras-io | examples/nlp/semantic_similarity_with_bert.py | 1 | 11604 | """
Title: Semantic Similarity with BERT
Author: [Mohamad Merchant](https://twitter.com/mohmadmerchant1)
Date created: 2020/08/15
Last modified: 2020/08/29
Description: Natural Language Inference by fine-tuning BERT model on SNLI Corpus.
"""
"""
## Introduction
Semantic Similarity is the task of determining how similar
two sentences are, in terms of what they mean.
This example demonstrates the use of SNLI (Stanford Natural Language Inference) Corpus
to predict sentence semantic similarity with Transformers.
We will fine-tune a BERT model that takes two sentences as inputs
and that outputs a similarity score for these two sentences.
### References
* [BERT](https://arxiv.org/pdf/1810.04805.pdf)
* [SNLI](https://nlp.stanford.edu/projects/snli/)
"""
"""
## Setup
Note: install HuggingFace `transformers` via `pip install transformers` (version >= 2.11.0).
"""
import numpy as np
import pandas as pd
import tensorflow as tf
import transformers
"""
## Configuration
"""
max_length = 128 # Maximum length of input sentence to the model.
batch_size = 32
epochs = 2
# Labels in our dataset.
labels = ["contradiction", "entailment", "neutral"]
"""
## Load the Data
"""
"""shell
curl -LO https://raw.githubusercontent.com/MohamadMerchant/SNLI/master/data.tar.gz
tar -xvzf data.tar.gz
"""
# There are more than 550k samples in total; we will use 100k for this example.
train_df = pd.read_csv("SNLI_Corpus/snli_1.0_train.csv", nrows=100000)
valid_df = pd.read_csv("SNLI_Corpus/snli_1.0_dev.csv")
test_df = pd.read_csv("SNLI_Corpus/snli_1.0_test.csv")
# Shape of the data
print(f"Total train samples : {train_df.shape[0]}")
print(f"Total validation samples: {valid_df.shape[0]}")
print(f"Total test samples: {valid_df.shape[0]}")
"""
Dataset Overview:
- sentence1: The premise caption that was supplied to the author of the pair.
- sentence2: The hypothesis caption that was written by the author of the pair.
- similarity: This is the label chosen by the majority of annotators.
Where no majority exists, the label "-" is used (we will skip such samples here).
Here are the "similarity" label values in our dataset:
- Contradiction: The sentences share no similarity.
- Entailment: The sentences have similar meaning.
- Neutral: The sentences are neutral.
"""
"""
Let's look at one sample from the dataset:
"""
print(f"Sentence1: {train_df.loc[1, 'sentence1']}")
print(f"Sentence2: {train_df.loc[1, 'sentence2']}")
print(f"Similarity: {train_df.loc[1, 'similarity']}")
"""
## Preprocessing
"""
# We have some NaN entries in our train data, we will simply drop them.
print("Number of missing values")
print(train_df.isnull().sum())
train_df.dropna(axis=0, inplace=True)
"""
Distribution of our training targets.
"""
print("Train Target Distribution")
print(train_df.similarity.value_counts())
"""
Distribution of our validation targets.
"""
print("Validation Target Distribution")
print(valid_df.similarity.value_counts())
"""
The value "-" appears as part of our training and validation targets.
We will skip these samples.
"""
train_df = (
train_df[train_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
valid_df = (
valid_df[valid_df.similarity != "-"]
.sample(frac=1.0, random_state=42)
.reset_index(drop=True)
)
"""
One-hot encode training, validation, and test labels.
"""
train_df["label"] = train_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_train = tf.keras.utils.to_categorical(train_df.label, num_classes=3)
valid_df["label"] = valid_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_val = tf.keras.utils.to_categorical(valid_df.label, num_classes=3)
test_df["label"] = test_df["similarity"].apply(
lambda x: 0 if x == "contradiction" else 1 if x == "entailment" else 2
)
y_test = tf.keras.utils.to_categorical(test_df.label, num_classes=3)
"""
## Create a custom data generator
"""
class BertSemanticDataGenerator(tf.keras.utils.Sequence):
"""Generates batches of data.
Args:
sentence_pairs: Array of premise and hypothesis input sentences.
labels: Array of labels.
batch_size: Integer batch size.
shuffle: boolean, whether to shuffle the data.
include_targets: boolean, whether to incude the labels.
Returns:
Tuples `([input_ids, attention_mask, `token_type_ids], labels)`
(or just `[input_ids, attention_mask, `token_type_ids]`
if `include_targets=False`)
"""
def __init__(
self,
sentence_pairs,
labels,
batch_size=batch_size,
shuffle=True,
include_targets=True,
):
self.sentence_pairs = sentence_pairs
self.labels = labels
self.shuffle = shuffle
self.batch_size = batch_size
self.include_targets = include_targets
# Load our BERT Tokenizer to encode the text.
# We will use base-base-uncased pretrained model.
self.tokenizer = transformers.BertTokenizer.from_pretrained(
"bert-base-uncased", do_lower_case=True
)
self.indexes = np.arange(len(self.sentence_pairs))
self.on_epoch_end()
def __len__(self):
# Denotes the number of batches per epoch.
return len(self.sentence_pairs) // self.batch_size
def __getitem__(self, idx):
# Retrieves the batch of index.
indexes = self.indexes[idx * self.batch_size : (idx + 1) * self.batch_size]
sentence_pairs = self.sentence_pairs[indexes]
# With BERT tokenizer's batch_encode_plus batch of both the sentences are
# encoded together and separated by [SEP] token.
encoded = self.tokenizer.batch_encode_plus(
sentence_pairs.tolist(),
add_special_tokens=True,
max_length=max_length,
return_attention_mask=True,
return_token_type_ids=True,
pad_to_max_length=True,
return_tensors="tf",
)
# Convert batch of encoded features to numpy array.
input_ids = np.array(encoded["input_ids"], dtype="int32")
attention_masks = np.array(encoded["attention_mask"], dtype="int32")
token_type_ids = np.array(encoded["token_type_ids"], dtype="int32")
# Set to true if data generator is used for training/validation.
if self.include_targets:
labels = np.array(self.labels[indexes], dtype="int32")
return [input_ids, attention_masks, token_type_ids], labels
else:
return [input_ids, attention_masks, token_type_ids]
def on_epoch_end(self):
# Shuffle indexes after each epoch if shuffle is set to True.
if self.shuffle:
np.random.RandomState(42).shuffle(self.indexes)
"""
## Build the model
"""
# Create the model under a distribution strategy scope.
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
# Encoded token ids from BERT tokenizer.
input_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="input_ids"
)
# Attention masks indicates to the model which tokens should be attended to.
attention_masks = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="attention_masks"
)
# Token type ids are binary masks identifying different sequences in the model.
token_type_ids = tf.keras.layers.Input(
shape=(max_length,), dtype=tf.int32, name="token_type_ids"
)
# Loading pretrained BERT model.
bert_model = transformers.TFBertModel.from_pretrained("bert-base-uncased")
# Freeze the BERT model to reuse the pretrained features without modifying them.
bert_model.trainable = False
sequence_output, pooled_output = bert_model(
input_ids, attention_mask=attention_masks, token_type_ids=token_type_ids
)
# Add trainable layers on top of frozen layers to adapt the pretrained features on the new data.
bi_lstm = tf.keras.layers.Bidirectional(
tf.keras.layers.LSTM(64, return_sequences=True)
)(sequence_output)
# Applying hybrid pooling approach to bi_lstm sequence output.
avg_pool = tf.keras.layers.GlobalAveragePooling1D()(bi_lstm)
max_pool = tf.keras.layers.GlobalMaxPooling1D()(bi_lstm)
concat = tf.keras.layers.concatenate([avg_pool, max_pool])
dropout = tf.keras.layers.Dropout(0.3)(concat)
output = tf.keras.layers.Dense(3, activation="softmax")(dropout)
model = tf.keras.models.Model(
inputs=[input_ids, attention_masks, token_type_ids], outputs=output
)
model.compile(
optimizer=tf.keras.optimizers.Adam(),
loss="categorical_crossentropy",
metrics=["acc"],
)
print(f"Strategy: {strategy}")
model.summary()
"""
Create train and validation data generators
"""
train_data = BertSemanticDataGenerator(
train_df[["sentence1", "sentence2"]].values.astype("str"),
y_train,
batch_size=batch_size,
shuffle=True,
)
valid_data = BertSemanticDataGenerator(
valid_df[["sentence1", "sentence2"]].values.astype("str"),
y_val,
batch_size=batch_size,
shuffle=False,
)
"""
## Train the Model
Training is done only for the top layers to perform "feature extraction",
which will allow the model to use the representations of the pretrained model.
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Fine-tuning
This step must only be performed after the feature extraction model has
been trained to convergence on the new data.
This is an optional last step where `bert_model` is unfreezed and retrained
with a very low learning rate. This can deliver meaningful improvement by
incrementally adapting the pretrained features to the new data.
"""
# Unfreeze the bert_model.
bert_model.trainable = True
# Recompile the model to make the change effective.
model.compile(
optimizer=tf.keras.optimizers.Adam(1e-5),
loss="categorical_crossentropy",
metrics=["accuracy"],
)
model.summary()
"""
## Train the entire model end-to-end
"""
history = model.fit(
train_data,
validation_data=valid_data,
epochs=epochs,
use_multiprocessing=True,
workers=-1,
)
"""
## Evaluate model on the test set
"""
test_data = BertSemanticDataGenerator(
test_df[["sentence1", "sentence2"]].values.astype("str"),
y_test,
batch_size=batch_size,
shuffle=False,
)
model.evaluate(test_data, verbose=1)
"""
## Inference on custom sentences
"""
def check_similarity(sentence1, sentence2):
sentence_pairs = np.array([[str(sentence1), str(sentence2)]])
test_data = BertSemanticDataGenerator(
sentence_pairs, labels=None, batch_size=1, shuffle=False, include_targets=False,
)
proba = model.predict(test_data)[0]
idx = np.argmax(proba)
proba = f"{proba[idx]: .2f}%"
pred = labels[idx]
return pred, proba
"""
Check results on some example sentence pairs.
"""
sentence1 = "Two women are observing something together."
sentence2 = "Two women are standing with their eyes closed."
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs.
"""
sentence1 = "A smiling costumed woman is holding an umbrella"
sentence2 = "A happy woman in a fairy costume holds an umbrella"
check_similarity(sentence1, sentence2)
"""
Check results on some example sentence pairs
"""
sentence1 = "A soccer game with multiple males playing"
sentence2 = "Some men are playing a sport"
check_similarity(sentence1, sentence2)
| apache-2.0 |
parthea/pydatalab | legacy_tests/kernel/utils_tests.py | 2 | 10847 | # Copyright 2015 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed under the License
# is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions and limitations under
# the License.
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import range
import datetime as dt
import collections
import mock
from oauth2client.client import AccessTokenCredentials
import pandas
import unittest
# import Python so we can mock the parts we need to here.
import IPython
import IPython.core.magic
IPython.core.magic.register_line_cell_magic = mock.Mock()
IPython.core.magic.register_line_magic = mock.Mock()
IPython.core.magic.register_cell_magic = mock.Mock()
IPython.get_ipython = mock.Mock()
import datalab.bigquery # noqa: E402
import datalab.context # noqa: E402
import datalab.utils.commands # noqa: E402
class TestCases(unittest.TestCase):
@staticmethod
def _get_expected_cols():
cols = [
{'type': 'number', 'id': 'Column1', 'label': 'Column1'},
{'type': 'number', 'id': 'Column2', 'label': 'Column2'},
{'type': 'string', 'id': 'Column3', 'label': 'Column3'},
{'type': 'boolean', 'id': 'Column4', 'label': 'Column4'},
{'type': 'number', 'id': 'Column5', 'label': 'Column5'},
{'type': 'datetime', 'id': 'Column6', 'label': 'Column6'}
]
return cols
@staticmethod
def _timestamp(d):
return (d - dt.datetime(1970, 1, 1)).total_seconds()
@staticmethod
def _get_raw_rows():
rows = [
{'f': [
{'v': 1}, {'v': 2}, {'v': '3'}, {'v': 'true'}, {'v': 0.0},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 1))}
]},
{'f': [
{'v': 11}, {'v': 12}, {'v': '13'}, {'v': 'false'}, {'v': 0.2},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 2))}
]},
{'f': [
{'v': 21}, {'v': 22}, {'v': '23'}, {'v': 'true'}, {'v': 0.3},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 3))}
]},
{'f': [
{'v': 31}, {'v': 32}, {'v': '33'}, {'v': 'false'}, {'v': 0.4},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 4))}
]},
{'f': [
{'v': 41}, {'v': 42}, {'v': '43'}, {'v': 'true'}, {'v': 0.5},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 5))}
]},
{'f': [
{'v': 51}, {'v': 52}, {'v': '53'}, {'v': 'true'}, {'v': 0.6},
{'v': TestCases._timestamp(dt.datetime(2000, 1, 6))}
]}
]
return rows
@staticmethod
def _get_expected_rows():
rows = [
{'c': [
{'v': 1}, {'v': 2}, {'v': '3'}, {'v': True}, {'v': 0.0}, {'v': dt.datetime(2000, 1, 1)}
]},
{'c': [
{'v': 11}, {'v': 12}, {'v': '13'}, {'v': False}, {'v': 0.2}, {'v': dt.datetime(2000, 1, 2)}
]},
{'c': [
{'v': 21}, {'v': 22}, {'v': '23'}, {'v': True}, {'v': 0.3}, {'v': dt.datetime(2000, 1, 3)}
]},
{'c': [
{'v': 31}, {'v': 32}, {'v': '33'}, {'v': False}, {'v': 0.4}, {'v': dt.datetime(2000, 1, 4)}
]},
{'c': [
{'v': 41}, {'v': 42}, {'v': '43'}, {'v': True}, {'v': 0.5}, {'v': dt.datetime(2000, 1, 5)}
]},
{'c': [
{'v': 51}, {'v': 52}, {'v': '53'}, {'v': True}, {'v': 0.6}, {'v': dt.datetime(2000, 1, 6)}
]}
]
return rows
@staticmethod
def _get_test_data_as_list_of_dicts():
test_data = [
{'Column1': 1, 'Column2': 2, 'Column3': '3',
'Column4': True, 'Column5': 0.0, 'Column6': dt.datetime(2000, 1, 1)},
{'Column1': 11, 'Column2': 12, 'Column3': '13',
'Column4': False, 'Column5': 0.2, 'Column6': dt.datetime(2000, 1, 2)},
{'Column1': 21, 'Column2': 22, 'Column3': '23',
'Column4': True, 'Column5': 0.3, 'Column6': dt.datetime(2000, 1, 3)},
{'Column1': 31, 'Column2': 32, 'Column3': '33',
'Column4': False, 'Column5': 0.4, 'Column6': dt.datetime(2000, 1, 4)},
{'Column1': 41, 'Column2': 42, 'Column3': '43',
'Column4': True, 'Column5': 0.5, 'Column6': dt.datetime(2000, 1, 5)},
{'Column1': 51, 'Column2': 52, 'Column3': '53',
'Column4': True, 'Column5': 0.6, 'Column6': dt.datetime(2000, 1, 6)}
]
# Use OrderedDicts to make testing the result easier.
for i in range(0, len(test_data)):
test_data[i] = collections.OrderedDict(sorted(list(test_data[i].items()), key=lambda t: t[0]))
return test_data
def test_get_data_from_list_of_dicts(self):
self._test_get_data(TestCases._get_test_data_as_list_of_dicts(), TestCases._get_expected_cols(),
TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_list_of_dicts)
self._test_get_data(TestCases._get_test_data_as_list_of_dicts(), TestCases._get_expected_cols(),
TestCases._get_expected_rows(), 6, datalab.utils.commands._utils.get_data)
def test_get_data_from_list_of_lists(self):
test_data = [
[1, 2, '3', True, 0.0, dt.datetime(2000, 1, 1)],
[11, 12, '13', False, 0.2, dt.datetime(2000, 1, 2)],
[21, 22, '23', True, 0.3, dt.datetime(2000, 1, 3)],
[31, 32, '33', False, 0.4, dt.datetime(2000, 1, 4)],
[41, 42, '43', True, 0.5, dt.datetime(2000, 1, 5)],
[51, 52, '53', True, 0.6, dt.datetime(2000, 1, 6)],
]
self._test_get_data(test_data, TestCases._get_expected_cols(), TestCases._get_expected_rows(),
6, datalab.utils.commands._utils._get_data_from_list_of_lists)
self._test_get_data(test_data, TestCases._get_expected_cols(), TestCases._get_expected_rows(),
6, datalab.utils.commands._utils.get_data)
def test_get_data_from_dataframe(self):
df = pandas.DataFrame(self._get_test_data_as_list_of_dicts())
self._test_get_data(df, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_dataframe)
self._test_get_data(df, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils.get_data)
@mock.patch('datalab.bigquery._api.Api.tabledata_list')
@mock.patch('datalab.bigquery._table.Table.exists')
@mock.patch('datalab.bigquery._api.Api.tables_get')
@mock.patch('datalab.context._context.Context.default')
def test_get_data_from_table(self, mock_context_default, mock_api_tables_get,
mock_table_exists, mock_api_tabledata_list):
data = TestCases._get_expected_rows()
mock_context_default.return_value = TestCases._create_context()
mock_api_tables_get.return_value = {
'numRows': len(data),
'schema': {
'fields': [
{'name': 'Column1', 'type': 'INTEGER'},
{'name': 'Column2', 'type': 'INTEGER'},
{'name': 'Column3', 'type': 'STRING'},
{'name': 'Column4', 'type': 'BOOLEAN'},
{'name': 'Column5', 'type': 'FLOAT'},
{'name': 'Column6', 'type': 'TIMESTAMP'}
]
}
}
mock_table_exists.return_value = True
raw_data = self._get_raw_rows()
def tabledata_list(*args, **kwargs):
start_index = kwargs['start_index']
max_results = kwargs['max_results']
if max_results < 0:
max_results = len(data)
return {'rows': raw_data[start_index:start_index + max_results]}
mock_api_tabledata_list.side_effect = tabledata_list
t = datalab.bigquery.Table('foo.bar')
self._test_get_data(t, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils._get_data_from_table)
self._test_get_data(t, TestCases._get_expected_cols(), TestCases._get_expected_rows(), 6,
datalab.utils.commands._utils.get_data)
def test_get_data_from_empty_list(self):
self._test_get_data([], [], [], 0, datalab.utils.commands._utils.get_data)
def test_get_data_from_malformed_list(self):
with self.assertRaises(Exception) as error:
self._test_get_data(['foo', 'bar'], [], [], 0, datalab.utils.commands._utils.get_data)
self.assertEquals('To get tabular data from a list it must contain dictionaries or lists.',
str(error.exception))
def _test_get_data(self, test_data, cols, rows, expected_count, fn):
self.maxDiff = None
data, count = fn(test_data)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows}, data)
# Test first_row. Note that count must be set in this case so we use a value greater than the
# data set size.
for first in range(0, 6):
data, count = fn(test_data, first_row=first, count=10)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows[first:]}, data)
# Test first_row + count
for first in range(0, 6):
data, count = fn(test_data, first_row=first, count=2)
self.assertEquals(expected_count, count)
self.assertEquals({'cols': cols, 'rows': rows[first:first + 2]}, data)
# Test subsets of columns
# No columns
data, count = fn(test_data, fields=[])
self.assertEquals({'cols': [], 'rows': [{'c': []}] * expected_count}, data)
# Single column
data, count = fn(test_data, fields=['Column3'])
if expected_count == 0:
return
self.assertEquals({'cols': [cols[2]],
'rows': [{'c': [row['c'][2]]} for row in rows]}, data)
# Multi-columns
data, count = fn(test_data, fields=['Column1', 'Column3', 'Column6'])
self.assertEquals({'cols': [cols[0], cols[2], cols[5]],
'rows': [{'c': [row['c'][0], row['c'][2], row['c'][5]]} for row in rows]},
data)
# Switch order
data, count = fn(test_data, fields=['Column3', 'Column1'])
self.assertEquals({'cols': [cols[2], cols[0]],
'rows': [{'c': [row['c'][2], row['c'][0]]} for row in rows]}, data)
# Select all
data, count = fn(test_data,
fields=['Column1', 'Column2', 'Column3', 'Column4', 'Column5', 'Column6'])
self.assertEquals({'cols': cols, 'rows': rows}, data)
@staticmethod
def _create_api():
context = TestCases._create_context()
return datalab.bigquery._api.Api(context.credentials, context.project_id)
@staticmethod
def _create_context():
project_id = 'test'
creds = AccessTokenCredentials('test_token', 'test_ua')
return datalab.context.Context(project_id, creds)
| apache-2.0 |
wkfwkf/statsmodels | statsmodels/examples/ex_kernel_semilinear_dgp.py | 33 | 4969 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 06 09:50:54 2013
Author: Josef Perktold
"""
from __future__ import print_function
if __name__ == '__main__':
import numpy as np
import matplotlib.pyplot as plt
#from statsmodels.nonparametric.api import KernelReg
import statsmodels.sandbox.nonparametric.kernel_extras as smke
import statsmodels.sandbox.nonparametric.dgp_examples as dgp
class UnivariateFunc1a(dgp.UnivariateFunc1):
def het_scale(self, x):
return 0.5
seed = np.random.randint(999999)
#seed = 430973
#seed = 47829
seed = 648456 #good seed for het_scale = 0.5
print(seed)
np.random.seed(seed)
nobs, k_vars = 300, 3
x = np.random.uniform(-2, 2, size=(nobs, k_vars))
xb = x.sum(1) / 3 #beta = [1,1,1]
k_vars_lin = 2
x2 = np.random.uniform(-2, 2, size=(nobs, k_vars_lin))
funcs = [#dgp.UnivariateFanGijbels1(),
#dgp.UnivariateFanGijbels2(),
#dgp.UnivariateFanGijbels1EU(),
#dgp.UnivariateFanGijbels2(distr_x=stats.uniform(-2, 4))
UnivariateFunc1a(x=xb)
]
res = []
fig = plt.figure()
for i,func in enumerate(funcs):
#f = func()
f = func
y = f.y + x2.sum(1)
model = smke.SemiLinear(y, x2, x, 'ccc', k_vars_lin)
mean, mfx = model.fit()
ax = fig.add_subplot(1, 1, i+1)
f.plot(ax=ax)
xb_est = np.dot(model.exog, model.b)
sortidx = np.argsort(xb_est) #f.x)
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, label='est. mean')
# ax.plot(f.x, mean0, color='g', lw=2, label='est. mean')
ax.legend(loc='upper left')
res.append((model, mean, mfx))
print('beta', model.b)
print('scale - est', (y - (xb_est+mean)).std())
print('scale - dgp realised, true', (y - (f.y_true + x2.sum(1))).std(), \
2 * f.het_scale(1))
fittedvalues = xb_est + mean
resid = np.squeeze(model.endog) - fittedvalues
print('corrcoef(fittedvalues, resid)', np.corrcoef(fittedvalues, resid)[0,1])
print('variance of components, var and as fraction of var(y)')
print('fitted values', fittedvalues.var(), fittedvalues.var() / y.var())
print('linear ', xb_est.var(), xb_est.var() / y.var())
print('nonparametric', mean.var(), mean.var() / y.var())
print('residual ', resid.var(), resid.var() / y.var())
print('\ncovariance decomposition fraction of var(y)')
print(np.cov(fittedvalues, resid) / model.endog.var(ddof=1))
print('sum', (np.cov(fittedvalues, resid) / model.endog.var(ddof=1)).sum())
print('\ncovariance decomposition, xb, m, resid as fraction of var(y)')
print(np.cov(np.column_stack((xb_est, mean, resid)), rowvar=False) / model.endog.var(ddof=1))
fig.suptitle('Kernel Regression')
fig.show()
alpha = 0.7
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], f.y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], mean[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
sortidx = np.argsort(xb_est + mean)
fig = plt.figure()
ax = fig.add_subplot(1, 1, 1)
ax.plot(f.x[sortidx], y[sortidx], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.x[sortidx], f.y_true[sortidx], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(f.x[sortidx], (xb_est + mean)[sortidx], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Semilinear Model - observed and total fitted')
fig = plt.figure()
# ax = fig.add_subplot(1, 2, 1)
# ax.plot(f.x, f.y, 'o', color='b', lw=2, alpha=alpha, label='observed')
# ax.plot(f.x, f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
# ax.plot(f.x, mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
# ax.legend(loc='upper left')
sortidx0 = np.argsort(xb)
ax = fig.add_subplot(1, 2, 1)
ax.plot(f.y[sortidx0], 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true[sortidx0], 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean[sortidx0], 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (sorted by true xb)')
ax = fig.add_subplot(1, 2, 2)
ax.plot(y - xb_est, 'o', color='b', lw=2, alpha=alpha, label='observed')
ax.plot(f.y_true, 'o', color='g', lw=2, alpha=alpha, label='dgp. mean')
ax.plot(mean, 'o', color='r', lw=2, alpha=alpha, label='est. mean')
ax.legend(loc='upper left')
ax.set_title('Single Index Model (nonparametric)')
plt.figure()
plt.plot(y, xb_est+mean, '.')
plt.title('observed versus fitted values')
plt.show()
| bsd-3-clause |
mayblue9/scikit-learn | examples/ensemble/plot_forest_importances_faces.py | 403 | 1519 | """
=================================================
Pixel importances with a parallel forest of trees
=================================================
This example shows the use of forests of trees to evaluate the importance
of the pixels in an image classification task (faces). The hotter the pixel,
the more important.
The code below also illustrates how the construction and the computation
of the predictions can be parallelized within multiple jobs.
"""
print(__doc__)
from time import time
import matplotlib.pyplot as plt
from sklearn.datasets import fetch_olivetti_faces
from sklearn.ensemble import ExtraTreesClassifier
# Number of cores to use to perform parallel fitting of the forest model
n_jobs = 1
# Load the faces dataset
data = fetch_olivetti_faces()
X = data.images.reshape((len(data.images), -1))
y = data.target
mask = y < 5 # Limit to 5 classes
X = X[mask]
y = y[mask]
# Build a forest and compute the pixel importances
print("Fitting ExtraTreesClassifier on faces data with %d cores..." % n_jobs)
t0 = time()
forest = ExtraTreesClassifier(n_estimators=1000,
max_features=128,
n_jobs=n_jobs,
random_state=0)
forest.fit(X, y)
print("done in %0.3fs" % (time() - t0))
importances = forest.feature_importances_
importances = importances.reshape(data.images[0].shape)
# Plot pixel importances
plt.matshow(importances, cmap=plt.cm.hot)
plt.title("Pixel importances with forests of trees")
plt.show()
| bsd-3-clause |
mmottahedi/nilmtk | nilmtk/metergroup.py | 4 | 70748 | from __future__ import print_function, division
import networkx as nx
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import FuncFormatter
from datetime import timedelta
from warnings import warn
from sys import stdout
from collections import Counter
from copy import copy, deepcopy
import gc
from collections import namedtuple
# NILMTK imports
from .elecmeter import ElecMeter, ElecMeterID
from .appliance import Appliance
from .datastore.datastore import join_key
from .utils import (tree_root, nodes_adjacent_to_root, simplest_type_for,
flatten_2d_list, convert_to_timestamp, normalise_timestamp,
print_on_line, convert_to_list, append_or_extend_list,
most_common, capitalise_first_letter)
from .plots import plot_series
from .measurement import (select_best_ac_type, AC_TYPES, LEVEL_NAMES,
PHYSICAL_QUANTITIES_TO_AVERAGE)
from nilmtk.exceptions import MeasurementError
from .electric import Electric
from .timeframe import TimeFrame, split_timeframes
from .preprocessing import Apply
from .datastore import MAX_MEM_ALLOWANCE_IN_BYTES
from nilmtk.timeframegroup import TimeFrameGroup
# MeterGroupID.meters is a tuple of ElecMeterIDs. Order doesn't matter.
# (we can't use a set because sets aren't hashable so we can't use
# a set as a dict key or a DataFrame column name.)
MeterGroupID = namedtuple('MeterGroupID', ['meters'])
class MeterGroup(Electric):
"""A group of ElecMeter objects. Can contain nested MeterGroup objects.
Implements many of the same methods as ElecMeter.
Attributes
----------
meters : list of ElecMeters or nested MeterGroups
disabled_meters : list of ElecMeters or nested MeterGroups
name : only set by functions like 'groupby' and 'select_top_k'
"""
def __init__(self, meters=None, disabled_meters=None):
self.meters = convert_to_list(meters)
self.disabled_meters = convert_to_list(disabled_meters)
self.name = ""
def import_metadata(self, store, elec_meters, appliances, building_id):
"""
Parameters
----------
store : nilmtk.DataStore
elec_meters : dict of dicts
metadata for each ElecMeter
appliances : list of dicts
metadata for each Appliance
building_id : BuildingID
"""
# Sanity checking
assert isinstance(elec_meters, dict)
assert isinstance(appliances, list)
assert isinstance(building_id, tuple)
if not elec_meters:
warn("Building {} has an empty 'elec_meters' object."
.format(building_id.instance), RuntimeWarning)
if not appliances:
warn("Building {} has an empty 'appliances' list."
.format(building_id.instance), RuntimeWarning)
# Load static Meter Devices
ElecMeter.load_meter_devices(store)
# Load each meter
for meter_i, meter_metadata_dict in elec_meters.iteritems():
meter_id = ElecMeterID(instance=meter_i,
building=building_id.instance,
dataset=building_id.dataset)
meter = ElecMeter(store, meter_metadata_dict, meter_id)
self.meters.append(meter)
# Load each appliance
for appliance_md in appliances:
appliance_md['dataset'] = building_id.dataset
appliance_md['building'] = building_id.instance
appliance = Appliance(appliance_md)
meter_ids = [ElecMeterID(instance=meter_instance,
building=building_id.instance,
dataset=building_id.dataset)
for meter_instance in appliance.metadata['meters']]
if appliance.n_meters == 1:
# Attach this appliance to just a single meter
meter = self[meter_ids[0]]
if isinstance(meter, MeterGroup): # MeterGroup of site_meters
metergroup = meter
for meter in metergroup.meters:
meter.appliances.append(appliance)
else:
meter.appliances.append(appliance)
else:
# DualSupply or 3-phase appliance so need a meter group
metergroup = MeterGroup()
metergroup.meters = [self[meter_id] for meter_id in meter_ids]
for meter in metergroup.meters:
# We assume that any meters used for measuring
# dual-supply or 3-phase appliances are not also used
# for measuring single-supply appliances.
self.meters.remove(meter)
meter.appliances.append(appliance)
self.meters.append(metergroup)
# disable disabled meters
meters_to_disable = [m for m in self.meters
if isinstance(m, ElecMeter)
and m.metadata.get('disabled')]
for meter in meters_to_disable:
self.meters.remove(meter)
self.disabled_meters.append(meter)
def union(self, other):
"""
Returns
-------
new MeterGroup where its set of `meters` is the union of
`self.meters` and `other.meters`.
"""
if not isinstance(other, MeterGroup):
raise TypeError()
return MeterGroup(set(self.meters).union(other.meters))
def dominant_appliance(self):
dominant_appliances = [meter.dominant_appliance()
for meter in self.meters]
dominant_appliances = list(set(dominant_appliances))
n_dominant_appliances = len(dominant_appliances)
if n_dominant_appliances == 0:
return
elif n_dominant_appliances == 1:
return dominant_appliances[0]
else:
raise RuntimeError(
"More than one dominant appliance in MeterGroup!"
" (The dominant appliance per meter should be manually"
" specified in the metadata. If it isn't and if there are"
" multiple appliances for a meter then NILMTK assumes"
" all appliances on that meter are dominant. NILMTK"
" can't automatically distinguish between multiple"
" appliances on the same meter (at least,"
" not without using NILM!))")
def nested_metergroups(self):
return [m for m in self.meters if isinstance(m, MeterGroup)]
def __getitem__(self, key):
"""Get a single meter using appliance type and instance unless
ElecMeterID is supplied.
These formats for `key` are accepted:
Retrieve a meter using details of the meter:
* `1` - retrieves meter instance 1, raises Exception if there are
more than one meter with this instance, raises KeyError
if none are found. If meter instance 1 is in a nested MeterGroup
then retrieve the ElecMeter, not the MeterGroup.
* `ElecMeterID(1, 1, 'REDD')` - retrieves meter with specified meter ID
* `MeterGroupID(meters=(ElecMeterID(1, 1, 'REDD')))` - retrieves
existing nested MeterGroup containing exactly meter instances 1 and 2.
* `[ElecMeterID(1, 1, 'REDD'), ElecMeterID(2, 1, 'REDD')]` - retrieves
existing nested MeterGroup containing exactly meter instances 1 and 2.
* `ElecMeterID(0, 1, 'REDD')` - instance `0` means `mains`. This returns
a new MeterGroup of all site_meters in building 1 in REDD.
* `ElecMeterID((1,2), 1, 'REDD')` - retrieve existing MeterGroup
which contains exactly meters 1 & 2.
* `(1, 2, 'REDD')` - converts to ElecMeterID and treats as an ElecMeterID.
Items must be in the order expected for an ElecMeterID.
Retrieve a meter using details of appliances attached to the meter:
* `'toaster'` - retrieves meter or group upstream of toaster instance 1
* `'toaster', 2` - retrieves meter or group upstream of toaster instance 2
* `{'dataset': 'redd', 'building': 3, 'type': 'toaster', 'instance': 2}`
- specify an appliance
Returns
-------
ElecMeter or MeterGroup
"""
if isinstance(key, str):
# default to get first meter
return self[(key, 1)]
elif isinstance(key, ElecMeterID):
if isinstance(key.instance, tuple):
# find meter group from a key of the form
# ElecMeterID(instance=(1,2), building=1, dataset='REDD')
for group in self.nested_metergroups():
if (set(group.instance()) == set(key.instance) and
group.building() == key.building and
group.dataset() == key.dataset):
return group
# Else try to find an ElecMeter with instance=(1,2)
for meter in self.meters:
if meter.identifier == key:
return meter
elif key.instance == 0:
metergroup_of_building = self.select(
building=key.building, dataset=key.dataset)
return metergroup_of_building.mains()
else:
for meter in self.meters:
if meter.identifier == key:
return meter
raise KeyError(key)
elif isinstance(key, MeterGroupID):
key_meters = set(key.meters)
for group in self.nested_metergroups():
if (set(group.identifier.meters) == key_meters):
return group
raise KeyError(key)
# find MeterGroup from list of ElecMeterIDs
elif isinstance(key, list):
if not all([isinstance(item, tuple) for item in key]):
raise TypeError("requires a list of ElecMeterID objects.")
for meter in self.meters: # TODO: write unit tests for this
# list of ElecMeterIDs. Return existing MeterGroup
if isinstance(meter, MeterGroup):
metergroup = meter
meter_ids = set(metergroup.identifier.meters)
if meter_ids == set(key):
return metergroup
raise KeyError(key)
elif isinstance(key, tuple):
if len(key) == 2:
if isinstance(key[0], str):
return self[{'type': key[0], 'instance': key[1]}]
else:
# Assume we're dealing with a request for 2 ElecMeters
return MeterGroup([self[i] for i in key])
elif len(key) == 3:
return self[ElecMeterID(*key)]
else:
raise TypeError()
elif isinstance(key, dict):
meters = []
for meter in self.meters:
if meter.matches_appliances(key):
meters.append(meter)
if len(meters) == 1:
return meters[0]
elif len(meters) > 1:
raise Exception('search terms match {} appliances'
.format(len(meters)))
else:
raise KeyError(key)
elif isinstance(key, int) and not isinstance(key, bool):
meters_found = []
for meter in self.meters:
if isinstance(meter.instance(), int):
if meter.instance() == key:
meters_found.append(meter)
elif isinstance(meter.instance(), (tuple, list)):
if key in meter.instance():
if isinstance(meter, MeterGroup):
print("Meter", key, "is in a nested meter group."
" Retrieving just the ElecMeter.")
meters_found.append(meter[key])
else:
meters_found.append(meter)
n_meters_found = len(meters_found)
if n_meters_found > 1:
raise Exception('{} meters found with instance == {}: {}'
.format(n_meters_found, key, meters_found))
elif n_meters_found == 0:
raise KeyError(
'No meters found with instance == {}'.format(key))
else:
return meters_found[0]
else:
raise TypeError()
def matches(self, key):
for meter in self.meters:
if meter.matches(key):
return True
return False
def select(self, **kwargs):
"""Select a group of meters based on meter metadata.
e.g.
* select(building=1, sample_period=6)
* select(room='bathroom')
If multiple criteria are supplied then these are ANDed together.
Returns
-------
new MeterGroup of selected meters.
Ideas for the future (not implemented yet!)
-------------------------------------------
* select(category=['ict', 'lighting'])
* select([(fridge, 1), (tv, 1)]) # get specifically fridge 1 and tv 1
* select(name=['fridge', 'tv']) # get all fridges and tvs
* select(category='lighting', except={'room'=['kitchen lights']})
* select('all', except=[('tv', 1)])
Also: see if we can do select(category='lighting' | name='tree lights')
or select(energy > 100)?? Perhaps using:
* Python's eval function something like this:
>>> s = pd.Series(np.random.randn(5))
>>> eval('(x > 0) | (index > 2)', {'x':s, 'index':s.index})
Hmm, yes, maybe we should just implement this! e.g.
select("(category == 'lighting') | (category == 'ict')")
But what about:
* select('total_energy > 100')
* select('mean(hours_on_per_day) > 3')
* select('max(hours_on_per_day) > 5')
* select('max(power) > 2000')
* select('energy_per_day > 2')
* select('rank_by_energy > 5') # top_k(5)
* select('rank_by_proportion > 0.2')
Maybe don't bother. That's easy enough
to get with itemised_energy(). Although these are quite nice
and shouldn't be too hard. Would need to only calculate
these stats if necessary though (e.g. by checking if 'total_energy'
is in the query string before running `eval`)
* or numexpr: https://github.com/pydata/numexpr
* see Pandas.eval():
* http://pandas.pydata.org/pandas-docs/stable/indexing.html#the-query-method-experimental
* https://github.com/pydata/pandas/blob/master/pandas/computation/eval.py#L119
"""
selected_meters = []
func = kwargs.pop('func', 'matches')
def get(_kwargs):
exception_raised_every_time = True
exception = None
no_match = True
for meter in self.meters:
try:
match = getattr(meter, func)(_kwargs)
except KeyError as e:
exception = e
else:
exception_raised_every_time = False
if match:
selected_meters.append(meter)
no_match = False
if no_match:
raise KeyError("'No match for {}'".format(_kwargs))
if exception_raised_every_time and exception is not None:
raise exception
if len(kwargs) == 1 and isinstance(kwargs.values()[0], list):
attribute = kwargs.keys()[0]
list_of_values = kwargs.values()[0]
for value in list_of_values:
get({attribute: value})
else:
get(kwargs)
return MeterGroup(selected_meters)
def select_using_appliances(self, **kwargs):
"""Select a group of meters based on appliance metadata.
e.g.
* select_using_appliances(category='lighting')
* select_using_appliances(type='fridge')
* select_using_appliances(type=['fridge', 'kettle', 'toaster'])
* select_using_appliances(building=1, category='lighting')
* select_using_appliances(room='bathroom')
If multiple criteria are supplied then these are ANDed together.
Returns
-------
new MeterGroup of selected meters.
"""
return self.select(func='matches_appliances', **kwargs)
def from_list(self, meter_ids):
"""
Parameters
----------
meter_ids : list or tuple
Each element is an ElecMeterID or a MeterGroupID.
Returns
-------
MeterGroup
"""
meter_ids = list(meter_ids)
meter_ids = list(set(meter_ids)) # make unique
meters = []
def append_meter_group(meter_id):
try:
# see if there is an existing MeterGroup
metergroup = self[meter_id]
except KeyError:
# there is no existing MeterGroup so assemble one
metergroup = self.from_list(meter_id.meters)
meters.append(metergroup)
for meter_id in meter_ids:
if isinstance(meter_id, ElecMeterID):
meters.append(self[meter_id])
elif isinstance(meter_id, MeterGroupID):
append_meter_group(meter_id)
elif isinstance(meter_id, tuple):
meter_id = MeterGroupID(meters=meter_id)
append_meter_group(meter_id)
else:
raise TypeError()
return MeterGroup(meters)
@classmethod
def from_other_metergroup(cls, other, dataset):
"""Assemble a new meter group using the same meter IDs and nested
MeterGroups as `other`. This is useful for preparing a ground truth
metergroup from a meter group of NILM predictions.
Parameters
----------
other : MeterGroup
dataset : string
The `name` of the dataset for the ground truth. e.g. 'REDD'
Returns
-------
MeterGroup
"""
other_identifiers = other.identifier.meters
new_identifiers = []
for other_id in other_identifiers:
new_id = other_id._replace(dataset=dataset)
if isinstance(new_id.instance, tuple):
nested = []
for instance in new_id.instance:
new_nested_id = new_id._replace(instance=instance)
nested.append(new_nested_id)
new_identifiers.append(tuple(nested))
else:
new_identifiers.append(new_id)
metergroup = MeterGroup()
metergroup.from_list(new_identifiers)
return metergroup
def __eq__(self, other):
if isinstance(other, MeterGroup):
return set(other.meters) == set(self.meters)
else:
return False
def __ne__(self, other):
return not self.__eq__(other)
@property
def appliances(self):
appliances = set()
for meter in self.meters:
appliances.update(meter.appliances)
return list(appliances)
def dominant_appliances(self):
appliances = set()
for meter in self.meters:
appliances.add(meter.dominant_appliance())
return list(appliances)
def values_for_appliance_metadata_key(self, key,
only_consider_dominant_appliance=True):
"""
Parameters
----------
key : str
e.g. 'type' or 'categories' or 'room'
Returns
-------
list
"""
values = []
if only_consider_dominant_appliance:
appliances = self.dominant_appliances()
else:
appliances = self.appliances
for appliance in appliances:
value = appliance.metadata.get(key)
append_or_extend_list(values, value)
value = appliance.type.get(key)
append_or_extend_list(values, value)
return list(set(values))
def get_labels(self, meter_ids, pretty=True):
"""Create human-readable meter labels.
Parameters
----------
meter_ids : list of ElecMeterIDs (or 3-tuples in same order as ElecMeterID)
Returns
-------
list of strings describing the appliances.
"""
meters = [self[meter_id] for meter_id in meter_ids]
labels = [meter.label(pretty=pretty) for meter in meters]
return labels
def __repr__(self):
s = "{:s}(meters=\n".format(self.__class__.__name__)
for meter in self.meters:
s += " " + str(meter).replace("\n", "\n ") + "\n"
s += ")"
return s
@property
def identifier(self):
"""Returns a MeterGroupID."""
return MeterGroupID(meters=tuple([meter.identifier for meter in self.meters]))
def instance(self):
"""Returns tuple of integers where each int is a meter instance."""
return tuple([meter.instance() for meter in self.meters])
def building(self):
"""Returns building instance integer(s)."""
buildings = set([meter.building() for meter in self.meters])
return simplest_type_for(buildings)
def contains_meters_from_multiple_buildings(self):
"""Returns True if this MeterGroup contains meters from
more than one building."""
building = self.building()
try:
n = len(building)
except TypeError:
return False
else:
return n > 1
def dataset(self):
"""Returns dataset string(s)."""
datasets = set([meter.dataset() for meter in self.meters])
return simplest_type_for(datasets)
def sample_period(self):
"""Returns max of all meter sample periods."""
return max([meter.sample_period() for meter in self.meters])
def wiring_graph(self):
"""Returns a networkx.DiGraph of connections between meters."""
wiring_graph = nx.DiGraph()
def _build_wiring_graph(meters):
for meter in meters:
if isinstance(meter, MeterGroup):
metergroup = meter
_build_wiring_graph(metergroup.meters)
else:
upstream_meter = meter.upstream_meter(raise_warning=False)
# Need to ensure we use the same object
# if upstream meter already exists.
if upstream_meter is not None:
for node in wiring_graph.nodes():
if upstream_meter == node:
upstream_meter = node
break
wiring_graph.add_edge(upstream_meter, meter)
_build_wiring_graph(self.meters)
return wiring_graph
def draw_wiring_graph(self, show_meter_labels=True):
graph = self.wiring_graph()
meter_labels = {meter: meter.instance() for meter in graph.nodes()}
pos = nx.graphviz_layout(graph, prog='dot')
nx.draw(graph, pos, labels=meter_labels, arrows=False)
if show_meter_labels:
meter_labels = {meter: meter.label() for meter in graph.nodes()}
for meter, name in meter_labels.iteritems():
x, y = pos[meter]
if meter.is_site_meter():
delta_y = 5
else:
delta_y = -5
plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center')
ax = plt.gca()
return graph, ax
def load(self, **kwargs):
"""Returns a generator of DataFrames loaded from the DataStore.
By default, `load` will load all available columns from the DataStore.
Specific columns can be selected in one or two mutually exclusive ways:
1. specify a list of column names using the `cols` parameter.
2. specify a `physical_quantity` and/or an `ac_type` parameter to ask
`load` to automatically select columns.
Each meter in the MeterGroup will first be resampled before being added.
The returned DataFrame will include NaNs at timestamps where no meter
had a sample (after resampling the meter).
Parameters
----------
sample_period : int or float, optional
Number of seconds to use as sample period when reindexing meters.
If not specified then will use the max of all meters' sample_periods.
resample_kwargs : dict of key word arguments (other than 'rule') to
`pass to pd.DataFrame.resample()`
chunksize : int, optional
the maximum number of rows per chunk. Note that each chunk is
guaranteed to be of length <= chunksize. Each chunk is *not*
guaranteed to be exactly of length == chunksize.
**kwargs :
any other key word arguments to pass to `self.store.load()` including:
physical_quantity : string or list of strings
e.g. 'power' or 'voltage' or 'energy' or ['power', 'energy'].
If a single string then load columns only for that physical quantity.
If a list of strings then load columns for all those physical
quantities.
ac_type : string or list of strings, defaults to None
Where 'ac_type' is short for 'alternating current type'. e.g.
'reactive' or 'active' or 'apparent'.
If set to None then will load all AC types per physical quantity.
If set to 'best' then load the single best AC type per
physical quantity.
If set to a single AC type then load just that single AC type per
physical quantity, else raise an Exception.
If set to a list of AC type strings then will load all those
AC types and will raise an Exception if any cannot be found.
cols : list of tuples, using NILMTK's vocabulary for measurements.
e.g. [('power', 'active'), ('voltage', ''), ('energy', 'reactive')]
`cols` can't be used if `ac_type` and/or `physical_quantity` are set.
preprocessing : list of Node subclass instances
e.g. [Clip()]
Returns
---------
Always return a generator of DataFrames (even if it only has a single
column).
.. note:: Different AC types will be treated separately.
"""
# Handle kwargs
sample_period = kwargs.setdefault('sample_period', self.sample_period())
sections = kwargs.pop('sections', [self.get_timeframe()])
chunksize = kwargs.pop('chunksize', MAX_MEM_ALLOWANCE_IN_BYTES)
duration_threshold = sample_period * chunksize
columns = pd.MultiIndex.from_tuples(
self._convert_physical_quantity_and_ac_type_to_cols(**kwargs)['cols'],
names=LEVEL_NAMES)
freq = '{:d}S'.format(int(sample_period))
verbose = kwargs.get('verbose')
# Check for empty sections
sections = [section for section in sections if section]
if not sections:
print("No sections to load.")
yield pd.DataFrame(columns=columns)
return
# Loop through each section to load
for section in split_timeframes(sections, duration_threshold):
kwargs['sections'] = [section]
start = normalise_timestamp(section.start, freq)
tz = None if start.tz is None else start.tz.zone
index = pd.date_range(
start.tz_localize(None), section.end.tz_localize(None), tz=tz,
closed='left', freq=freq)
chunk = combine_chunks_from_generators(
index, columns, self.meters, kwargs)
yield chunk
def _convert_physical_quantity_and_ac_type_to_cols(self, **kwargs):
all_columns = set()
kwargs = deepcopy(kwargs)
for meter in self.meters:
kwargs_copy = deepcopy(kwargs)
new_kwargs = meter._convert_physical_quantity_and_ac_type_to_cols(**kwargs_copy)
cols = new_kwargs.get('cols', [])
for col in cols:
all_columns.add(col)
kwargs['cols'] = list(all_columns)
return kwargs
def _meter_generators(self, **kwargs):
"""Returns (list of identifiers, list of generators)."""
generators = []
identifiers = []
for meter in self.meters:
kwargs_copy = deepcopy(kwargs)
generator = meter.load(**kwargs_copy)
generators.append(generator)
identifiers.append(meter.identifier)
return identifiers, generators
def simultaneous_switches(self, threshold=40):
"""
Parameters
----------
threshold : number, threshold in Watts
Returns
-------
sim_switches : pd.Series of type {timestamp: number of
simultaneous switches}
Notes
-----
This function assumes that the submeters in this MeterGroup
are all aligned. If they are not then you should align the
meters, e.g. by using an `Apply` node with `resample`.
"""
submeters = self.submeters().meters
count = Counter()
for meter in submeters:
switch_time_meter = meter.switch_times(threshold)
for timestamp in switch_time_meter:
count[timestamp] += 1
sim_switches = pd.Series(count)
# Should be 2 or more appliances changing state at the same time
sim_switches = sim_switches[sim_switches >= 2]
return sim_switches
def mains(self):
"""
Returns
-------
ElecMeter or MeterGroup or None
"""
if self.contains_meters_from_multiple_buildings():
msg = ("This MeterGroup contains meters from buildings '{}'."
" It only makes sense to get `mains` if the MeterGroup"
" contains meters from a single building."
.format(self.building()))
raise RuntimeError(msg)
site_meters = [meter for meter in self.meters if meter.is_site_meter()]
n_site_meters = len(site_meters)
if n_site_meters == 0:
return
elif n_site_meters == 1:
return site_meters[0]
else:
return MeterGroup(meters=site_meters)
def use_alternative_mains(self):
"""Swap present mains meter(s) for mains meter(s) in `disabled_meters`.
This is useful if the dataset has multiple, redundant mains meters
(e.g. in UK-DALE buildings 1, 2 and 5).
"""
present_mains = [m for m in self.meters if m.is_site_meter()]
alternative_mains = [m for m in self.disabled_meters if m.is_site_meter()]
if not alternative_mains:
raise RuntimeError("No site meters found in `self.disabled_meters`")
for meter in present_mains:
self.meters.remove(meter)
self.disabled_meters.append(meter)
for meter in alternative_mains:
self.meters.append(meter)
self.disabled_meters.remove(meter)
def upstream_meter(self):
"""Returns single upstream meter.
Raises RuntimeError if more than 1 upstream meter.
"""
upstream_meters = []
for meter in self.meters:
upstream_meters.append(meter.upstream_meter())
unique_upstream_meters = list(set(upstream_meters))
if len(unique_upstream_meters) > 1:
raise RuntimeError("{:d} upstream meters found for meter group."
" Should be 1.".format(len(unique_upstream_meters)))
return unique_upstream_meters[0]
def meters_directly_downstream_of_mains(self):
"""Returns new MeterGroup."""
meters = nodes_adjacent_to_root(self.wiring_graph())
assert isinstance(meters, list)
return MeterGroup(meters)
def submeters(self):
"""Returns new MeterGroup of all meters except site_meters"""
submeters = [meter for meter in self.meters
if not meter.is_site_meter()]
return MeterGroup(submeters)
def is_site_meter(self):
"""Returns True if any meters are site meters"""
return any([meter.is_site_meter() for meter in self.meters])
def total_energy(self, **load_kwargs):
"""Sums together total meter_energy for each meter.
Note that this function does *not* return the total aggregate
energy for a building. Instead this function adds up the total energy
for all the meters contained in this MeterGroup. If you want the total
aggregate energy then please use `MeterGroup.mains().total_energy()`.
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else return a pd.Series with a row for each AC type.
"""
self._check_kwargs_for_full_results_and_sections(load_kwargs)
full_results = load_kwargs.pop('full_results', False)
meter_energies = self._collect_stats_on_all_meters(
load_kwargs, 'total_energy', full_results)
if meter_energies:
total_energy_results = meter_energies[0]
for meter_energy in meter_energies[1:]:
if full_results:
total_energy_results.unify(meter_energy)
else:
total_energy_results += meter_energy
return total_energy_results
def _collect_stats_on_all_meters(self, load_kwargs, func, full_results):
collected_stats = []
for meter in self.meters:
print_on_line("\rCalculating", func, "for", meter.identifier, "... ")
single_stat = getattr(meter, func)(full_results=full_results,
**load_kwargs)
collected_stats.append(single_stat)
if (full_results and len(self.meters) > 1 and
not meter.store.all_sections_smaller_than_chunksize):
warn("at least one section requested from '{}' required"
" multiple chunks to be loaded into memory. This may cause"
" a failure when we try to unify results from multiple"
" meters.".format(meter))
return collected_stats
def dropout_rate(self, **load_kwargs):
"""Sums together total energy for each meter.
Parameters
----------
full_results : bool, default=False
**loader_kwargs : key word arguments for DataStore.load()
Returns
-------
if `full_results` is True then return TotalEnergyResults object
else return either a single number of, if there are multiple
AC types, then return a pd.Series with a row for each AC type.
"""
self._check_kwargs_for_full_results_and_sections(load_kwargs)
full_results = load_kwargs.pop('full_results', False)
dropout_rates = self._collect_stats_on_all_meters(
load_kwargs, 'dropout_rate', full_results)
if full_results and dropout_rates:
dropout_rate_results = dropout_rates[0]
for dr in dropout_rates[1:]:
dropout_rate_results.unify(dr)
return dropout_rate_results
else:
return np.mean(dropout_rates)
def _check_kwargs_for_full_results_and_sections(self, load_kwargs):
if (load_kwargs.get('full_results')
and 'sections' not in load_kwargs
and len(self.meters) > 1):
raise RuntimeError("MeterGroup stats can only return full results"
" objects if you specify 'sections' to load. If"
" you do not specify periods then the results"
" from individual meters are likely to be for"
" different periods and hence"
" cannot be unified.")
def good_sections(self, **kwargs):
"""Returns good sections for just the first meter.
TODO: combine good sections from every meter.
"""
if self.meters:
if len(self.meters) > 1:
warn("As a quick implementation we only get Good Sections from"
" the first meter in the meter group. We should really"
" return the intersection of the good sections for all"
" meters. This will be fixed...")
return self.meters[0].good_sections(**kwargs)
else:
return []
def dataframe_of_meters(self, **kwargs):
"""
Parameters
----------
sample_period : int or float, optional
Number of seconds to use as sample period when reindexing meters.
If not specified then will use the max of all meters' sample_periods.
resample : bool, defaults to True
If True then resample to `sample_period`.
**kwargs :
any other key word arguments to pass to `self.store.load()` including:
ac_type : string, defaults to 'best'
physical_quantity: string, defaults to 'power'
Returns
-------
DataFrame
Each column is a meter.
"""
kwargs.setdefault('sample_period', self.sample_period())
kwargs.setdefault('ac_type', 'best')
kwargs.setdefault('physical_quantity', 'power')
identifiers, generators = self._meter_generators(**kwargs)
segments = []
while True:
chunks = []
ids = []
for meter_id, generator in zip(identifiers, generators):
try:
chunk_from_next_meter = next(generator)
except StopIteration:
continue
if not chunk_from_next_meter.empty:
ids.append(meter_id)
chunks.append(chunk_from_next_meter.sum(axis=1))
if chunks:
df = pd.concat(chunks, axis=1)
df.columns = ids
segments.append(df)
else:
break
if segments:
return pd.concat(segments)
else:
return pd.DataFrame(columns=self.identifier.meters)
def entropy_per_meter(self):
"""Finds the entropy of each meter in this MeterGroup.
Returns
-------
pd.Series of entropy
"""
return self.call_method_on_all_meters('entropy')
def call_method_on_all_meters(self, method):
"""Calls `method` on each element in `self.meters`.
Parameters
----------
method : str
Name of a stats method in `ElecMeter`. e.g. 'correlation'.
Returns
-------
pd.Series of result of `method` called on each element in `self.meters`.
"""
meter_identifiers = list(self.identifier.meters)
result = pd.Series(index=meter_identifiers)
for meter in self.meters:
id_meter = meter.identifier
result[id_meter] = getattr(meter, method)()
return result
def pairwise(self, method):
"""
Calls `method` on all pairs in `self.meters`.
Assumes `method` is symmetrical.
Parameters
----------
method : str
Name of a stats method in `ElecMeter`. e.g. 'correlation'.
Returns
-------
pd.DataFrame of the result of `method` called on each
pair in `self.meters`.
"""
meter_identifiers = list(self.identifier.meters)
result = pd.DataFrame(index=meter_identifiers, columns=meter_identifiers)
for i, m_i in enumerate(self.meters):
for j, m_j in enumerate(self.meters):
id_i = m_i.identifier
id_j = m_j.identifier
if i > j:
result[id_i][id_j] = result[id_j][id_i]
else:
result[id_i][id_j] = getattr(m_i, method)(m_j)
return result
def pairwise_mutual_information(self):
"""
Finds the pairwise mutual information among different
meters in a MeterGroup.
Returns
-------
pd.DataFrame of mutual information between
pair of ElecMeters.
"""
return self.pairwise('mutual_information')
def pairwise_correlation(self):
"""
Finds the pairwise correlation among different
meters in a MeterGroup.
Returns
-------
pd.DataFrame of correlation between pair of ElecMeters.
"""
return self.pairwise('correlation')
def proportion_of_energy_submetered(self, **loader_kwargs):
"""
Returns
-------
float [0,1] or NaN if mains total_energy == 0
"""
print("Running MeterGroup.proportion_of_energy_submetered...")
mains = self.mains()
downstream_meters = self.meters_directly_downstream_of_mains()
proportion = 0.0
verbose = loader_kwargs.get('verbose')
all_nan = True
for m in downstream_meters.meters:
if verbose:
print("Calculating proportion for", m)
prop = m.proportion_of_energy(mains, **loader_kwargs)
if not np.isnan(prop):
proportion += prop
all_nan = False
if verbose:
print(" {:.2%}".format(prop))
if all_nan:
proportion = np.NaN
return proportion
def available_ac_types(self, physical_quantity):
"""Returns set of all available alternating current types for a
specific physical quantity.
Parameters
----------
physical_quantity : str or list of strings
Returns
-------
list of strings e.g. ['apparent', 'active']
"""
all_ac_types = [meter.available_ac_types(physical_quantity)
for meter in self.meters]
return list(set(flatten_2d_list(all_ac_types)))
def available_physical_quantities(self):
"""
Returns
-------
list of strings e.g. ['power', 'energy']
"""
all_physical_quants = [meter.available_physical_quantities()
for meter in self.meters]
return list(set(flatten_2d_list(all_physical_quants)))
def energy_per_meter(self, per_period=None, mains=None,
use_meter_labels=False, **load_kwargs):
"""Returns pd.DataFrame where columns is meter.identifier and
each value is total energy. Index is AC types.
Does not care about wiring hierarchy. Does not attempt to ensure all
channels share the same time sections.
Parameters
----------
per_period : None or offset alias
If None then returns absolute energy used per meter.
If a Pandas offset alias (e.g. 'D' for 'daily') then
will return the average energy per period.
ac_type : None or str
e.g. 'active' or 'best'. Defaults to 'best'.
use_meter_labels : bool
If True then columns will be human-friendly meter labels.
If False then columns will be ElecMeterIDs or MeterGroupIDs
mains : None or MeterGroup or ElecMeter
If None then will return DataFrame without remainder.
If not None then will return a Series including a 'remainder'
row which will be `mains.total_energy() - energy_per_meter.sum()`
and an attempt will be made to use the correct AC_TYPE.
Returns
-------
pd.DataFrame if mains is None else a pd.Series
"""
meter_identifiers = list(self.identifier.meters)
energy_per_meter = pd.DataFrame(columns=meter_identifiers, index=AC_TYPES)
n_meters = len(self.meters)
load_kwargs.setdefault('ac_type', 'best')
for i, meter in enumerate(self.meters):
print('\r{:d}/{:d} {}'.format(i+1, n_meters, meter), end='')
stdout.flush()
if per_period is None:
meter_energy = meter.total_energy(**load_kwargs)
else:
load_kwargs.setdefault('use_uptime', False)
meter_energy = meter.average_energy_per_period(
offset_alias=per_period, **load_kwargs)
energy_per_meter[meter.identifier] = meter_energy
energy_per_meters = energy_per_meter.dropna(how='all')
if use_meter_labels:
energy_per_meter.columns = self.get_labels(energy_per_meter.columns)
if mains is not None:
energy_per_meter = self._energy_per_meter_with_remainder(
energy_per_meter, mains, per_period, **load_kwargs)
return energy_per_meter
def _energy_per_meter_with_remainder(self, energy_per_meter,
mains, per_period, **kwargs):
ac_types = energy_per_meter.keys()
energy_per_meter = energy_per_meter.sum() # Collapse AC_TYPEs into Series
# Find most common ac_type in energy_per_meter:
most_common_ac_type = most_common(ac_types)
mains_ac_types = mains.available_ac_types(
['power', 'energy', 'cumulative energy'])
if most_common_ac_type in mains_ac_types:
mains_ac_type = most_common_ac_type
else:
mains_ac_type = 'best'
# Get mains energy_per_meter
kwargs['ac_type'] = mains_ac_type
if per_period is None:
mains_energy = mains.total_energy(**kwargs)
else:
mains_energy = mains.average_energy_per_period(
offset_alias=per_period, **kwargs)
mains_energy = mains_energy[mains_energy.keys()[0]]
# Calculate remainder
energy_per_meter['Remainder'] = mains_energy - energy_per_meter.sum()
energy_per_meter.sort(ascending=False)
return energy_per_meter
def fraction_per_meter(self, **load_kwargs):
"""Fraction of energy per meter.
Return pd.Series. Index is meter.instance.
Each value is a float in the range [0,1].
"""
energy_per_meter = self.energy_per_meter(**load_kwargs).max()
total_energy = energy_per_meter.sum()
return energy_per_meter / total_energy
def proportion_of_upstream_total_per_meter(self, **load_kwargs):
prop_per_meter = pd.Series(index=self.identifier.meters)
n_meters = len(self.meters)
for i, meter in enumerate(self.meters):
proportion = meter.proportion_of_upstream(**load_kwargs)
print('\r{:d}/{:d} {} = {:.3f}'
.format(i+1, n_meters, meter, proportion), end='')
stdout.flush()
prop_per_meter[meter.identifier] = proportion
prop_per_meter.sort(ascending=False)
return prop_per_meter
def train_test_split(self, train_fraction=0.5):
"""
Parameters
----------
train_fraction
Returns
-------
split_time: pd.Timestamp where split should happen
"""
assert(
0 < train_fraction < 1), "`train_fraction` should be between 0 and 1"
# TODO: currently just works with the first mains meter, assuming
# both to be simultaneosly sampled
mains = self.mains()
good_sections = self.mains().good_sections()
sample_period = mains.device['sample_period']
appx_num_records_in_each_good_section = [
int((ts.end - ts.start).total_seconds() / sample_period) for ts in good_sections]
appx_total_records = sum(appx_num_records_in_each_good_section)
records_in_train = appx_total_records * train_fraction
seconds_in_train = int(records_in_train * sample_period)
if len(good_sections) == 1:
# all data is contained in one good section
split_point = good_sections[
0].start + timedelta(seconds=seconds_in_train)
return split_point
else:
# data is split across multiple time deltas
records_remaining = records_in_train
while records_remaining:
for i, records_in_section in enumerate(appx_num_records_in_each_good_section):
if records_remaining > records_in_section:
records_remaining -= records_in_section
elif records_remaining == records_in_section:
# Next TimeFrame is the split point!!
split_point = good_sections[i + 1].start
return split_point
else:
# Need to split this timeframe
split_point = good_sections[
i].start + timedelta(seconds=sample_period * records_remaining)
return split_point
################## FUNCTIONS NOT YET IMPLEMENTED ###################
# def init_new_dataset(self):
# self.infer_and_set_meter_connections()
# self.infer_and_set_dual_supply_appliances()
# def infer_and_set_meter_connections(self):
# """
# Arguments
# ---------
# meters : list of Meter objects
# """
# Maybe this should be a stand-alone function which
# takes a list of meters???
# raise NotImplementedError
# def infer_and_set_dual_supply_appliances(self):
# raise NotImplementedError
# def total_on_duration(self):
# """Return timedelta"""
# raise NotImplementedError
# def on_durations(self):
# self.get_unique_upstream_meters()
# for each meter, get the on time,
# assuming the on-power-threshold for the
# smallest appliance connected to that meter???
# raise NotImplementedError
# def activity_distribution(self, bin_size, timespan):
# raise NotImplementedError
# def on_off_events(self, minimum_state_duration):
# raise NotImplementedError
def select_top_k(self, k=5, by="energy", asc=False, group_remainder=False, **kwargs):
"""Only select the top K meters, according to energy.
Functions on the entire MeterGroup. So if you mean to select
the top K from only the submeters, please do something like
this:
elec.submeters().select_top_k()
Parameters
----------
k : int, optional, defaults to 5
by: string, optional, defaults to energy
Can select top k by:
* energy
* entropy
asc: bool, optional, defaults to False
By default top_k is in descending order. To select top_k
by ascending order, use asc=True
group_remainder : bool, optional, defaults to False
If True then place all remaining meters into a
nested metergroup.
**kwargs : key word arguments to pass to load()
Returns
-------
MeterGroup
"""
function_map = {'energy': self.fraction_per_meter, 'entropy': self.entropy_per_meter}
top_k_series = function_map[by](**kwargs)
top_k_series.sort(ascending=asc)
top_k_elec_meter_ids = top_k_series[:k].index
top_k_metergroup = self.from_list(top_k_elec_meter_ids)
if group_remainder:
remainder_ids = top_k_series[k:].index
remainder_metergroup = self.from_list(remainder_ids)
remainder_metergroup.name = 'others'
top_k_metergroup.meters.append(remainder_metergroup)
return top_k_metergroup
def groupby(self, key, use_appliance_metadata=True, **kwargs):
"""
e.g. groupby('category')
Returns
-------
MeterGroup of nested MeterGroups: one per group
"""
if not use_appliance_metadata:
raise NotImplementedError()
values = self.values_for_appliance_metadata_key(key)
groups = []
for value in values:
group = self.select_using_appliances(**{key: value})
group.name = value
groups.append(group)
return MeterGroup(groups)
def get_timeframe(self):
"""
Returns
-------
nilmtk.TimeFrame representing the timeframe which is the union
of all meters in self.meters.
"""
timeframe = None
for meter in self.meters:
if timeframe is None:
timeframe = meter.get_timeframe()
elif meter.get_timeframe().empty:
pass
else:
timeframe = timeframe.union(meter.get_timeframe())
return timeframe
def plot(self, kind='separate lines', **kwargs):
"""
Parameters
----------
width : int, optional
Number of points on the x axis required
ax : matplotlib.axes, optional
plot_legend : boolean, optional
Defaults to True. Set to False to not plot legend.
kind : {'separate lines', 'sum', 'area', 'snakey', 'energy bar'}
timeframe : nilmtk.TimeFrame, optional
Defaults to self.get_timeframe()
"""
# Load data and plot each meter
function_map = {
'separate lines': self._plot_separate_lines,
'sum': super(MeterGroup, self).plot,
'area': self._plot_area,
'sankey': self._plot_sankey,
'energy bar': self._plot_energy_bar
}
try:
ax = function_map[kind](**kwargs)
except KeyError:
raise ValueError("'{}' not a valid setting for 'kind' parameter."
.format(kind))
return ax
def _plot_separate_lines(self, ax=None, plot_legend=True, **kwargs):
for meter in self.meters:
if isinstance(meter, MeterGroup):
ax = meter.plot(ax=ax, plot_legend=False, kind='sum', **kwargs)
else:
ax = meter.plot(ax=ax, plot_legend=False, **kwargs)
if plot_legend:
plt.legend()
return ax
def _plot_sankey(self):
graph = self.wiring_graph()
meter_labels = {meter: meter.instance() for meter in graph.nodes()}
pos = nx.graphviz_layout(graph, prog='dot')
#nx.draw(graph, pos, labels=meter_labels, arrows=False)
meter_labels = {meter: meter.label() for meter in graph.nodes()}
for meter, name in meter_labels.iteritems():
x, y = pos[meter]
if meter.is_site_meter():
delta_y = 5
else:
delta_y = -5
plt.text(x, y+delta_y, s=name, bbox=dict(facecolor='red', alpha=0.5), horizontalalignment='center')
if not meter.is_site_meter():
upstream_meter = meter.upstream_meter()
proportion_of_upstream = meter.proportion_of_upstream()
print(meter.instance(), upstream_meter.instance(), proportion_of_upstream)
graph[upstream_meter][meter]["weight"] = proportion_of_upstream*10
graph[upstream_meter][meter]["color"] = "blue"
nx.draw(graph, pos, labels=meter_labels, arrows=False)
def _plot_area(self, ax=None, timeframe=None, pretty_labels=True, unit='W',
label_kwargs=None, plot_kwargs=None, threshold=None,
**load_kwargs):
"""
Parameters
----------
plot_kwargs : dict of key word arguments for DataFrame.plot()
unit : {kW or W}
threshold : float or None
if set to a float then any measured value under this threshold
will be set to 0.
Returns
-------
ax, dataframe
"""
# Get start and end times for the plot
timeframe = self.get_timeframe() if timeframe is None else timeframe
if not timeframe:
return ax
load_kwargs['sections'] = [timeframe]
load_kwargs = self._set_sample_period(timeframe, **load_kwargs)
df = self.dataframe_of_meters(**load_kwargs)
if threshold is not None:
df[df <= threshold] = 0
if unit == 'kW':
df /= 1000
if plot_kwargs is None:
plot_kwargs = {}
df.columns = self.get_labels(df.columns, pretty=pretty_labels)
# Set a tiny linewidth otherwise we get lines even if power is zero
# and this looks ugly when drawn above other lines.
plot_kwargs.setdefault('linewidth', 0.0001)
ax = df.plot(kind='area', **plot_kwargs)
ax.set_ylabel("Power ({:s})".format(unit))
return ax, df
def plot_when_on(self, **load_kwargs):
meter_identifiers = list(self.identifier.meters)
fig, ax = plt.subplots()
for i, meter in enumerate(self.meters):
id_meter = meter.identifier
for chunk_when_on in meter.when_on(**load_kwargs):
series_to_plot = chunk_when_on[chunk_when_on==True]
if len(series_to_plot.index):
(series_to_plot+i-1).plot(ax=ax, style='k.')
labels = self.get_labels(meter_identifiers)
plt.yticks(range(len(self.meters)), labels)
plt.ylim((-0.5, len(self.meters)+0.5))
return ax
def plot_good_sections(self, ax=None, label_func='instance',
include_disabled_meters=True, load_kwargs=None,
**plot_kwargs):
"""
Parameters
----------
label_func : str or None
e.g. 'instance' (default) or 'label'
if None then no labels will be produced.
include_disabled_meters : bool
"""
if ax is None:
ax = plt.gca()
if load_kwargs is None:
load_kwargs = {}
# Prepare list of meters
if include_disabled_meters:
meters = self.all_meters()
else:
meters = self.meters
meters = copy(meters)
meters.sort(key=meter_sorting_key, reverse=True)
n = len(meters)
labels = []
for i, meter in enumerate(meters):
good_sections = meter.good_sections(**load_kwargs)
ax = good_sections.plot(ax=ax, y=i, **plot_kwargs)
del good_sections
if label_func:
labels.append(getattr(meter, label_func)())
# Just end numbers
if label_func is None:
labels = [n] + ([''] * (n-1))
# Y tick formatting
ax.set_yticks(np.arange(0, n) + 0.5)
def y_formatter(y, pos):
try:
label = labels[int(y)]
except IndexError:
label = ''
return label
ax.yaxis.set_major_formatter(FuncFormatter(y_formatter))
ax.set_ylim([0, n])
return ax
def _plot_energy_bar(self, ax=None, mains=None):
"""Plot a stacked bar of the energy per meter, in order.
Parameters
----------
ax : matplotlib axes
mains : MeterGroup or ElecMeter, optional
Used to calculate Remainder.
Returns
-------
ax
"""
energy = self.energy_per_meter(mains=mains, per_period='D',
use_meter_labels=True)
energy.sort(ascending=False)
# Plot
ax = pd.DataFrame(energy).T.plot(kind='bar', stacked=True, grid=True,
edgecolor="none", legend=False, width=2)
ax.set_xticks([])
ax.set_ylabel('kWh\nper\nday', rotation=0, ha='center', va='center',
labelpad=15)
cumsum = energy.cumsum()
text_ys = cumsum - (cumsum.diff().fillna(energy['Remainder']) / 2)
for kwh, (label, y) in zip(energy.values, text_ys.iteritems()):
label += " ({:.2f})".format(kwh)
ax.annotate(label, (0, y), color='white', size=8,
horizontalalignment='center',
verticalalignment='center')
return ax
def plot_multiple(self, axes, meter_keys, plot_func,
kwargs_per_meter=None, pretty_label=True, **kwargs):
"""Create multiple subplots.
Parameters
-----------
axes : list of matplotlib axes objects.
e.g. created using `fix, axes = plt.subplots()`
meter_keys : list of keys for identifying ElecMeters or MeterGroups.
e.g. ['fridge', 'kettle', 4, MeterGroupID, ElecMeterID].
Each element is anything that MeterGroup.__getitem__() accepts.
plot_func : string
Name of function from ElecMeter or Electric or MeterGroup
e.g. `plot_power_histogram`
kwargs_per_meter : dict
Provide key word arguments for the plot_func for each meter.
each key is a parameter name for plot_func
each value is a list (same length as `meters`) for specifying a value for
this parameter for each meter.
e.g. {'range': [(0,100), (0,200)]}
pretty_label : bool
**kwargs : any key word arguments to pass the same values to the
plot func for every meter.
Returns
-------
axes (flattened into a 1D list)
"""
axes = flatten_2d_list(axes)
if len(axes) != len(meter_keys):
raise ValueError("`axes` and `meters` must be of equal length.")
if kwargs_per_meter is None:
kwargs_per_meter = {}
meters = [self[meter_key] for meter_key in meter_keys]
for i, (ax, meter) in enumerate(zip(axes, meters)):
kwargs_copy = deepcopy(kwargs)
for parameter, arguments in kwargs_per_meter.iteritems():
kwargs_copy[parameter] = arguments[i]
getattr(meter, plot_func)(ax=ax, **kwargs_copy)
ax.set_title(meter.label(pretty=pretty_label))
return axes
def sort_meters(self):
"""Sorts meters by instance."""
self.meters.sort(key=meter_sorting_key)
def label(self, **kwargs):
"""
Returns
-------
string : A label listing all the appliance types.
"""
if self.name:
label = self.name
if kwargs.get('pretty'):
label = capitalise_first_letter(label)
return label
return ", ".join(set([meter.label(**kwargs) for meter in self.meters]))
def clear_cache(self):
"""Clear cache on all meters in this MeterGroup."""
for meter in self.meters:
meter.clear_cache()
def correlation_of_sum_of_submeters_with_mains(self, **load_kwargs):
print("Running MeterGroup.correlation_of_sum_of_submeters_with_mains...")
submeters = self.meters_directly_downstream_of_mains()
return self.mains().correlation(submeters, **load_kwargs)
def all_meters(self):
"""Returns a list of self.meters + self.disabled_meters."""
return self.meters + self.disabled_meters
def describe(self, compute_expensive_stats=True, **kwargs):
"""Returns pd.Series describing this MeterGroup."""
series = pd.Series()
all_meters = self.all_meters()
series['total_n_meters'] = len(all_meters)
site_meters = [m for m in all_meters if m.is_site_meter()]
series['total_n_site_meters'] = len(site_meters)
if compute_expensive_stats:
series['correlation_of_sum_of_submeters_with_mains'] = (
self.correlation_of_sum_of_submeters_with_mains(**kwargs))
series['proportion_of_energy_submetered'] = (
self.proportion_of_energy_submetered(**kwargs))
dropout_rates = self._collect_stats_on_all_meters(
kwargs, 'dropout_rate', False)
dropout_rates = np.array(dropout_rates)
series['dropout_rates_ignoring_gaps'] = (
"min={}, mean={}, max={}".format(
dropout_rates.min(),
dropout_rates.mean(),
dropout_rates.max()))
series['mains_sample_period'] = self.mains().sample_period()
series['submeter_sample_period'] = self.submeters().sample_period()
timeframe = self.get_timeframe()
series['timeframe'] = "start={}, end={}".format(timeframe.start, timeframe.end)
series['total_duration'] = str(timeframe.timedelta)
mains_uptime = self.mains().uptime(**kwargs)
series['mains_uptime'] = str(mains_uptime)
try:
series['proportion_uptime'] = (mains_uptime.total_seconds() /
timeframe.timedelta.total_seconds())
except ZeroDivisionError:
series['proportion_uptime'] = np.NaN
series['average_mains_energy_per_day'] = self.mains().average_energy_per_period()
return series
def replace_dataset(identifier, dataset):
"""
Parameters
----------
identifier : ElecMeterID or MeterGroupID
Returns
-------
ElecMeterID or MeterGroupID with dataset replaced with `dataset`
"""
if isinstance(identifier, MeterGroupID):
new_meter_ids = [replace_dataset(id, dataset) for id in identifier.meters]
new_id = MeterGroupID(meters=tuple(new_meter_ids))
elif isinstance(identifier, ElecMeterID):
new_id = identifier._replace(dataset=dataset)
else:
raise TypeError()
return new_id
def iterate_through_submeters_of_two_metergroups(master, slave):
"""
Parameters
----------
master, slave : MeterGroup
Returns
-------
list of 2-tuples of the form (`master_meter`, `slave_meter`)
"""
zipped = []
for master_meter in master.submeters().meters:
slave_identifier = replace_dataset(master_meter.identifier, slave.dataset())
slave_meter = slave[slave_identifier]
zipped.append((master_meter, slave_meter))
return zipped
def combine_chunks_from_generators(index, columns, meters, kwargs):
"""Combines chunks into a single DataFrame.
Adds or averages columns, depending on whether each column is in
PHYSICAL_QUANTITIES_TO_AVERAGE.
Returns
-------
DataFrame
"""
# Regarding columns (e.g. voltage) that we need to average:
# The approach is that we first add everything together
# in the first for-loop, whilst also keeping a
# `columns_to_average_counter` DataFrame
# which tells us what to divide by in order to compute the
# mean for PHYSICAL_QUANTITIES_TO_AVERAGE.
# Regarding doing an in-place addition:
# We convert out cumulator dataframe to a numpy matrix.
# This allows us to use np.add to do an in-place add.
# If we didn't do this then we'd get horrible memory fragmentation.
# See http://stackoverflow.com/a/27526721/732596
DTYPE = np.float32
cumulator = pd.DataFrame(np.NaN, index=index, columns=columns, dtype=DTYPE)
cumulator_arr = cumulator.as_matrix()
columns_to_average_counter = pd.DataFrame(dtype=np.uint16)
timeframe = None
# Go through each generator to try sum values together
for meter in meters:
print_on_line("\rLoading data for meter", meter.identifier, " ")
kwargs_copy = deepcopy(kwargs)
generator = meter.load(**kwargs_copy)
try:
chunk_from_next_meter = generator.next()
except StopIteration:
continue
del generator
del kwargs_copy
gc.collect()
if chunk_from_next_meter.empty or not chunk_from_next_meter.timeframe:
continue
if timeframe is None:
timeframe = chunk_from_next_meter.timeframe
else:
timeframe = timeframe.union(chunk_from_next_meter.timeframe)
# Add (in-place)
for i, column_name in enumerate(columns):
try:
column = chunk_from_next_meter[column_name]
except KeyError:
continue
aligned = column.reindex(index, copy=False).values
del column
cumulator_col = cumulator_arr[:,i]
where_both_are_nan = np.isnan(cumulator_col) & np.isnan(aligned)
np.nansum([cumulator_col, aligned], axis=0, out=cumulator_col,
dtype=DTYPE)
cumulator_col[where_both_are_nan] = np.NaN
del aligned
del where_both_are_nan
gc.collect()
# Update columns_to_average_counter - this is necessary so we do not
# add up columns like 'voltage' which should be averaged.
physical_quantities = chunk_from_next_meter.columns.get_level_values('physical_quantity')
columns_to_average = (set(PHYSICAL_QUANTITIES_TO_AVERAGE)
.intersection(physical_quantities))
if columns_to_average:
counter_increment = pd.DataFrame(1, columns=columns_to_average,
dtype=np.uint16,
index=chunk_from_next_meter.index)
columns_to_average_counter = columns_to_average_counter.add(
counter_increment, fill_value=0)
del counter_increment
del chunk_from_next_meter
gc.collect()
del cumulator_arr
gc.collect()
# Create mean values by dividing any columns which need dividing
for column in columns_to_average_counter:
cumulator[column] /= columns_to_average_counter[column]
del columns_to_average_counter
gc.collect()
print()
print("Done loading data all meters for this chunk.")
cumulator.timeframe = timeframe
return cumulator
meter_sorting_key = lambda meter: meter.instance()
| apache-2.0 |
akrherz/iem | htdocs/plotting/auto/scripts100/p120.py | 1 | 5184 | """last spring temp"""
import datetime
from pandas.io.sql import read_sql
import pandas as pd
import matplotlib.dates as mdates
from pyiem.plot import figure_axes
from pyiem.util import get_autoplot_context, get_dbconn
from pyiem.exceptions import NoDataFound
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc["data"] = True
desc["report"] = True
desc[
"description"
] = """This chart presents the accumulated frequency of
having the last spring temperature at or below a given threshold."""
desc["arguments"] = [
dict(
type="station",
name="station",
default="IATDSM",
label="Select Station",
network="IACLIMATE",
),
dict(type="int", name="t1", default=32, label="First Threshold (F)"),
dict(type="int", name="t2", default=28, label="Second Threshold (F)"),
dict(type="int", name="t3", default=26, label="Third Threshold (F)"),
dict(type="int", name="t4", default=22, label="Fourth Threshold (F)"),
dict(
type="year",
name="syear",
min=1880,
label="Potential (if data exists) minimum year",
default=1880,
),
dict(
type="year",
name="eyear",
min=1880,
label="Potential (if data exists) exclusive maximum year",
default=datetime.date.today().year,
),
]
return desc
def plotter(fdict):
""" Go """
pgconn = get_dbconn("coop")
ctx = get_autoplot_context(fdict, get_description())
station = ctx["station"]
thresholds = [ctx["t1"], ctx["t2"], ctx["t3"], ctx["t4"]]
table = "alldata_%s" % (station[:2],)
# Load up dict of dates..
df = pd.DataFrame(
{
"dates": pd.date_range("2000/01/29", "2000/06/30"),
"%scnts" % (thresholds[0],): 0,
"%scnts" % (thresholds[1],): 0,
"%scnts" % (thresholds[2],): 0,
"%scnts" % (thresholds[3],): 0,
},
index=range(29, 183),
)
df.index.name = "doy"
for base in thresholds:
# Query Last doy for each year in archive
df2 = read_sql(
f"""
select year,
max(case when low <= %s then extract(doy from day)
else 0 end) as doy from {table}
WHERE month < 7 and station = %s and year > %s and year < %s
GROUP by year
""",
pgconn,
params=(base, station, ctx["syear"], ctx["eyear"]),
index_col=None,
)
for _, row in df2.iterrows():
if row["doy"] == 0:
continue
df.loc[0 : row["doy"], "%scnts" % (base,)] += 1
df["%sfreq" % (base,)] = (
df["%scnts" % (base,)] / len(df2.index) * 100.0
)
bs = ctx["_nt"].sts[station]["archive_begin"]
if bs is None:
raise NoDataFound("No metadata found.")
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: Daryl Herzmann akrherz@iastate.edu 515.294.5978
# Low Temperature exceedence probabilities
# (On a certain date, what is the chance a temperature below a certain
# threshold would be observed again that spring season)
DOY Date <%s <%s <%s <%s
""" % (
datetime.date.today().strftime("%d %b %Y"),
max([bs.date(), datetime.date(ctx["syear"], 1, 1)]),
min([datetime.date.today(), datetime.date(ctx["eyear"] - 1, 12, 31)]),
station,
ctx["_nt"].sts[station]["name"],
thresholds[0] + 1,
thresholds[1] + 1,
thresholds[2] + 1,
thresholds[3] + 1,
)
fcols = ["%sfreq" % (s,) for s in thresholds]
mindate = None
for doy, row in df.iterrows():
if doy % 2 != 0:
continue
if row[fcols[3]] < 100 and mindate is None:
mindate = row["dates"] - datetime.timedelta(days=5)
res += (" %3s %s %3i %3i %3i %3i\n") % (
row["dates"].strftime("%-j"),
row["dates"].strftime("%b %d"),
row[fcols[0]],
row[fcols[1]],
row[fcols[2]],
row[fcols[3]],
)
title = "Frequency of Last Spring Temperature"
subtitle = "%s %s (%s-%s)" % (
station,
ctx["_nt"].sts[station]["name"],
max([bs.date(), datetime.date(ctx["syear"], 1, 1)]),
min([datetime.date.today(), datetime.date(ctx["eyear"] - 1, 12, 31)]),
)
(fig, ax) = figure_axes(title=title, subtitle=subtitle)
for base in thresholds:
ax.plot(
df["dates"].values,
df["%sfreq" % (base,)],
label="%s" % (base,),
lw=2,
)
ax.legend(loc="best")
ax.set_xlim(mindate)
ax.xaxis.set_major_locator(mdates.DayLocator([1, 7, 14, 21]))
ax.xaxis.set_major_formatter(mdates.DateFormatter("%-d\n%b"))
ax.grid(True)
df.reset_index(inplace=True)
return fig, df, res
if __name__ == "__main__":
plotter(dict())
| mit |
meduz/scikit-learn | examples/linear_model/plot_ols_3d.py | 350 | 2040 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
=========================================================
Sparsity Example: Fitting only features 1 and 2
=========================================================
Features 1 and 2 of the diabetes-dataset are fitted and
plotted below. It illustrates that although feature 2
has a strong coefficient on the full model, it does not
give us much regarding `y` when compared to just feature 1
"""
print(__doc__)
# Code source: Gaël Varoquaux
# Modified for documentation by Jaques Grobler
# License: BSD 3 clause
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from sklearn import datasets, linear_model
diabetes = datasets.load_diabetes()
indices = (0, 1)
X_train = diabetes.data[:-20, indices]
X_test = diabetes.data[-20:, indices]
y_train = diabetes.target[:-20]
y_test = diabetes.target[-20:]
ols = linear_model.LinearRegression()
ols.fit(X_train, y_train)
###############################################################################
# Plot the figure
def plot_figs(fig_num, elev, azim, X_train, clf):
fig = plt.figure(fig_num, figsize=(4, 3))
plt.clf()
ax = Axes3D(fig, elev=elev, azim=azim)
ax.scatter(X_train[:, 0], X_train[:, 1], y_train, c='k', marker='+')
ax.plot_surface(np.array([[-.1, -.1], [.15, .15]]),
np.array([[-.1, .15], [-.1, .15]]),
clf.predict(np.array([[-.1, -.1, .15, .15],
[-.1, .15, -.1, .15]]).T
).reshape((2, 2)),
alpha=.5)
ax.set_xlabel('X_1')
ax.set_ylabel('X_2')
ax.set_zlabel('Y')
ax.w_xaxis.set_ticklabels([])
ax.w_yaxis.set_ticklabels([])
ax.w_zaxis.set_ticklabels([])
#Generate the three different figures from different views
elev = 43.5
azim = -110
plot_figs(1, elev, azim, X_train, ols)
elev = -.5
azim = 0
plot_figs(2, elev, azim, X_train, ols)
elev = -.5
azim = 90
plot_figs(3, elev, azim, X_train, ols)
plt.show()
| bsd-3-clause |
sysid/kg | quora/Ensemble_CNN_TD_Quora.py | 1 | 12948 | # coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
import nltk
from nltk.corpus import stopwords
from nltk.stem import SnowballStemmer
import re
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# In[2]:
train = pd.read_csv("../input/train.csv")
test = pd.read_csv("../input/test.csv")
# In[3]:
train.head()
# In[4]:
test.head()
# In[5]:
print(train.shape)
print(test.shape)
# In[6]:
print(train.isnull().sum())
print(test.isnull().sum())
# In[7]:
train = train.fillna('empty')
test = test.fillna('empty')
# In[8]:
print(train.isnull().sum())
print(test.isnull().sum())
# In[9]:
test.head()
# In[10]:
for i in range(6):
print(train.question1[i])
print(train.question2[i])
print()
# In[17]:
def text_to_wordlist(text, remove_stopwords=False, stem_words=False):
# Clean the text, with the option to remove stopwords and to stem words.
# Convert words to lower case and split them
text = text.lower().split()
# Optionally remove stop words (true by default)
if remove_stopwords:
stops = set(stopwords.words("english"))
text = [w for w in text if not w in stops]
text = " ".join(text)
# Clean the text
text = re.sub(r"[^A-Za-z0-9^,!.\'+-=]", " ", text)
text = re.sub(r"\'s", " 's ", text)
text = re.sub(r"\'ve", " have ", text)
text = re.sub(r"can't", " cannot ", text)
text = re.sub(r"n't", " not ", text)
text = re.sub(r"\'re", " are ", text)
text = re.sub(r"\'d", " would ", text)
text = re.sub(r"\'ll", " will ", text)
text = re.sub(r",", " ", text)
text = re.sub(r"\.", " ", text)
text = re.sub(r"!", " ! ", text)
text = re.sub(r"\^", " ^ ", text)
text = re.sub(r"\+", " + ", text)
text = re.sub(r"\-", " - ", text)
text = re.sub(r"\=", " = ", text)
text = re.sub(r"\s{2,}", " ", text)
# Shorten words to their stems
if stem_words:
text = text.split()
stemmer = SnowballStemmer('english')
stemmed_words = [stemmer.stem(word) for word in text]
text = " ".join(stemmed_words)
# Return a list of words
return(text)
# In[18]:
def process_questions(question_list, questions, question_list_name, dataframe):
# function to transform questions and display progress
for question in questions:
question_list.append(text_to_wordlist(question))
if len(question_list) % 100000 == 0:
progress = len(question_list)/len(dataframe) * 100
print("{} is {}% complete.".format(question_list_name, round(progress, 1)))
# In[19]:
train_question1 = []
process_questions(train_question1, train.question1, 'train_question1', train)
# In[35]:
train_question2 = []
process_questions(train_question2, train.question2, 'train_question2', train)
# In[36]:
test_question1 = []
process_questions(test_question1, test.question1, 'test_question1', test)
# In[37]:
test_question2 = []
process_questions(test_question2, test.question2, 'test_question2', test)
# # Using Keras
# In[38]:
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import datetime, time, json
from keras.models import Sequential
from keras.layers import Embedding, Dense, Dropout, Reshape, Merge, BatchNormalization, TimeDistributed, Lambda, Activation, LSTM, Flatten, Bidirectional, Convolution1D, GRU, MaxPooling1D, Convolution2D
from keras.regularizers import l2
from keras.callbacks import Callback, ModelCheckpoint, EarlyStopping
from keras import backend as K
from sklearn.model_selection import train_test_split
from keras.optimizers import SGD
from collections import defaultdict
# In[39]:
# Count the number of different words in the reviews
word_count = defaultdict(int)
for question in train_question1:
word_count[question] += 1
print("train_question1 is complete.")
for question in train_question2:
word_count[question] += 1
print("train_question2 is complete")
for question in test_question1:
word_count[question] += 1
print("test_question1 is complete.")
for question in test_question2:
word_count[question] += 1
print("test_question2 is complete")
print("Total number of unique words:", len(word_count))
# In[40]:
# Find the length of questions
lengths = []
for question in train_question1:
lengths.append(len(question.split()))
for question in train_question2:
lengths.append(len(question.split()))
# Create a dataframe so that the values can be inspected
lengths = pd.DataFrame(lengths, columns=['counts'])
# In[41]:
lengths.counts.describe()
# In[42]:
np.percentile(lengths.counts, 99.5)
# In[43]:
num_words = 200000
train_questions = train_question1 + train_question2
tokenizer = Tokenizer(nb_words = num_words)
tokenizer.fit_on_texts(train_questions)
print("Fitting is compelte.")
train_question1_word_sequences = tokenizer.texts_to_sequences(train_question1)
print("train_question1 is complete.")
train_question2_word_sequences = tokenizer.texts_to_sequences(train_question2)
print("train_question2 is complete")
# In[44]:
test_question1_word_sequences = tokenizer.texts_to_sequences(test_question1)
print("test_question1 is complete.")
test_question2_word_sequences = tokenizer.texts_to_sequences(test_question2)
print("test_question2 is complete.")
# In[45]:
word_index = tokenizer.word_index
print("Words in index: %d" % len(word_index))
# In[46]:
# Pad the questions so that they all have the same length.
max_question_len = 37
train_q1 = pad_sequences(train_question1_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("train_q1 is complete.")
train_q2 = pad_sequences(train_question2_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("train_q2 is complete.")
# In[47]:
test_q1 = pad_sequences(test_question1_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("test_q1 is complete.")
test_q2 = pad_sequences(test_question2_word_sequences,
maxlen = max_question_len,
padding = 'post',
truncating = 'post')
print("test_q2 is complete.")
# In[48]:
y_train = train.is_duplicate
# In[49]:
# Load GloVe to use pretrained vectors
# From this link: https://nlp.stanford.edu/projects/glove/
embeddings_index = {}
with open('glove.840B.300d.txt', encoding='utf-8') as f:
for line in f:
values = line.split(' ')
word = values[0]
embedding = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = embedding
print('Word embeddings:', len(embeddings_index))
# In[50]:
# Need to use 300 for embedding dimensions to match GloVe vectors.
embedding_dim = 300
nb_words = len(word_index)
word_embedding_matrix = np.zeros((nb_words + 1, embedding_dim))
for word, i in word_index.items():
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
word_embedding_matrix[i] = embedding_vector
print('Null word embeddings: %d' % np.sum(np.sum(word_embedding_matrix, axis=1) == 0))
# In[66]:
units = 150
dropout = 0.25
nb_filter = 32
filter_length = 3
embedding_dim = 300
model1 = Sequential()
model1.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model1.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(dropout))
model1.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model1.add(BatchNormalization())
model1.add(Activation('relu'))
model1.add(Dropout(dropout))
model1.add(Flatten())
model2 = Sequential()
model2.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model2.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(Dropout(dropout))
model2.add(Convolution1D(nb_filter = nb_filter,
filter_length = filter_length,
border_mode = 'same'))
model2.add(BatchNormalization())
model2.add(Activation('relu'))
model2.add(Dropout(dropout))
model2.add(Flatten())
model3 = Sequential()
model3.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model3.add(TimeDistributed(Dense(embedding_dim)))
model3.add(BatchNormalization())
model3.add(Activation('relu'))
model3.add(Dropout(dropout))
model3.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, )))
model4 = Sequential()
model4.add(Embedding(nb_words + 1,
embedding_dim,
weights = [word_embedding_matrix],
input_length = max_question_len,
trainable = False))
model4.add(TimeDistributed(Dense(embedding_dim)))
model4.add(BatchNormalization())
model4.add(Activation('relu'))
model4.add(Dropout(dropout))
model4.add(Lambda(lambda x: K.max(x, axis=1), output_shape=(embedding_dim, )))
modela = Sequential()
modela.add(Merge([model1, model2], mode='concat'))
modela.add(Dense(units))
modela.add(BatchNormalization())
modela.add(Activation('relu'))
modela.add(Dropout(dropout))
modela.add(Dense(units))
modela.add(BatchNormalization())
modela.add(Activation('relu'))
modela.add(Dropout(dropout))
modelb = Sequential()
modelb.add(Merge([model3, model4], mode='concat'))
modelb.add(Dense(units))
modelb.add(BatchNormalization())
modelb.add(Activation('relu'))
modelb.add(Dropout(dropout))
modelb.add(Dense(units))
modelb.add(BatchNormalization())
modelb.add(Activation('relu'))
modelb.add(Dropout(dropout))
model = Sequential()
model.add(Merge([modela, modelb], mode='concat'))
model.add(Dense(units))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(units))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(Dropout(dropout))
model.add(Dense(1))
model.add(BatchNormalization())
model.add(Activation('sigmoid'))
#sgd = SGD(lr=0.01, decay=5e-6, momentum=0.9, nesterov=True)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[67]:
save_best_weights = 'question_pairs_weights.h5'
t0 = time.time()
callbacks = [ModelCheckpoint(save_best_weights, monitor='val_loss', save_best_only=True),
EarlyStopping(monitor='val_loss', patience=5, verbose=1, mode='auto')]
history = model.fit([train_q1, train_q2],
y_train,
batch_size=200,
nb_epoch=100,
validation_split=0.1,
verbose=True,
shuffle=True,
callbacks=callbacks)
t1 = time.time()
print("Minutes elapsed: %f" % ((t1 - t0) / 60.))
# In[68]:
summary_stats = pd.DataFrame({'epoch': [ i + 1 for i in history.epoch ],
'train_acc': history.history['acc'],
'valid_acc': history.history['val_acc'],
'train_loss': history.history['loss'],
'valid_loss': history.history['val_loss']})
# In[69]:
summary_stats
# In[70]:
plt.plot(summary_stats.train_loss)
plt.plot(summary_stats.valid_loss)
plt.show()
# In[71]:
min_loss, idx = min((loss, idx) for (idx, loss) in enumerate(history.history['val_loss']))
print('Minimum loss at epoch', '{:d}'.format(idx+1), '=', '{:.4f}'.format(min_loss))
min_loss = round(min_loss, 4)
# In[72]:
model.load_weights(save_best_weights)
predictions = model.predict([test_q1, test_q2], verbose = True)
# In[73]:
#Create submission
submission = pd.DataFrame(predictions, columns=['is_duplicate'])
submission.insert(0, 'test_id', test.test_id)
file_name = 'submission_{}.csv'.format(min_loss)
submission.to_csv(file_name, index=False)
# In[74]:
submission.head(10)
| mit |
mayblue9/scikit-learn | examples/linear_model/lasso_dense_vs_sparse_data.py | 348 | 1862 | """
==============================
Lasso on dense and sparse data
==============================
We show that linear_model.Lasso provides the same results for dense and sparse
data and that in the case of sparse data the speed is improved.
"""
print(__doc__)
from time import time
from scipy import sparse
from scipy import linalg
from sklearn.datasets.samples_generator import make_regression
from sklearn.linear_model import Lasso
###############################################################################
# The two Lasso implementations on Dense data
print("--- Dense matrices")
X, y = make_regression(n_samples=200, n_features=5000, random_state=0)
X_sp = sparse.coo_matrix(X)
alpha = 1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=1000)
t0 = time()
sparse_lasso.fit(X_sp, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(X, y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
###############################################################################
# The two Lasso implementations on Sparse data
print("--- Sparse matrices")
Xs = X.copy()
Xs[Xs < 2.5] = 0.0
Xs = sparse.coo_matrix(Xs)
Xs = Xs.tocsc()
print("Matrix density : %s %%" % (Xs.nnz / float(X.size) * 100))
alpha = 0.1
sparse_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
dense_lasso = Lasso(alpha=alpha, fit_intercept=False, max_iter=10000)
t0 = time()
sparse_lasso.fit(Xs, y)
print("Sparse Lasso done in %fs" % (time() - t0))
t0 = time()
dense_lasso.fit(Xs.toarray(), y)
print("Dense Lasso done in %fs" % (time() - t0))
print("Distance between coefficients : %s"
% linalg.norm(sparse_lasso.coef_ - dense_lasso.coef_))
| bsd-3-clause |
YinongLong/scikit-learn | sklearn/manifold/isomap.py | 50 | 7515 | """Isomap for manifold learning"""
# Author: Jake Vanderplas -- <vanderplas@astro.washington.edu>
# License: BSD 3 clause (C) 2011
import numpy as np
from ..base import BaseEstimator, TransformerMixin
from ..neighbors import NearestNeighbors, kneighbors_graph
from ..utils import check_array
from ..utils.graph import graph_shortest_path
from ..decomposition import KernelPCA
from ..preprocessing import KernelCenterer
class Isomap(BaseEstimator, TransformerMixin):
"""Isomap Embedding
Non-linear dimensionality reduction through Isometric Mapping
Read more in the :ref:`User Guide <isomap>`.
Parameters
----------
n_neighbors : integer
number of neighbors to consider for each point.
n_components : integer
number of coordinates for the manifold
eigen_solver : ['auto'|'arpack'|'dense']
'auto' : Attempt to choose the most efficient solver
for the given problem.
'arpack' : Use Arnoldi decomposition to find the eigenvalues
and eigenvectors.
'dense' : Use a direct solver (i.e. LAPACK)
for the eigenvalue decomposition.
tol : float
Convergence tolerance passed to arpack or lobpcg.
not used if eigen_solver == 'dense'.
max_iter : integer
Maximum number of iterations for the arpack solver.
not used if eigen_solver == 'dense'.
path_method : string ['auto'|'FW'|'D']
Method to use in finding shortest path.
'auto' : attempt to choose the best algorithm automatically.
'FW' : Floyd-Warshall algorithm.
'D' : Dijkstra's algorithm.
neighbors_algorithm : string ['auto'|'brute'|'kd_tree'|'ball_tree']
Algorithm to use for nearest neighbors search,
passed to neighbors.NearestNeighbors instance.
n_jobs : int, optional (default = 1)
The number of parallel jobs to run.
If ``-1``, then the number of jobs is set to the number of CPU cores.
Attributes
----------
embedding_ : array-like, shape (n_samples, n_components)
Stores the embedding vectors.
kernel_pca_ : object
`KernelPCA` object used to implement the embedding.
training_data_ : array-like, shape (n_samples, n_features)
Stores the training data.
nbrs_ : sklearn.neighbors.NearestNeighbors instance
Stores nearest neighbors instance, including BallTree or KDtree
if applicable.
dist_matrix_ : array-like, shape (n_samples, n_samples)
Stores the geodesic distance matrix of training data.
References
----------
.. [1] Tenenbaum, J.B.; De Silva, V.; & Langford, J.C. A global geometric
framework for nonlinear dimensionality reduction. Science 290 (5500)
"""
def __init__(self, n_neighbors=5, n_components=2, eigen_solver='auto',
tol=0, max_iter=None, path_method='auto',
neighbors_algorithm='auto', n_jobs=1):
self.n_neighbors = n_neighbors
self.n_components = n_components
self.eigen_solver = eigen_solver
self.tol = tol
self.max_iter = max_iter
self.path_method = path_method
self.neighbors_algorithm = neighbors_algorithm
self.n_jobs = n_jobs
def _fit_transform(self, X):
X = check_array(X)
self.nbrs_ = NearestNeighbors(n_neighbors=self.n_neighbors,
algorithm=self.neighbors_algorithm,
n_jobs=self.n_jobs)
self.nbrs_.fit(X)
self.training_data_ = self.nbrs_._fit_X
self.kernel_pca_ = KernelPCA(n_components=self.n_components,
kernel="precomputed",
eigen_solver=self.eigen_solver,
tol=self.tol, max_iter=self.max_iter,
n_jobs=self.n_jobs)
kng = kneighbors_graph(self.nbrs_, self.n_neighbors,
mode='distance', n_jobs=self.n_jobs)
self.dist_matrix_ = graph_shortest_path(kng,
method=self.path_method,
directed=False)
G = self.dist_matrix_ ** 2
G *= -0.5
self.embedding_ = self.kernel_pca_.fit_transform(G)
def reconstruction_error(self):
"""Compute the reconstruction error for the embedding.
Returns
-------
reconstruction_error : float
Notes
-------
The cost function of an isomap embedding is
``E = frobenius_norm[K(D) - K(D_fit)] / n_samples``
Where D is the matrix of distances for the input data X,
D_fit is the matrix of distances for the output embedding X_fit,
and K is the isomap kernel:
``K(D) = -0.5 * (I - 1/n_samples) * D^2 * (I - 1/n_samples)``
"""
G = -0.5 * self.dist_matrix_ ** 2
G_center = KernelCenterer().fit_transform(G)
evals = self.kernel_pca_.lambdas_
return np.sqrt(np.sum(G_center ** 2) - np.sum(evals ** 2)) / G.shape[0]
def fit(self, X, y=None):
"""Compute the embedding vectors for data X
Parameters
----------
X : {array-like, sparse matrix, BallTree, KDTree, NearestNeighbors}
Sample data, shape = (n_samples, n_features), in the form of a
numpy array, precomputed tree, or NearestNeighbors
object.
Returns
-------
self : returns an instance of self.
"""
self._fit_transform(X)
return self
def fit_transform(self, X, y=None):
"""Fit the model from data in X and transform X.
Parameters
----------
X: {array-like, sparse matrix, BallTree, KDTree}
Training vector, where n_samples in the number of samples
and n_features is the number of features.
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
self._fit_transform(X)
return self.embedding_
def transform(self, X):
"""Transform X.
This is implemented by linking the points X into the graph of geodesic
distances of the training data. First the `n_neighbors` nearest
neighbors of X are found in the training data, and from these the
shortest geodesic distances from each point in X to each point in
the training data are computed in order to construct the kernel.
The embedding of X is the projection of this kernel onto the
embedding vectors of the training set.
Parameters
----------
X: array-like, shape (n_samples, n_features)
Returns
-------
X_new: array-like, shape (n_samples, n_components)
"""
X = check_array(X)
distances, indices = self.nbrs_.kneighbors(X, return_distance=True)
# Create the graph of shortest distances from X to self.training_data_
# via the nearest neighbors of X.
# This can be done as a single array operation, but it potentially
# takes a lot of memory. To avoid that, use a loop:
G_X = np.zeros((X.shape[0], self.training_data_.shape[0]))
for i in range(X.shape[0]):
G_X[i] = np.min(self.dist_matrix_[indices[i]] +
distances[i][:, None], 0)
G_X **= 2
G_X *= -0.5
return self.kernel_pca_.transform(G_X)
| bsd-3-clause |
indashnet/InDashNet.Open.UN2000 | android/external/chromium_org/chrome/test/nacl_test_injection/buildbot_nacl_integration.py | 61 | 2538 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import subprocess
import sys
def Main(args):
pwd = os.environ.get('PWD', '')
is_integration_bot = 'nacl-chrome' in pwd
# This environment variable check mimics what
# buildbot_chrome_nacl_stage.py does.
is_win64 = (sys.platform in ('win32', 'cygwin') and
('64' in os.environ.get('PROCESSOR_ARCHITECTURE', '') or
'64' in os.environ.get('PROCESSOR_ARCHITEW6432', '')))
# On the main Chrome waterfall, we may need to control where the tests are
# run.
# If there is serious skew in the PPAPI interface that causes all of
# the NaCl integration tests to fail, you can uncomment the
# following block. (Make sure you comment it out when the issues
# are resolved.) *However*, it is much preferred to add tests to
# the 'tests_to_disable' list below.
#if not is_integration_bot:
# return
tests_to_disable = []
# In general, you should disable tests inside this conditional. This turns
# them off on the main Chrome waterfall, but not on NaCl's integration bots.
# This makes it easier to see when things have been fixed NaCl side.
if not is_integration_bot:
# http://code.google.com/p/nativeclient/issues/detail?id=2511
tests_to_disable.append('run_ppapi_ppb_image_data_browser_test')
if sys.platform == 'darwin':
# TODO(mseaborn) fix
# http://code.google.com/p/nativeclient/issues/detail?id=1835
tests_to_disable.append('run_ppapi_crash_browser_test')
if sys.platform in ('win32', 'cygwin'):
# This one is only failing for nacl_glibc on x64 Windows
# but it is not clear how to disable only that limited case.
# See http://crbug.com/132395
tests_to_disable.append('run_inbrowser_test_runner')
script_dir = os.path.dirname(os.path.abspath(__file__))
nacl_integration_script = os.path.join(script_dir,
'buildbot_chrome_nacl_stage.py')
cmd = [sys.executable,
nacl_integration_script,
# TODO(ncbray) re-enable.
# https://code.google.com/p/chromium/issues/detail?id=133568
'--disable_glibc',
'--disable_tests=%s' % ','.join(tests_to_disable)]
cmd += args
sys.stdout.write('Running %s\n' % ' '.join(cmd))
sys.stdout.flush()
return subprocess.call(cmd)
if __name__ == '__main__':
sys.exit(Main(sys.argv[1:]))
| apache-2.0 |
CVL-dev/cvl-fabric-launcher | pyinstaller-2.1/PyInstaller/loader/rthooks/pyi_rth_mplconfig.py | 10 | 1430 | #-----------------------------------------------------------------------------
# Copyright (c) 2013, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License with exception
# for distributing bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#-----------------------------------------------------------------------------
# matplotlib will create $HOME/.matplotlib folder in user's home directory.
# In this directory there is fontList.cache file which lists paths
# to matplotlib fonts.
#
# When you run your onefile exe for the first time it's extracted to for example
# "_MEIxxxxx" temp directory and fontList.cache file is created with fonts paths
# pointing to this directory.
#
# Second time you run your exe new directory is created "_MEIyyyyy" but
# fontList.cache file still points to previous directory which was deleted.
# And then you will get error like:
#
# RuntimeError: Could not open facefile
#
# We need to force matplotlib to recreate config directory every time you run
# your app.
import atexit
import os
import shutil
import tempfile
# Put matplot config dir to temp directory.
configdir = tempfile.mkdtemp()
os.environ['MPLCONFIGDIR'] = configdir
try:
# Remove temp directory at application exit and ignore any errors.
atexit.register(shutil.rmtree, configdir, ignore_errors=True)
except OSError:
pass
| gpl-3.0 |
mjgrav2001/scikit-learn | sklearn/datasets/species_distributions.py | 198 | 7923 | """
=============================
Species distribution dataset
=============================
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References:
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes:
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset
"""
# Authors: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Jake Vanderplas <vanderplas@astro.washington.edu>
#
# License: BSD 3 clause
from io import BytesIO
from os import makedirs
from os.path import join
from os.path import exists
try:
# Python 2
from urllib2 import urlopen
PY2 = True
except ImportError:
# Python 3
from urllib.request import urlopen
PY2 = False
import numpy as np
from sklearn.datasets.base import get_data_home, Bunch
from sklearn.externals import joblib
DIRECTORY_URL = "http://www.cs.princeton.edu/~schapire/maxent/datasets/"
SAMPLES_URL = join(DIRECTORY_URL, "samples.zip")
COVERAGES_URL = join(DIRECTORY_URL, "coverages.zip")
DATA_ARCHIVE_NAME = "species_coverage.pkz"
def _load_coverage(F, header_length=6, dtype=np.int16):
"""Load a coverage file from an open file object.
This will return a numpy array of the given dtype
"""
header = [F.readline() for i in range(header_length)]
make_tuple = lambda t: (t.split()[0], float(t.split()[1]))
header = dict([make_tuple(line) for line in header])
M = np.loadtxt(F, dtype=dtype)
nodata = header[b'NODATA_value']
if nodata != -9999:
print(nodata)
M[nodata] = -9999
return M
def _load_csv(F):
"""Load csv file.
Parameters
----------
F : file object
CSV file open in byte mode.
Returns
-------
rec : np.ndarray
record array representing the data
"""
if PY2:
# Numpy recarray wants Python 2 str but not unicode
names = F.readline().strip().split(',')
else:
# Numpy recarray wants Python 3 str but not bytes...
names = F.readline().decode('ascii').strip().split(',')
rec = np.loadtxt(F, skiprows=0, delimiter=',', dtype='a22,f4,f4')
rec.dtype.names = names
return rec
def construct_grids(batch):
"""Construct the map grid from the batch object
Parameters
----------
batch : Batch object
The object returned by :func:`fetch_species_distributions`
Returns
-------
(xgrid, ygrid) : 1-D arrays
The grid corresponding to the values in batch.coverages
"""
# x,y coordinates for corner cells
xmin = batch.x_left_lower_corner + batch.grid_size
xmax = xmin + (batch.Nx * batch.grid_size)
ymin = batch.y_left_lower_corner + batch.grid_size
ymax = ymin + (batch.Ny * batch.grid_size)
# x coordinates of the grid cells
xgrid = np.arange(xmin, xmax, batch.grid_size)
# y coordinates of the grid cells
ygrid = np.arange(ymin, ymax, batch.grid_size)
return (xgrid, ygrid)
def fetch_species_distributions(data_home=None,
download_if_missing=True):
"""Loader for species distribution dataset from Phillips et. al. (2006)
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : optional, default: None
Specify another download and cache folder for the datasets. By default
all scikit learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing: optional, True by default
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
--------
The data is returned as a Bunch object with the following attributes:
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1623,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (619,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
------
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/apps/redlist/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/apps/redlist/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://www.cs.princeton.edu/~schapire/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
Notes
-----
* See examples/applications/plot_species_distribution_modeling.py
for an example of using this dataset with scikit-learn
"""
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05)
dtype = np.int16
if not exists(join(data_home, DATA_ARCHIVE_NAME)):
print('Downloading species data from %s to %s' % (SAMPLES_URL,
data_home))
X = np.load(BytesIO(urlopen(SAMPLES_URL).read()))
for f in X.files:
fhandle = BytesIO(X[f])
if 'train' in f:
train = _load_csv(fhandle)
if 'test' in f:
test = _load_csv(fhandle)
print('Downloading coverage data from %s to %s' % (COVERAGES_URL,
data_home))
X = np.load(BytesIO(urlopen(COVERAGES_URL).read()))
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
print(' - converting', f)
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
bunch = Bunch(coverages=coverages,
test=test,
train=train,
**extra_params)
joblib.dump(bunch, join(data_home, DATA_ARCHIVE_NAME), compress=9)
else:
bunch = joblib.load(join(data_home, DATA_ARCHIVE_NAME))
return bunch
| bsd-3-clause |
anntzer/scikit-learn | sklearn/tests/test_min_dependencies_readme.py | 9 | 1432 | """Tests for the minimum dependencies in the README.rst file."""
import os
import re
from pathlib import Path
import pytest
import sklearn
from sklearn._min_dependencies import dependent_packages
from sklearn.utils.fixes import parse_version
def test_min_dependencies_readme():
# Test that the minimum dependencies in the README.rst file are
# consistent with the minimum dependencies defined at the file:
# sklearn/_min_dependencies.py
pattern = re.compile(r"(\.\. \|)" +
r"(([A-Za-z]+\-?)+)" +
r"(MinVersion\| replace::)" +
r"( [0-9]+\.[0-9]+(\.[0-9]+)?)")
readme_path = Path(sklearn.__path__[0]).parents[0]
readme_file = readme_path / "README.rst"
if not os.path.exists(readme_file):
# Skip the test if the README.rst file is not available.
# For instance, when installing scikit-learn from wheels
pytest.skip("The README.rst file is not available.")
with readme_file.open("r") as f:
for line in f:
matched = pattern.match(line)
if not matched:
continue
package, version = matched.group(2), matched.group(5)
if package in dependent_packages:
version = parse_version(version)
min_version = parse_version(dependent_packages[package][0])
assert version == min_version
| bsd-3-clause |
Hiyorimi/scikit-image | skimage/future/graph/rag.py | 5 | 19594 | import networkx as nx
import numpy as np
from numpy.lib.stride_tricks import as_strided
from scipy import ndimage as ndi
from scipy import sparse
import math
from ... import measure, segmentation, util, color
from matplotlib import colors, cm
from matplotlib import pyplot as plt
from matplotlib.collections import LineCollection
def _edge_generator_from_csr(csr_matrix):
"""Yield weighted edge triples for use by NetworkX from a CSR matrix.
This function is a straight rewrite of
`networkx.convert_matrix._csr_gen_triples`. Since that is a private
function, it is safer to include our own here.
Parameters
----------
csr_matrix : scipy.sparse.csr_matrix
The input matrix. An edge (i, j, w) will be yielded if there is a
data value for coordinates (i, j) in the matrix, even if that value
is 0.
Yields
------
i, j, w : (int, int, float) tuples
Each value `w` in the matrix along with its coordinates (i, j).
Examples
--------
>>> dense = np.eye(2, dtype=np.float)
>>> csr = sparse.csr_matrix(dense)
>>> edges = _edge_generator_from_csr(csr)
>>> list(edges)
[(0, 0, 1.0), (1, 1, 1.0)]
"""
nrows = csr_matrix.shape[0]
values = csr_matrix.data
indptr = csr_matrix.indptr
col_indices = csr_matrix.indices
for i in range(nrows):
for j in range(indptr[i], indptr[i + 1]):
yield i, col_indices[j], values[j]
def min_weight(graph, src, dst, n):
"""Callback to handle merging nodes by choosing minimum weight.
Returns a dictionary with `"weight"` set as either the weight between
(`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when
both exist.
Parameters
----------
graph : RAG
The graph under consideration.
src, dst : int
The verices in `graph` to be merged.
n : int
A neighbor of `src` or `dst` or both.
Returns
-------
data : dict
A dict with the `"weight"` attribute set the weight between
(`src`, `n`) or (`dst`, `n`) in `graph` or the minimum of the two when
both exist.
"""
# cover the cases where n only has edge to either `src` or `dst`
default = {'weight': np.inf}
w1 = graph[n].get(src, default)['weight']
w2 = graph[n].get(dst, default)['weight']
return {'weight': min(w1, w2)}
def _add_edge_filter(values, graph):
"""Create edge in `graph` between central element of `values` and the rest.
Add an edge between the middle element in `values` and
all other elements of `values` into `graph`. ``values[len(values) // 2]``
is expected to be the central value of the footprint used.
Parameters
----------
values : array
The array to process.
graph : RAG
The graph to add edges in.
Returns
-------
0 : float
Always returns 0. The return value is required so that `generic_filter`
can put it in the output array, but it is ignored by this filter.
"""
values = values.astype(int)
center = values[len(values) // 2]
for value in values:
if value != center and not graph.has_edge(center, value):
graph.add_edge(center, value)
return 0.
class RAG(nx.Graph):
"""
The Region Adjacency Graph (RAG) of an image, subclasses
`networx.Graph <http://networkx.github.io/documentation/latest/reference/classes.graph.html>`_
Parameters
----------
label_image : array of int
An initial segmentation, with each region labeled as a different
integer. Every unique value in ``label_image`` will correspond to
a node in the graph.
connectivity : int in {1, ..., ``label_image.ndim``}, optional
The connectivity between pixels in ``label_image``. For a 2D image,
a connectivity of 1 corresponds to immediate neighbors up, down,
left, and right, while a connectivity of 2 also includes diagonal
neighbors. See `scipy.ndimage.generate_binary_structure`.
data : networkx Graph specification, optional
Initial or additional edges to pass to the NetworkX Graph
constructor. See `networkx.Graph`. Valid edge specifications
include edge list (list of tuples), NumPy arrays, and SciPy
sparse matrices.
**attr : keyword arguments, optional
Additional attributes to add to the graph.
"""
def __init__(self, label_image=None, connectivity=1, data=None, **attr):
super(RAG, self).__init__(data, **attr)
if self.number_of_nodes() == 0:
self.max_id = 0
else:
self.max_id = max(self.nodes_iter())
if label_image is not None:
fp = ndi.generate_binary_structure(label_image.ndim, connectivity)
# In the next ``ndi.generic_filter`` function, the kwarg
# ``output`` is used to provide a strided array with a single
# 64-bit floating point number, to which the function repeatedly
# writes. This is done because even if we don't care about the
# output, without this, a float array of the same shape as the
# input image will be created and that could be expensive in
# memory consumption.
ndi.generic_filter(
label_image,
function=_add_edge_filter,
footprint=fp,
mode='nearest',
output=as_strided(np.empty((1,), dtype=np.float_),
shape=label_image.shape,
strides=((0,) * label_image.ndim)),
extra_arguments=(self,))
def merge_nodes(self, src, dst, weight_func=min_weight, in_place=True,
extra_arguments=[], extra_keywords={}):
"""Merge node `src` and `dst`.
The new combined node is adjacent to all the neighbors of `src`
and `dst`. `weight_func` is called to decide the weight of edges
incident on the new node.
Parameters
----------
src, dst : int
Nodes to be merged.
weight_func : callable, optional
Function to decide the attributes of edges incident on the new
node. For each neighbor `n` for `src and `dst`, `weight_func` will
be called as follows: `weight_func(src, dst, n, *extra_arguments,
**extra_keywords)`. `src`, `dst` and `n` are IDs of vertices in the
RAG object which is in turn a subclass of `networkx.Graph`. It is
expected to return a dict of attributes of the resulting edge.
in_place : bool, optional
If set to `True`, the merged node has the id `dst`, else merged
node has a new id which is returned.
extra_arguments : sequence, optional
The sequence of extra positional arguments passed to
`weight_func`.
extra_keywords : dictionary, optional
The dict of keyword arguments passed to the `weight_func`.
Returns
-------
id : int
The id of the new node.
Notes
-----
If `in_place` is `False` the resulting node has a new id, rather than
`dst`.
"""
src_nbrs = set(self.neighbors(src))
dst_nbrs = set(self.neighbors(dst))
neighbors = (src_nbrs | dst_nbrs) - set([src, dst])
if in_place:
new = dst
else:
new = self.next_id()
self.add_node(new)
for neighbor in neighbors:
data = weight_func(self, src, new, neighbor, *extra_arguments,
**extra_keywords)
self.add_edge(neighbor, new, attr_dict=data)
self.node[new]['labels'] = (self.node[src]['labels'] +
self.node[dst]['labels'])
self.remove_node(src)
if not in_place:
self.remove_node(dst)
return new
def add_node(self, n, attr_dict=None, **attr):
"""Add node `n` while updating the maximum node id.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n, attr_dict, **attr)
self.max_id = max(n, self.max_id)
def add_edge(self, u, v, attr_dict=None, **attr):
"""Add an edge between `u` and `v` while updating max node id.
.. seealso:: :func:`networkx.Graph.add_edge`."""
super(RAG, self).add_edge(u, v, attr_dict, **attr)
self.max_id = max(u, v, self.max_id)
def copy(self):
"""Copy the graph with its max node id.
.. seealso:: :func:`networkx.Graph.copy`."""
g = super(RAG, self).copy()
g.max_id = self.max_id
return g
def next_id(self):
"""Returns the `id` for the new node to be inserted.
The current implementation returns one more than the maximum `id`.
Returns
-------
id : int
The `id` of the new node to be inserted.
"""
return self.max_id + 1
def _add_node_silent(self, n):
"""Add node `n` without updating the maximum node id.
This is a convenience method used internally.
.. seealso:: :func:`networkx.Graph.add_node`."""
super(RAG, self).add_node(n)
def rag_mean_color(image, labels, connectivity=2, mode='distance',
sigma=255.0):
"""Compute the Region Adjacency Graph using mean colors.
Given an image and its initial segmentation, this method constructs the
corresponding Region Adjacency Graph (RAG). Each node in the RAG
represents a set of pixels within `image` with the same label in `labels`.
The weight between two adjacent regions represents how similar or
dissimilar two regions are depending on the `mode` parameter.
Parameters
----------
image : ndarray, shape(M, N, [..., P,] 3)
Input image.
labels : ndarray, shape(M, N, [..., P,])
The labelled image. This should have one dimension less than
`image`. If `image` has dimensions `(M, N, 3)` `labels` should have
dimensions `(M, N)`.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
`scipy.ndimage.generate_binary_structure`.
mode : {'distance', 'similarity'}, optional
The strategy to assign edge weights.
'distance' : The weight between two adjacent regions is the
:math:`|c_1 - c_2|`, where :math:`c_1` and :math:`c_2` are the mean
colors of the two regions. It represents the Euclidean distance in
their average color.
'similarity' : The weight between two adjacent is
:math:`e^{-d^2/sigma}` where :math:`d=|c_1 - c_2|`, where
:math:`c_1` and :math:`c_2` are the mean colors of the two regions.
It represents how similar two regions are.
sigma : float, optional
Used for computation when `mode` is "similarity". It governs how
close to each other two colors should be, for their corresponding edge
weight to be significant. A very large value of `sigma` could make
any two colors behave as though they were similar.
Returns
-------
out : RAG
The region adjacency graph.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.astronaut()
>>> labels = segmentation.slic(img)
>>> rag = graph.rag_mean_color(img, labels)
References
----------
.. [1] Alain Tremeau and Philippe Colantoni
"Regions Adjacency Graph Applied To Color Image Segmentation"
http://citeseerx.ist.psu.edu/viewdoc/summary?doi=10.1.1.11.5274
"""
graph = RAG(labels, connectivity=connectivity)
for n in graph:
graph.node[n].update({'labels': [n],
'pixel count': 0,
'total color': np.array([0, 0, 0],
dtype=np.double)})
for index in np.ndindex(labels.shape):
current = labels[index]
graph.node[current]['pixel count'] += 1
graph.node[current]['total color'] += image[index]
for n in graph:
graph.node[n]['mean color'] = (graph.node[n]['total color'] /
graph.node[n]['pixel count'])
for x, y, d in graph.edges_iter(data=True):
diff = graph.node[x]['mean color'] - graph.node[y]['mean color']
diff = np.linalg.norm(diff)
if mode == 'similarity':
d['weight'] = math.e ** (-(diff ** 2) / sigma)
elif mode == 'distance':
d['weight'] = diff
else:
raise ValueError("The mode '%s' is not recognised" % mode)
return graph
def rag_boundary(labels, edge_map, connectivity=2):
""" Comouter RAG based on region boundaries
Given an image's initial segmentation and its edge map this method
constructs the corresponding Region Adjacency Graph (RAG). Each node in the
RAG represents a set of pixels within the image with the same label in
`labels`. The weight between two adjacent regions is the average value
in `edge_map` along their boundary.
labels : ndarray
The labelled image.
edge_map : ndarray
This should have the same shape as that of `labels`. For all pixels
along the boundary between 2 adjacent regions, the average value of the
corresponding pixels in `edge_map` is the edge weight between them.
connectivity : int, optional
Pixels with a squared distance less than `connectivity` from each other
are considered adjacent. It can range from 1 to `labels.ndim`. Its
behavior is the same as `connectivity` parameter in
`scipy.ndimage.filters.generate_binary_structure`.
Examples
--------
>>> from skimage import data, segmentation, filters, color
>>> from skimage.future import graph
>>> img = data.chelsea()
>>> labels = segmentation.slic(img)
>>> edge_map = filters.sobel(color.rgb2gray(img))
>>> rag = graph.rag_boundary(labels, edge_map)
"""
conn = ndi.generate_binary_structure(labels.ndim, connectivity)
eroded = ndi.grey_erosion(labels, footprint=conn)
dilated = ndi.grey_dilation(labels, footprint=conn)
boundaries0 = (eroded != labels)
boundaries1 = (dilated != labels)
labels_small = np.concatenate((eroded[boundaries0], labels[boundaries1]))
labels_large = np.concatenate((labels[boundaries0], dilated[boundaries1]))
n = np.max(labels_large) + 1
# use a dummy broadcast array as data for RAG
ones = as_strided(np.ones((1,), dtype=np.float), shape=labels_small.shape,
strides=(0,))
count_matrix = sparse.coo_matrix((ones, (labels_small, labels_large)),
dtype=np.int_, shape=(n, n)).tocsr()
data = np.concatenate((edge_map[boundaries0], edge_map[boundaries1]))
data_coo = sparse.coo_matrix((data, (labels_small, labels_large)))
graph_matrix = data_coo.tocsr()
graph_matrix.data /= count_matrix.data
rag = RAG()
rag.add_weighted_edges_from(_edge_generator_from_csr(graph_matrix),
weight='weight')
rag.add_weighted_edges_from(_edge_generator_from_csr(count_matrix),
weight='count')
for n in rag.nodes():
rag.node[n].update({'labels': [n]})
return rag
def show_rag(labels, rag, img, border_color='black', edge_width=1.5,
edge_cmap='magma', img_cmap='bone', in_place=True, ax=None):
"""Draw a Region Adjacency Graph on an image.
Given a labelled image and its corresponding RAG, draw the nodes and edges
of the RAG on the image with the specified colors. Edges are drawn between
the centroid of the 2 adjacent regions in the image.
Parameters
----------
labels : ndarray, shape (M, N)
The labelled image.
rag : RAG
The Region Adjacency Graph.
img : ndarray, shape (M, N[, 3])
Input image. If `colormap` is `None`, the image should be in RGB
format.
border_color : color spec, optional
Color with which the borders between regions are drawn.
edge_width : float, optional
The thickness with which the RAG edges are drawn.
edge_cmap : :py:class:`matplotlib.colors.Colormap`, optional
Any matplotlib colormap with which the edges are drawn.
img_cmap : :py:class:`matplotlib.colors.Colormap`, optional
Any matplotlib colormap with which the image is draw. If set to `None`
the image is drawn as it is.
in_place : bool, optional
If set, the RAG is modified in place. For each node `n` the function
will set a new attribute ``rag.node[n]['centroid']``.
ax : :py:class:`matplotlib.axes.Axes`, optional
The axes to draw on. If not specified, new axes are created and drawn
on.
Returns
-------
lc : :py:class:`matplotlib.collections.LineCollection`
A colection of lines that represent the edges of the graph. It can be
passed to the :meth:`matplotlib.figure.Figure.colorbar` function.
Examples
--------
>>> from skimage import data, segmentation
>>> from skimage.future import graph
>>> img = data.coffee()
>>> labels = segmentation.slic(img)
>>> g = graph.rag_mean_color(img, labels)
>>> lc = graph.show_rag(labels, g, img)
>>> cbar = plt.colorbar(lc)
"""
if not in_place:
rag = rag.copy()
if ax is None:
fig, ax = plt.subplots()
out = util.img_as_float(img, force_copy=True)
if img_cmap is None:
if img.ndim < 3 or img.shape[2] not in [3, 4]:
msg = 'If colormap is `None`, an RGB or RGBA image should be given'
raise ValueError(msg)
# Ignore the alpha channel
out = img[:, :, :3]
else:
img_cmap = cm.get_cmap(img_cmap)
out = color.rgb2gray(img)
# Ignore the alpha channel
out = img_cmap(out)[:, :, :3]
edge_cmap = cm.get_cmap(edge_cmap)
# Handling the case where one node has multiple labels
# offset is 1 so that regionprops does not ignore 0
offset = 1
map_array = np.arange(labels.max() + 1)
for n, d in rag.nodes_iter(data=True):
for label in d['labels']:
map_array[label] = offset
offset += 1
rag_labels = map_array[labels]
regions = measure.regionprops(rag_labels)
for (n, data), region in zip(rag.nodes_iter(data=True), regions):
data['centroid'] = tuple(map(int, region['centroid']))
cc = colors.ColorConverter()
if border_color is not None:
border_color = cc.to_rgb(border_color)
out = segmentation.mark_boundaries(out, rag_labels, color=border_color)
ax.imshow(out)
# Defining the end points of the edges
# The tuple[::-1] syntax reverses a tuple as matplotlib uses (x,y)
# convention while skimage uses (row, column)
lines = [[rag.node[n1]['centroid'][::-1], rag.node[n2]['centroid'][::-1]]
for (n1, n2) in rag.edges_iter()]
lc = LineCollection(lines, linewidths=edge_width, cmap=edge_cmap)
edge_weights = [d['weight'] for x, y, d in rag.edges_iter(data=True)]
lc.set_array(np.array(edge_weights))
ax.add_collection(lc)
return lc
| bsd-3-clause |
kerzhner/airflow | airflow/contrib/hooks/bigquery_hook.py | 2 | 35738 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains a BigQuery Hook, as well as a very basic PEP 249
implementation for BigQuery.
"""
from builtins import range
from past.builtins import basestring
import logging
import time
from airflow.contrib.hooks.gcp_api_base_hook import GoogleCloudBaseHook
from airflow.hooks.dbapi_hook import DbApiHook
from apiclient.discovery import build, HttpError
from pandas.io.gbq import GbqConnector, \
_parse_data as gbq_parse_data, \
_check_google_client_version as gbq_check_google_client_version, \
_test_google_api_imports as gbq_test_google_api_imports
from pandas.tools.merge import concat
logging.getLogger("bigquery").setLevel(logging.INFO)
class BigQueryHook(GoogleCloudBaseHook, DbApiHook):
"""
Interact with BigQuery. This hook uses the Google Cloud Platform
connection.
"""
conn_name_attr = 'bigquery_conn_id'
def __init__(self,
bigquery_conn_id='bigquery_default',
delegate_to=None):
super(BigQueryHook, self).__init__(
conn_id=bigquery_conn_id,
delegate_to=delegate_to)
def get_conn(self):
"""
Returns a BigQuery PEP 249 connection object.
"""
service = self.get_service()
project = self._get_field('project')
return BigQueryConnection(service=service, project_id=project)
def get_service(self):
"""
Returns a BigQuery service object.
"""
http_authorized = self._authorize()
return build('bigquery', 'v2', http=http_authorized)
def insert_rows(self, table, rows, target_fields=None, commit_every=1000):
"""
Insertion is currently unsupported. Theoretically, you could use
BigQuery's streaming API to insert rows into a table, but this hasn't
been implemented.
"""
raise NotImplementedError()
def get_pandas_df(self, bql, parameters=None):
"""
Returns a Pandas DataFrame for the results produced by a BigQuery
query. The DbApiHook method must be overridden because Pandas
doesn't support PEP 249 connections, except for SQLite. See:
https://github.com/pydata/pandas/blob/master/pandas/io/sql.py#L447
https://github.com/pydata/pandas/issues/6900
:param bql: The BigQuery SQL to execute.
:type bql: string
"""
service = self.get_service()
project = self._get_field('project')
connector = BigQueryPandasConnector(project, service)
schema, pages = connector.run_query(bql)
dataframe_list = []
while len(pages) > 0:
page = pages.pop()
dataframe_list.append(gbq_parse_data(schema, page))
if len(dataframe_list) > 0:
return concat(dataframe_list, ignore_index=True)
else:
return gbq_parse_data(schema, [])
class BigQueryPandasConnector(GbqConnector):
"""
This connector behaves identically to GbqConnector (from Pandas), except
that it allows the service to be injected, and disables a call to
self.get_credentials(). This allows Airflow to use BigQuery with Pandas
without forcing a three legged OAuth connection. Instead, we can inject
service account credentials into the binding.
"""
def __init__(self, project_id, service, reauth=False, verbose=False):
gbq_check_google_client_version()
gbq_test_google_api_imports()
self.project_id = project_id
self.reauth = reauth
self.service = service
self.verbose = verbose
class BigQueryConnection(object):
"""
BigQuery does not have a notion of a persistent connection. Thus, these
objects are small stateless factories for cursors, which do all the real
work.
"""
def __init__(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
def close(self):
""" BigQueryConnection does not have anything to close. """
pass
def commit(self):
""" BigQueryConnection does not support transactions. """
pass
def cursor(self):
""" Return a new :py:class:`Cursor` object using the connection. """
return BigQueryCursor(*self._args, **self._kwargs)
def rollback(self):
raise NotImplementedError(
"BigQueryConnection does not have transactions")
class BigQueryBaseCursor(object):
"""
The BigQuery base cursor contains helper methods to execute queries against
BigQuery. The methods can be used directly by operators, in cases where a
PEP 249 cursor isn't needed.
"""
def __init__(self, service, project_id):
self.service = service
self.project_id = project_id
def run_query(
self, bql, destination_dataset_table = False,
write_disposition = 'WRITE_EMPTY',
allow_large_results=False,
udf_config = False,
use_legacy_sql=True):
"""
Executes a BigQuery SQL query. Optionally persists results in a BigQuery
table. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param bql: The BigQuery SQL to execute.
:type bql: string
:param destination_dataset_table: The dotted <dataset>.<table>
BigQuery table to save the query results.
:param write_disposition: What to do if the table already exists in
BigQuery.
:param allow_large_results: Whether to allow large results.
:type allow_large_results: boolean
:param udf_config: The User Defined Function configuration for the query.
See https://cloud.google.com/bigquery/user-defined-functions for details.
:type udf_config: list
:param use_legacy_sql: Whether to use legacy SQL (true) or standard SQL (false).
:type use_legacy_sql: boolean
"""
configuration = {
'query': {
'query': bql,
'useLegacySql': use_legacy_sql
}
}
if destination_dataset_table:
assert '.' in destination_dataset_table, (
'Expected destination_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(destination_dataset_table)
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_dataset_table,
default_project_id=self.project_id)
configuration['query'].update({
'allowLargeResults': allow_large_results,
'writeDisposition': write_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
}
})
if udf_config:
assert isinstance(udf_config, list)
configuration['query'].update({
'userDefinedFunctionResources': udf_config
})
return self.run_with_configuration(configuration)
def run_extract( # noqa
self, source_project_dataset_table, destination_cloud_storage_uris,
compression='NONE', export_format='CSV', field_delimiter=',',
print_header=True):
"""
Executes a BigQuery extract command to copy data from BigQuery to
Google Cloud Storage. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param source_project_dataset_table: The dotted <dataset>.<table>
BigQuery table to use as the source data.
:type source_project_dataset_table: string
:param destination_cloud_storage_uris: The destination Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). Follows
convention defined here:
https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
:type destination_cloud_storage_uris: list
:param compression: Type of compression to use.
:type compression: string
:param export_format: File format to export.
:type export_format: string
:param field_delimiter: The delimiter to use when extracting to a CSV.
:type field_delimiter: string
:param print_header: Whether to print a header for a CSV file extract.
:type print_header: boolean
"""
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
configuration = {
'extract': {
'sourceTable': {
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table,
},
'compression': compression,
'destinationUris': destination_cloud_storage_uris,
'destinationFormat': export_format,
}
}
if export_format == 'CSV':
# Only set fieldDelimiter and printHeader fields if using CSV.
# Google does not like it if you set these fields for other export
# formats.
configuration['extract']['fieldDelimiter'] = field_delimiter
configuration['extract']['printHeader'] = print_header
return self.run_with_configuration(configuration)
def run_copy(self,
source_project_dataset_tables,
destination_project_dataset_table,
write_disposition='WRITE_EMPTY',
create_disposition='CREATE_IF_NEEDED'):
"""
Executes a BigQuery copy command to copy data from one BigQuery table
to another. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.copy
For more details about these parameters.
:param source_project_dataset_tables: One or more dotted
(project:|project.)<dataset>.<table>
BigQuery tables to use as the source data. Use a list if there are
multiple source tables.
If <project> is not included, project will be the project defined
in the connection json.
:type source_project_dataset_tables: list|string
:param destination_project_dataset_table: The destination BigQuery
table. Format is: (project:|project.)<dataset>.<table>
:type destination_project_dataset_table: string
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
"""
source_project_dataset_tables = (
[source_project_dataset_tables]
if not isinstance(source_project_dataset_tables, list)
else source_project_dataset_tables)
source_project_dataset_tables_fixup = []
for source_project_dataset_table in source_project_dataset_tables:
source_project, source_dataset, source_table = \
_split_tablename(table_input=source_project_dataset_table,
default_project_id=self.project_id,
var_name='source_project_dataset_table')
source_project_dataset_tables_fixup.append({
'projectId': source_project,
'datasetId': source_dataset,
'tableId': source_table
})
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id)
configuration = {
'copy': {
'createDisposition': create_disposition,
'writeDisposition': write_disposition,
'sourceTables': source_project_dataset_tables_fixup,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table
}
}
}
return self.run_with_configuration(configuration)
def run_load(self,
destination_project_dataset_table,
schema_fields, source_uris,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=','):
"""
Executes a BigQuery load command to load data from Google Cloud Storage
to BigQuery. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about these parameters.
:param destination_project_dataset_table:
The dotted (<project>.|<project>:)<dataset>.<table> BigQuery table to load
data into. If <project> is not included, project will be the project defined
in the connection json.
:type destination_project_dataset_table: string
:param schema_fields: The schema field list as defined here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load
:type schema_fields: list
:param source_uris: The source Google Cloud
Storage URI (e.g. gs://some-bucket/some-file.txt). A single wild
per-object name can be used.
:type source_uris: list
:param source_format: File format to export.
:type source_format: string
:param create_disposition: The create disposition if the table doesn't exist.
:type create_disposition: string
:param skip_leading_rows: Number of rows to skip when loading from a CSV.
:type skip_leading_rows: int
:param write_disposition: The write disposition if the table already exists.
:type write_disposition: string
:param field_delimiter: The delimiter to use when loading from a CSV.
:type field_delimiter: string
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
source_format = source_format.upper()
allowed_formats = ["CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS"]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'schema': {
'fields': schema_fields
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
}
}
if source_format == 'CSV':
configuration['load']['skipLeadingRows'] = skip_leading_rows
configuration['load']['fieldDelimiter'] = field_delimiter
return self.run_with_configuration(configuration)
def run_with_configuration(self, configuration):
"""
Executes a BigQuery SQL query. See here:
https://cloud.google.com/bigquery/docs/reference/v2/jobs
For more details about the configuration parameter.
:param configuration: The configuration parameter maps directly to
BigQuery's configuration field in the job object. See
https://cloud.google.com/bigquery/docs/reference/v2/jobs for
details.
"""
jobs = self.service.jobs()
job_data = {
'configuration': configuration
}
# Send query and wait for reply.
query_reply = jobs \
.insert(projectId=self.project_id, body=job_data) \
.execute()
job_id = query_reply['jobReference']['jobId']
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Wait for query to finish.
while not job['status']['state'] == 'DONE':
logging.info('Waiting for job to complete: %s, %s', self.project_id, job_id)
time.sleep(5)
job = jobs.get(projectId=self.project_id, jobId=job_id).execute()
# Check if job had errors.
if 'errorResult' in job['status']:
raise Exception(
'BigQuery job failed. Final error was: {}. The job was: {}'.format(
job['status']['errorResult'], job
)
)
return job_id
def get_schema(self, dataset_id, table_id):
"""
Get the schema for a given datset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:param dataset_id: the dataset ID of the requested table
:param table_id: the table ID of the requested table
:return: a table schema
"""
tables_resource = self.service.tables() \
.get(projectId=self.project_id, datasetId=dataset_id, tableId=table_id) \
.execute()
return tables_resource['schema']
def get_tabledata(self, dataset_id, table_id,
max_results=None, page_token=None, start_index=None):
"""
Get the data of a given dataset.table.
see https://cloud.google.com/bigquery/docs/reference/v2/tabledata/list
:param dataset_id: the dataset ID of the requested table.
:param table_id: the table ID of the requested table.
:param max_results: the maximum results to return.
:param page_token: page token, returned from a previous call,
identifying the result set.
:param start_index: zero based index of the starting row to read.
:return: map containing the requested rows.
"""
optional_params = {}
if max_results:
optional_params['maxResults'] = max_results
if page_token:
optional_params['pageToken'] = page_token
if start_index:
optional_params['startIndex'] = start_index
return (
self.service.tabledata()
.list(
projectId=self.project_id, datasetId=dataset_id,
tableId=table_id, **optional_params)
.execute()
)
def run_table_delete(self, deletion_dataset_table, ignore_if_missing=False):
"""
Delete an existing table from the dataset;
If the table does not exist, return an error unless ignore_if_missing
is set to True.
:param deletion_dataset_table: A dotted
(<project>.|<project>:)<dataset>.<table> that indicates which table
will be deleted.
:type deletion_dataset_table: str
:param ignore_if_missing: if True, then return success even if the
requested table does not exist.
:type ignore_if_missing: boolean
:return:
"""
assert '.' in deletion_dataset_table, (
'Expected deletion_dataset_table in the format of '
'<dataset>.<table>. Got: {}').format(deletion_dataset_table)
deletion_project, deletion_dataset, deletion_table = \
_split_tablename(table_input=deletion_dataset_table,
default_project_id=self.project_id)
try:
tables_resource = self.service.tables() \
.delete(projectId=deletion_project,
datasetId=deletion_dataset,
tableId=deletion_table) \
.execute()
logging.info('Deleted table %s:%s.%s.',
deletion_project, deletion_dataset, deletion_table)
except HttpError:
if not ignore_if_missing:
raise Exception(
'Table deletion failed. Table does not exist.')
else:
logging.info('Table does not exist. Skipping.')
def run_table_upsert(self, dataset_id, table_resource, project_id=None):
"""
creates a new, empty table in the dataset;
If the table already exists, update the existing table.
Since BigQuery does not natively allow table upserts, this is not an
atomic operation.
:param dataset_id: the dataset to upsert the table into.
:type dataset_id: str
:param table_resource: a table resource. see
https://cloud.google.com/bigquery/docs/reference/v2/tables#resource
:type table_resource: dict
:param project_id: the project to upsert the table into. If None,
project will be self.project_id.
:return:
"""
# check to see if the table exists
table_id = table_resource['tableReference']['tableId']
project_id = project_id if project_id is not None else self.project_id
tables_list_resp = self.service.tables().list(projectId=project_id,
datasetId=dataset_id).execute()
while True:
for table in tables_list_resp.get('tables', []):
if table['tableReference']['tableId'] == table_id:
# found the table, do update
logging.info('table %s:%s.%s exists, updating.',
project_id, dataset_id, table_id)
return self.service.tables().update(projectId=project_id,
datasetId=dataset_id,
tableId=table_id,
body=table_resource).execute()
# If there is a next page, we need to check the next page.
if 'nextPageToken' in tables_list_resp:
tables_list_resp = self.service.tables()\
.list(projectId=project_id,
datasetId=dataset_id,
pageToken=tables_list_resp['nextPageToken'])\
.execute()
# If there is no next page, then the table doesn't exist.
else:
# do insert
logging.info('table %s:%s.%s does not exist. creating.',
project_id, dataset_id, table_id)
return self.service.tables().insert(projectId=project_id,
datasetId=dataset_id,
body=table_resource).execute()
def run_grant_dataset_view_access(self,
source_dataset,
view_dataset,
view_table,
source_project = None,
view_project = None):
"""
Grant authorized view access of a dataset to a view table.
If this view has already been granted access to the dataset, do nothing.
This method is not atomic. Running it may clobber a simultaneous update.
:param source_dataset: the source dataset
:type source_dataset: str
:param view_dataset: the dataset that the view is in
:type view_dataset: str
:param view_table: the table of the view
:type view_table: str
:param source_project: the project of the source dataset. If None,
self.project_id will be used.
:type source_project: str
:param view_project: the project that the view is in. If None,
self.project_id will be used.
:type view_project: str
:return: the datasets resource of the source dataset.
"""
# Apply default values to projects
source_project = source_project if source_project else self.project_id
view_project = view_project if view_project else self.project_id
# we don't want to clobber any existing accesses, so we have to get
# info on the dataset before we can add view access
source_dataset_resource = self.service.datasets().get(projectId=source_project,
datasetId=source_dataset).execute()
access = source_dataset_resource['access'] if 'access' in source_dataset_resource else []
view_access = {'view': {'projectId': view_project,
'datasetId': view_dataset,
'tableId': view_table}}
# check to see if the view we want to add already exists.
if view_access not in access:
logging.info('granting table %s:%s.%s authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
access.append(view_access)
return self.service.datasets().patch(projectId=source_project,
datasetId=source_dataset,
body={'access': access}).execute()
else:
# if view is already in access, do nothing.
logging.info('table %s:%s.%s already has authorized view access to %s:%s dataset.',
view_project, view_dataset, view_table,
source_project, source_dataset)
return source_dataset_resource
class BigQueryCursor(BigQueryBaseCursor):
"""
A very basic BigQuery PEP 249 cursor implementation. The PyHive PEP 249
implementation was used as a reference:
https://github.com/dropbox/PyHive/blob/master/pyhive/presto.py
https://github.com/dropbox/PyHive/blob/master/pyhive/common.py
"""
def __init__(self, service, project_id):
super(BigQueryCursor, self).__init__(service=service, project_id=project_id)
self.buffersize = None
self.page_token = None
self.job_id = None
self.buffer = []
self.all_pages_loaded = False
@property
def description(self):
""" The schema description method is not currently implemented. """
raise NotImplementedError
def close(self):
""" By default, do nothing """
pass
@property
def rowcount(self):
""" By default, return -1 to indicate that this is not supported. """
return -1
def execute(self, operation, parameters=None):
"""
Executes a BigQuery query, and returns the job ID.
:param operation: The query to execute.
:type operation: string
:param parameters: Parameters to substitute into the query.
:type parameters: dict
"""
bql = _bind_parameters(operation, parameters) if parameters else operation
self.job_id = self.run_query(bql)
def executemany(self, operation, seq_of_parameters):
"""
Execute a BigQuery query multiple times with different parameters.
:param operation: The query to execute.
:type operation: string
:param parameters: List of dictionary parameters to substitute into the
query.
:type parameters: list
"""
for parameters in seq_of_parameters:
self.execute(operation, parameters)
def fetchone(self):
""" Fetch the next row of a query result set. """
return self.next()
def next(self):
"""
Helper method for fetchone, which returns the next row from a buffer.
If the buffer is empty, attempts to paginate through the result set for
the next page, and load it into the buffer.
"""
if not self.job_id:
return None
if len(self.buffer) == 0:
if self.all_pages_loaded:
return None
query_results = (
self.service.jobs()
.getQueryResults(
projectId=self.project_id,
jobId=self.job_id,
pageToken=self.page_token)
.execute()
)
if 'rows' in query_results and query_results['rows']:
self.page_token = query_results.get('pageToken')
fields = query_results['schema']['fields']
col_types = [field['type'] for field in fields]
rows = query_results['rows']
for dict_row in rows:
typed_row = ([
_bq_cast(vs['v'], col_types[idx])
for idx, vs in enumerate(dict_row['f'])
])
self.buffer.append(typed_row)
if not self.page_token:
self.all_pages_loaded = True
else:
# Reset all state since we've exhausted the results.
self.page_token = None
self.job_id = None
self.page_token = None
return None
return self.buffer.pop(0)
def fetchmany(self, size=None):
"""
Fetch the next set of rows of a query result, returning a sequence of sequences (e.g. a
list of tuples). An empty sequence is returned when no more rows are available.
The number of rows to fetch per call is specified by the parameter. If it is not given, the
cursor's arraysize determines the number of rows to be fetched. The method should try to
fetch as many rows as indicated by the size parameter. If this is not possible due to the
specified number of rows not being available, fewer rows may be returned.
An :py:class:`~pyhive.exc.Error` (or subclass) exception is raised if the previous call to
:py:meth:`execute` did not produce any result set or no call was issued yet.
"""
if size is None:
size = self.arraysize
result = []
for _ in range(size):
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def fetchall(self):
"""
Fetch all (remaining) rows of a query result, returning them as a sequence of sequences
(e.g. a list of tuples).
"""
result = []
while True:
one = self.fetchone()
if one is None:
break
else:
result.append(one)
return result
def get_arraysize(self):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
return self._buffersize if self.buffersize else 1
def set_arraysize(self, arraysize):
""" Specifies the number of rows to fetch at a time with .fetchmany() """
self.buffersize = arraysize
arraysize = property(get_arraysize, set_arraysize)
def setinputsizes(self, sizes):
""" Does nothing by default """
pass
def setoutputsize(self, size, column=None):
""" Does nothing by default """
pass
def _bind_parameters(operation, parameters):
""" Helper method that binds parameters to a SQL query. """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in parameters.iteritems():
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
def _escape(s):
""" Helper method that escapes parameters to a SQL query. """
e = s
e = e.replace('\\', '\\\\')
e = e.replace('\n', '\\n')
e = e.replace('\r', '\\r')
e = e.replace("'", "\\'")
e = e.replace('"', '\\"')
return e
def _bq_cast(string_field, bq_type):
"""
Helper method that casts a BigQuery row to the appropriate data types.
This is useful because BigQuery returns all fields as strings.
"""
if string_field is None:
return None
elif bq_type == 'INTEGER' or bq_type == 'TIMESTAMP':
return int(string_field)
elif bq_type == 'FLOAT':
return float(string_field)
elif bq_type == 'BOOLEAN':
assert string_field in set(['true', 'false'])
return string_field == 'true'
else:
return string_field
def _split_tablename(table_input, default_project_id, var_name=None):
assert default_project_id is not None, "INTERNAL: No default project is specified"
def var_print(var_name):
if var_name is None:
return ""
else:
return "Format exception for {var}: ".format(var=var_name)
cmpt = table_input.split(':')
if len(cmpt) == 1:
project_id = None
rest = cmpt[0]
elif len(cmpt) == 2:
project_id = cmpt[0]
rest = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
cmpt = rest.split('.')
if len(cmpt) == 3:
assert project_id is None, (
"{var}Use either : or . to specify project"
).format(var=var_print(var_name))
project_id = cmpt[0]
dataset_id = cmpt[1]
table_id = cmpt[2]
elif len(cmpt) == 2:
dataset_id = cmpt[0]
table_id = cmpt[1]
else:
raise Exception((
'{var}Expect format of (<project.|<project:)<dataset>.<table>, '
'got {input}'
).format(var=var_print(var_name), input=table_input))
if project_id is None:
if var_name is not None:
logging.info(
'project not included in {var}: '
'{input}; using project "{project}"'.format(
var=var_name, input=table_input, project=default_project_id))
project_id = default_project_id
return project_id, dataset_id, table_id
| apache-2.0 |
mbayon/TFG-MachineLearning | vbig/lib/python2.7/site-packages/pandas/tests/frame/test_indexing.py | 7 | 104529 | # -*- coding: utf-8 -*-
from __future__ import print_function
from warnings import catch_warnings
from datetime import datetime, date, timedelta, time
from pandas.compat import map, zip, range, lrange, lzip, long
from pandas import compat
from numpy import nan
from numpy.random import randn
import pytest
import numpy as np
import pandas.core.common as com
from pandas import (DataFrame, Index, Series, notnull, isnull,
MultiIndex, DatetimeIndex, Timestamp,
date_range)
import pandas as pd
from pandas._libs.tslib import iNaT
from pandas.tseries.offsets import BDay
from pandas.core.dtypes.common import (
is_float_dtype,
is_integer,
is_scalar)
from pandas.util.testing import (assert_almost_equal,
assert_series_equal,
assert_frame_equal)
from pandas.core.indexing import IndexingError
import pandas.util.testing as tm
from pandas.tests.frame.common import TestData
class TestDataFrameIndexing(TestData):
def test_getitem(self):
# Slicing
sl = self.frame[:20]
assert len(sl.index) == 20
# Column access
for _, series in compat.iteritems(sl):
assert len(series.index) == 20
assert tm.equalContents(series.index, sl.index)
for key, _ in compat.iteritems(self.frame._series):
assert self.frame[key] is not None
assert 'random' not in self.frame
with tm.assert_raises_regex(KeyError, 'random'):
self.frame['random']
df = self.frame.copy()
df['$10'] = randn(len(df))
ad = randn(len(df))
df['@awesome_domain'] = ad
with pytest.raises(KeyError):
df.__getitem__('df["$10"]')
res = df['@awesome_domain']
tm.assert_numpy_array_equal(ad, res.values)
def test_getitem_dupe_cols(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=['a', 'a', 'b'])
try:
df[['baf']]
except KeyError:
pass
else:
self.fail("Dataframe failed to raise KeyError")
def test_get(self):
b = self.frame.get('B')
assert_series_equal(b, self.frame['B'])
assert self.frame.get('foo') is None
assert_series_equal(self.frame.get('foo', self.frame['B']),
self.frame['B'])
# None
# GH 5652
for df in [DataFrame(), DataFrame(columns=list('AB')),
DataFrame(columns=list('AB'), index=range(3))]:
result = df.get(None)
assert result is None
def test_getitem_iterator(self):
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
idx = iter(['A', 'B', 'C'])
result = self.frame.loc[:, idx]
expected = self.frame.loc[:, ['A', 'B', 'C']]
assert_frame_equal(result, expected)
def test_getitem_list(self):
self.frame.columns.name = 'foo'
result = self.frame[['B', 'A']]
result2 = self.frame[Index(['B', 'A'])]
expected = self.frame.loc[:, ['B', 'A']]
expected.columns.name = 'foo'
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
assert result.columns.name == 'foo'
with tm.assert_raises_regex(KeyError, 'not in index'):
self.frame[['B', 'A', 'food']]
with tm.assert_raises_regex(KeyError, 'not in index'):
self.frame[Index(['B', 'A', 'foo'])]
# tuples
df = DataFrame(randn(8, 3),
columns=Index([('foo', 'bar'), ('baz', 'qux'),
('peek', 'aboo')], name=['sth', 'sth2']))
result = df[[('foo', 'bar'), ('baz', 'qux')]]
expected = df.iloc[:, :2]
assert_frame_equal(result, expected)
assert result.columns.names == ['sth', 'sth2']
def test_getitem_callable(self):
# GH 12533
result = self.frame[lambda x: 'A']
tm.assert_series_equal(result, self.frame.loc[:, 'A'])
result = self.frame[lambda x: ['A', 'B']]
tm.assert_frame_equal(result, self.frame.loc[:, ['A', 'B']])
df = self.frame[:3]
result = df[lambda x: [True, False, True]]
tm.assert_frame_equal(result, self.frame.iloc[[0, 2], :])
def test_setitem_list(self):
self.frame['E'] = 'foo'
data = self.frame[['A', 'B']]
self.frame[['B', 'A']] = data
assert_series_equal(self.frame['B'], data['A'], check_names=False)
assert_series_equal(self.frame['A'], data['B'], check_names=False)
with tm.assert_raises_regex(ValueError,
'Columns must be same length as key'):
data[['A']] = self.frame[['A', 'B']]
with tm.assert_raises_regex(ValueError, 'Length of values '
'does not match '
'length of index'):
data['A'] = range(len(data.index) - 1)
df = DataFrame(0, lrange(3), ['tt1', 'tt2'], dtype=np.int_)
df.loc[1, ['tt1', 'tt2']] = [1, 2]
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series([1, 2], df.columns, dtype=np.int_, name=1)
assert_series_equal(result, expected)
df['tt1'] = df['tt2'] = '0'
df.loc[df.index[1], ['tt1', 'tt2']] = ['1', '2']
result = df.loc[df.index[1], ['tt1', 'tt2']]
expected = Series(['1', '2'], df.columns, name=1)
assert_series_equal(result, expected)
def test_setitem_list_not_dataframe(self):
data = np.random.randn(len(self.frame), 2)
self.frame[['A', 'B']] = data
assert_almost_equal(self.frame[['A', 'B']].values, data)
def test_setitem_list_of_tuples(self):
tuples = lzip(self.frame['A'], self.frame['B'])
self.frame['tuples'] = tuples
result = self.frame['tuples']
expected = Series(tuples, index=self.frame.index, name='tuples')
assert_series_equal(result, expected)
def test_setitem_mulit_index(self):
# GH7655, test that assigning to a sub-frame of a frame
# with multi-index columns aligns both rows and columns
it = ['jim', 'joe', 'jolie'], ['first', 'last'], \
['left', 'center', 'right']
cols = MultiIndex.from_product(it)
index = pd.date_range('20141006', periods=20)
vals = np.random.randint(1, 1000, (len(index), len(cols)))
df = pd.DataFrame(vals, columns=cols, index=index)
i, j = df.index.values.copy(), it[-1][:]
np.random.shuffle(i)
df['jim'] = df['jolie'].loc[i, ::-1]
assert_frame_equal(df['jim'], df['jolie'])
np.random.shuffle(j)
df[('joe', 'first')] = df[('jolie', 'last')].loc[i, j]
assert_frame_equal(df[('joe', 'first')], df[('jolie', 'last')])
np.random.shuffle(j)
df[('joe', 'last')] = df[('jolie', 'first')].loc[i, j]
assert_frame_equal(df[('joe', 'last')], df[('jolie', 'first')])
def test_setitem_callable(self):
# GH 12533
df = pd.DataFrame({'A': [1, 2, 3, 4], 'B': [5, 6, 7, 8]})
df[lambda x: 'A'] = [11, 12, 13, 14]
exp = pd.DataFrame({'A': [11, 12, 13, 14], 'B': [5, 6, 7, 8]})
tm.assert_frame_equal(df, exp)
def test_setitem_other_callable(self):
# GH 13299
inc = lambda x: x + 1
df = pd.DataFrame([[-1, 1], [1, -1]])
df[df > 0] = inc
expected = pd.DataFrame([[-1, inc], [inc, -1]])
tm.assert_frame_equal(df, expected)
def test_getitem_boolean(self):
# boolean indexing
d = self.tsframe.index[10]
indexer = self.tsframe.index > d
indexer_obj = indexer.astype(object)
subindex = self.tsframe.index[indexer]
subframe = self.tsframe[indexer]
tm.assert_index_equal(subindex, subframe.index)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
self.tsframe[indexer[:-1]]
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
with tm.assert_raises_regex(ValueError, 'boolean values only'):
self.tsframe[self.tsframe]
# test that Series work
indexer_obj = Series(indexer_obj, self.tsframe.index)
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test that Series indexers reindex
# we are producing a warning that since the passed boolean
# key is not the same as the given index, we will reindex
# not sure this is really necessary
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
indexer_obj = indexer_obj.reindex(self.tsframe.index[::-1])
subframe_obj = self.tsframe[indexer_obj]
assert_frame_equal(subframe_obj, subframe)
# test df[df > 0]
for df in [self.tsframe, self.mixed_frame,
self.mixed_float, self.mixed_int]:
data = df._get_numeric_data()
bif = df[df > 0]
bifw = DataFrame(dict([(c, np.where(data[c] > 0, data[c], np.nan))
for c in data.columns]),
index=data.index, columns=data.columns)
# add back other columns to compare
for c in df.columns:
if c not in bifw:
bifw[c] = df[c]
bifw = bifw.reindex(columns=df.columns)
assert_frame_equal(bif, bifw, check_dtype=False)
for c in df.columns:
if bif[c].dtype != bifw[c].dtype:
assert bif[c].dtype == df[c].dtype
def test_getitem_boolean_casting(self):
# don't upcast if we don't need to
df = self.tsframe.copy()
df['E'] = 1
df['E'] = df['E'].astype('int32')
df['E1'] = df['E'].copy()
df['F'] = 1
df['F'] = df['F'].astype('int64')
df['F1'] = df['F'].copy()
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 4, 'int32': 2, 'int64': 2})
assert_series_equal(result, expected)
# int block splitting
df.loc[df.index[1:3], ['E1', 'F1']] = 0
casted = df[df > 0]
result = casted.get_dtype_counts()
expected = Series({'float64': 6, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# where dtype conversions
# GH 3733
df = DataFrame(data=np.random.randn(100, 50))
df = df.where(df > 0) # create nans
bools = df > 0
mask = isnull(df)
expected = bools.astype(float).mask(mask)
result = bools.mask(mask)
assert_frame_equal(result, expected)
def test_getitem_boolean_list(self):
df = DataFrame(np.arange(12).reshape(3, 4))
def _checkit(lst):
result = df[lst]
expected = df.loc[df.index[lst]]
assert_frame_equal(result, expected)
_checkit([True, False, True])
_checkit([True, True, True])
_checkit([False, False, False])
def test_getitem_boolean_iadd(self):
arr = randn(5, 5)
df = DataFrame(arr.copy(), columns=['A', 'B', 'C', 'D', 'E'])
df[df < 0] += 1
arr[arr < 0] += 1
assert_almost_equal(df.values, arr)
def test_boolean_index_empty_corner(self):
# #2096
blah = DataFrame(np.empty([0, 1]), columns=['A'],
index=DatetimeIndex([]))
# both of these should succeed trivially
k = np.array([], bool)
blah[k]
blah[k] = 0
def test_getitem_ix_mixed_integer(self):
df = DataFrame(np.random.randn(4, 3),
index=[1, 10, 'C', 'E'], columns=[1, 2, 3])
result = df.iloc[:-1]
expected = df.loc[df.index[:-1]]
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[[1, 10]]
expected = df.ix[Index([1, 10], dtype=object)]
assert_frame_equal(result, expected)
# 11320
df = pd.DataFrame({"rna": (1.5, 2.2, 3.2, 4.5),
-1000: [11, 21, 36, 40],
0: [10, 22, 43, 34],
1000: [0, 10, 20, 30]},
columns=['rna', -1000, 0, 1000])
result = df[[1000]]
expected = df.iloc[:, [3]]
assert_frame_equal(result, expected)
result = df[[-1000]]
expected = df.iloc[:, [1]]
assert_frame_equal(result, expected)
def test_getitem_setitem_ix_negative_integers(self):
with catch_warnings(record=True):
result = self.frame.ix[:, -1]
assert_series_equal(result, self.frame['D'])
with catch_warnings(record=True):
result = self.frame.ix[:, [-1]]
assert_frame_equal(result, self.frame[['D']])
with catch_warnings(record=True):
result = self.frame.ix[:, [-1, -2]]
assert_frame_equal(result, self.frame[['D', 'C']])
with catch_warnings(record=True):
self.frame.ix[:, [-1]] = 0
assert (self.frame['D'] == 0).all()
df = DataFrame(np.random.randn(8, 4))
with catch_warnings(record=True):
assert isnull(df.ix[:, [-1]].values).all()
# #1942
a = DataFrame(randn(20, 2), index=[chr(x + 65) for x in range(20)])
with catch_warnings(record=True):
a.ix[-1] = a.ix[-2]
with catch_warnings(record=True):
assert_series_equal(a.ix[-1], a.ix[-2], check_names=False)
assert a.ix[-1].name == 'T'
assert a.ix[-2].name == 'S'
def test_getattr(self):
assert_series_equal(self.frame.A, self.frame['A'])
pytest.raises(AttributeError, getattr, self.frame,
'NONEXISTENT_NAME')
def test_setattr_column(self):
df = DataFrame({'foobar': 1}, index=lrange(10))
df.foobar = 5
assert (df.foobar == 5).all()
def test_setitem(self):
# not sure what else to do here
series = self.frame['A'][::2]
self.frame['col5'] = series
assert 'col5' in self.frame
assert len(series) == 15
assert len(self.frame) == 30
exp = np.ravel(np.column_stack((series.values, [np.nan] * 15)))
exp = Series(exp, index=self.frame.index, name='col5')
tm.assert_series_equal(self.frame['col5'], exp)
series = self.frame['A']
self.frame['col6'] = series
tm.assert_series_equal(series, self.frame['col6'], check_names=False)
with pytest.raises(KeyError):
self.frame[randn(len(self.frame) + 1)] = 1
# set ndarray
arr = randn(len(self.frame))
self.frame['col9'] = arr
assert (self.frame['col9'] == arr).all()
self.frame['col7'] = 5
assert((self.frame['col7'] == 5).all())
self.frame['col0'] = 3.14
assert((self.frame['col0'] == 3.14).all())
self.frame['col8'] = 'foo'
assert((self.frame['col8'] == 'foo').all())
# this is partially a view (e.g. some blocks are view)
# so raise/warn
smaller = self.frame[:2]
def f():
smaller['col10'] = ['1', '2']
pytest.raises(com.SettingWithCopyError, f)
assert smaller['col10'].dtype == np.object_
assert (smaller['col10'] == ['1', '2']).all()
# with a dtype
for dtype in ['int32', 'int64', 'float32', 'float64']:
self.frame[dtype] = np.array(arr, dtype=dtype)
assert self.frame[dtype].dtype.name == dtype
# dtype changing GH4204
df = DataFrame([[0, 0]])
df.iloc[0] = np.nan
expected = DataFrame([[np.nan, np.nan]])
assert_frame_equal(df, expected)
df = DataFrame([[0, 0]])
df.loc[0] = np.nan
assert_frame_equal(df, expected)
def test_setitem_tuple(self):
self.frame['A', 'B'] = self.frame['A']
assert_series_equal(self.frame['A', 'B'], self.frame[
'A'], check_names=False)
def test_setitem_always_copy(self):
s = self.frame['A'].copy()
self.frame['E'] = s
self.frame['E'][5:10] = nan
assert notnull(s[5:10]).all()
def test_setitem_boolean(self):
df = self.frame.copy()
values = self.frame.values
df[df['A'] > 0] = 4
values[values[:, 0] > 0] = 4
assert_almost_equal(df.values, values)
# test that column reindexing works
series = df['A'] == 4
series = series.reindex(df.index[::-1])
df[series] = 1
values[values[:, 0] == 4] = 1
assert_almost_equal(df.values, values)
df[df > 0] = 5
values[values > 0] = 5
assert_almost_equal(df.values, values)
df[df == 5] = 0
values[values == 5] = 0
assert_almost_equal(df.values, values)
# a df that needs alignment first
df[df[:-1] < 0] = 2
np.putmask(values[:-1], values[:-1] < 0, 2)
assert_almost_equal(df.values, values)
# indexed with same shape but rows-reversed df
df[df[::-1] == 2] = 3
values[values == 2] = 3
assert_almost_equal(df.values, values)
with tm.assert_raises_regex(TypeError, 'Must pass '
'DataFrame with '
'boolean values only'):
df[df * 0] = 2
# index with DataFrame
mask = df > np.abs(df)
expected = df.copy()
df[df > np.abs(df)] = nan
expected.values[mask.values] = nan
assert_frame_equal(df, expected)
# set from DataFrame
expected = df.copy()
df[df > np.abs(df)] = df * 2
np.putmask(expected.values, mask.values, df.values * 2)
assert_frame_equal(df, expected)
def test_setitem_cast(self):
self.frame['D'] = self.frame['D'].astype('i8')
assert self.frame['D'].dtype == np.int64
# #669, should not cast?
# this is now set to int64, which means a replacement of the column to
# the value dtype (and nothing to do with the existing dtype)
self.frame['B'] = 0
assert self.frame['B'].dtype == np.int64
# cast if pass array of course
self.frame['B'] = np.arange(len(self.frame))
assert issubclass(self.frame['B'].dtype.type, np.integer)
self.frame['foo'] = 'bar'
self.frame['foo'] = 0
assert self.frame['foo'].dtype == np.int64
self.frame['foo'] = 'bar'
self.frame['foo'] = 2.5
assert self.frame['foo'].dtype == np.float64
self.frame['something'] = 0
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2
assert self.frame['something'].dtype == np.int64
self.frame['something'] = 2.5
assert self.frame['something'].dtype == np.float64
# GH 7704
# dtype conversion on setting
df = DataFrame(np.random.rand(30, 3), columns=tuple('ABC'))
df['event'] = np.nan
df.loc[10, 'event'] = 'foo'
result = df.get_dtype_counts().sort_values()
expected = Series({'float64': 3, 'object': 1}).sort_values()
assert_series_equal(result, expected)
# Test that data type is preserved . #5782
df = DataFrame({'one': np.arange(6, dtype=np.int8)})
df.loc[1, 'one'] = 6
assert df.dtypes.one == np.dtype(np.int8)
df.one = np.int8(7)
assert df.dtypes.one == np.dtype(np.int8)
def test_setitem_boolean_column(self):
expected = self.frame.copy()
mask = self.frame['A'] > 0
self.frame.loc[mask, 'B'] = 0
expected.values[mask.values, 1] = 0
assert_frame_equal(self.frame, expected)
def test_setitem_corner(self):
# corner case
df = DataFrame({'B': [1., 2., 3.],
'C': ['a', 'b', 'c']},
index=np.arange(3))
del df['B']
df['B'] = [1., 2., 3.]
assert 'B' in df
assert len(df.columns) == 2
df['A'] = 'beginning'
df['E'] = 'foo'
df['D'] = 'bar'
df[datetime.now()] = 'date'
df[datetime.now()] = 5.
# what to do when empty frame with index
dm = DataFrame(index=self.frame.index)
dm['A'] = 'foo'
dm['B'] = 'bar'
assert len(dm.columns) == 2
assert dm.values.dtype == np.object_
# upcast
dm['C'] = 1
assert dm['C'].dtype == np.int64
dm['E'] = 1.
assert dm['E'].dtype == np.float64
# set existing column
dm['A'] = 'bar'
assert 'bar' == dm['A'][0]
dm = DataFrame(index=np.arange(3))
dm['A'] = 1
dm['foo'] = 'bar'
del dm['foo']
dm['foo'] = 'bar'
assert dm['foo'].dtype == np.object_
dm['coercable'] = ['1', '2', '3']
assert dm['coercable'].dtype == np.object_
def test_setitem_corner2(self):
data = {"title": ['foobar', 'bar', 'foobar'] + ['foobar'] * 17,
"cruft": np.random.random(20)}
df = DataFrame(data)
ix = df[df['title'] == 'bar'].index
df.loc[ix, ['title']] = 'foobar'
df.loc[ix, ['cruft']] = 0
assert df.loc[1, 'title'] == 'foobar'
assert df.loc[1, 'cruft'] == 0
def test_setitem_ambig(self):
# Difficulties with mixed-type data
from decimal import Decimal
# Created as float type
dm = DataFrame(index=lrange(3), columns=lrange(3))
coercable_series = Series([Decimal(1) for _ in range(3)],
index=lrange(3))
uncoercable_series = Series(['foo', 'bzr', 'baz'], index=lrange(3))
dm[0] = np.ones(3)
assert len(dm.columns) == 3
dm[1] = coercable_series
assert len(dm.columns) == 3
dm[2] = uncoercable_series
assert len(dm.columns) == 3
assert dm[2].dtype == np.object_
def test_setitem_clear_caches(self):
# see gh-304
df = DataFrame({'x': [1.1, 2.1, 3.1, 4.1], 'y': [5.1, 6.1, 7.1, 8.1]},
index=[0, 1, 2, 3])
df.insert(2, 'z', np.nan)
# cache it
foo = df['z']
df.loc[df.index[2:], 'z'] = 42
expected = Series([np.nan, np.nan, 42, 42], index=df.index, name='z')
assert df['z'] is not foo
tm.assert_series_equal(df['z'], expected)
def test_setitem_None(self):
# GH #766
self.frame[None] = self.frame['A']
assert_series_equal(
self.frame.iloc[:, -1], self.frame['A'], check_names=False)
assert_series_equal(self.frame.loc[:, None], self.frame[
'A'], check_names=False)
assert_series_equal(self.frame[None], self.frame[
'A'], check_names=False)
repr(self.frame)
def test_setitem_empty(self):
# GH 9596
df = pd.DataFrame({'a': ['1', '2', '3'],
'b': ['11', '22', '33'],
'c': ['111', '222', '333']})
result = df.copy()
result.loc[result.b.isnull(), 'a'] = result.a
assert_frame_equal(result, df)
def test_setitem_empty_frame_with_boolean(self):
# Test for issue #10126
for dtype in ('float', 'int64'):
for df in [
pd.DataFrame(dtype=dtype),
pd.DataFrame(dtype=dtype, index=[1]),
pd.DataFrame(dtype=dtype, columns=['A']),
]:
df2 = df.copy()
df[df > df2] = 47
assert_frame_equal(df, df2)
def test_getitem_empty_frame_with_boolean(self):
# Test for issue #11859
df = pd.DataFrame()
df2 = df[df > 0]
assert_frame_equal(df, df2)
def test_delitem_corner(self):
f = self.frame.copy()
del f['D']
assert len(f.columns) == 3
pytest.raises(KeyError, f.__delitem__, 'D')
del f['B']
assert len(f.columns) == 2
def test_getitem_fancy_2d(self):
f = self.frame
with catch_warnings(record=True):
assert_frame_equal(f.ix[:, ['B', 'A']],
f.reindex(columns=['B', 'A']))
subidx = self.frame.index[[5, 4, 1]]
with catch_warnings(record=True):
assert_frame_equal(f.ix[subidx, ['B', 'A']],
f.reindex(index=subidx, columns=['B', 'A']))
# slicing rows, etc.
with catch_warnings(record=True):
assert_frame_equal(f.ix[5:10], f[5:10])
assert_frame_equal(f.ix[5:10, :], f[5:10])
assert_frame_equal(f.ix[:5, ['A', 'B']],
f.reindex(index=f.index[:5],
columns=['A', 'B']))
# slice rows with labels, inclusive!
with catch_warnings(record=True):
expected = f.ix[5:11]
result = f.ix[f.index[5]:f.index[10]]
assert_frame_equal(expected, result)
# slice columns
with catch_warnings(record=True):
assert_frame_equal(f.ix[:, :2], f.reindex(columns=['A', 'B']))
# get view
with catch_warnings(record=True):
exp = f.copy()
f.ix[5:10].values[:] = 5
exp.values[5:10] = 5
assert_frame_equal(f, exp)
with catch_warnings(record=True):
pytest.raises(ValueError, f.ix.__getitem__, f > 0.5)
def test_slice_floats(self):
index = [52195.504153, 52196.303147, 52198.369883]
df = DataFrame(np.random.rand(3, 2), index=index)
s1 = df.loc[52195.1:52196.5]
assert len(s1) == 2
s1 = df.loc[52195.1:52196.6]
assert len(s1) == 2
s1 = df.loc[52195.1:52198.9]
assert len(s1) == 3
def test_getitem_fancy_slice_integers_step(self):
df = DataFrame(np.random.randn(10, 5))
# this is OK
result = df.iloc[:8:2] # noqa
df.iloc[:8:2] = np.nan
assert isnull(df.iloc[:8:2]).values.all()
def test_getitem_setitem_integer_slice_keyerrors(self):
df = DataFrame(np.random.randn(10, 5), index=lrange(0, 20, 2))
# this is OK
cp = df.copy()
cp.iloc[4:10] = 0
assert (cp.iloc[4:10] == 0).values.all()
# so is this
cp = df.copy()
cp.iloc[3:11] = 0
assert (cp.iloc[3:11] == 0).values.all()
result = df.iloc[2:6]
result2 = df.loc[3:11]
expected = df.reindex([4, 6, 8, 10])
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
# non-monotonic, raise KeyError
df2 = df.iloc[lrange(5) + lrange(5, 10)[::-1]]
pytest.raises(KeyError, df2.loc.__getitem__, slice(3, 11))
pytest.raises(KeyError, df2.loc.__setitem__, slice(3, 11), 0)
def test_setitem_fancy_2d(self):
# case 1
frame = self.frame.copy()
expected = frame.copy()
with catch_warnings(record=True):
frame.ix[:, ['B', 'A']] = 1
expected['B'] = 1.
expected['A'] = 1.
assert_frame_equal(frame, expected)
# case 2
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = frame.copy()
subidx = self.frame.index[[5, 4, 1]]
values = randn(3, 2)
with catch_warnings(record=True):
frame.ix[subidx, ['B', 'A']] = values
frame2.ix[[5, 4, 1], ['B', 'A']] = values
expected['B'].ix[subidx] = values[:, 0]
expected['A'].ix[subidx] = values[:, 1]
assert_frame_equal(frame, expected)
assert_frame_equal(frame2, expected)
# case 3: slicing rows, etc.
frame = self.frame.copy()
with catch_warnings(record=True):
expected1 = self.frame.copy()
frame.ix[5:10] = 1.
expected1.values[5:10] = 1.
assert_frame_equal(frame, expected1)
with catch_warnings(record=True):
expected2 = self.frame.copy()
arr = randn(5, len(frame.columns))
frame.ix[5:10] = arr
expected2.values[5:10] = arr
assert_frame_equal(frame, expected2)
# case 4
with catch_warnings(record=True):
frame = self.frame.copy()
frame.ix[5:10, :] = 1.
assert_frame_equal(frame, expected1)
frame.ix[5:10, :] = arr
assert_frame_equal(frame, expected2)
# case 5
with catch_warnings(record=True):
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
values = randn(5, 2)
frame.ix[:5, ['A', 'B']] = values
expected['A'][:5] = values[:, 0]
expected['B'][:5] = values[:, 1]
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2.ix[:5, [0, 1]] = values
assert_frame_equal(frame2, expected)
# case 6: slice rows with labels, inclusive!
with catch_warnings(record=True):
frame = self.frame.copy()
expected = self.frame.copy()
frame.ix[frame.index[5]:frame.index[10]] = 5.
expected.values[5:11] = 5
assert_frame_equal(frame, expected)
# case 7: slice columns
with catch_warnings(record=True):
frame = self.frame.copy()
frame2 = self.frame.copy()
expected = self.frame.copy()
# slice indices
frame.ix[:, 1:3] = 4.
expected.values[:, 1:3] = 4.
assert_frame_equal(frame, expected)
# slice with labels
frame.ix[:, 'B':'C'] = 4.
assert_frame_equal(frame, expected)
# new corner case of boolean slicing / setting
frame = DataFrame(lzip([2, 3, 9, 6, 7], [np.nan] * 5),
columns=['a', 'b'])
lst = [100]
lst.extend([np.nan] * 4)
expected = DataFrame(lzip([100, 3, 9, 6, 7], lst),
columns=['a', 'b'])
frame[frame['a'] == 2] = 100
assert_frame_equal(frame, expected)
def test_fancy_getitem_slice_mixed(self):
sliced = self.mixed_frame.iloc[:, -3:]
assert sliced['D'].dtype == np.float64
# get view with single block
# setting it triggers setting with copy
sliced = self.frame.iloc[:, -3:]
def f():
sliced['C'] = 4.
pytest.raises(com.SettingWithCopyError, f)
assert (self.frame['C'] == 4).all()
def test_fancy_setitem_int_labels(self):
# integer index defers to label-based indexing
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[[0, 2, 4]] = 5
exp.values[:3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[6] = 5
exp.values[3] = 5
assert_frame_equal(tmp, exp)
with catch_warnings(record=True):
tmp = df.copy()
exp = df.copy()
tmp.ix[:, 2] = 5
# tmp correctly sets the dtype
# so match the exp way
exp[2] = 5
assert_frame_equal(tmp, exp)
def test_fancy_getitem_int_labels(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
result = df.ix[[4, 2, 0], [2, 0]]
expected = df.reindex(index=[4, 2, 0], columns=[2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[[4, 2, 0]]
expected = df.reindex(index=[4, 2, 0])
assert_frame_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[4]
expected = df.xs(4)
assert_series_equal(result, expected)
with catch_warnings(record=True):
result = df.ix[:, 3]
expected = df[3]
assert_series_equal(result, expected)
def test_fancy_index_int_labels_exceptions(self):
df = DataFrame(np.random.randn(10, 5), index=np.arange(0, 20, 2))
with catch_warnings(record=True):
# labels that aren't contained
pytest.raises(KeyError, df.ix.__setitem__,
([0, 1, 2], [2, 3, 4]), 5)
# try to set indices not contained in frame
pytest.raises(KeyError, self.frame.ix.__setitem__,
['foo', 'bar', 'baz'], 1)
pytest.raises(KeyError, self.frame.ix.__setitem__,
(slice(None, None), ['E']), 1)
# partial setting now allows this GH2578
# pytest.raises(KeyError, self.frame.ix.__setitem__,
# (slice(None, None), 'E'), 1)
def test_setitem_fancy_mixed_2d(self):
with catch_warnings(record=True):
self.mixed_frame.ix[:5, ['C', 'B', 'A']] = 5
result = self.mixed_frame.ix[:5, ['C', 'B', 'A']]
assert (result.values == 5).all()
self.mixed_frame.ix[5] = np.nan
assert isnull(self.mixed_frame.ix[5]).all()
self.mixed_frame.ix[5] = self.mixed_frame.ix[6]
assert_series_equal(self.mixed_frame.ix[5], self.mixed_frame.ix[6],
check_names=False)
# #1432
with catch_warnings(record=True):
df = DataFrame({1: [1., 2., 3.],
2: [3, 4, 5]})
assert df._is_mixed_type
df.ix[1] = [5, 10]
expected = DataFrame({1: [1., 5., 3.],
2: [3, 10, 5]})
assert_frame_equal(df, expected)
def test_ix_align(self):
b = Series(randn(10), name=0).sort_values()
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:, 0] = b
assert_series_equal(df.ix[:, 0].reindex(b.index), b)
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, :] = b
assert_series_equal(dft.ix[0, :].reindex(b.index), b)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[:5, 0] = b
s = df.ix[:5, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, :5] = b
s = dft.ix[0, :5]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
df = df_orig.copy()
idx = [0, 1, 3, 5]
df.ix[idx, 0] = b
s = df.ix[idx, 0]
assert_series_equal(s, b.reindex(s.index))
with catch_warnings(record=True):
dft = df_orig.T
dft.ix[0, idx] = b
s = dft.ix[0, idx]
assert_series_equal(s, b.reindex(s.index))
def test_ix_frame_align(self):
b = DataFrame(np.random.randn(3, 4))
df_orig = DataFrame(randn(10, 4))
df = df_orig.copy()
with catch_warnings(record=True):
df.ix[:3] = b
out = b.ix[:3]
assert_frame_equal(out, b)
b.sort_index(inplace=True)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[[0, 1, 2]] = b
out = df.ix[[0, 1, 2]].reindex(b.index)
assert_frame_equal(out, b)
with catch_warnings(record=True):
df = df_orig.copy()
df.ix[:3] = b
out = df.ix[:3]
assert_frame_equal(out, b.reindex(out.index))
def test_getitem_setitem_non_ix_labels(self):
df = tm.makeTimeDataFrame()
start, end = df.index[[5, 10]]
result = df.loc[start:end]
result2 = df[start:end]
expected = df[5:11]
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
result = df.copy()
result.loc[start:end] = 0
result2 = df.copy()
result2[start:end] = 0
expected = df.copy()
expected[5:11] = 0
assert_frame_equal(result, expected)
assert_frame_equal(result2, expected)
def test_ix_multi_take(self):
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index == 0, :]
xp = df.reindex([0])
assert_frame_equal(rs, xp)
""" #1321
df = DataFrame(np.random.randn(3, 2))
rs = df.loc[df.index==0, df.columns==1]
xp = df.reindex([0], [1])
assert_frame_equal(rs, xp)
"""
def test_ix_multi_take_nonint_index(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=['a', 'b'])
with catch_warnings(record=True):
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=['a'])
assert_frame_equal(rs, xp)
def test_ix_multi_take_multiindex(self):
df = DataFrame(np.random.randn(3, 2), index=['x', 'y', 'z'],
columns=[['a', 'b'], ['1', '2']])
with catch_warnings(record=True):
rs = df.ix[[0], [0]]
xp = df.reindex(['x'], columns=[('a', '1')])
assert_frame_equal(rs, xp)
def test_ix_dup(self):
idx = Index(['a', 'a', 'b', 'c', 'd', 'd'])
df = DataFrame(np.random.randn(len(idx), 3), idx)
with catch_warnings(record=True):
sub = df.ix[:'d']
assert_frame_equal(sub, df)
with catch_warnings(record=True):
sub = df.ix['a':'c']
assert_frame_equal(sub, df.ix[0:4])
with catch_warnings(record=True):
sub = df.ix['b':'d']
assert_frame_equal(sub, df.ix[2:])
def test_getitem_fancy_1d(self):
f = self.frame
# return self if no slicing...for now
with catch_warnings(record=True):
assert f.ix[:, :] is f
# low dimensional slice
with catch_warnings(record=True):
xs1 = f.ix[2, ['C', 'B', 'A']]
xs2 = f.xs(f.index[2]).reindex(['C', 'B', 'A'])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
ts1 = f.ix[5:10, 2]
ts2 = f[f.columns[2]][5:10]
tm.assert_series_equal(ts1, ts2)
# positional xs
with catch_warnings(record=True):
xs1 = f.ix[0]
xs2 = f.xs(f.index[0])
tm.assert_series_equal(xs1, xs2)
with catch_warnings(record=True):
xs1 = f.ix[f.index[5]]
xs2 = f.xs(f.index[5])
tm.assert_series_equal(xs1, xs2)
# single column
with catch_warnings(record=True):
assert_series_equal(f.ix[:, 'A'], f['A'])
# return view
with catch_warnings(record=True):
exp = f.copy()
exp.values[5] = 4
f.ix[5][:] = 4
tm.assert_frame_equal(exp, f)
with catch_warnings(record=True):
exp.values[:, 1] = 6
f.ix[:, 1][:] = 6
tm.assert_frame_equal(exp, f)
# slice of mixed-frame
with catch_warnings(record=True):
xs = self.mixed_frame.ix[5]
exp = self.mixed_frame.xs(self.mixed_frame.index[5])
tm.assert_series_equal(xs, exp)
def test_setitem_fancy_1d(self):
# case 1: set cross-section for indices
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[2, ['C', 'B', 'A']] = [1., 2., 3.]
expected['C'][2] = 1.
expected['B'][2] = 2.
expected['A'][2] = 3.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2 = self.frame.copy()
frame2.ix[2, [3, 2, 1]] = [1., 2., 3.]
assert_frame_equal(frame, expected)
# case 2, set a section of a column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
vals = randn(5)
expected.values[5:10, 2] = vals
frame.ix[5:10, 2] = vals
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame2 = self.frame.copy()
frame2.ix[5:10, 'B'] = vals
assert_frame_equal(frame, expected)
# case 3: full xs
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[4] = 5.
expected.values[4] = 5.
assert_frame_equal(frame, expected)
with catch_warnings(record=True):
frame.ix[frame.index[4]] = 6.
expected.values[4] = 6.
assert_frame_equal(frame, expected)
# single column
frame = self.frame.copy()
expected = self.frame.copy()
with catch_warnings(record=True):
frame.ix[:, 'A'] = 7.
expected['A'] = 7.
assert_frame_equal(frame, expected)
def test_getitem_fancy_scalar(self):
f = self.frame
ix = f.loc
# individual value
for col in f.columns:
ts = f[col]
for idx in f.index[::5]:
assert ix[idx, col] == ts[idx]
def test_setitem_fancy_scalar(self):
f = self.frame
expected = self.frame.copy()
ix = f.loc
# individual value
for j, col in enumerate(f.columns):
ts = f[col] # noqa
for idx in f.index[::5]:
i = f.index.get_loc(idx)
val = randn()
expected.values[i, j] = val
ix[idx, col] = val
assert_frame_equal(f, expected)
def test_getitem_fancy_boolean(self):
f = self.frame
ix = f.loc
expected = f.reindex(columns=['B', 'D'])
result = ix[:, [False, True, False, True]]
assert_frame_equal(result, expected)
expected = f.reindex(index=f.index[5:10], columns=['B', 'D'])
result = ix[f.index[5:10], [False, True, False, True]]
assert_frame_equal(result, expected)
boolvec = f.index > f.index[7]
expected = f.reindex(index=f.index[boolvec])
result = ix[boolvec]
assert_frame_equal(result, expected)
result = ix[boolvec, :]
assert_frame_equal(result, expected)
result = ix[boolvec, f.columns[2:]]
expected = f.reindex(index=f.index[boolvec],
columns=['C', 'D'])
assert_frame_equal(result, expected)
def test_setitem_fancy_boolean(self):
# from 2d, set with booleans
frame = self.frame.copy()
expected = self.frame.copy()
mask = frame['A'] > 0
frame.loc[mask] = 0.
expected.values[mask.values] = 0.
assert_frame_equal(frame, expected)
frame = self.frame.copy()
expected = self.frame.copy()
frame.loc[mask, ['A', 'B']] = 0.
expected.values[mask.values, :2] = 0.
assert_frame_equal(frame, expected)
def test_getitem_fancy_ints(self):
result = self.frame.iloc[[1, 4, 7]]
expected = self.frame.loc[self.frame.index[[1, 4, 7]]]
assert_frame_equal(result, expected)
result = self.frame.iloc[:, [2, 0, 1]]
expected = self.frame.loc[:, self.frame.columns[[2, 0, 1]]]
assert_frame_equal(result, expected)
def test_getitem_setitem_fancy_exceptions(self):
ix = self.frame.iloc
with tm.assert_raises_regex(IndexingError, 'Too many indexers'):
ix[:, :, :]
with pytest.raises(IndexingError):
ix[:, :, :] = 1
def test_getitem_setitem_boolean_misaligned(self):
# boolean index misaligned labels
mask = self.frame['A'][::-1] > 1
result = self.frame.loc[mask]
expected = self.frame.loc[mask[::-1]]
assert_frame_equal(result, expected)
cp = self.frame.copy()
expected = self.frame.copy()
cp.loc[mask] = 0
expected.loc[mask] = 0
assert_frame_equal(cp, expected)
def test_getitem_setitem_boolean_multi(self):
df = DataFrame(np.random.randn(3, 2))
# get
k1 = np.array([True, False, True])
k2 = np.array([False, True])
result = df.loc[k1, k2]
expected = df.loc[[0, 2], [1]]
assert_frame_equal(result, expected)
expected = df.copy()
df.loc[np.array([True, False, True]),
np.array([False, True])] = 5
expected.loc[[0, 2], [1]] = 5
assert_frame_equal(df, expected)
def test_getitem_setitem_float_labels(self):
index = Index([1.5, 2, 3, 4, 5])
df = DataFrame(np.random.randn(5, 5), index=index)
result = df.loc[1.5:4]
expected = df.reindex([1.5, 2, 3, 4])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4:5]
expected = df.reindex([4, 5]) # reindex with int
assert_frame_equal(result, expected, check_index_type=False)
assert len(result) == 2
result = df.loc[4:5]
expected = df.reindex([4.0, 5.0]) # reindex with float
assert_frame_equal(result, expected)
assert len(result) == 2
# loc_float changes this to work properly
result = df.loc[1:2]
expected = df.iloc[0:2]
assert_frame_equal(result, expected)
df.loc[1:2] = 0
result = df[1:2]
assert (result == 0).all().all()
# #2727
index = Index([1.0, 2.5, 3.5, 4.5, 5.0])
df = DataFrame(np.random.randn(5, 5), index=index)
# positional slicing only via iloc!
pytest.raises(TypeError, lambda: df.iloc[1.0:5])
result = df.iloc[4:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
cp = df.copy()
def f():
cp.iloc[1.0:5] = 0
pytest.raises(TypeError, f)
def f():
result = cp.iloc[1.0:5] == 0 # noqa
pytest.raises(TypeError, f)
assert result.values.all()
assert (cp.iloc[0:1] == df.iloc[0:1]).values.all()
cp = df.copy()
cp.iloc[4:5] = 0
assert (cp.iloc[4:5] == 0).values.all()
assert (cp.iloc[0:4] == df.iloc[0:4]).values.all()
# float slicing
result = df.loc[1.0:5]
expected = df
assert_frame_equal(result, expected)
assert len(result) == 5
result = df.loc[1.1:5]
expected = df.reindex([2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 4
result = df.loc[4.51:5]
expected = df.reindex([5.0])
assert_frame_equal(result, expected)
assert len(result) == 1
result = df.loc[1.0:5.0]
expected = df.reindex([1.0, 2.5, 3.5, 4.5, 5.0])
assert_frame_equal(result, expected)
assert len(result) == 5
cp = df.copy()
cp.loc[1.0:5.0] = 0
result = cp.loc[1.0:5.0]
assert (result == 0).values.all()
def test_setitem_single_column_mixed(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['str'] = 'qux'
df.loc[df.index[::2], 'str'] = nan
expected = np.array([nan, 'qux', nan, 'qux', nan], dtype=object)
assert_almost_equal(df['str'].values, expected)
def test_setitem_single_column_mixed_datetime(self):
df = DataFrame(randn(5, 3), index=['a', 'b', 'c', 'd', 'e'],
columns=['foo', 'bar', 'baz'])
df['timestamp'] = Timestamp('20010102')
# check our dtypes
result = df.get_dtype_counts()
expected = Series({'float64': 3, 'datetime64[ns]': 1})
assert_series_equal(result, expected)
# set an allowable datetime64 type
df.loc['b', 'timestamp'] = iNaT
assert isnull(df.loc['b', 'timestamp'])
# allow this syntax
df.loc['c', 'timestamp'] = nan
assert isnull(df.loc['c', 'timestamp'])
# allow this syntax
df.loc['d', :] = nan
assert not isnull(df.loc['c', :]).all()
# as of GH 3216 this will now work!
# try to set with a list like item
# pytest.raises(
# Exception, df.loc.__setitem__, ('d', 'timestamp'), [nan])
def test_setitem_frame(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
self.frame.loc[self.frame.index[-2]:, ['A', 'B']] = piece.values
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
# GH 3216
# already aligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.]],
index=f.index[0:2], columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2], ['A', 'B']].values,
piece.values)
# rows unaligned
f = self.mixed_frame.copy()
piece = DataFrame([[1., 2.], [3., 4.], [5., 6.], [7., 8.]],
index=list(f.index[0:2]) + ['foo', 'bar'],
columns=['A', 'B'])
key = (slice(None, 2), ['A', 'B'])
f.loc[key] = piece
assert_almost_equal(f.loc[f.index[0:2:], ['A', 'B']].values,
piece.values[0:2])
# key is unaligned with values
f = self.mixed_frame.copy()
piece = f.loc[f.index[:2], ['A']]
piece.index = f.index[-2:]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece
piece['B'] = np.nan
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# ndarray
f = self.mixed_frame.copy()
piece = self.mixed_frame.loc[f.index[:2], ['A', 'B']]
key = (slice(-2, None), ['A', 'B'])
f.loc[key] = piece.values
assert_almost_equal(f.loc[f.index[-2:], ['A', 'B']].values,
piece.values)
# needs upcasting
df = DataFrame([[1, 2, 'foo'], [3, 4, 'bar']], columns=['A', 'B', 'C'])
df2 = df.copy()
df2.loc[:, ['A', 'B']] = df.loc[:, ['A', 'B']] + 0.5
expected = df.reindex(columns=['A', 'B'])
expected += 0.5
expected['C'] = df['C']
assert_frame_equal(df2, expected)
def test_setitem_frame_align(self):
piece = self.frame.loc[self.frame.index[:2], ['A', 'B']]
piece.index = self.frame.index[-2:]
piece.columns = ['A', 'B']
self.frame.loc[self.frame.index[-2:], ['A', 'B']] = piece
result = self.frame.loc[self.frame.index[-2:], ['A', 'B']].values
expected = piece.values
assert_almost_equal(result, expected)
def test_getitem_setitem_ix_duplicates(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc['foo']
expected = df[:2]
assert_frame_equal(result, expected)
result = df.loc['bar']
expected = df.iloc[[2, 4]]
assert_frame_equal(result, expected)
result = df.loc['baz']
expected = df.iloc[3]
assert_series_equal(result, expected)
def test_getitem_ix_boolean_duplicates_multiple(self):
# #1201
df = DataFrame(np.random.randn(5, 3),
index=['foo', 'foo', 'bar', 'baz', 'bar'])
result = df.loc[['bar']]
exp = df.iloc[[2, 4]]
assert_frame_equal(result, exp)
result = df.loc[df[1] > 0]
exp = df[df[1] > 0]
assert_frame_equal(result, exp)
result = df.loc[df[0] > 0]
exp = df[df[0] > 0]
assert_frame_equal(result, exp)
def test_getitem_setitem_ix_bool_keyerror(self):
# #2199
df = DataFrame({'a': [1, 2, 3]})
pytest.raises(KeyError, df.loc.__getitem__, False)
pytest.raises(KeyError, df.loc.__getitem__, True)
pytest.raises(KeyError, df.loc.__setitem__, False, 0)
pytest.raises(KeyError, df.loc.__setitem__, True, 0)
def test_getitem_list_duplicates(self):
# #1943
df = DataFrame(np.random.randn(4, 4), columns=list('AABC'))
df.columns.name = 'foo'
result = df[['B', 'C']]
assert result.columns.name == 'foo'
expected = df.iloc[:, 2:]
assert_frame_equal(result, expected)
def test_get_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
result = self.frame.get_value(idx, col)
expected = self.frame[col][idx]
assert result == expected
def test_lookup(self):
def alt(df, rows, cols, dtype):
result = []
for r, c in zip(rows, cols):
result.append(df.get_value(r, c))
return np.array(result, dtype=dtype)
def testit(df):
rows = list(df.index) * len(df.columns)
cols = list(df.columns) * len(df.index)
result = df.lookup(rows, cols)
expected = alt(df, rows, cols, dtype=np.object_)
tm.assert_almost_equal(result, expected, check_dtype=False)
testit(self.mixed_frame)
testit(self.frame)
df = DataFrame({'label': ['a', 'b', 'a', 'c'],
'mask_a': [True, True, False, True],
'mask_b': [True, False, False, False],
'mask_c': [False, True, False, True]})
df['mask'] = df.lookup(df.index, 'mask_' + df['label'])
exp_mask = alt(df, df.index, 'mask_' + df['label'], dtype=np.bool_)
tm.assert_series_equal(df['mask'], pd.Series(exp_mask, name='mask'))
assert df['mask'].dtype == np.bool_
with pytest.raises(KeyError):
self.frame.lookup(['xyz'], ['A'])
with pytest.raises(KeyError):
self.frame.lookup([self.frame.index[0]], ['xyz'])
with tm.assert_raises_regex(ValueError, 'same size'):
self.frame.lookup(['a', 'b', 'c'], ['a'])
def test_set_value(self):
for idx in self.frame.index:
for col in self.frame.columns:
self.frame.set_value(idx, col, 1)
assert self.frame[col][idx] == 1
def test_set_value_resize(self):
res = self.frame.set_value('foobar', 'B', 0)
assert res is self.frame
assert res.index[-1] == 'foobar'
assert res.get_value('foobar', 'B') == 0
self.frame.loc['foobar', 'qux'] = 0
assert self.frame.get_value('foobar', 'qux') == 0
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 'sam')
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', True)
assert res3['baz'].dtype == np.object_
res = self.frame.copy()
res3 = res.set_value('foobar', 'baz', 5)
assert is_float_dtype(res3['baz'])
assert isnull(res3['baz'].drop(['foobar'])).all()
pytest.raises(ValueError, res3.set_value, 'foobar', 'baz', 'sam')
def test_set_value_with_index_dtype_change(self):
df_orig = DataFrame(randn(3, 3), index=lrange(3), columns=list('ABC'))
# this is actually ambiguous as the 2 is interpreted as a positional
# so column is not created
df = df_orig.copy()
df.set_value('C', 2, 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
df = df_orig.copy()
df.loc['C', 2] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
# assert list(df.columns) == list(df_orig.columns) + [2]
# create both new
df = df_orig.copy()
df.set_value('C', 'D', 1.0)
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
df = df_orig.copy()
df.loc['C', 'D'] = 1.0
assert list(df.index) == list(df_orig.index) + ['C']
assert list(df.columns) == list(df_orig.columns) + ['D']
def test_get_set_value_no_partial_indexing(self):
# partial w/ MultiIndex raise exception
index = MultiIndex.from_tuples([(0, 1), (0, 2), (1, 1), (1, 2)])
df = DataFrame(index=index, columns=lrange(4))
pytest.raises(KeyError, df.get_value, 0, 1)
# pytest.raises(KeyError, df.set_value, 0, 1, 0)
def test_single_element_ix_dont_upcast(self):
self.frame['E'] = 1
assert issubclass(self.frame['E'].dtype.type, (int, np.integer))
with catch_warnings(record=True):
result = self.frame.ix[self.frame.index[5], 'E']
assert is_integer(result)
result = self.frame.loc[self.frame.index[5], 'E']
assert is_integer(result)
# GH 11617
df = pd.DataFrame(dict(a=[1.23]))
df["b"] = 666
with catch_warnings(record=True):
result = df.ix[0, "b"]
assert is_integer(result)
result = df.loc[0, "b"]
assert is_integer(result)
expected = Series([666], [0], name='b')
with catch_warnings(record=True):
result = df.ix[[0], "b"]
assert_series_equal(result, expected)
result = df.loc[[0], "b"]
assert_series_equal(result, expected)
def test_iloc_row(self):
df = DataFrame(np.random.randn(10, 4), index=lrange(0, 20, 2))
result = df.iloc[1]
exp = df.loc[2]
assert_series_equal(result, exp)
result = df.iloc[2]
exp = df.loc[4]
assert_series_equal(result, exp)
# slice
result = df.iloc[slice(4, 8)]
expected = df.loc[8:14]
assert_frame_equal(result, expected)
# verify slice is view
# setting it makes it raise/warn
def f():
result[2] = 0.
pytest.raises(com.SettingWithCopyError, f)
exp_col = df[2].copy()
exp_col[4:8] = 0.
assert_series_equal(df[2], exp_col)
# list of integers
result = df.iloc[[1, 2, 4, 6]]
expected = df.reindex(df.index[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_col(self):
df = DataFrame(np.random.randn(4, 10), columns=lrange(0, 20, 2))
result = df.iloc[:, 1]
exp = df.loc[:, 2]
assert_series_equal(result, exp)
result = df.iloc[:, 2]
exp = df.loc[:, 4]
assert_series_equal(result, exp)
# slice
result = df.iloc[:, slice(4, 8)]
expected = df.loc[:, 8:14]
assert_frame_equal(result, expected)
# verify slice is view
# and that we are setting a copy
def f():
result[8] = 0.
pytest.raises(com.SettingWithCopyError, f)
assert (df[8] == 0).all()
# list of integers
result = df.iloc[:, [1, 2, 4, 6]]
expected = df.reindex(columns=df.columns[[1, 2, 4, 6]])
assert_frame_equal(result, expected)
def test_iloc_duplicates(self):
df = DataFrame(np.random.rand(3, 3), columns=list('ABC'),
index=list('aab'))
result = df.iloc[0]
with catch_warnings(record=True):
result2 = df.ix[0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
with catch_warnings(record=True):
result = df.T.iloc[:, 0]
result2 = df.T.ix[:, 0]
assert isinstance(result, Series)
assert_almost_equal(result.values, df.values[0])
assert_series_equal(result, result2)
# multiindex
df = DataFrame(np.random.randn(3, 3),
columns=[['i', 'i', 'j'], ['A', 'A', 'B']],
index=[['i', 'i', 'j'], ['X', 'X', 'Y']])
with catch_warnings(record=True):
rs = df.iloc[0]
xp = df.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
rs = df.iloc[:, 0]
xp = df.T.ix[0]
assert_series_equal(rs, xp)
with catch_warnings(record=True):
rs = df.iloc[:, [0]]
xp = df.ix[:, [0]]
assert_frame_equal(rs, xp)
# #2259
df = DataFrame([[1, 2, 3], [4, 5, 6]], columns=[1, 1, 2])
result = df.iloc[:, [0]]
expected = df.take([0], axis=1)
assert_frame_equal(result, expected)
def test_iloc_sparse_propegate_fill_value(self):
from pandas.core.sparse.api import SparseDataFrame
df = SparseDataFrame({'A': [999, 1]}, default_fill_value=999)
assert len(df['A'].sp_values) == len(df.iloc[:, 0].sp_values)
def test_iat(self):
for i, row in enumerate(self.frame.index):
for j, col in enumerate(self.frame.columns):
result = self.frame.iat[i, j]
expected = self.frame.at[row, col]
assert result == expected
def test_nested_exception(self):
# Ignore the strange way of triggering the problem
# (which may get fixed), it's just a way to trigger
# the issue or reraising an outer exception without
# a named argument
df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6],
"c": [7, 8, 9]}).set_index(["a", "b"])
l = list(df.index)
l[0] = ["a", "b"]
df.index = l
try:
repr(df)
except Exception as e:
assert type(e) != UnboundLocalError
def test_reindex_methods(self):
df = pd.DataFrame({'x': list(range(5))})
target = np.array([-0.1, 0.9, 1.1, 1.5])
for method, expected_values in [('nearest', [0, 1, 1, 2]),
('pad', [np.nan, 0, 1, 1]),
('backfill', [0, 1, 2, 2])]:
expected = pd.DataFrame({'x': expected_values}, index=target)
actual = df.reindex(target, method=method)
assert_frame_equal(expected, actual)
actual = df.reindex_like(df, method=method, tolerance=0)
assert_frame_equal(df, actual)
actual = df.reindex(target, method=method, tolerance=1)
assert_frame_equal(expected, actual)
e2 = expected[::-1]
actual = df.reindex(target[::-1], method=method)
assert_frame_equal(e2, actual)
new_order = [3, 0, 2, 1]
e2 = expected.iloc[new_order]
actual = df.reindex(target[new_order], method=method)
assert_frame_equal(e2, actual)
switched_method = ('pad' if method == 'backfill'
else 'backfill' if method == 'pad'
else method)
actual = df[::-1].reindex(target, method=switched_method)
assert_frame_equal(expected, actual)
expected = pd.DataFrame({'x': [0, 1, 1, np.nan]}, index=target)
actual = df.reindex(target, method='nearest', tolerance=0.2)
assert_frame_equal(expected, actual)
def test_reindex_frame_add_nat(self):
rng = date_range('1/1/2000 00:00:00', periods=10, freq='10s')
df = DataFrame({'A': np.random.randn(len(rng)), 'B': rng})
result = df.reindex(lrange(15))
assert np.issubdtype(result['B'].dtype, np.dtype('M8[ns]'))
mask = com.isnull(result)['B']
assert mask[-5:].all()
assert not mask[:-5].any()
def test_set_dataframe_column_ns_dtype(self):
x = DataFrame([datetime.now(), datetime.now()])
assert x[0].dtype == np.dtype('M8[ns]')
def test_non_monotonic_reindex_methods(self):
dr = pd.date_range('2013-08-01', periods=6, freq='B')
data = np.random.randn(6, 1)
df = pd.DataFrame(data, index=dr, columns=list('A'))
df_rev = pd.DataFrame(data, index=dr[[3, 4, 5] + [0, 1, 2]],
columns=list('A'))
# index is not monotonic increasing or decreasing
pytest.raises(ValueError, df_rev.reindex, df.index, method='pad')
pytest.raises(ValueError, df_rev.reindex, df.index, method='ffill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='bfill')
pytest.raises(ValueError, df_rev.reindex, df.index, method='nearest')
def test_reindex_level(self):
from itertools import permutations
icol = ['jim', 'joe', 'jolie']
def verify_first_level(df, level, idx, check_index_type=True):
f = lambda val: np.nonzero(df[level] == val)[0]
i = np.concatenate(list(map(f, idx)))
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[i].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
def verify(df, level, idx, indexer, check_index_type=True):
left = df.set_index(icol).reindex(idx, level=level)
right = df.iloc[indexer].set_index(icol)
assert_frame_equal(left, right, check_index_type=check_index_type)
df = pd.DataFrame({'jim': list('B' * 4 + 'A' * 2 + 'C' * 3),
'joe': list('abcdeabcd')[::-1],
'jolie': [10, 20, 30] * 3,
'joline': np.random.randint(0, 1000, 9)})
target = [['C', 'B', 'A'], ['F', 'C', 'A', 'D'], ['A'],
['A', 'B', 'C'], ['C', 'A', 'B'], ['C', 'B'], ['C', 'A'],
['A', 'B'], ['B', 'A', 'C']]
for idx in target:
verify_first_level(df, 'jim', idx)
# reindex by these causes different MultiIndex levels
for idx in [['D', 'F'], ['A', 'C', 'B']]:
verify_first_level(df, 'jim', idx, check_index_type=False)
verify(df, 'joe', list('abcde'), [3, 2, 1, 0, 5, 4, 8, 7, 6])
verify(df, 'joe', list('abcd'), [3, 2, 1, 0, 5, 8, 7, 6])
verify(df, 'joe', list('abc'), [3, 2, 1, 8, 7, 6])
verify(df, 'joe', list('eca'), [1, 3, 4, 6, 8])
verify(df, 'joe', list('edc'), [0, 1, 4, 5, 6])
verify(df, 'joe', list('eadbc'), [3, 0, 2, 1, 4, 5, 8, 7, 6])
verify(df, 'joe', list('edwq'), [0, 4, 5])
verify(df, 'joe', list('wq'), [], check_index_type=False)
df = DataFrame({'jim': ['mid'] * 5 + ['btm'] * 8 + ['top'] * 7,
'joe': ['3rd'] * 2 + ['1st'] * 3 + ['2nd'] * 3 +
['1st'] * 2 + ['3rd'] * 3 + ['1st'] * 2 +
['3rd'] * 3 + ['2nd'] * 2,
# this needs to be jointly unique with jim and joe or
# reindexing will fail ~1.5% of the time, this works
# out to needing unique groups of same size as joe
'jolie': np.concatenate([
np.random.choice(1000, x, replace=False)
for x in [2, 3, 3, 2, 3, 2, 3, 2]]),
'joline': np.random.randn(20).round(3) * 10})
for idx in permutations(df['jim'].unique()):
for i in range(3):
verify_first_level(df, 'jim', idx[:i + 1])
i = [2, 3, 4, 0, 1, 8, 9, 5, 6, 7, 10,
11, 12, 13, 14, 18, 19, 15, 16, 17]
verify(df, 'joe', ['1st', '2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 5, 6,
7, 8, 9, 15, 16, 17, 18, 19, 13, 14]
verify(df, 'joe', ['3rd', '2nd', '1st'], i)
i = [0, 1, 5, 6, 7, 10, 11, 12, 18, 19, 15, 16, 17]
verify(df, 'joe', ['2nd', '3rd'], i)
i = [0, 1, 2, 3, 4, 10, 11, 12, 8, 9, 15, 16, 17, 13, 14]
verify(df, 'joe', ['3rd', '1st'], i)
def test_getitem_ix_float_duplicates(self):
df = pd.DataFrame(np.random.randn(3, 3),
index=[0.1, 0.2, 0.2], columns=list('abc'))
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [1, 0.2, 0.2]
expect = df.iloc[1:]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df = pd.DataFrame(np.random.randn(4, 3),
index=[1, 0.2, 0.2, 1], columns=list('abc'))
expect = df.iloc[1:-1]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[1:-1, 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
df.index = [0.1, 0.2, 2, 0.2]
expect = df.iloc[[1, -1]]
assert_frame_equal(df.loc[0.2], expect)
with catch_warnings(record=True):
assert_frame_equal(df.ix[0.2], expect)
expect = df.iloc[[1, -1], 0]
assert_series_equal(df.loc[0.2, 'a'], expect)
def test_setitem_with_sparse_value(self):
# GH8131
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = pd.Series([0, 0, 1]).to_sparse(fill_value=0)
df['new_column'] = sp_series
assert_series_equal(df['new_column'], sp_series, check_names=False)
def test_setitem_with_unaligned_sparse_value(self):
df = pd.DataFrame({'c_1': ['a', 'b', 'c'], 'n_1': [1., 2., 3.]})
sp_series = (pd.Series([0, 0, 1], index=[2, 1, 0])
.to_sparse(fill_value=0))
df['new_column'] = sp_series
exp = pd.Series([1, 0, 0], name='new_column')
assert_series_equal(df['new_column'], exp)
def test_setitem_with_unaligned_tz_aware_datetime_column(self):
# GH 12981
# Assignment of unaligned offset-aware datetime series.
# Make sure timezone isn't lost
column = pd.Series(pd.date_range('2015-01-01', periods=3, tz='utc'),
name='dates')
df = pd.DataFrame({'dates': column})
df['dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
df = pd.DataFrame({'dates': column})
df.loc[[0, 1, 2], 'dates'] = column[[1, 0, 2]]
assert_series_equal(df['dates'], column)
def test_setitem_datetime_coercion(self):
# gh-1048
df = pd.DataFrame({'c': [pd.Timestamp('2010-10-01')] * 3})
df.loc[0:1, 'c'] = np.datetime64('2008-08-08')
assert pd.Timestamp('2008-08-08') == df.loc[0, 'c']
assert pd.Timestamp('2008-08-08') == df.loc[1, 'c']
df.loc[2, 'c'] = date(2005, 5, 5)
assert pd.Timestamp('2005-05-05') == df.loc[2, 'c']
def test_setitem_datetimelike_with_inference(self):
# GH 7592
# assignment of timedeltas with NaT
one_hour = timedelta(hours=1)
df = DataFrame(index=date_range('20130101', periods=4))
df['A'] = np.array([1 * one_hour] * 4, dtype='m8[ns]')
df.loc[:, 'B'] = np.array([2 * one_hour] * 4, dtype='m8[ns]')
df.loc[:3, 'C'] = np.array([3 * one_hour] * 3, dtype='m8[ns]')
df.loc[:, 'D'] = np.array([4 * one_hour] * 4, dtype='m8[ns]')
df.loc[df.index[:3], 'E'] = np.array([5 * one_hour] * 3,
dtype='m8[ns]')
df['F'] = np.timedelta64('NaT')
df.loc[df.index[:-1], 'F'] = np.array([6 * one_hour] * 3,
dtype='m8[ns]')
df.loc[df.index[-3]:, 'G'] = date_range('20130101', periods=3)
df['H'] = np.datetime64('NaT')
result = df.dtypes
expected = Series([np.dtype('timedelta64[ns]')] * 6 +
[np.dtype('datetime64[ns]')] * 2,
index=list('ABCDEFGH'))
assert_series_equal(result, expected)
def test_at_time_between_time_datetimeindex(self):
index = date_range("2012-01-01", "2012-01-05", freq='30min')
df = DataFrame(randn(len(index), 5), index=index)
akey = time(12, 0, 0)
bkey = slice(time(13, 0, 0), time(14, 0, 0))
ainds = [24, 72, 120, 168]
binds = [26, 27, 28, 74, 75, 76, 122, 123, 124, 170, 171, 172]
result = df.at_time(akey)
expected = df.loc[akey]
expected2 = df.iloc[ainds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 4
result = df.between_time(bkey.start, bkey.stop)
expected = df.loc[bkey]
expected2 = df.iloc[binds]
assert_frame_equal(result, expected)
assert_frame_equal(result, expected2)
assert len(result) == 12
result = df.copy()
result.loc[akey] = 0
result = result.loc[akey]
expected = df.loc[akey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[akey] = 0
result.loc[akey] = df.iloc[ainds]
assert_frame_equal(result, df)
result = df.copy()
result.loc[bkey] = 0
result = result.loc[bkey]
expected = df.loc[bkey].copy()
expected.loc[:] = 0
assert_frame_equal(result, expected)
result = df.copy()
result.loc[bkey] = 0
result.loc[bkey] = df.iloc[binds]
assert_frame_equal(result, df)
def test_xs(self):
idx = self.frame.index[5]
xs = self.frame.xs(idx)
for item, value in compat.iteritems(xs):
if np.isnan(value):
assert np.isnan(self.frame[item][idx])
else:
assert value == self.frame[item][idx]
# mixed-type xs
test_data = {
'A': {'1': 1, '2': 2},
'B': {'1': '1', '2': '2', '3': '3'},
}
frame = DataFrame(test_data)
xs = frame.xs('1')
assert xs.dtype == np.object_
assert xs['A'] == 1
assert xs['B'] == '1'
with pytest.raises(KeyError):
self.tsframe.xs(self.tsframe.index[0] - BDay())
# xs get column
series = self.frame.xs('A', axis=1)
expected = self.frame['A']
assert_series_equal(series, expected)
# view is returned if possible
series = self.frame.xs('A', axis=1)
series[:] = 5
assert (expected == 5).all()
def test_xs_corner(self):
# pathological mixed-type reordering case
df = DataFrame(index=[0])
df['A'] = 1.
df['B'] = 'foo'
df['C'] = 2.
df['D'] = 'bar'
df['E'] = 3.
xs = df.xs(0)
exp = pd.Series([1., 'foo', 2., 'bar', 3.],
index=list('ABCDE'), name=0)
tm.assert_series_equal(xs, exp)
# no columns but Index(dtype=object)
df = DataFrame(index=['a', 'b', 'c'])
result = df.xs('a')
expected = Series([], name='a', index=pd.Index([], dtype=object))
assert_series_equal(result, expected)
def test_xs_duplicates(self):
df = DataFrame(randn(5, 2), index=['b', 'b', 'c', 'b', 'a'])
cross = df.xs('c')
exp = df.iloc[2]
assert_series_equal(cross, exp)
def test_xs_keep_level(self):
df = (DataFrame({'day': {0: 'sat', 1: 'sun'},
'flavour': {0: 'strawberry', 1: 'strawberry'},
'sales': {0: 10, 1: 12},
'year': {0: 2008, 1: 2008}})
.set_index(['year', 'flavour', 'day']))
result = df.xs('sat', level='day', drop_level=False)
expected = df[:1]
assert_frame_equal(result, expected)
result = df.xs([2008, 'sat'], level=['year', 'day'], drop_level=False)
assert_frame_equal(result, expected)
def test_xs_view(self):
# in 0.14 this will return a view if possible a copy otherwise, but
# this is numpy dependent
dm = DataFrame(np.arange(20.).reshape(4, 5),
index=lrange(4), columns=lrange(5))
dm.xs(2)[:] = 10
assert (dm.xs(2) == 10).all()
def test_index_namedtuple(self):
from collections import namedtuple
IndexType = namedtuple("IndexType", ["a", "b"])
idx1 = IndexType("foo", "bar")
idx2 = IndexType("baz", "bof")
index = Index([idx1, idx2],
name="composite_index", tupleize_cols=False)
df = DataFrame([(1, 2), (3, 4)], index=index, columns=["A", "B"])
with catch_warnings(record=True):
result = df.ix[IndexType("foo", "bar")]["A"]
assert result == 1
result = df.loc[IndexType("foo", "bar")]["A"]
assert result == 1
def test_boolean_indexing(self):
idx = lrange(3)
cols = ['A', 'B', 'C']
df1 = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, 2.5],
[3.0, 3.5, 4.0]],
dtype=float))
df2 = DataFrame(index=idx, columns=cols,
data=np.ones((len(idx), len(cols))))
expected = DataFrame(index=idx, columns=cols,
data=np.array([[0.0, 0.5, 1.0],
[1.5, 2.0, -1],
[-1, -1, -1]], dtype=float))
df1[df1 > 2.0 * df2] = -1
assert_frame_equal(df1, expected)
with tm.assert_raises_regex(ValueError, 'Item wrong length'):
df1[df1.index[:-1] > 2] = -1
def test_boolean_indexing_mixed(self):
df = DataFrame({
long(0): {35: np.nan, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
long(1): {35: np.nan,
40: 0.32632316859446198,
43: np.nan,
49: 0.32632316859446198,
50: 0.39114724480578139},
long(2): {35: np.nan, 40: np.nan, 43: 0.29012581014105987,
49: np.nan, 50: np.nan},
long(3): {35: np.nan, 40: np.nan, 43: np.nan, 49: np.nan,
50: np.nan},
long(4): {35: 0.34215328467153283, 40: np.nan, 43: np.nan,
49: np.nan, 50: np.nan},
'y': {35: 0, 40: 0, 43: 0, 49: 0, 50: 1}})
# mixed int/float ok
df2 = df.copy()
df2[df2 > 0.3] = 1
expected = df.copy()
expected.loc[40, 1] = 1
expected.loc[49, 1] = 1
expected.loc[50, 1] = 1
expected.loc[35, 4] = 1
assert_frame_equal(df2, expected)
df['foo'] = 'test'
with tm.assert_raises_regex(TypeError, 'boolean setting '
'on mixed-type'):
df[df > 0.3] = 1
def test_where(self):
default_frame = DataFrame(np.random.randn(5, 3),
columns=['A', 'B', 'C'])
def _safe_add(df):
# only add to the numeric items
def is_ok(s):
return (issubclass(s.dtype.type, (np.integer, np.floating)) and
s.dtype != 'uint8')
return DataFrame(dict([(c, s + 1) if is_ok(s) else (c, s)
for c, s in compat.iteritems(df)]))
def _check_get(df, cond, check_dtypes=True):
other1 = _safe_add(df)
rs = df.where(cond, other1)
rs2 = df.where(cond.values, other1)
for k, v in rs.iteritems():
exp = Series(
np.where(cond[k], df[k], other1[k]), index=v.index)
assert_series_equal(v, exp, check_names=False)
assert_frame_equal(rs, rs2)
# dtypes
if check_dtypes:
assert (rs.dtypes == df.dtypes).all()
# check getting
for df in [default_frame, self.mixed_frame,
self.mixed_float, self.mixed_int]:
cond = df > 0
_check_get(df, cond)
# upcasting case (GH # 2794)
df = DataFrame(dict([(c, Series([1] * 3, dtype=c))
for c in ['int64', 'int32',
'float32', 'float64']]))
df.iloc[1, :] = 0
result = df.where(df >= 0).get_dtype_counts()
# when we don't preserve boolean casts
#
# expected = Series({ 'float32' : 1, 'float64' : 3 })
expected = Series({'float32': 1, 'float64': 1, 'int32': 1, 'int64': 1})
assert_series_equal(result, expected)
# aligning
def _check_align(df, cond, other, check_dtypes=True):
rs = df.where(cond, other)
for i, k in enumerate(rs.columns):
result = rs[k]
d = df[k].values
c = cond[k].reindex(df[k].index).fillna(False).values
if is_scalar(other):
o = other
else:
if isinstance(other, np.ndarray):
o = Series(other[:, i], index=result.index).values
else:
o = other[k].values
new_values = d if c.all() else np.where(c, d, o)
expected = Series(new_values, index=result.index, name=k)
# since we can't always have the correct numpy dtype
# as numpy doesn't know how to downcast, don't check
assert_series_equal(result, expected, check_dtype=False)
# dtypes
# can't check dtype when other is an ndarray
if check_dtypes and not isinstance(other, np.ndarray):
assert (rs.dtypes == df.dtypes).all()
for df in [self.mixed_frame, self.mixed_float, self.mixed_int]:
# other is a frame
cond = (df > 0)[1:]
_check_align(df, cond, _safe_add(df))
# check other is ndarray
cond = df > 0
_check_align(df, cond, (_safe_add(df).values))
# integers are upcast, so don't check the dtypes
cond = df > 0
check_dtypes = all([not issubclass(s.type, np.integer)
for s in df.dtypes])
_check_align(df, cond, np.nan, check_dtypes=check_dtypes)
# invalid conditions
df = default_frame
err1 = (df + 1).values[0:2, :]
pytest.raises(ValueError, df.where, cond, err1)
err2 = cond.iloc[:2, :].values
other1 = _safe_add(df)
pytest.raises(ValueError, df.where, err2, other1)
pytest.raises(ValueError, df.mask, True)
pytest.raises(ValueError, df.mask, 0)
# where inplace
def _check_set(df, cond, check_dtypes=True):
dfi = df.copy()
econd = cond.reindex_like(df).fillna(True)
expected = dfi.mask(~econd)
dfi.where(cond, np.nan, inplace=True)
assert_frame_equal(dfi, expected)
# dtypes (and confirm upcasts)x
if check_dtypes:
for k, v in compat.iteritems(df.dtypes):
if issubclass(v.type, np.integer) and not cond[k].all():
v = np.dtype('float64')
assert dfi[k].dtype == v
for df in [default_frame, self.mixed_frame, self.mixed_float,
self.mixed_int]:
cond = df > 0
_check_set(df, cond)
cond = df >= 0
_check_set(df, cond)
# aligining
cond = (df >= 0)[1:]
_check_set(df, cond)
# GH 10218
# test DataFrame.where with Series slicing
df = DataFrame({'a': range(3), 'b': range(4, 7)})
result = df.where(df['a'] == 1)
expected = df[df['a'] == 1].reindex(df.index)
assert_frame_equal(result, expected)
def test_where_array_like(self):
# see gh-15414
klasses = [list, tuple, np.array]
df = DataFrame({'a': [1, 2, 3]})
cond = [[False], [True], [True]]
expected = DataFrame({'a': [np.nan, 2, 3]})
for klass in klasses:
result = df.where(klass(cond))
assert_frame_equal(result, expected)
df['b'] = 2
expected['b'] = [2, np.nan, 2]
cond = [[False, True], [True, False], [True, True]]
for klass in klasses:
result = df.where(klass(cond))
assert_frame_equal(result, expected)
def test_where_invalid_input(self):
# see gh-15414: only boolean arrays accepted
df = DataFrame({'a': [1, 2, 3]})
msg = "Boolean array expected for the condition"
conds = [
[[1], [0], [1]],
Series([[2], [5], [7]]),
DataFrame({'a': [2, 5, 7]}),
[["True"], ["False"], ["True"]],
[[Timestamp("2017-01-01")],
[pd.NaT], [Timestamp("2017-01-02")]]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
df['b'] = 2
conds = [
[[0, 1], [1, 0], [1, 1]],
Series([[0, 2], [5, 0], [4, 7]]),
[["False", "True"], ["True", "False"],
["True", "True"]],
DataFrame({'a': [2, 5, 7], 'b': [4, 8, 9]}),
[[pd.NaT, Timestamp("2017-01-01")],
[Timestamp("2017-01-02"), pd.NaT],
[Timestamp("2017-01-03"), Timestamp("2017-01-03")]]
]
for cond in conds:
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
def test_where_dataframe_col_match(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = DataFrame([[True, False, True], [False, False, True]])
out = df.where(cond)
expected = DataFrame([[1.0, np.nan, 3], [np.nan, np.nan, 6]])
tm.assert_frame_equal(out, expected)
cond.columns = ["a", "b", "c"] # Columns no longer match.
msg = "Boolean array expected for the condition"
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
def test_where_ndframe_align(self):
msg = "Array conditional must be same shape as self"
df = DataFrame([[1, 2, 3], [4, 5, 6]])
cond = [True]
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[1, 2, 3], [np.nan, np.nan, np.nan]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
cond = np.array([False, True, False, True])
with tm.assert_raises_regex(ValueError, msg):
df.where(cond)
expected = DataFrame([[np.nan, np.nan, np.nan], [4, 5, 6]])
out = df.where(Series(cond))
tm.assert_frame_equal(out, expected)
def test_where_bug(self):
# GH 2793
df = DataFrame({'a': [1.0, 2.0, 3.0, 4.0], 'b': [
4.0, 3.0, 2.0, 1.0]}, dtype='float64')
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0], 'b': [
4.0, 3.0, np.nan, np.nan]}, dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# mixed
for dtype in ['int16', 'int8', 'int32', 'int64']:
df = DataFrame({'a': np.array([1, 2, 3, 4], dtype=dtype),
'b': np.array([4.0, 3.0, 2.0, 1.0],
dtype='float64')})
expected = DataFrame({'a': [np.nan, np.nan, 3.0, 4.0],
'b': [4.0, 3.0, np.nan, np.nan]},
dtype='float64')
result = df.where(df > 2, np.nan)
assert_frame_equal(result, expected)
result = df.copy()
result.where(result > 2, np.nan, inplace=True)
assert_frame_equal(result, expected)
# transpositional issue
# GH7506
a = DataFrame({0: [1, 2], 1: [3, 4], 2: [5, 6]})
b = DataFrame({0: [np.nan, 8], 1: [9, np.nan], 2: [np.nan, np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
a = DataFrame({0: [4, 6], 1: [1, 0]})
b = DataFrame({0: [np.nan, 3], 1: [3, np.nan]})
do_not_replace = b.isnull() | (a > b)
expected = a.copy()
expected[~do_not_replace] = b
result = a.where(do_not_replace, b)
assert_frame_equal(result, expected)
def test_where_datetime(self):
# GH 3311
df = DataFrame(dict(A=date_range('20130102', periods=5),
B=date_range('20130104', periods=5),
C=np.random.randn(5)))
stamp = datetime(2013, 1, 3)
result = df[df > stamp]
expected = df.copy()
expected.loc[[0, 1], 'A'] = np.nan
assert_frame_equal(result, expected)
def test_where_none(self):
# GH 4667
# setting with None changes dtype
df = DataFrame({'series': Series(range(10))}).astype(float)
df[df > 7] = None
expected = DataFrame(
{'series': Series([0, 1, 2, 3, 4, 5, 6, 7, np.nan, np.nan])})
assert_frame_equal(df, expected)
# GH 7656
df = DataFrame([{'A': 1, 'B': np.nan, 'C': 'Test'}, {
'A': np.nan, 'B': 'Test', 'C': np.nan}])
expected = df.where(~isnull(df), None)
with tm.assert_raises_regex(TypeError, 'boolean setting '
'on mixed-type'):
df.where(~isnull(df), None, inplace=True)
def test_where_align(self):
def create():
df = DataFrame(np.random.randn(10, 3))
df.iloc[3:5, 0] = np.nan
df.iloc[4:6, 1] = np.nan
df.iloc[5:8, 2] = np.nan
return df
# series
df = create()
expected = df.fillna(df.mean())
result = df.where(pd.notnull(df), df.mean(), axis='columns')
assert_frame_equal(result, expected)
df.where(pd.notnull(df), df.mean(), inplace=True, axis='columns')
assert_frame_equal(df, expected)
df = create().fillna(0)
expected = df.apply(lambda x, y: x.where(x > 0, y), y=df[0])
result = df.where(df > 0, df[0], axis='index')
assert_frame_equal(result, expected)
result = df.where(df > 0, df[0], axis='rows')
assert_frame_equal(result, expected)
# frame
df = create()
expected = df.fillna(1)
result = df.where(pd.notnull(df), DataFrame(
1, index=df.index, columns=df.columns))
assert_frame_equal(result, expected)
def test_where_complex(self):
# GH 6345
expected = DataFrame(
[[1 + 1j, 2], [np.nan, 4 + 1j]], columns=['a', 'b'])
df = DataFrame([[1 + 1j, 2], [5 + 1j, 4 + 1j]], columns=['a', 'b'])
df[df.abs() >= 5] = np.nan
assert_frame_equal(df, expected)
def test_where_axis(self):
# GH 9736
df = DataFrame(np.random.randn(2, 2))
mask = DataFrame([[False, False], [False, False]])
s = Series([0, 1])
expected = DataFrame([[0, 0], [1, 1]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, 1], [0, 1]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Upcast needed
df = DataFrame([[1, 2], [3, 4]], dtype='int64')
mask = DataFrame([[False, False], [False, False]])
s = Series([0, np.nan])
expected = DataFrame([[0, 0], [np.nan, np.nan]], dtype='float64')
result = df.where(mask, s, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s, axis='index', inplace=True)
assert_frame_equal(result, expected)
expected = DataFrame([[0, np.nan], [0, np.nan]], dtype='float64')
result = df.where(mask, s, axis='columns')
assert_frame_equal(result, expected)
expected = DataFrame({0: np.array([0, 0], dtype='int64'),
1: np.array([np.nan, np.nan], dtype='float64')})
result = df.copy()
result.where(mask, s, axis='columns', inplace=True)
assert_frame_equal(result, expected)
# Multiple dtypes (=> multiple Blocks)
df = pd.concat([DataFrame(np.random.randn(10, 2)),
DataFrame(np.random.randint(0, 10, size=(10, 2)))],
ignore_index=True, axis=1)
mask = DataFrame(False, columns=df.columns, index=df.index)
s1 = Series(1, index=df.columns)
s2 = Series(2, index=df.index)
result = df.where(mask, s1, axis='columns')
expected = DataFrame(1.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s1, axis='columns', inplace=True)
assert_frame_equal(result, expected)
result = df.where(mask, s2, axis='index')
expected = DataFrame(2.0, columns=df.columns, index=df.index)
expected[2] = expected[2].astype(int)
expected[3] = expected[3].astype(int)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, s2, axis='index', inplace=True)
assert_frame_equal(result, expected)
# DataFrame vs DataFrame
d1 = df.copy().drop(1, axis=0)
expected = df.copy()
expected.loc[1, :] = np.nan
result = df.where(mask, d1)
assert_frame_equal(result, expected)
result = df.where(mask, d1, axis='index')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d1, inplace=True, axis='index')
assert_frame_equal(result, expected)
d2 = df.copy().drop(1, axis=1)
expected = df.copy()
expected.loc[:, 1] = np.nan
result = df.where(mask, d2)
assert_frame_equal(result, expected)
result = df.where(mask, d2, axis='columns')
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True)
assert_frame_equal(result, expected)
result = df.copy()
result.where(mask, d2, inplace=True, axis='columns')
assert_frame_equal(result, expected)
def test_where_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.where(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[2, 3, 4], [5, 5, 6], [7, 8, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df > 4, df + 1))
# return ndarray and scalar
result = df.where(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[99, 2, 99], [4, 99, 6], [99, 8, 99]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.where(df % 2 == 0, 99))
# chain
result = (df + 2).where(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[13, 14, 15], [16, 17, 18], [9, 10, 11]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).where((df + 2) > 8, (df + 2) + 10))
def test_mask(self):
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rs = df.where(cond, np.nan)
assert_frame_equal(rs, df.mask(df <= 0))
assert_frame_equal(rs, df.mask(~cond))
other = DataFrame(np.random.randn(5, 3))
rs = df.where(cond, other)
assert_frame_equal(rs, df.mask(df <= 0, other))
assert_frame_equal(rs, df.mask(~cond, other))
def test_mask_inplace(self):
# GH8801
df = DataFrame(np.random.randn(5, 3))
cond = df > 0
rdf = df.copy()
rdf.where(cond, inplace=True)
assert_frame_equal(rdf, df.where(cond))
assert_frame_equal(rdf, df.mask(~cond))
rdf = df.copy()
rdf.where(cond, -df, inplace=True)
assert_frame_equal(rdf, df.where(cond, -df))
assert_frame_equal(rdf, df.mask(~cond, -df))
def test_mask_edge_case_1xN_frame(self):
# GH4071
df = DataFrame([[1, 2]])
res = df.mask(DataFrame([[True, False]]))
expec = DataFrame([[nan, 2]])
assert_frame_equal(res, expec)
def test_mask_callable(self):
# GH 12533
df = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
result = df.mask(lambda x: x > 4, lambda x: x + 1)
exp = DataFrame([[1, 2, 3], [4, 6, 7], [8, 9, 10]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df > 4, df + 1))
# return ndarray and scalar
result = df.mask(lambda x: (x % 2 == 0).values, lambda x: 99)
exp = DataFrame([[1, 99, 3], [99, 5, 99], [7, 99, 9]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result, df.mask(df % 2 == 0, 99))
# chain
result = (df + 2).mask(lambda x: x > 8, lambda x: x + 10)
exp = DataFrame([[3, 4, 5], [6, 7, 8], [19, 20, 21]])
tm.assert_frame_equal(result, exp)
tm.assert_frame_equal(result,
(df + 2).mask((df + 2) > 8, (df + 2) + 10))
def test_head_tail(self):
assert_frame_equal(self.frame.head(), self.frame[:5])
assert_frame_equal(self.frame.tail(), self.frame[-5:])
assert_frame_equal(self.frame.head(0), self.frame[0:0])
assert_frame_equal(self.frame.tail(0), self.frame[0:0])
assert_frame_equal(self.frame.head(-1), self.frame[:-1])
assert_frame_equal(self.frame.tail(-1), self.frame[1:])
assert_frame_equal(self.frame.head(1), self.frame[:1])
assert_frame_equal(self.frame.tail(1), self.frame[-1:])
# with a float index
df = self.frame.copy()
df.index = np.arange(len(self.frame)) + 0.1
assert_frame_equal(df.head(), df.iloc[:5])
assert_frame_equal(df.tail(), df.iloc[-5:])
assert_frame_equal(df.head(0), df[0:0])
assert_frame_equal(df.tail(0), df[0:0])
assert_frame_equal(df.head(-1), df.iloc[:-1])
assert_frame_equal(df.tail(-1), df.iloc[1:])
# test empty dataframe
empty_df = DataFrame()
assert_frame_equal(empty_df.tail(), empty_df)
assert_frame_equal(empty_df.head(), empty_df)
def test_type_error_multiindex(self):
# See gh-12218
df = DataFrame(columns=['i', 'c', 'x', 'y'],
data=[[0, 0, 1, 2], [1, 0, 3, 4],
[0, 1, 1, 2], [1, 1, 3, 4]])
dg = df.pivot_table(index='i', columns='c',
values=['x', 'y'])
with tm.assert_raises_regex(TypeError, "is an invalid key"):
str(dg[:, 0])
index = Index(range(2), name='i')
columns = MultiIndex(levels=[['x', 'y'], [0, 1]],
labels=[[0, 1], [0, 0]],
names=[None, 'c'])
expected = DataFrame([[1, 2], [3, 4]], columns=columns, index=index)
result = dg.loc[:, (slice(None), 0)]
assert_frame_equal(result, expected)
name = ('x', 0)
index = Index(range(2), name='i')
expected = Series([1, 3], index=index, name=name)
result = dg['x', 0]
assert_series_equal(result, expected)
class TestDataFrameIndexingDatetimeWithTZ(TestData):
def setup_method(self, method):
self.idx = Index(date_range('20130101', periods=3, tz='US/Eastern'),
name='foo')
self.dr = date_range('20130110', periods=3)
self.df = DataFrame({'A': self.idx, 'B': self.dr})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# assert that A & C are not sharing the same base (e.g. they
# are copies)
b1 = df._data.blocks[1]
b2 = df._data.blocks[2]
assert b1.values.equals(b2.values)
assert id(b1.values.values.base) != id(b2.values.values.base)
# with nan
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notnull(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, df.dtypes)
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype, 'M8[ns, US/Eastern'
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
class TestDataFrameIndexingUInt64(TestData):
def setup_method(self, method):
self.ir = Index(np.arange(3), dtype=np.uint64)
self.idx = Index([2**63, 2**63 + 5, 2**63 + 10], name='foo')
self.df = DataFrame({'A': self.idx, 'B': self.ir})
def test_setitem(self):
df = self.df
idx = self.idx
# setitem
df['C'] = idx
assert_series_equal(df['C'], Series(idx, name='C'))
df['D'] = 'foo'
df['D'] = idx
assert_series_equal(df['D'], Series(idx, name='D'))
del df['D']
# With NaN: because uint64 has no NaN element,
# the column should be cast to object.
df2 = df.copy()
df2.iloc[1, 1] = pd.NaT
df2.iloc[1, 2] = pd.NaT
result = df2['B']
assert_series_equal(notnull(result), Series(
[True, False, True], name='B'))
assert_series_equal(df2.dtypes, Series([np.dtype('uint64'),
np.dtype('O'), np.dtype('O')],
index=['A', 'B', 'C']))
def test_set_reset(self):
idx = self.idx
# set/reset
df = DataFrame({'A': [0, 1, 2]}, index=idx)
result = df.reset_index()
assert result['foo'].dtype == np.dtype('uint64')
df = result.set_index('foo')
tm.assert_index_equal(df.index, idx)
def test_transpose(self):
result = self.df.T
expected = DataFrame(self.df.values.T)
expected.index = ['A', 'B']
assert_frame_equal(result, expected)
| mit |
BhallaLab/moose-examples | traub_2005/py/test_singlecomp.py | 2 | 7203 | # test_singlecomp.py ---
#
# Filename: test_singlecomp.py
# Description:
# Author: Subhasis Ray
# Maintainer:
# Created: Tue Jul 17 21:01:14 2012 (+0530)
# Version:
# Last-Updated: Sun Jun 25 15:37:21 2017 (-0400)
# By: subha
# Update #: 320
# URL:
# Keywords:
# Compatibility:
#
#
# Commentary:
#
# Test the ion channels with a single compartment.
#
#
# Change log:
#
# 2012-07-17 22:22:23 (+0530) Tested NaF2 and NaPF_SS against neuron
# test case.
#
#
# Code:
import os
os.environ['NUMPTHREADS'] = '1'
import uuid
import unittest
from datetime import datetime
import sys
sys.path.append('../../../python')
import numpy as np
from matplotlib import pyplot as plt
import moose
from testutils import *
from nachans import *
from kchans import *
from archan import *
from cachans import *
from capool import *
simdt = 0.25e-4
plotdt = 0.25e-4
simtime = 350e-3
erev = {
'K': -100e-3,
'Na': 50e-3,
'Ca': 125e-3,
'AR': -40e-3
}
channel_density = {
'NaF2': 1500.0,
'NaPF_SS': 1.5,
'KDR_FS': 1000.0,
'KC_FAST': 100.0,
'KA': 300.0,
'KM': 37.5,
'K2': 1.0,
'KAHP_SLOWER': 1.0,
'CaL': 5.0,
'CaT_A': 1.0,
'AR': 2.5
}
compartment_propeties = {
'length': 20e-6,
'diameter': 2e-6 * 7.5,
'initVm': -65e-3,
'Em': -65e-3,
'Rm': 5.0,
'Cm': 9e-3,
'Ra': 1.0,
'specific': True}
stimulus = [[100e-3, 50e-3, 3e-10], # delay[0], width[0], level[0]
[1e9, 0, 0]]
def create_compartment(path, length, diameter, initVm, Em, Rm, Cm, Ra, specific=False):
comp = moose.Compartment(path)
comp.length = length
comp.diameter = diameter
comp.initVm = initVm
comp.Em = Em
if not specific:
comp.Rm = Rm
comp.Cm = Cm
comp.Ra = Ra
else:
sarea = np.pi * length * diameter
comp.Rm = Rm / sarea
comp.Cm = Cm * sarea
comp.Ra = 4.0 * Ra * length / (np.pi * diameter * diameter)
return comp
def insert_channel(compartment, channeclass, gbar, density=False):
channel = moose.copy(channeclass.prototype, compartment)[0]
if not density:
channel.Gbar = gbar
else:
channel.Gbar = gbar * np.pi * compartment.length * compartment.diameter
moose.connect(channel, 'channel', compartment, 'channel')
return channel
def insert_ca(compartment, phi, tau):
ca = moose.copy(CaPool.prototype, compartment)[0]
ca.B = phi / (np.pi * compartment.length * compartment.diameter)
ca.tau = tau
print( ca.path, ca.B, ca.tau)
for chan in moose.wildcardFind('%s/#[TYPE=HHChannel]' % (compartment.path)):
if chan.name.startswith('KC') or chan.name.startswith('KAHP'):
moose.connect(ca, 'concOut', chan, 'concen')
elif chan.name.startswith('CaL'):
moose.connect(chan, 'IkOut', ca, 'current')
else:
continue
moose.showfield(chan)
return ca
class TestSingleComp(unittest.TestCase):
def setUp(self):
self.testId = uuid.uuid4().int
self.container = moose.Neutral('test%d' % (self.testId))
self.model = moose.Neutral('%s/model' % (self.container.path))
self.data = moose.Neutral('%s/data' % (self.container.path))
self.soma = create_compartment('%s/soma' % (self.model.path),
**compartment_propeties)
self.tables = {}
tab = moose.Table('%s/Vm' % (self.data.path))
self.tables['Vm'] = tab
moose.connect(tab, 'requestOut', self.soma, 'getVm')
for channelname, conductance in list(channel_density.items()):
chanclass = eval(channelname)
channel = insert_channel(self.soma, chanclass, conductance, density=True)
if issubclass(chanclass, KChannel):
channel.Ek = erev['K']
elif issubclass(chanclass, NaChannel):
channel.Ek = erev['Na']
elif issubclass(chanclass, CaChannel):
channel.Ek = erev['Ca']
elif issubclass(chanclass, AR):
channel.Ek = erev['AR']
tab = moose.Table('%s/%s' % (self.data.path, channelname))
moose.connect(tab, 'requestOut', channel, 'getGk')
self.tables['Gk_'+channel.name] = tab
archan = moose.HHChannel(self.soma.path + '/AR')
archan.X = 0.0
ca = insert_ca(self.soma, 2.6e7, 50e-3)
tab = moose.Table('%s/Ca' % (self.data.path))
self.tables['Ca'] = tab
moose.connect(tab, 'requestOut', ca, 'getCa')
self.pulsegen = moose.PulseGen('%s/inject' % (self.model.path))
moose.connect(self.pulsegen, 'output', self.soma, 'injectMsg')
tab = moose.Table('%s/injection' % (self.data.path))
moose.connect(tab, 'requestOut', self.pulsegen, 'getOutputValue')
self.tables['pulsegen'] = tab
self.pulsegen.count = len(stimulus)
for ii in range(len(stimulus)):
self.pulsegen.delay[ii] = stimulus[ii][0]
self.pulsegen.width[ii] = stimulus[ii][1]
self.pulsegen.level[ii] = stimulus[ii][2]
setup_clocks(simdt, plotdt)
assign_clocks(self.model, self.data)
moose.reinit()
start = datetime.now()
moose.start(simtime)
end = datetime.now()
delta = end - start
print( 'Simulation of %g s finished in %g s' % (simtime, delta.seconds + delta.microseconds*1e-6))
def testDefault(self):
vm_axis = plt.subplot(2,1,1)
ca_axis = plt.subplot(2,1,2)
try:
fname = os.path.join(config.mydir, 'nrn', 'data', 'singlecomp_Vm.dat')
nrndata = np.loadtxt(fname)
vm_axis.plot(nrndata[:,0], nrndata[:,1], label='Vm (mV) - nrn')
ca_axis.plot(nrndata[:,0], nrndata[:,2], label='Ca (mM) - nrn')
except IOError as e:
print(e)
tseries = np.linspace(0, simtime, len(self.tables['Vm'].vector)) * 1e3
# plotcount = len(channel_density) + 1
# rows = int(np.sqrt(plotcount) + 0.5)
# columns = int(plotcount * 1.0/rows + 0.5)
# print plotcount, rows, columns
# plt.subplot(rows, columns, 1)
vm_axis.plot(tseries, self.tables['Vm'].vector * 1e3, label='Vm (mV) - moose')
vm_axis.plot(tseries, self.tables['pulsegen'].vector * 1e12, label='inject (pA)')
ca_axis.plot(tseries, self.tables['Ca'].vector, label='Ca (mM) - moose')
vm_axis.legend()
ca_axis.legend()
# ii = 2
# for key, value in self.tables.items():
# if key.startswith('Gk'):
# plt.subplot(rows, columns, ii)
# plt.plot(tseries, value.vector, label=key)
# ii += 1
# plt.legend()
plt.show()
data = np.vstack((tseries*1e-3,
self.tables['Vm'].vector,
self.tables['Ca'].vector))
np.savetxt(os.path.join(config.data_dir, 'singlecomp_Vm.dat'),
np.transpose(data))
if __name__ == '__main__':
unittest.main()
#
# test_singlecomp.py ends here
| gpl-2.0 |
mantidproject/mantid | qt/python/mantidqt/gui_helper.py | 3 | 5994 | # Mantid Repository : https://github.com/mantidproject/mantid
#
# Copyright © 2018 ISIS Rutherford Appleton Laboratory UKRI,
# NScD Oak Ridge National Laboratory, European Spallation Source,
# Institut Laue - Langevin & CSNS, Institute of High Energy Physics, CAS
# SPDX - License - Identifier: GPL - 3.0 +
from qtpy.QtWidgets import (QApplication) # noqa
from qtpy import QtCore, QtGui
import matplotlib
import sys
import os
try:
from mantid import __version__ as __mtd_version
from mantid import _bindir as __mtd_bin_dir
# convert to major.minor
__mtd_version = '.'.join(__mtd_version.split(".")[:2])
except ImportError: # mantid not found
__mtd_version = ''
__mtd_bin_dir=''
def set_matplotlib_backend():
'''MUST be called before anything tries to use matplotlib
This will set the backend if it hasn't been already. It also returns
the name of the backend to be the name to be used for importing the
correct matplotlib widgets.'''
backend = matplotlib.get_backend()
if backend.startswith('module://'):
if backend.endswith('qt4agg'):
backend = 'Qt4Agg'
elif backend.endswith('workbench') or backend.endswith('qt5agg'):
backend = 'Qt5Agg'
else:
from qtpy import PYQT4, PYQT5 # noqa
if PYQT5:
backend = 'Qt5Agg'
elif PYQT4:
backend = 'Qt4Agg'
else:
raise RuntimeError('Do not know which matplotlib backend to set')
matplotlib.use(backend)
return backend
def get_qapplication():
''' Example usage:
app, within_mantid = get_qapplication()
reducer = eventFilterGUI.MainWindow() # the main ui class in this file
reducer.show()
if not within_mantid:
sys.exit(app.exec_())'''
app = QApplication.instance()
if app:
return app, app.applicationName().lower().startswith('mantid')
else:
return QApplication(sys.argv), False
def __to_external_url(interface_name: str, section: str, external_url: str) -> QtCore.QUrl:
if not external_url:
template = 'http://docs.mantidproject.org/nightly/interfaces/{}/{}.html'
external_url = template.format(section, interface_name)
return QtCore.QUrl(external_url)
def __to_qthelp_url(interface_name: str, section: str, qt_url: str) -> str:
if qt_url:
return qt_url
else:
template = 'qthelp://org.sphinx.mantidproject.{}/doc/interfaces/{}/{}.html'
return template.format(__mtd_version, section, interface_name)
def __get_collection_file(collection_file: str) -> str:
if not collection_file:
if not __mtd_bin_dir:
return 'HELP COLLECTION FILE NOT FOUND'
else:
collection_file = os.path.join(__mtd_bin_dir, '../docs/qthelp/MantidProject.qhc')
return os.path.abspath(collection_file)
def show_interface_help(mantidplot_name, assistant_process, area: str='',
collection_file: str='',
qt_url: str='', external_url: str=""):
''' Shows the help page for a custom interface
@param mantidplot_name: used by showCustomInterfaceHelp
@param assistant_process: needs to be started/closed from outside (see example below)
@param collection_file: qth file containing the help in format used by qtassistant. The default is
``mantid._bindir + '../docs/qthelp/MantidProject.qhc'``
@param qt_url: location of the help in the qth file. The default value is
``qthelp://org.sphinx.mantidproject.{mtdversion}/doc/interfaces/{mantidplot_name}.html``.
@param external_url: location of external page to be displayed in the default browser. The default value is
``http://docs.mantidproject.org/nightly/interfaces/framework/{mantidplot_name}.html``
Example using defaults:
#in the __init__ function of the GUI add:
self.assistant_process = QtCore.QProcess(self)
self.mantidplot_name='DGS Planner'
#add a help function in the GUI
def help(self):
show_interface_help(self.mantidplot_name,
self.assistant_process)
#make sure you close the qtassistant when the GUI is closed
def closeEvent(self, event):
self.assistant_process.close()
self.assistant_process.waitForFinished()
event.accept()
'''
try:
# try using built-in help in mantid
import mantidqt
mantidqt.interfacemanager.InterfaceManager().showCustomInterfaceHelp(mantidplot_name, area)
except: #(ImportError, ModuleNotFoundError) raises the wrong type of error
# built-in help failed, try external qtassistant then give up and launch a browser
# cleanup previous version
assistant_process.close()
assistant_process.waitForFinished()
# where to expect qtassistant
helpapp = QtCore.QLibraryInfo.location(QtCore.QLibraryInfo.BinariesPath) + QtCore.QDir.separator()
helpapp += 'assistant'
collection_file = __get_collection_file(collection_file)
if os.path.isfile(helpapp) and os.path.isfile(collection_file):
# try to find the collection file and launch qtassistant
args = ['-enableRemoteControl',
'-collectionFile', collection_file,
'-showUrl', __to_qthelp_url(mantidplot_name, area, qt_url)]
assistant_process.close()
assistant_process.waitForFinished()
assistant_process.start(helpapp, args)
else:
# give up and upen a URL in default browser
openUrl=QtGui.QDesktopServices.openUrl
sysenv=QtCore.QProcessEnvironment.systemEnvironment()
ldp=sysenv.value('LD_PRELOAD')
if ldp:
del os.environ['LD_PRELOAD']
# create a url to the help in the default location
openUrl(__to_external_url(mantidplot_name, area, external_url))
if ldp:
os.environ['LD_PRELOAD']=ldp
| gpl-3.0 |
joshloyal/scikit-learn | sklearn/metrics/regression.py | 47 | 19967 | """Metrics to assess performance on regression task
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck
# Joel Nothman <joel.nothman@gmail.com>
# Karan Desai <karandesai281196@gmail.com>
# Noel Dawe <noel@dawe.me>
# Manoj Kumar <manojkumarsivaraj334@gmail.com>
# Michael Eickenberg <michael.eickenberg@gmail.com>
# Konstantin Shmelkov <konstantin.shmelkov@polytechnique.edu>
# License: BSD 3 clause
from __future__ import division
import numpy as np
from ..utils.validation import check_array, check_consistent_length
from ..utils.validation import column_or_1d
from ..externals.six import string_types
__ALL__ = [
"mean_absolute_error",
"mean_squared_error",
"mean_squared_log_error",
"median_absolute_error",
"r2_score",
"explained_variance_score"
]
def _check_reg_targets(y_true, y_pred, multioutput):
"""Check that y_true and y_pred belong to the same regression task
Parameters
----------
y_true : array-like,
y_pred : array-like,
multioutput : array-like or string in ['raw_values', uniform_average',
'variance_weighted'] or None
None is accepted due to backward compatibility of r2_score().
Returns
-------
type_true : one of {'continuous', continuous-multioutput'}
The type of the true target data, as output by
'utils.multiclass.type_of_target'
y_true : array-like of shape = (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples, n_outputs)
Estimated target values.
multioutput : array-like of shape = (n_outputs) or string in ['raw_values',
uniform_average', 'variance_weighted'] or None
Custom output weights if ``multioutput`` is array-like or
just the corresponding argument if ``multioutput`` is a
correct keyword.
"""
check_consistent_length(y_true, y_pred)
y_true = check_array(y_true, ensure_2d=False)
y_pred = check_array(y_pred, ensure_2d=False)
if y_true.ndim == 1:
y_true = y_true.reshape((-1, 1))
if y_pred.ndim == 1:
y_pred = y_pred.reshape((-1, 1))
if y_true.shape[1] != y_pred.shape[1]:
raise ValueError("y_true and y_pred have different number of output "
"({0}!={1})".format(y_true.shape[1], y_pred.shape[1]))
n_outputs = y_true.shape[1]
allowed_multioutput_str = ('raw_values', 'uniform_average',
'variance_weighted')
if isinstance(multioutput, string_types):
if multioutput not in allowed_multioutput_str:
raise ValueError("Allowed 'multioutput' string values are {}. "
"You provided multioutput={!r}".format(
allowed_multioutput_str,
multioutput))
elif multioutput is not None:
multioutput = check_array(multioutput, ensure_2d=False)
if n_outputs == 1:
raise ValueError("Custom weights are useful only in "
"multi-output cases.")
elif n_outputs != len(multioutput):
raise ValueError(("There must be equally many custom weights "
"(%d) as outputs (%d).") %
(len(multioutput), n_outputs))
y_type = 'continuous' if n_outputs == 1 else 'continuous-multioutput'
return y_type, y_true, y_pred, multioutput
def mean_absolute_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean absolute error regression loss
Read more in the :ref:`User Guide <mean_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
If multioutput is 'raw_values', then mean absolute error is returned
for each output separately.
If multioutput is 'uniform_average' or an ndarray of weights, then the
weighted average of all output errors is returned.
MAE output is non-negative floating point. The best value is 0.0.
Examples
--------
>>> from sklearn.metrics import mean_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_absolute_error(y_true, y_pred)
0.5
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> mean_absolute_error(y_true, y_pred)
0.75
>>> mean_absolute_error(y_true, y_pred, multioutput='raw_values')
array([ 0.5, 1. ])
>>> mean_absolute_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.849...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average(np.abs(y_pred - y_true),
weights=sample_weight, axis=0)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared error regression loss
Read more in the :ref:`User Guide <mean_squared_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average']
or array-like of shape (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors in case of multioutput input.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> mean_squared_error(y_true, y_pred)
0.375
>>> y_true = [[0.5, 1],[-1, 1],[7, -6]]
>>> y_pred = [[0, 2],[-1, 2],[8, -5]]
>>> mean_squared_error(y_true, y_pred) # doctest: +ELLIPSIS
0.708...
>>> mean_squared_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.416..., 1. ])
>>> mean_squared_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.824...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
output_errors = np.average((y_true - y_pred) ** 2, axis=0,
weights=sample_weight)
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
return output_errors
elif multioutput == 'uniform_average':
# pass None as weights to np.average: uniform mean
multioutput = None
return np.average(output_errors, weights=multioutput)
def mean_squared_log_error(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Mean squared logarithmic error regression loss
Read more in the :ref:`User Guide <mean_squared_log_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average'] \
or array-like of shape = (n_outputs)
Defines aggregating of multiple output values.
Array-like value defines weights used to average errors.
'raw_values' :
Returns a full set of errors when the input is of multioutput
format.
'uniform_average' :
Errors of all outputs are averaged with uniform weight.
Returns
-------
loss : float or ndarray of floats
A non-negative floating point value (the best value is 0.0), or an
array of floating point values, one for each individual target.
Examples
--------
>>> from sklearn.metrics import mean_squared_log_error
>>> y_true = [3, 5, 2.5, 7]
>>> y_pred = [2.5, 5, 4, 8]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.039...
>>> y_true = [[0.5, 1], [1, 2], [7, 6]]
>>> y_pred = [[0.5, 2], [1, 2.5], [8, 8]]
>>> mean_squared_log_error(y_true, y_pred) # doctest: +ELLIPSIS
0.044...
>>> mean_squared_log_error(y_true, y_pred, multioutput='raw_values')
... # doctest: +ELLIPSIS
array([ 0.004..., 0.083...])
>>> mean_squared_log_error(y_true, y_pred, multioutput=[0.3, 0.7])
... # doctest: +ELLIPSIS
0.060...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if not (y_true >= 0).all() and not (y_pred >= 0).all():
raise ValueError("Mean Squared Logarithmic Error cannot be used when "
"targets contain negative values.")
return mean_squared_error(np.log(y_true + 1), np.log(y_pred + 1),
sample_weight, multioutput)
def median_absolute_error(y_true, y_pred):
"""Median absolute error regression loss
Read more in the :ref:`User Guide <median_absolute_error>`.
Parameters
----------
y_true : array-like of shape = (n_samples)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples)
Estimated target values.
Returns
-------
loss : float
A positive floating point value (the best value is 0.0).
Examples
--------
>>> from sklearn.metrics import median_absolute_error
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> median_absolute_error(y_true, y_pred)
0.5
"""
y_type, y_true, y_pred, _ = _check_reg_targets(y_true, y_pred,
'uniform_average')
if y_type == 'continuous-multioutput':
raise ValueError("Multioutput not supported in median_absolute_error")
return np.median(np.abs(y_pred - y_true))
def explained_variance_score(y_true, y_pred,
sample_weight=None,
multioutput='uniform_average'):
"""Explained variance regression score function
Best possible score is 1.0, lower values are worse.
Read more in the :ref:`User Guide <explained_variance_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
Returns
-------
score : float or ndarray of floats
The explained variance or ndarray if 'multioutput' is 'raw_values'.
Notes
-----
This is not a symmetric function.
Examples
--------
>>> from sklearn.metrics import explained_variance_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> explained_variance_score(y_true, y_pred) # doctest: +ELLIPSIS
0.957...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> explained_variance_score(y_true, y_pred, multioutput='uniform_average')
... # doctest: +ELLIPSIS
0.983...
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
y_diff_avg = np.average(y_true - y_pred, weights=sample_weight, axis=0)
numerator = np.average((y_true - y_pred - y_diff_avg) ** 2,
weights=sample_weight, axis=0)
y_true_avg = np.average(y_true, weights=sample_weight, axis=0)
denominator = np.average((y_true - y_true_avg) ** 2,
weights=sample_weight, axis=0)
nonzero_numerator = numerator != 0
nonzero_denominator = denominator != 0
valid_score = nonzero_numerator & nonzero_denominator
output_scores = np.ones(y_true.shape[1])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing to np.average() None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
def r2_score(y_true, y_pred, sample_weight=None,
multioutput="uniform_average"):
"""R^2 (coefficient of determination) regression score function.
Best possible score is 1.0 and it can be negative (because the
model can be arbitrarily worse). A constant model that always
predicts the expected value of y, disregarding the input features,
would get a R^2 score of 0.0.
Read more in the :ref:`User Guide <r2_score>`.
Parameters
----------
y_true : array-like of shape = (n_samples) or (n_samples, n_outputs)
Ground truth (correct) target values.
y_pred : array-like of shape = (n_samples) or (n_samples, n_outputs)
Estimated target values.
sample_weight : array-like of shape = (n_samples), optional
Sample weights.
multioutput : string in ['raw_values', 'uniform_average', \
'variance_weighted'] or None or array-like of shape (n_outputs)
Defines aggregating of multiple output scores.
Array-like value defines weights used to average scores.
Default is "uniform_average".
'raw_values' :
Returns a full set of scores in case of multioutput input.
'uniform_average' :
Scores of all outputs are averaged with uniform weight.
'variance_weighted' :
Scores of all outputs are averaged, weighted by the variances
of each individual output.
.. versionchanged:: 0.19
Default value of multioutput is 'uniform_average'.
Returns
-------
z : float or ndarray of floats
The R^2 score or ndarray of scores if 'multioutput' is
'raw_values'.
Notes
-----
This is not a symmetric function.
Unlike most other scores, R^2 score may be negative (it need not actually
be the square of a quantity R).
References
----------
.. [1] `Wikipedia entry on the Coefficient of determination
<https://en.wikipedia.org/wiki/Coefficient_of_determination>`_
Examples
--------
>>> from sklearn.metrics import r2_score
>>> y_true = [3, -0.5, 2, 7]
>>> y_pred = [2.5, 0.0, 2, 8]
>>> r2_score(y_true, y_pred) # doctest: +ELLIPSIS
0.948...
>>> y_true = [[0.5, 1], [-1, 1], [7, -6]]
>>> y_pred = [[0, 2], [-1, 2], [8, -5]]
>>> r2_score(y_true, y_pred, multioutput='variance_weighted')
... # doctest: +ELLIPSIS
0.938...
>>> y_true = [1,2,3]
>>> y_pred = [1,2,3]
>>> r2_score(y_true, y_pred)
1.0
>>> y_true = [1,2,3]
>>> y_pred = [2,2,2]
>>> r2_score(y_true, y_pred)
0.0
>>> y_true = [1,2,3]
>>> y_pred = [3,2,1]
>>> r2_score(y_true, y_pred)
-3.0
"""
y_type, y_true, y_pred, multioutput = _check_reg_targets(
y_true, y_pred, multioutput)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
weight = sample_weight[:, np.newaxis]
else:
weight = 1.
numerator = (weight * (y_true - y_pred) ** 2).sum(axis=0,
dtype=np.float64)
denominator = (weight * (y_true - np.average(
y_true, axis=0, weights=sample_weight)) ** 2).sum(axis=0,
dtype=np.float64)
nonzero_denominator = denominator != 0
nonzero_numerator = numerator != 0
valid_score = nonzero_denominator & nonzero_numerator
output_scores = np.ones([y_true.shape[1]])
output_scores[valid_score] = 1 - (numerator[valid_score] /
denominator[valid_score])
# arbitrary set to zero to avoid -inf scores, having a constant
# y_true is not interesting for scoring a regression anyway
output_scores[nonzero_numerator & ~nonzero_denominator] = 0.
if isinstance(multioutput, string_types):
if multioutput == 'raw_values':
# return scores individually
return output_scores
elif multioutput == 'uniform_average':
# passing None as weights results is uniform mean
avg_weights = None
elif multioutput == 'variance_weighted':
avg_weights = denominator
# avoid fail on constant y or one-element arrays
if not np.any(nonzero_denominator):
if not np.any(nonzero_numerator):
return 1.0
else:
return 0.0
else:
avg_weights = multioutput
return np.average(output_scores, weights=avg_weights)
| bsd-3-clause |
andre-richter/pcie-lat | all_in_one.py | 1 | 6054 | #!/usr/bin/python
import sys
import os
import numpy as np
import matplotlib
import matplotlib.mlab as mlab
import matplotlib.pyplot as plt
import subprocess
import traceback
pci_dev ={
"name" : "",
"loc" : "",
"class" : "",
"vender" : "",
"device" : "",
"vd" : "",
"isBridge" : 1,
"driver" : ""
}
def is_root():
return os.geteuid() == 0
def get_pci_list():
out = subprocess.Popen(['lspci', '-nm'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
lspci_str = stdout.decode('ascii')
pci_list = []
pcis = lspci_str.split('\n')
for each_pci in pcis:
pci = {}
__ = each_pci.split(" ")
if len(__) < 4:
continue
pci["loc"] = __[0].replace('"', '')
pci["vender"] = __[2].replace('"', '')
pci["device"] = __[3].replace('"', '')
pci["vd"] = ":".join([pci["vender"], pci["device"]])
out = subprocess.Popen(['lspci', '-s', '{}'.format(pci["loc"]), "-mvk"],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
stdout, stderr = out.communicate()
ss = stdout.decode('ascii')
for line in ss.split("\n"):
if ': ' in line:
k, v = line.split(": ")
if k.strip() == "Class":
pci['class'] = v.strip().replace('"', '')
elif k.strip() == "Vendor":
pci['vender'] = v.strip().replace('"', '')
elif k.strip() == "Device" and ss.split("\n").index(line) > 0:
pci['device'] = v.strip().replace('"', '')
elif k.strip() == "Driver":
pci['driver'] = v.strip().replace('"', '')
else:
pass
else:
continue
pci_list.append(pci)
return pci_list
def print_mach_info(tsc_freq, tsc_overhead, loops):
print("-------------------------------")
print(" tsc_freq : {}".format(tsc_freq))
print(" tsc_overhead : {} clocks".format(tsc_overhead))
print(" loops : {}".format(loops))
print("-------------------------------")
def clock2ns(clocks, tsc_freq):
return int(clocks*1000000000/tsc_freq)
def plot_y(y, fname):
num_width = 10
ymin = int(min(y))-1
ymax = int(max(y))+1
print("Max. and Min. latencies are {}ns {}ns".format(ymax, ymin))
margin = max(num_width, 5)
bins = [ii for ii in range(ymin-margin, ymax+margin, num_width)]
plt.yscale('log')
n, bins, patches = plt.hist(y, bins, range=(min(y), max(y)), width=10, color='blue')
plt.xlabel('nanoseconds')
plt.ylabel('Probability')
plt.title('Histogram of PCIe latencies (%s samples)' % len(y))
plt.savefig(fname, dpi=200, format='png')
def main():
loops = 0
if len(sys.argv) < 2:
print("Usage: {} [0000]:XX:XX.X [loops]".format(sys.argv[0]))
exit(-1)
else:
pci_test = sys.argv[1]
if pci_test.startswith('0000:'):
pci_test = sys.argv[0][5:]
if len(sys.argv) == 3:
loops = int(sys.argv[2])
else:
loops = 100000
### must be root to run the script
if not is_root():
print("Need root privillege! run as root!")
exit(-1)
### get all devices in this computer
pcis = get_pci_list()
if pci_test not in [pp['loc'] for pp in pcis]:
print("existing PCI devices:")
for __ in pcis:
print(__)
print("{} not found!".format(pci_test))
exit(-1)
for p in pcis:
if p['loc'] == pci_test:
pci_test = p
unbind_file = "/sys/bus/pci/devices/0000\:{}/driver/unbind"
unbind_file = unbind_file.format(pci_test['loc'].replace(':', '\:'))
if os.path.exists(unbind_file):
print("Unbind file {} not found!".format(unbind_file))
exit(-1)
unbind_ss = 'echo -n "0000:{}" > {}'.format(pci_test['loc'], unbind_file)
os.system(unbind_ss)
# insert module
os.system("make")
print("finished compiling the pcie-lat, insmod...");
ins_command = "sudo insmod ./pcie-lat.ko ids={}".format(pci_test['vd'])
print(ins_command)
os.system(ins_command)
# couting
try:
sys_path_head = "/sys/bus/pci/devices/0000:{}/pcie-lat/{}/pcielat_"
sys_path_head = sys_path_head.format(pci_test['loc'], pci_test['loc'])
tsc_freq = 0
tsc_overhead = 0
with open(sys_path_head+'tsc_freq', 'r') as __:
tsc_freq = int(float(__.read()))
with open(sys_path_head+'tsc_overhead', 'r') as __:
tsc_overhead = int(float(__.read()))
with open(sys_path_head+'loops', 'w') as __:
__.write(str(loops))
with open(sys_path_head+'target_bar', 'w') as __:
__.write('0')
print_mach_info(tsc_freq, tsc_overhead, loops)
with open(sys_path_head+'measure', 'w') as __:
__.write('0')
with open('/dev/pcie-lat/{}'.format(pci_test['loc']), 'rb') as __:
y = []
cc = __.read(16)
while cc:
acc = 0
acc2 = 0
for ii in range(8):
acc = acc*256 + int(cc[7-ii])
acc2 = acc2*256 + int(cc[15-ii])
y.append(clock2ns(acc2, tsc_freq))
# read next
cc = __.read(16)
fname = "pcie_lat_loops{}_{}.png"
fname = fname.format(loops, pci_test['loc'].replace(':', '..'))
print("plot the graph")
plot_y(y, fname)
except Exception:
traceback.print_exc()
print("Removing module : sudo rmmod pcie-lat.ko")
os.system("sudo rmmod pcie-lat.ko")
exit(-1)
# remove module
print("Removing module : sudo rmmod pcie-lat.ko")
os.system("sudo rmmod pcie-lat.ko")
if __name__ == "__main__":
main()
| gpl-2.0 |
wavelets/pandashells | pandashells/test/module_checker_lib_tests.py | 7 | 1443 | #! /usr/bin/env python
from unittest import TestCase
from pandashells.lib.module_checker_lib import check_for_modules
from pandashells.lib import module_checker_lib
from mock import patch
class ModuleCheckerTests(TestCase):
def setUp(self):
module_checker_lib.CMD_DICT['fakemodule1'] = 'pip install fakemodule1'
module_checker_lib.CMD_DICT['fakemodule2'] = 'pip install fakemodule2'
module_checker_lib.CMD_DICT['os'] = 'part of standard module'
def test_check_for_modules_unrecognized(self):
"""
check_for_modules() raises error when module is unrecognized
"""
with self.assertRaises(ValueError):
check_for_modules(['not_a_module'])
@patch('pandashells.lib.module_checker_lib.importlib.import_module')
def test_check_for_modules_no_modules(self, import_module_mock):
"""
check_for_modules() does nothing when module list is empty
"""
check_for_modules([])
self.assertFalse(import_module_mock.called)
def test_check_for_modules_existing_module(self):
"""
check_for_modules() successfully finds existing module
"""
check_for_modules(['os'])
def test_check_for_modules_bad(self):
"""
check_for_modules() correctly identifies missing modules
"""
with self.assertRaises(ImportError):
check_for_modules(['fakemodule1', 'fakemodule2'])
| bsd-2-clause |
abimannans/scikit-learn | sklearn/externals/joblib/__init__.py | 86 | 4795 | """ Joblib is a set of tools to provide **lightweight pipelining in
Python**. In particular, joblib offers:
1. transparent disk-caching of the output values and lazy re-evaluation
(memoize pattern)
2. easy simple parallel computing
3. logging and tracing of the execution
Joblib is optimized to be **fast** and **robust** in particular on large
data and has specific optimizations for `numpy` arrays. It is
**BSD-licensed**.
============================== ============================================
**User documentation**: http://pythonhosted.org/joblib
**Download packages**: http://pypi.python.org/pypi/joblib#downloads
**Source code**: http://github.com/joblib/joblib
**Report issues**: http://github.com/joblib/joblib/issues
============================== ============================================
Vision
--------
The vision is to provide tools to easily achieve better performance and
reproducibility when working with long running jobs.
* **Avoid computing twice the same thing**: code is rerun over an
over, for instance when prototyping computational-heavy jobs (as in
scientific development), but hand-crafted solution to alleviate this
issue is error-prone and often leads to unreproducible results
* **Persist to disk transparently**: persisting in an efficient way
arbitrary objects containing large data is hard. Using
joblib's caching mechanism avoids hand-written persistence and
implicitly links the file on disk to the execution context of
the original Python object. As a result, joblib's persistence is
good for resuming an application status or computational job, eg
after a crash.
Joblib strives to address these problems while **leaving your code and
your flow control as unmodified as possible** (no framework, no new
paradigms).
Main features
------------------
1) **Transparent and fast disk-caching of output value:** a memoize or
make-like functionality for Python functions that works well for
arbitrary Python objects, including very large numpy arrays. Separate
persistence and flow-execution logic from domain logic or algorithmic
code by writing the operations as a set of steps with well-defined
inputs and outputs: Python functions. Joblib can save their
computation to disk and rerun it only if necessary::
>>> import numpy as np
>>> from sklearn.externals.joblib import Memory
>>> mem = Memory(cachedir='/tmp/joblib')
>>> import numpy as np
>>> a = np.vander(np.arange(3)).astype(np.float)
>>> square = mem.cache(np.square)
>>> b = square(a) # doctest: +ELLIPSIS
________________________________________________________________________________
[Memory] Calling square...
square(array([[ 0., 0., 1.],
[ 1., 1., 1.],
[ 4., 2., 1.]]))
___________________________________________________________square - 0...s, 0.0min
>>> c = square(a)
>>> # The above call did not trigger an evaluation
2) **Embarrassingly parallel helper:** to make is easy to write readable
parallel code and debug it quickly::
>>> from sklearn.externals.joblib import Parallel, delayed
>>> from math import sqrt
>>> Parallel(n_jobs=1)(delayed(sqrt)(i**2) for i in range(10))
[0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0]
3) **Logging/tracing:** The different functionalities will
progressively acquire better logging mechanism to help track what
has been ran, and capture I/O easily. In addition, Joblib will
provide a few I/O primitives, to easily define define logging and
display streams, and provide a way of compiling a report.
We want to be able to quickly inspect what has been run.
4) **Fast compressed Persistence**: a replacement for pickle to work
efficiently on Python objects containing large data (
*joblib.dump* & *joblib.load* ).
..
>>> import shutil ; shutil.rmtree('/tmp/joblib/')
"""
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# Generic release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# Admissible pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
#
# Dev branch marker is: 'X.Y.dev' or 'X.Y.devN' where N is an integer.
# 'X.Y.dev0' is the canonical version of 'X.Y.dev'
#
__version__ = '0.9.0b3'
from .memory import Memory, MemorizedResult
from .logger import PrintTime
from .logger import Logger
from .hashing import hash
from .numpy_pickle import dump
from .numpy_pickle import load
from .parallel import Parallel
from .parallel import delayed
from .parallel import cpu_count
| bsd-3-clause |
carrillo/scikit-learn | sklearn/datasets/twenty_newsgroups.py | 126 | 13591 | """Caching loader for the 20 newsgroups text classification dataset
The description of the dataset is available on the official website at:
http://people.csail.mit.edu/jrennie/20Newsgroups/
Quoting the introduction:
The 20 Newsgroups data set is a collection of approximately 20,000
newsgroup documents, partitioned (nearly) evenly across 20 different
newsgroups. To the best of my knowledge, it was originally collected
by Ken Lang, probably for his Newsweeder: Learning to filter netnews
paper, though he does not explicitly mention this collection. The 20
newsgroups collection has become a popular data set for experiments
in text applications of machine learning techniques, such as text
classification and text clustering.
This dataset loader will download the recommended "by date" variant of the
dataset and which features a point in time split between the train and
test sets. The compressed dataset size is around 14 Mb compressed. Once
uncompressed the train set is 52 MB and the test set is 34 MB.
The data is downloaded, extracted and cached in the '~/scikit_learn_data'
folder.
The `fetch_20newsgroups` function will not vectorize the data into numpy
arrays but the dataset lists the filenames of the posts and their categories
as target labels.
The `fetch_20newsgroups_vectorized` function will in addition do a simple
tf-idf vectorization step.
"""
# Copyright (c) 2011 Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
import os
import logging
import tarfile
import pickle
import shutil
import re
import codecs
import numpy as np
import scipy.sparse as sp
from .base import get_data_home
from .base import Bunch
from .base import load_files
from ..utils import check_random_state
from ..feature_extraction.text import CountVectorizer
from ..preprocessing import normalize
from ..externals import joblib, six
if six.PY3:
from urllib.request import urlopen
else:
from urllib2 import urlopen
logger = logging.getLogger(__name__)
URL = ("http://people.csail.mit.edu/jrennie/"
"20Newsgroups/20news-bydate.tar.gz")
ARCHIVE_NAME = "20news-bydate.tar.gz"
CACHE_NAME = "20news-bydate.pkz"
TRAIN_FOLDER = "20news-bydate-train"
TEST_FOLDER = "20news-bydate-test"
def download_20newsgroups(target_dir, cache_path):
"""Download the 20 newsgroups data and stored it as a zipped pickle."""
archive_path = os.path.join(target_dir, ARCHIVE_NAME)
train_path = os.path.join(target_dir, TRAIN_FOLDER)
test_path = os.path.join(target_dir, TEST_FOLDER)
if not os.path.exists(target_dir):
os.makedirs(target_dir)
if os.path.exists(archive_path):
# Download is not complete as the .tar.gz file is removed after
# download.
logger.warning("Download was incomplete, downloading again.")
os.remove(archive_path)
logger.warning("Downloading dataset from %s (14 MB)", URL)
opener = urlopen(URL)
with open(archive_path, 'wb') as f:
f.write(opener.read())
logger.info("Decompressing %s", archive_path)
tarfile.open(archive_path, "r:gz").extractall(path=target_dir)
os.remove(archive_path)
# Store a zipped pickle
cache = dict(train=load_files(train_path, encoding='latin1'),
test=load_files(test_path, encoding='latin1'))
compressed_content = codecs.encode(pickle.dumps(cache), 'zlib_codec')
with open(cache_path, 'wb') as f:
f.write(compressed_content)
shutil.rmtree(target_dir)
return cache
def strip_newsgroup_header(text):
"""
Given text in "news" format, strip the headers, by removing everything
before the first blank line.
"""
_before, _blankline, after = text.partition('\n\n')
return after
_QUOTE_RE = re.compile(r'(writes in|writes:|wrote:|says:|said:'
r'|^In article|^Quoted from|^\||^>)')
def strip_newsgroup_quoting(text):
"""
Given text in "news" format, strip lines beginning with the quote
characters > or |, plus lines that often introduce a quoted section
(for example, because they contain the string 'writes:'.)
"""
good_lines = [line for line in text.split('\n')
if not _QUOTE_RE.search(line)]
return '\n'.join(good_lines)
def strip_newsgroup_footer(text):
"""
Given text in "news" format, attempt to remove a signature block.
As a rough heuristic, we assume that signatures are set apart by either
a blank line or a line made of hyphens, and that it is the last such line
in the file (disregarding blank lines at the end).
"""
lines = text.strip().split('\n')
for line_num in range(len(lines) - 1, -1, -1):
line = lines[line_num]
if line.strip().strip('-') == '':
break
if line_num > 0:
return '\n'.join(lines[:line_num])
else:
return text
def fetch_20newsgroups(data_home=None, subset='train', categories=None,
shuffle=True, random_state=42,
remove=(),
download_if_missing=True):
"""Load the filenames and data from the 20 newsgroups dataset.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify a download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
categories: None or collection of string or unicode
If None (default), load all the categories.
If not None, list of category names to load (other categories
ignored).
shuffle: bool, optional
Whether or not to shuffle the data: might be important for models that
make the assumption that the samples are independent and identically
distributed (i.i.d.), such as stochastic gradient descent.
random_state: numpy random number generator or seed integer
Used to shuffle the dataset.
download_if_missing: optional, True by default
If False, raise an IOError if the data is not locally available
instead of trying to download the data from the source site.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
'headers' follows an exact standard; the other filters are not always
correct.
"""
data_home = get_data_home(data_home=data_home)
cache_path = os.path.join(data_home, CACHE_NAME)
twenty_home = os.path.join(data_home, "20news_home")
cache = None
if os.path.exists(cache_path):
try:
with open(cache_path, 'rb') as f:
compressed_content = f.read()
uncompressed_content = codecs.decode(
compressed_content, 'zlib_codec')
cache = pickle.loads(uncompressed_content)
except Exception as e:
print(80 * '_')
print('Cache loading failed')
print(80 * '_')
print(e)
if cache is None:
if download_if_missing:
cache = download_20newsgroups(target_dir=twenty_home,
cache_path=cache_path)
else:
raise IOError('20Newsgroups dataset not found')
if subset in ('train', 'test'):
data = cache[subset]
elif subset == 'all':
data_lst = list()
target = list()
filenames = list()
for subset in ('train', 'test'):
data = cache[subset]
data_lst.extend(data.data)
target.extend(data.target)
filenames.extend(data.filenames)
data.data = data_lst
data.target = np.array(target)
data.filenames = np.array(filenames)
else:
raise ValueError(
"subset can only be 'train', 'test' or 'all', got '%s'" % subset)
data.description = 'the 20 newsgroups by date dataset'
if 'headers' in remove:
data.data = [strip_newsgroup_header(text) for text in data.data]
if 'footers' in remove:
data.data = [strip_newsgroup_footer(text) for text in data.data]
if 'quotes' in remove:
data.data = [strip_newsgroup_quoting(text) for text in data.data]
if categories is not None:
labels = [(data.target_names.index(cat), cat) for cat in categories]
# Sort the categories to have the ordering of the labels
labels.sort()
labels, categories = zip(*labels)
mask = np.in1d(data.target, labels)
data.filenames = data.filenames[mask]
data.target = data.target[mask]
# searchsorted to have continuous labels
data.target = np.searchsorted(labels, data.target)
data.target_names = list(categories)
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[mask]
data.data = data_lst.tolist()
if shuffle:
random_state = check_random_state(random_state)
indices = np.arange(data.target.shape[0])
random_state.shuffle(indices)
data.filenames = data.filenames[indices]
data.target = data.target[indices]
# Use an object array to shuffle: avoids memory copy
data_lst = np.array(data.data, dtype=object)
data_lst = data_lst[indices]
data.data = data_lst.tolist()
return data
def fetch_20newsgroups_vectorized(subset="train", remove=(), data_home=None):
"""Load the 20 newsgroups dataset and transform it into tf-idf vectors.
This is a convenience function; the tf-idf transformation is done using the
default settings for `sklearn.feature_extraction.text.Vectorizer`. For more
advanced usage (stopword filtering, n-gram extraction, etc.), combine
fetch_20newsgroups with a custom `Vectorizer` or `CountVectorizer`.
Read more in the :ref:`User Guide <20newsgroups>`.
Parameters
----------
subset: 'train' or 'test', 'all', optional
Select the dataset to load: 'train' for the training set, 'test'
for the test set, 'all' for both, with shuffled ordering.
data_home: optional, default: None
Specify an download and cache folder for the datasets. If None,
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
remove: tuple
May contain any subset of ('headers', 'footers', 'quotes'). Each of
these are kinds of text that will be detected and removed from the
newsgroup posts, preventing classifiers from overfitting on
metadata.
'headers' removes newsgroup headers, 'footers' removes blocks at the
ends of posts that look like signatures, and 'quotes' removes lines
that appear to be quoting another post.
Returns
-------
bunch : Bunch object
bunch.data: sparse matrix, shape [n_samples, n_features]
bunch.target: array, shape [n_samples]
bunch.target_names: list, length [n_classes]
"""
data_home = get_data_home(data_home=data_home)
filebase = '20newsgroup_vectorized'
if remove:
filebase += 'remove-' + ('-'.join(remove))
target_file = os.path.join(data_home, filebase + ".pk")
# we shuffle but use a fixed seed for the memoization
data_train = fetch_20newsgroups(data_home=data_home,
subset='train',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
data_test = fetch_20newsgroups(data_home=data_home,
subset='test',
categories=None,
shuffle=True,
random_state=12,
remove=remove)
if os.path.exists(target_file):
X_train, X_test = joblib.load(target_file)
else:
vectorizer = CountVectorizer(dtype=np.int16)
X_train = vectorizer.fit_transform(data_train.data).tocsr()
X_test = vectorizer.transform(data_test.data).tocsr()
joblib.dump((X_train, X_test), target_file, compress=9)
# the data is stored as int16 for compactness
# but normalize needs floats
X_train = X_train.astype(np.float64)
X_test = X_test.astype(np.float64)
normalize(X_train, copy=False)
normalize(X_test, copy=False)
target_names = data_train.target_names
if subset == "train":
data = X_train
target = data_train.target
elif subset == "test":
data = X_test
target = data_test.target
elif subset == "all":
data = sp.vstack((X_train, X_test)).tocsr()
target = np.concatenate((data_train.target, data_test.target))
else:
raise ValueError("%r is not a valid subset: should be one of "
"['train', 'test', 'all']" % subset)
return Bunch(data=data, target=target, target_names=target_names)
| bsd-3-clause |
bzero/statsmodels | statsmodels/sandbox/tsa/varma.py | 33 | 5032 | '''VAR and VARMA process
this doesn't actually do much, trying out a version for a time loop
alternative representation:
* textbook, different blocks in matrices
* Kalman filter
* VAR, VARX and ARX could be calculated with signal.lfilter
only tried some examples, not implemented
TODO: try minimizing sum of squares of (Y-Yhat)
Note: filter has smallest lag at end of array and largest lag at beginning,
be careful for asymmetric lags coefficients
check this again if it is consistently used
changes
2009-09-08 : separated from movstat.py
Author : josefpkt
License : BSD
'''
from __future__ import print_function
import numpy as np
from scipy import signal
#import matplotlib.pylab as plt
from numpy.testing import assert_array_equal, assert_array_almost_equal
#NOTE: this just returns that predicted values given the
#B matrix in polynomial form.
#TODO: make sure VAR class returns B/params in this form.
def VAR(x,B, const=0):
''' multivariate linear filter
Parameters
----------
x: (TxK) array
columns are variables, rows are observations for time period
B: (PxKxK) array
b_t-1 is bottom "row", b_t-P is top "row" when printing
B(:,:,0) is lag polynomial matrix for variable 1
B(:,:,k) is lag polynomial matrix for variable k
B(p,:,k) is pth lag for variable k
B[p,:,:].T corresponds to A_p in Wikipedia
const: float or array (not tested)
constant added to autoregression
Returns
-------
xhat: (TxK) array
filtered, predicted values of x array
Notes
-----
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } for all i = 0,K-1, for all t=p..T
xhat does not include the forecasting observation, xhat(T+1),
xhat is 1 row shorter than signal.correlate
References
----------
http://en.wikipedia.org/wiki/Vector_Autoregression
http://en.wikipedia.org/wiki/General_matrix_notation_of_a_VAR(p)
'''
p = B.shape[0]
T = x.shape[0]
xhat = np.zeros(x.shape)
for t in range(p,T): #[p+2]:#
## print(p,T)
## print(x[t-p:t,:,np.newaxis].shape)
## print(B.shape)
#print(x[t-p:t,:,np.newaxis])
xhat[t,:] = const + (x[t-p:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0)
return xhat
def VARMA(x,B,C, const=0):
''' multivariate linear filter
x (TxK)
B (PxKxK)
xhat(t,i) = sum{_p}sum{_k} { x(t-P:t,:) .* B(:,:,i) } +
sum{_q}sum{_k} { e(t-Q:t,:) .* C(:,:,i) }for all i = 0,K-1
'''
P = B.shape[0]
Q = C.shape[0]
T = x.shape[0]
xhat = np.zeros(x.shape)
e = np.zeros(x.shape)
start = max(P,Q)
for t in range(start,T): #[p+2]:#
## print(p,T
## print(x[t-p:t,:,np.newaxis].shape
## print(B.shape
#print(x[t-p:t,:,np.newaxis]
xhat[t,:] = const + (x[t-P:t,:,np.newaxis]*B).sum(axis=1).sum(axis=0) + \
(e[t-Q:t,:,np.newaxis]*C).sum(axis=1).sum(axis=0)
e[t,:] = x[t,:] - xhat[t,:]
return xhat, e
if __name__ == '__main__':
T = 20
K = 2
P = 3
#x = np.arange(10).reshape(5,2)
x = np.column_stack([np.arange(T)]*K)
B = np.ones((P,K,K))
#B[:,:,1] = 2
B[:,:,1] = [[0,0],[0,0],[0,1]]
xhat = VAR(x,B)
print(np.all(xhat[P:,0]==np.correlate(x[:-1,0],np.ones(P))*2))
#print(xhat)
T = 20
K = 2
Q = 2
P = 3
const = 1
#x = np.arange(10).reshape(5,2)
x = np.column_stack([np.arange(T)]*K)
B = np.ones((P,K,K))
#B[:,:,1] = 2
B[:,:,1] = [[0,0],[0,0],[0,1]]
C = np.zeros((Q,K,K))
xhat1 = VAR(x,B, const=const)
xhat2, err2 = VARMA(x,B,C, const=const)
print(np.all(xhat2 == xhat1))
print(np.all(xhat2[P:,0] == np.correlate(x[:-1,0],np.ones(P))*2+const))
C[1,1,1] = 0.5
xhat3, err3 = VARMA(x,B,C)
x = np.r_[np.zeros((P,K)),x] #prepend inital conditions
xhat4, err4 = VARMA(x,B,C)
C[1,1,1] = 1
B[:,:,1] = [[0,0],[0,0],[0,1]]
xhat5, err5 = VARMA(x,B,C)
#print(err5)
#in differences
#VARMA(np.diff(x,axis=0),B,C)
#Note:
# * signal correlate applies same filter to all columns if kernel.shape[1]<K
# e.g. signal.correlate(x0,np.ones((3,1)),'valid')
# * if kernel.shape[1]==K, then `valid` produces a single column
# -> possible to run signal.correlate K times with different filters,
# see the following example, which replicates VAR filter
x0 = np.column_stack([np.arange(T), 2*np.arange(T)])
B[:,:,0] = np.ones((P,K))
B[:,:,1] = np.ones((P,K))
B[1,1,1] = 0
xhat0 = VAR(x0,B)
xcorr00 = signal.correlate(x0,B[:,:,0])#[:,0]
xcorr01 = signal.correlate(x0,B[:,:,1])
print(np.all(signal.correlate(x0,B[:,:,0],'valid')[:-1,0]==xhat0[P:,0]))
print(np.all(signal.correlate(x0,B[:,:,1],'valid')[:-1,0]==xhat0[P:,1]))
#import error
#from movstat import acovf, acf
from statsmodels.tsa.stattools import acovf, acf
aav = acovf(x[:,0])
print(aav[0] == np.var(x[:,0]))
aac = acf(x[:,0])
| bsd-3-clause |
to266/hyperspy | hyperspy/drawing/widget.py | 2 | 36785 | # -*- coding: utf-8 -*-
# Copyright 2007-2016 The HyperSpy developers
#
# This file is part of HyperSpy.
#
# HyperSpy is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# HyperSpy is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with HyperSpy. If not, see <http://www.gnu.org/licenses/>.
from __future__ import division
import matplotlib.pyplot as plt
from matplotlib.backend_bases import MouseEvent
import numpy as np
from hyperspy.drawing.utils import on_figure_window_close
from hyperspy.events import Events, Event
class WidgetBase(object):
"""Base class for interactive widgets/patches. A widget creates and
maintains one or more matplotlib patches, and manages the interaction code
so that the user can maniuplate it on the fly.
This base class implements functionality witch is common to all such
widgets, mainly the code that manages the patch, axes management, and
sets up common events ('changed' and 'closed').
Any inherting subclasses must implement the following methods:
_set_patch(self)
_on_navigate(obj, name, old, new) # Only for widgets that can navigate
It should also make sure to fill the 'axes' attribute as early as
possible (but after the base class init), so that it is available when
needed.
"""
def __init__(self, axes_manager=None, **kwargs):
self.axes_manager = axes_manager
self._axes = list()
self.ax = None
self.picked = False
self.selected = False
self._selected_artist = None
self._size = 1.
self.color = 'red'
self.__is_on = True
self.background = None
self.patch = []
self.cids = list()
self.blit = True
self.events = Events()
self.events.changed = Event(doc="""
Event that triggers when the widget has a significant change.
The event triggers after the internal state of the widget has been
updated.
Arguments:
----------
widget:
The widget that changed
""", arguments=['obj'])
self.events.closed = Event(doc="""
Event that triggers when the widget closed.
The event triggers after the widget has already been closed.
Arguments:
----------
widget:
The widget that closed
""", arguments=['obj'])
self._navigating = False
super(WidgetBase, self).__init__(**kwargs)
def _get_axes(self):
return self._axes
def _set_axes(self, axes):
if axes is None:
self._axes = list()
else:
self._axes = axes
axes = property(lambda s: s._get_axes(),
lambda s, v: s._set_axes(v))
def is_on(self):
"""Determines if the widget is set to draw if valid (turned on).
"""
return self.__is_on
def set_on(self, value):
"""Change the on state of the widget. If turning off, all patches will
be removed from the matplotlib axes and the widget will disconnect from
all events. If turning on, the patch(es) will be added to the
matplotlib axes, and the widget will connect to its default events.
"""
did_something = False
if value is not self.is_on() and self.ax is not None:
did_something = True
if value is True:
self._add_patch_to(self.ax)
self.connect(self.ax)
elif value is False:
for container in [
self.ax.patches,
self.ax.lines,
self.ax.artists,
self.ax.texts]:
for p in self.patch:
if p in container:
container.remove(p)
self.disconnect()
if hasattr(super(WidgetBase, self), 'set_on'):
super(WidgetBase, self).set_on(value)
if did_something:
self.draw_patch()
if value is False:
self.ax = None
self.__is_on = value
def _set_patch(self):
"""Create the matplotlib patch(es), and store it in self.patch
"""
if hasattr(super(WidgetBase, self), '_set_patch'):
super(WidgetBase, self)._set_patch()
# Must be provided by the subclass
def _add_patch_to(self, ax):
"""Create and add the matplotlib patches to 'ax'
"""
self._set_patch()
for p in self.patch:
ax.add_artist(p)
p.set_animated(hasattr(ax, 'hspy_fig'))
if hasattr(super(WidgetBase, self), '_add_patch_to'):
super(WidgetBase, self)._add_patch_to(ax)
def set_mpl_ax(self, ax):
"""Set the matplotlib Axes that the widget will draw to. If the widget
on state is True, it will also add the patch to the Axes, and connect
to its default events.
"""
if ax is self.ax:
return # Do nothing
# Disconnect from previous axes if set
if self.ax is not None and self.is_on():
self.disconnect()
self.ax = ax
canvas = ax.figure.canvas
if self.is_on() is True:
self._add_patch_to(ax)
self.connect(ax)
canvas.draw()
self.select()
def select(self):
"""
Cause this widget to be the selected widget in its MPL axes. This
assumes that the widget has its patch added to the MPL axes.
"""
if not self.patch or not self.is_on() or not self.ax:
return
canvas = self.ax.figure.canvas
# Simulate a pick event
x, y = self.patch[0].get_transform().transform_point((0, 0))
mouseevent = MouseEvent('pick_event', canvas, x, y)
canvas.pick_event(mouseevent, self.patch[0])
self.picked = False
def connect(self, ax):
"""Connect to the matplotlib Axes' events.
"""
on_figure_window_close(ax.figure, self.close)
if self._navigating:
self.connect_navigate()
def connect_navigate(self):
"""Connect to the axes_manager such that changes in the widget or in
the axes_manager are reflected in the other.
"""
if self._navigating:
self.disconnect_navigate()
self.axes_manager.events.indices_changed.connect(
self._on_navigate, {'obj': 'axes_manager'})
self._on_navigate(self.axes_manager) # Update our position
self._navigating = True
def disconnect_navigate(self):
"""Disconnect a previous naivgation connection.
"""
self.axes_manager.events.indices_changed.disconnect(self._on_navigate)
self._navigating = False
def _on_navigate(self, axes_manager):
"""Callback for axes_manager's change notification.
"""
pass # Implement in subclass!
def disconnect(self):
"""Disconnect from all events (both matplotlib and navigation).
"""
for cid in self.cids:
try:
self.ax.figure.canvas.mpl_disconnect(cid)
except:
pass
if self._navigating:
self.disconnect_navigate()
def close(self, window=None):
"""Set the on state to off (removes patch and disconnects), and trigger
events.closed.
"""
self.set_on(False)
self.events.closed.trigger(obj=self)
def draw_patch(self, *args):
"""Update the patch drawing.
"""
try:
if hasattr(self.ax, 'hspy_fig'):
self.ax.hspy_fig._draw_animated()
elif self.ax.figure is not None:
self.ax.figure.canvas.draw_idle()
except AttributeError:
pass # When figure is None, typically when closing
def _v2i(self, axis, v):
"""Wrapped version of DataAxis.value2index, which bounds the index
inbetween axis.low_index and axis.high_index+1, and does not raise a
ValueError.
"""
try:
return axis.value2index(v)
except ValueError:
if v > axis.high_value:
return axis.high_index + 1
elif v < axis.low_value:
return axis.low_index
else:
raise
def _i2v(self, axis, i):
"""Wrapped version of DataAxis.index2value, which bounds the value
inbetween axis.low_value and axis.high_value+axis.scale, and does not
raise a ValueError.
"""
try:
return axis.index2value(i)
except ValueError:
if i > axis.high_index:
return axis.high_value + axis.scale
elif i < axis.low_index:
return axis.low_value
else:
raise
class DraggableWidgetBase(WidgetBase):
"""Adds the `position` and `indices` properties, and adds a framework for
letting the user drag the patch around. Also adds the `moved` event.
The default behavior is that `position` snaps to the values corresponding
to the values of the axes grid (i.e. no subpixel values). This behavior
can be controlled by the property `snap_position`.
Any inheritors must override these methods:
_onmousemove(self, event)
_update_patch_position(self)
_set_patch(self)
"""
def __init__(self, axes_manager, **kwargs):
super(DraggableWidgetBase, self).__init__(axes_manager, **kwargs)
self.events.moved = Event(doc="""
Event that triggers when the widget was moved.
The event triggers after the internal state of the widget has been
updated. This event does not differentiate on how the position of
the widget was changed, so it is the responsibility of the user
to suppress events as neccessary to avoid closed loops etc.
Arguments:
----------
obj:
The widget that was moved.
""", arguments=['obj'])
self._snap_position = True
# Set default axes
if self.axes_manager is not None:
if self.axes_manager.navigation_dimension > 0:
self.axes = self.axes_manager.navigation_axes[0:1]
else:
self.axes = self.axes_manager.signal_axes[0:1]
else:
self._pos = np.array([0.])
def _set_axes(self, axes):
super(DraggableWidgetBase, self)._set_axes(axes)
if self.axes:
self._pos = np.array([ax.low_value for ax in self.axes])
def _get_indices(self):
"""Returns a tuple with the position (indices).
"""
idx = []
for i in range(len(self.axes)):
idx.append(self.axes[i].value2index(self._pos[i]))
return tuple(idx)
def _set_indices(self, value):
"""Sets the position of the widget (by indices). The dimensions should
correspond to that of the 'axes' attribute. Calls _pos_changed if the
value has changed, which is then responsible for triggering any
relevant events.
"""
if np.ndim(value) == 0 and len(self.axes) == 1:
self.position = [self.axes[0].index2value(value)]
elif len(self.axes) != len(value):
raise ValueError()
else:
p = []
for i in range(len(self.axes)):
p.append(self.axes[i].index2value(value[i]))
self.position = p
indices = property(lambda s: s._get_indices(),
lambda s, v: s._set_indices(v))
def _pos_changed(self):
"""Call when the position of the widget has changed. It triggers the
relevant events, and updates the patch position.
"""
if self._navigating:
with self.axes_manager.events.indices_changed.suppress_callback(
self._on_navigate):
for i in range(len(self.axes)):
self.axes[i].value = self._pos[i]
self.events.moved.trigger(self)
self.events.changed.trigger(self)
self._update_patch_position()
def _validate_pos(self, pos):
"""Validates the passed position. Depending on the position and the
implementation, this can either fire a ValueError, or return a modified
position that has valid values. Or simply return the unmodified
position if everything is ok.
This default implementation bounds the position within the axes limits.
"""
if len(pos) != len(self.axes):
raise ValueError()
pos = np.maximum(pos, [ax.low_value for ax in self.axes])
pos = np.minimum(pos, [ax.high_value for ax in self.axes])
if self.snap_position:
pos = self._do_snap_position(pos)
return pos
def _get_position(self):
"""Providies the position of the widget (by values) in a tuple.
"""
return tuple(
self._pos.tolist()) # Don't pass reference, and make it clear
def _set_position(self, position):
"""Sets the position of the widget (by values). The dimensions should
correspond to that of the 'axes' attribute. Calls _pos_changed if the
value has changed, which is then responsible for triggering any
relevant events.
"""
position = self._validate_pos(position)
if np.any(self._pos != position):
self._pos = np.array(position)
self._pos_changed()
position = property(lambda s: s._get_position(),
lambda s, v: s._set_position(v))
def _do_snap_position(self, value=None):
"""Snaps position to axes grid. Returns snapped value. If value is
passed as an argument, the internal state is left untouched, if not
the position attribute is updated to the snapped value.
"""
value = np.array(value) if value is not None else self._pos
for i, ax in enumerate(self.axes):
value[i] = ax.index2value(ax.value2index(value[i]))
return value
def _set_snap_position(self, value):
self._snap_position = value
if value:
snap_value = self._do_snap_position(self._pos)
if np.any(self._pos != snap_value):
self._pos = snap_value
self._pos_changed()
snap_position = property(lambda s: s._snap_position,
lambda s, v: s._set_snap_position(v))
def connect(self, ax):
super(DraggableWidgetBase, self).connect(ax)
canvas = ax.figure.canvas
self.cids.append(
canvas.mpl_connect('motion_notify_event', self._onmousemove))
self.cids.append(canvas.mpl_connect('pick_event', self.onpick))
self.cids.append(canvas.mpl_connect(
'button_release_event', self.button_release))
def _on_navigate(self, axes_manager):
if axes_manager is self.axes_manager:
p = self._pos.tolist()
for i, a in enumerate(self.axes):
p[i] = a.value
self.position = p # Use property to trigger events
def onpick(self, event):
# Callback for MPL pick event
self.picked = (event.artist in self.patch)
self._selected_artist = event.artist
if hasattr(super(DraggableWidgetBase, self), 'onpick'):
super(DraggableWidgetBase, self).onpick(event)
self.selected = self.picked
def _onmousemove(self, event):
"""Callback for mouse movement. For dragging, the implementor would
normally check that the widget is picked, and that the event.inaxes
Axes equals self.ax.
"""
# This method must be provided by the subclass
pass
def _update_patch_position(self):
"""Updates the position of the patch on the plot.
"""
# This method must be provided by the subclass
pass
def _update_patch_geometry(self):
"""Updates all geometry of the patch on the plot.
"""
self._update_patch_position()
def button_release(self, event):
"""whenever a mouse button is released"""
if event.button != 1:
return
if self.picked is True:
self.picked = False
class Widget1DBase(DraggableWidgetBase):
"""A base class for 1D widgets.
It sets the right dimensions for size and
position, adds the 'border_thickness' attribute and initalizes the 'axes'
attribute to the first two navigation axes if possible, if not, the two
first signal_axes are used. Other than that it mainly supplies common
utility functions for inheritors, and implements required functions for
ResizableDraggableWidgetBase.
The implementation for ResizableDraggableWidgetBase methods all assume that
a Rectangle patch will be used, centered on position. If not, the
inheriting class will have to override those as applicable.
"""
def _set_position(self, position):
try:
len(position)
except TypeError:
position = (position,)
super(Widget1DBase, self)._set_position(position)
def _validate_pos(self, pos):
pos = np.maximum(pos, self.axes[0].low_value)
pos = np.minimum(pos, self.axes[0].high_value)
return super(Widget1DBase, self)._validate_pos(pos)
class ResizableDraggableWidgetBase(DraggableWidgetBase):
"""Adds the `size` property and get_size_in_axes method, and adds a
framework for letting the user resize the patch, including resizing by
key strokes ('+', '-'). Also adds the 'resized' event.
Utility functions for resizing are implemented by `increase_size` and
`decrease_size`, which will in-/decrement the size by 1. Other utility
functions include `get_centre` and `get_centre_indices` which returns the
center position, and the internal _apply_changes which helps make sure that
only one 'changed' event is fired for a combined move and resize.
Any inheritors must override these methods:
_update_patch_position(self)
_update_patch_size(self)
_update_patch_geometry(self)
_set_patch(self)
"""
def __init__(self, axes_manager, **kwargs):
super(ResizableDraggableWidgetBase, self).__init__(
axes_manager, **kwargs)
if not self.axes:
self._size = np.array([1])
self.size_step = 1 # = one step in index space
self._snap_size = True
self.events.resized = Event(doc="""
Event that triggers when the widget was resized.
The event triggers after the internal state of the widget has been
updated. This event does not differentiate on how the size of
the widget was changed, so it is the responsibility of the user
to suppress events as neccessary to avoid closed loops etc.
Arguments:
----------
obj:
The widget that was resized.
""", arguments=['obj'])
self.no_events_while_dragging = False
self._drag_store = None
def _set_axes(self, axes):
super(ResizableDraggableWidgetBase, self)._set_axes(axes)
if self.axes:
self._size = np.array([ax.scale for ax in self.axes])
def _get_size(self):
"""Getter for 'size' property. Returns the size as a tuple (to prevent
unintended in-place changes).
"""
return tuple(self._size.tolist())
def _set_size(self, value):
"""Setter for the 'size' property. Calls _size_changed to handle size
change, if the value has changed.
"""
value = np.minimum(value, [ax.size * ax.scale for ax in self.axes])
value = np.maximum(value,
self.size_step * [ax.scale for ax in self.axes])
if self.snap_size:
value = self._do_snap_size(value)
if np.any(self._size != value):
self._size = value
self._size_changed()
size = property(lambda s: s._get_size(), lambda s, v: s._set_size(v))
def _do_snap_size(self, value=None):
value = np.array(value) if value is not None else self._size
for i, ax in enumerate(self.axes):
value[i] = round(value[i] / ax.scale) * ax.scale
return value
def _set_snap_size(self, value):
self._snap_size = value
if value:
snap_value = self._do_snap_size(self._size)
if np.any(self._size != snap_value):
self._size = snap_value
self._size_changed()
snap_size = property(lambda s: s._snap_size,
lambda s, v: s._set_snap_size(v))
def _set_snap_all(self, value):
# Snap position first, as snapped size can depend on position.
self.snap_position = value
self.snap_size = value
snap_all = property(lambda s: s.snap_size and s.snap_position,
lambda s, v: s._set_snap_all(v))
def increase_size(self):
"""Increment all sizes by 1. Applied via 'size' property.
"""
self.size = np.array(self.size) + \
self.size_step * np.array([a.scale for a in self.axes])
def decrease_size(self):
"""Decrement all sizes by 1. Applied via 'size' property.
"""
self.size = np.array(self.size) - \
self.size_step * np.array([a.scale for a in self.axes])
def _size_changed(self):
"""Triggers resize and changed events, and updates the patch.
"""
self.events.resized.trigger(self)
self.events.changed.trigger(self)
self._update_patch_size()
def get_size_in_indices(self):
"""Gets the size property converted to the index space (via 'axes'
attribute).
"""
s = list()
for i in range(len(self.axes)):
s.append(int(round(self._size[i] / self.axes[i].scale)))
return np.array(s)
def set_size_in_indices(self, value):
"""Sets the size property converted to the index space (via 'axes'
attribute).
"""
s = list()
for i in range(len(self.axes)):
s.append(int(round(value[i] * self.axes[i].scale)))
self.size = s # Use property to get full processing
def get_centre(self):
"""Get's the center indices. The default implementation is simply the
position + half the size in axes space, which should work for any
symmetric widget, but more advanced widgets will need to decide whether
to return the center of gravity or the geometrical center of the
bounds.
"""
return self._pos + self._size() / 2.0
def get_centre_index(self):
"""Get's the center position (in index space). The default
implementation is simply the indices + half the size, which should
work for any symmetric widget, but more advanced widgets will need to
decide whether to return the center of gravity or the geometrical
center of the bounds.
"""
return self.indices + self.get_size_in_indices() / 2.0
def _update_patch_size(self):
"""Updates the size of the patch on the plot.
"""
# This method must be provided by the subclass
pass
def _update_patch_geometry(self):
"""Updates all geometry of the patch on the plot.
"""
# This method must be provided by the subclass
pass
def on_key_press(self, event):
if event.key == "+":
self.increase_size()
if event.key == "-":
self.decrease_size()
def connect(self, ax):
super(ResizableDraggableWidgetBase, self).connect(ax)
canvas = ax.figure.canvas
self.cids.append(canvas.mpl_connect('key_press_event',
self.on_key_press))
def onpick(self, event):
if hasattr(super(ResizableDraggableWidgetBase, self), 'onpick'):
super(ResizableDraggableWidgetBase, self).onpick(event)
if self.picked:
self._drag_store = (self.position, self.size)
def _apply_changes(self, old_size, old_position):
"""Evalutes whether the widget has been moved/resized, and triggers
the correct events and updates the patch geometry. This function has
the advantage that the geometry is updated only once, preventing
flickering, and the 'changed' event only fires once.
"""
moved = self.position != old_position
resized = self.size != old_size
if moved:
if self._navigating:
e = self.axes_manager.events.indices_changed
with e.suppress_callback(self._on_navigate):
for i in range(len(self.axes)):
self.axes[i].index = self.indices[i]
if moved or resized:
# Update patch first
if moved and resized:
self._update_patch_geometry()
elif moved:
self._update_patch_position()
else:
self._update_patch_size()
# Then fire events
if not self.no_events_while_dragging or not self.picked:
if moved:
self.events.moved.trigger(self)
if resized:
self.events.resized.trigger(self)
self.events.changed.trigger(self)
def button_release(self, event):
"""whenever a mouse button is released"""
picked = self.picked
super(ResizableDraggableWidgetBase, self).button_release(event)
if event.button != 1:
return
if picked and self.picked is False:
if self.no_events_while_dragging and self._drag_store:
self._apply_changes(*self._drag_store)
class Widget2DBase(ResizableDraggableWidgetBase):
"""A base class for 2D widgets. It sets the right dimensions for size and
position, adds the 'border_thickness' attribute and initalizes the 'axes'
attribute to the first two navigation axes if possible, if not, the two
first signal_axes are used. Other than that it mainly supplies common
utility functions for inheritors, and implements required functions for
ResizableDraggableWidgetBase.
The implementation for ResizableDraggableWidgetBase methods all assume that
a Rectangle patch will be used, centered on position. If not, the
inheriting class will have to override those as applicable.
"""
def __init__(self, axes_manager, **kwargs):
super(Widget2DBase, self).__init__(axes_manager, **kwargs)
self.border_thickness = 2
# Set default axes
if self.axes_manager is not None:
if self.axes_manager.navigation_dimension > 1:
self.axes = self.axes_manager.navigation_axes[0:2]
elif self.axes_manager.signal_dimension > 1:
self.axes = self.axes_manager.signal_axes[0:2]
elif len(self.axes_manager.shape) > 1:
self.axes = (self.axes_manager.signal_axes +
self.axes_manager.navigation_axes)
else:
raise ValueError("2D widget needs at least two axes!")
else:
self._pos = np.array([0, 0])
self._size = np.array([1, 1])
def _get_patch_xy(self):
"""Returns the xy position of the widget. In this default
implementation, the widget is centered on the position.
"""
return self._pos - self._size / 2.
def _get_patch_bounds(self):
"""Returns the bounds of the patch in the form of a tuple in the order
left, top, width, height. In matplotlib, 'bottom' is used instead of
'top' as the naming assumes an upwards pointing y-axis, meaning the
lowest value corresponds to bottom. However, our widgets will normally
only go on images (which has an inverted y-axis in MPL by default), so
we define the lowest value to be termed 'top'.
"""
xy = self._get_patch_xy()
xs, ys = self.size
return (xy[0], xy[1], xs, ys) # x,y,w,h
def _update_patch_position(self):
if self.is_on() and self.patch:
self.patch[0].set_xy(self._get_patch_xy())
self.draw_patch()
def _update_patch_size(self):
self._update_patch_geometry()
def _update_patch_geometry(self):
if self.is_on() and self.patch:
self.patch[0].set_bounds(*self._get_patch_bounds())
self.draw_patch()
class ResizersMixin(object):
"""
Widget mix-in for adding resizing manipulation handles.
The default handles are green boxes displayed on the outside corners of the
boundaries. By default, the handles are only displayed when the widget is
selected (`picked` in matplotlib terminology).
Attributes:
-----------
resizers : {bool}
Property that determines whether the resizer handles should be used
resize_color : {matplotlib color}
The color of the resize handles.
resize_pixel_size : {tuple | None}
Size of the resize handles in screen pixels. If None, it is set
equal to the size of one 'data-pixel' (image pixel size).
resizer_picked : {False | int}
Inidcates which, if any, resizer was selected the last time the
widget was picked. `False` if another patch was picked, or the
index of the resizer handle that was picked.
"""
def __init__(self, resizers=True, **kwargs):
super(ResizersMixin, self).__init__(**kwargs)
self.resizer_picked = False
self.pick_offset = (0, 0)
self.resize_color = 'lime'
self.resize_pixel_size = (5, 5) # Set to None to make one data pixel
self._resizers = resizers
self._resizer_handles = []
self._resizers_on = False
# The `_resizers_on` attribute reflects whether handles are actually on
# as compared to `_resizers` which is whether the user wants them on.
# The difference is e.g. for turning on and off handles when the
# widget is selected/deselected.
@property
def resizers(self):
return self._resizers
@resizers.setter
def resizers(self, value):
if self._resizers != value:
self._resizers = value
self._set_resizers(value, self.ax)
def _update_resizers(self):
"""Update resizer handles' patch geometry.
"""
pos = self._get_resizer_pos()
rsize = self._get_resizer_size()
for i, r in enumerate(self._resizer_handles):
r.set_xy(pos[i])
r.set_width(rsize[0])
r.set_height(rsize[1])
def _set_resizers(self, value, ax):
"""Turns the resizers on/off, in much the same way that _set_patch
works.
"""
if ax is not None:
if value:
for r in self._resizer_handles:
ax.add_artist(r)
r.set_animated(hasattr(ax, 'hspy_fig'))
else:
for container in [
ax.patches,
ax.lines,
ax.artists,
ax.texts]:
for r in self._resizer_handles:
if r in container:
container.remove(r)
self._resizers_on = value
self.draw_patch()
def _get_resizer_size(self):
"""Gets the size of the resizer handles in axes coordinates. If
'resize_pixel_size' is None, a size of one pixel will be used.
"""
invtrans = self.ax.transData.inverted()
if self.resize_pixel_size is None:
rsize = [ax.scale for ax in self.axes]
else:
rsize = np.abs(invtrans.transform(self.resize_pixel_size) -
invtrans.transform((0, 0)))
return rsize
def _get_resizer_offset(self):
"""Utility for getting the distance from the boundary box to the
center of the resize handles.
"""
invtrans = self.ax.transData.inverted()
border = self.border_thickness
# Transform the border thickness into data values
dl = np.abs(invtrans.transform((border, border)) -
invtrans.transform((0, 0))) / 2
rsize = self._get_resizer_size()
return rsize / 2 + dl
def _get_resizer_pos(self):
"""Get the positions of the resizer handles.
"""
invtrans = self.ax.transData.inverted()
border = self.border_thickness
# Transform the border thickness into data values
dl = np.abs(invtrans.transform((border, border)) -
invtrans.transform((0, 0))) / 2
rsize = self._get_resizer_size()
xs, ys = self._size
positions = []
rp = np.array(self._get_patch_xy())
p = rp - rsize + dl # Top left
positions.append(p)
p = rp + (xs - dl[0], -rsize[1] + dl[1]) # Top right
positions.append(p)
p = rp + (-rsize[0] + dl[0], ys - dl[1]) # Bottom left
positions.append(p)
p = rp + (xs - dl[0], ys - dl[1]) # Bottom right
positions.append(p)
return positions
def _set_patch(self):
"""Creates the resizer handles, irregardless of whether they will be
used or not.
"""
if hasattr(super(ResizersMixin, self), '_set_patch'):
super(ResizersMixin, self)._set_patch()
if self._resizer_handles:
self._set_resizers(False, self.ax)
self._resizer_handles = []
rsize = self._get_resizer_size()
pos = self._get_resizer_pos()
for i in range(len(pos)):
r = plt.Rectangle(pos[i], rsize[0], rsize[1], animated=self.blit,
fill=True, lw=0, fc=self.resize_color,
picker=True,)
self._resizer_handles.append(r)
def set_on(self, value):
"""Turns on/off resizers whet widget is turned on/off.
"""
if self.resizers and value != self._resizers_on:
self._set_resizers(value, self.ax)
if hasattr(super(ResizersMixin, self), 'set_on'):
super(ResizersMixin, self).set_on(value)
def onpick(self, event):
"""Picking of main patch is same as for widget base, but this also
handles picking of the resize handles. If a resize handles is picked,
`picked` is set to `True`, and `resizer_picked` is set to an integer
indicating which handle was picked (0-3 for top left, top right, bottom
left, bottom right). It is set to `False` if another widget was picked.
If the main patch is picked, the offset from the picked pixel to the
`position` is stored in `pick_offset`. This can be used in e.g.
`_onmousemove` to ease dragging code (prevent widget center/corner
snapping to mouse).
"""
if event.artist in self._resizer_handles:
corner = self._resizer_handles.index(event.artist)
self.resizer_picked = corner
self.picked = True
elif self.picked:
if self.resizers and not self._resizers_on:
self._set_resizers(True, self.ax)
x = event.mouseevent.xdata
y = event.mouseevent.ydata
self.pick_offset = (x - self._pos[0], y - self._pos[1])
self.resizer_picked = False
else:
self._set_resizers(False, self.ax)
if hasattr(super(ResizersMixin, self), 'onpick'):
super(ResizersMixin, self).onpick(event)
def _add_patch_to(self, ax):
"""Same as widget base, but also adds resizers if 'resizers' property
is True.
"""
if self.resizers:
self._set_resizers(True, ax)
if hasattr(super(ResizersMixin, self), '_add_patch_to'):
super(ResizersMixin, self)._add_patch_to(ax)
| gpl-3.0 |
WindCanDie/spark | python/pyspark/sql/functions.py | 5 | 133419 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
A collections of builtin functions
"""
import sys
import functools
import warnings
if sys.version < "3":
from itertools import imap as map
if sys.version >= '3':
basestring = str
from pyspark import since, SparkContext
from pyspark.rdd import ignore_unicode_prefix, PythonEvalType
from pyspark.sql.column import Column, _to_java_column, _to_seq, _create_column_from_literal
from pyspark.sql.dataframe import DataFrame
from pyspark.sql.types import StringType, DataType
# Keep UserDefinedFunction import for backwards compatible import; moved in SPARK-22409
from pyspark.sql.udf import UserDefinedFunction, _create_udf
def _create_function(name, doc=""):
""" Create a function for aggregator by name"""
def _(col):
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)(col._jc if isinstance(col, Column) else col)
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _wrap_deprecated_function(func, message):
""" Wrap the deprecated function to print out deprecation warnings"""
def _(col):
warnings.warn(message, DeprecationWarning)
return func(col)
return functools.wraps(func)(_)
def _create_binary_mathfunction(name, doc=""):
""" Create a binary mathfunction by name"""
def _(col1, col2):
sc = SparkContext._active_spark_context
# users might write ints for simplicity. This would throw an error on the JVM side.
jc = getattr(sc._jvm.functions, name)(col1._jc if isinstance(col1, Column) else float(col1),
col2._jc if isinstance(col2, Column) else float(col2))
return Column(jc)
_.__name__ = name
_.__doc__ = doc
return _
def _create_window_function(name, doc=''):
""" Create a window function by name """
def _():
sc = SparkContext._active_spark_context
jc = getattr(sc._jvm.functions, name)()
return Column(jc)
_.__name__ = name
_.__doc__ = 'Window function: ' + doc
return _
_lit_doc = """
Creates a :class:`Column` of literal value.
>>> df.select(lit(5).alias('height')).withColumn('spark_user', lit(True)).take(1)
[Row(height=5, spark_user=True)]
"""
_functions = {
'lit': _lit_doc,
'col': 'Returns a :class:`Column` based on the given column name.',
'column': 'Returns a :class:`Column` based on the given column name.',
'asc': 'Returns a sort expression based on the ascending order of the given column name.',
'desc': 'Returns a sort expression based on the descending order of the given column name.',
'upper': 'Converts a string expression to upper case.',
'lower': 'Converts a string expression to upper case.',
'sqrt': 'Computes the square root of the specified float value.',
'abs': 'Computes the absolute value.',
'max': 'Aggregate function: returns the maximum value of the expression in a group.',
'min': 'Aggregate function: returns the minimum value of the expression in a group.',
'count': 'Aggregate function: returns the number of items in a group.',
'sum': 'Aggregate function: returns the sum of all values in the expression.',
'avg': 'Aggregate function: returns the average of the values in a group.',
'mean': 'Aggregate function: returns the average of the values in a group.',
'sumDistinct': 'Aggregate function: returns the sum of distinct values in the expression.',
}
_functions_1_4 = {
# unary math functions
'acos': ':return: inverse cosine of `col`, as if computed by `java.lang.Math.acos()`',
'asin': ':return: inverse sine of `col`, as if computed by `java.lang.Math.asin()`',
'atan': ':return: inverse tangent of `col`, as if computed by `java.lang.Math.atan()`',
'cbrt': 'Computes the cube-root of the given value.',
'ceil': 'Computes the ceiling of the given value.',
'cos': """:param col: angle in radians
:return: cosine of the angle, as if computed by `java.lang.Math.cos()`.""",
'cosh': """:param col: hyperbolic angle
:return: hyperbolic cosine of the angle, as if computed by `java.lang.Math.cosh()`""",
'exp': 'Computes the exponential of the given value.',
'expm1': 'Computes the exponential of the given value minus one.',
'floor': 'Computes the floor of the given value.',
'log': 'Computes the natural logarithm of the given value.',
'log10': 'Computes the logarithm of the given value in Base 10.',
'log1p': 'Computes the natural logarithm of the given value plus one.',
'rint': 'Returns the double value that is closest in value to the argument and' +
' is equal to a mathematical integer.',
'signum': 'Computes the signum of the given value.',
'sin': """:param col: angle in radians
:return: sine of the angle, as if computed by `java.lang.Math.sin()`""",
'sinh': """:param col: hyperbolic angle
:return: hyperbolic sine of the given value,
as if computed by `java.lang.Math.sinh()`""",
'tan': """:param col: angle in radians
:return: tangent of the given value, as if computed by `java.lang.Math.tan()`""",
'tanh': """:param col: hyperbolic angle
:return: hyperbolic tangent of the given value,
as if computed by `java.lang.Math.tanh()`""",
'toDegrees': '.. note:: Deprecated in 2.1, use :func:`degrees` instead.',
'toRadians': '.. note:: Deprecated in 2.1, use :func:`radians` instead.',
'bitwiseNOT': 'Computes bitwise not.',
}
_functions_2_4 = {
'asc_nulls_first': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values return before non-null values.',
'asc_nulls_last': 'Returns a sort expression based on the ascending order of the given' +
' column name, and null values appear after non-null values.',
'desc_nulls_first': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear before non-null values.',
'desc_nulls_last': 'Returns a sort expression based on the descending order of the given' +
' column name, and null values appear after non-null values',
}
_collect_list_doc = """
Aggregate function: returns a list of objects with duplicates.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_list('age')).collect()
[Row(collect_list(age)=[2, 5, 5])]
"""
_collect_set_doc = """
Aggregate function: returns a set of objects with duplicate elements eliminated.
.. note:: The function is non-deterministic because the order of collected results depends
on order of rows which may be non-deterministic after a shuffle.
>>> df2 = spark.createDataFrame([(2,), (5,), (5,)], ('age',))
>>> df2.agg(collect_set('age')).collect()
[Row(collect_set(age)=[5, 2])]
"""
_functions_1_6 = {
# unary math functions
'stddev': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_samp': 'Aggregate function: returns the unbiased sample standard deviation of' +
' the expression in a group.',
'stddev_pop': 'Aggregate function: returns population standard deviation of' +
' the expression in a group.',
'variance': 'Aggregate function: returns the population variance of the values in a group.',
'var_samp': 'Aggregate function: returns the unbiased variance of the values in a group.',
'var_pop': 'Aggregate function: returns the population variance of the values in a group.',
'skewness': 'Aggregate function: returns the skewness of the values in a group.',
'kurtosis': 'Aggregate function: returns the kurtosis of the values in a group.',
'collect_list': _collect_list_doc,
'collect_set': _collect_set_doc
}
_functions_2_1 = {
# unary math functions
'degrees': """
Converts an angle measured in radians to an approximately equivalent angle
measured in degrees.
:param col: angle in radians
:return: angle in degrees, as if computed by `java.lang.Math.toDegrees()`
""",
'radians': """
Converts an angle measured in degrees to an approximately equivalent angle
measured in radians.
:param col: angle in degrees
:return: angle in radians, as if computed by `java.lang.Math.toRadians()`
""",
}
# math functions that take two arguments as input
_binary_mathfunctions = {
'atan2': """
:param col1: coordinate on y-axis
:param col2: coordinate on x-axis
:return: the `theta` component of the point
(`r`, `theta`)
in polar coordinates that corresponds to the point
(`x`, `y`) in Cartesian coordinates,
as if computed by `java.lang.Math.atan2()`
""",
'hypot': 'Computes ``sqrt(a^2 + b^2)`` without intermediate overflow or underflow.',
'pow': 'Returns the value of the first argument raised to the power of the second argument.',
}
_window_functions = {
'row_number':
"""returns a sequential number starting at 1 within a window partition.""",
'dense_rank':
"""returns the rank of rows within a window partition, without any gaps.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the DENSE_RANK function in SQL.""",
'rank':
"""returns the rank of rows within a window partition.
The difference between rank and dense_rank is that dense_rank leaves no gaps in ranking
sequence when there are ties. That is, if you were ranking a competition using dense_rank
and had three people tie for second place, you would say that all three were in second
place and that the next person came in third. Rank would give me sequential numbers, making
the person that came in third place (after the ties) would register as coming in fifth.
This is equivalent to the RANK function in SQL.""",
'cume_dist':
"""returns the cumulative distribution of values within a window partition,
i.e. the fraction of rows that are below the current row.""",
'percent_rank':
"""returns the relative rank (i.e. percentile) of rows within a window partition.""",
}
# Wraps deprecated functions (keys) with the messages (values).
_functions_deprecated = {
}
for _name, _doc in _functions.items():
globals()[_name] = since(1.3)(_create_function(_name, _doc))
for _name, _doc in _functions_1_4.items():
globals()[_name] = since(1.4)(_create_function(_name, _doc))
for _name, _doc in _binary_mathfunctions.items():
globals()[_name] = since(1.4)(_create_binary_mathfunction(_name, _doc))
for _name, _doc in _window_functions.items():
globals()[_name] = since(1.6)(_create_window_function(_name, _doc))
for _name, _doc in _functions_1_6.items():
globals()[_name] = since(1.6)(_create_function(_name, _doc))
for _name, _doc in _functions_2_1.items():
globals()[_name] = since(2.1)(_create_function(_name, _doc))
for _name, _message in _functions_deprecated.items():
globals()[_name] = _wrap_deprecated_function(globals()[_name], _message)
for _name, _doc in _functions_2_4.items():
globals()[_name] = since(2.4)(_create_function(_name, _doc))
del _name, _doc
@since(2.1)
def approx_count_distinct(col, rsd=None):
"""Aggregate function: returns a new :class:`Column` for approximate distinct count of
column `col`.
:param rsd: maximum estimation error allowed (default = 0.05). For rsd < 0.01, it is more
efficient to use :func:`countDistinct`
>>> df.agg(approx_count_distinct(df.age).alias('distinct_ages')).collect()
[Row(distinct_ages=2)]
"""
sc = SparkContext._active_spark_context
if rsd is None:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col))
else:
jc = sc._jvm.functions.approx_count_distinct(_to_java_column(col), rsd)
return Column(jc)
@since(1.6)
def broadcast(df):
"""Marks a DataFrame as small enough for use in broadcast joins."""
sc = SparkContext._active_spark_context
return DataFrame(sc._jvm.functions.broadcast(df._jdf), df.sql_ctx)
@since(1.4)
def coalesce(*cols):
"""Returns the first column that is not null.
>>> cDf = spark.createDataFrame([(None, None), (1, None), (None, 2)], ("a", "b"))
>>> cDf.show()
+----+----+
| a| b|
+----+----+
|null|null|
| 1|null|
|null| 2|
+----+----+
>>> cDf.select(coalesce(cDf["a"], cDf["b"])).show()
+--------------+
|coalesce(a, b)|
+--------------+
| null|
| 1|
| 2|
+--------------+
>>> cDf.select('*', coalesce(cDf["a"], lit(0.0))).show()
+----+----+----------------+
| a| b|coalesce(a, 0.0)|
+----+----+----------------+
|null|null| 0.0|
| 1|null| 1.0|
|null| 2| 0.0|
+----+----+----------------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.coalesce(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def corr(col1, col2):
"""Returns a new :class:`Column` for the Pearson Correlation Coefficient for ``col1``
and ``col2``.
>>> a = range(20)
>>> b = [2 * x for x in range(20)]
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(corr("a", "b").alias('c')).collect()
[Row(c=1.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.corr(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_pop(col1, col2):
"""Returns a new :class:`Column` for the population covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_pop("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_pop(_to_java_column(col1), _to_java_column(col2)))
@since(2.0)
def covar_samp(col1, col2):
"""Returns a new :class:`Column` for the sample covariance of ``col1`` and ``col2``.
>>> a = [1] * 10
>>> b = [1] * 10
>>> df = spark.createDataFrame(zip(a, b), ["a", "b"])
>>> df.agg(covar_samp("a", "b").alias('c')).collect()
[Row(c=0.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.covar_samp(_to_java_column(col1), _to_java_column(col2)))
@since(1.3)
def countDistinct(col, *cols):
"""Returns a new :class:`Column` for distinct count of ``col`` or ``cols``.
>>> df.agg(countDistinct(df.age, df.name).alias('c')).collect()
[Row(c=2)]
>>> df.agg(countDistinct("age", "name").alias('c')).collect()
[Row(c=2)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.countDistinct(_to_java_column(col), _to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.3)
def first(col, ignorenulls=False):
"""Aggregate function: returns the first value in a group.
The function by default returns the first values it sees. It will return the first non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows which
may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.first(_to_java_column(col), ignorenulls)
return Column(jc)
@since(2.0)
def grouping(col):
"""
Aggregate function: indicates whether a specified column in a GROUP BY list is aggregated
or not, returns 1 for aggregated or 0 for not aggregated in the result set.
>>> df.cube("name").agg(grouping("name"), sum("age")).orderBy("name").show()
+-----+--------------+--------+
| name|grouping(name)|sum(age)|
+-----+--------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+--------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping(_to_java_column(col))
return Column(jc)
@since(2.0)
def grouping_id(*cols):
"""
Aggregate function: returns the level of grouping, equals to
(grouping(c1) << (n-1)) + (grouping(c2) << (n-2)) + ... + grouping(cn)
.. note:: The list of columns should match with grouping columns exactly, or empty (means all
the grouping columns).
>>> df.cube("name").agg(grouping_id(), sum("age")).orderBy("name").show()
+-----+-------------+--------+
| name|grouping_id()|sum(age)|
+-----+-------------+--------+
| null| 1| 7|
|Alice| 0| 2|
| Bob| 0| 5|
+-----+-------------+--------+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.grouping_id(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.6)
def input_file_name():
"""Creates a string column for the file name of the current Spark task.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.input_file_name())
@since(1.6)
def isnan(col):
"""An expression that returns true iff the column is NaN.
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(isnan("a").alias("r1"), isnan(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnan(_to_java_column(col)))
@since(1.6)
def isnull(col):
"""An expression that returns true iff the column is null.
>>> df = spark.createDataFrame([(1, None), (None, 2)], ("a", "b"))
>>> df.select(isnull("a").alias("r1"), isnull(df.a).alias("r2")).collect()
[Row(r1=False, r2=False), Row(r1=True, r2=True)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.isnull(_to_java_column(col)))
@since(1.3)
def last(col, ignorenulls=False):
"""Aggregate function: returns the last value in a group.
The function by default returns the last values it sees. It will return the last non-null
value it sees when ignoreNulls is set to true. If all values are null, then null is returned.
.. note:: The function is non-deterministic because its results depends on order of rows
which may be non-deterministic after a shuffle.
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.last(_to_java_column(col), ignorenulls)
return Column(jc)
@since(1.6)
def monotonically_increasing_id():
"""A column that generates monotonically increasing 64-bit integers.
The generated ID is guaranteed to be monotonically increasing and unique, but not consecutive.
The current implementation puts the partition ID in the upper 31 bits, and the record number
within each partition in the lower 33 bits. The assumption is that the data frame has
less than 1 billion partitions, and each partition has less than 8 billion records.
.. note:: The function is non-deterministic because its result depends on partition IDs.
As an example, consider a :class:`DataFrame` with two partitions, each with 3 records.
This expression would return the following IDs:
0, 1, 2, 8589934592 (1L << 33), 8589934593, 8589934594.
>>> df0 = sc.parallelize(range(2), 2).mapPartitions(lambda x: [(1,), (2,), (3,)]).toDF(['col1'])
>>> df0.select(monotonically_increasing_id().alias('id')).collect()
[Row(id=0), Row(id=1), Row(id=2), Row(id=8589934592), Row(id=8589934593), Row(id=8589934594)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.monotonically_increasing_id())
@since(1.6)
def nanvl(col1, col2):
"""Returns col1 if it is not NaN, or col2 if col1 is NaN.
Both inputs should be floating point columns (:class:`DoubleType` or :class:`FloatType`).
>>> df = spark.createDataFrame([(1.0, float('nan')), (float('nan'), 2.0)], ("a", "b"))
>>> df.select(nanvl("a", "b").alias("r1"), nanvl(df.a, df.b).alias("r2")).collect()
[Row(r1=1.0, r2=1.0), Row(r1=2.0, r2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.nanvl(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(1.4)
def rand(seed=None):
"""Generates a random column with independent and identically distributed (i.i.d.) samples
from U[0.0, 1.0].
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('rand', rand(seed=42) * 3).collect()
[Row(age=2, name=u'Alice', rand=1.1568609015300986),
Row(age=5, name=u'Bob', rand=1.403379671529166)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.rand(seed)
else:
jc = sc._jvm.functions.rand()
return Column(jc)
@ignore_unicode_prefix
@since(1.4)
def randn(seed=None):
"""Generates a column with independent and identically distributed (i.i.d.) samples from
the standard normal distribution.
.. note:: The function is non-deterministic in general case.
>>> df.withColumn('randn', randn(seed=42)).collect()
[Row(age=2, name=u'Alice', randn=-0.7556247885860078),
Row(age=5, name=u'Bob', randn=-0.0861619008451133)]
"""
sc = SparkContext._active_spark_context
if seed is not None:
jc = sc._jvm.functions.randn(seed)
else:
jc = sc._jvm.functions.randn()
return Column(jc)
@since(1.5)
def round(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_UP rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(round('a', 0).alias('r')).collect()
[Row(r=3.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.round(_to_java_column(col), scale))
@since(2.0)
def bround(col, scale=0):
"""
Round the given value to `scale` decimal places using HALF_EVEN rounding mode if `scale` >= 0
or at integral part when `scale` < 0.
>>> spark.createDataFrame([(2.5,)], ['a']).select(bround('a', 0).alias('r')).collect()
[Row(r=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.bround(_to_java_column(col), scale))
@since(1.5)
def shiftLeft(col, numBits):
"""Shift the given value numBits left.
>>> spark.createDataFrame([(21,)], ['a']).select(shiftLeft('a', 1).alias('r')).collect()
[Row(r=42)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shiftLeft(_to_java_column(col), numBits))
@since(1.5)
def shiftRight(col, numBits):
"""(Signed) shift the given value numBits right.
>>> spark.createDataFrame([(42,)], ['a']).select(shiftRight('a', 1).alias('r')).collect()
[Row(r=21)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRight(_to_java_column(col), numBits)
return Column(jc)
@since(1.5)
def shiftRightUnsigned(col, numBits):
"""Unsigned shift the given value numBits right.
>>> df = spark.createDataFrame([(-42,)], ['a'])
>>> df.select(shiftRightUnsigned('a', 1).alias('r')).collect()
[Row(r=9223372036854775787)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.shiftRightUnsigned(_to_java_column(col), numBits)
return Column(jc)
@since(1.6)
def spark_partition_id():
"""A column for partition ID.
.. note:: This is indeterministic because it depends on data partitioning and task scheduling.
>>> df.repartition(1).select(spark_partition_id().alias("pid")).collect()
[Row(pid=0), Row(pid=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.spark_partition_id())
@since(1.5)
def expr(str):
"""Parses the expression string into the column that it represents
>>> df.select(expr("length(name)")).collect()
[Row(length(name)=5), Row(length(name)=3)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.expr(str))
@ignore_unicode_prefix
@since(1.4)
def struct(*cols):
"""Creates a new struct column.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> df.select(struct('age', 'name').alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
>>> df.select(struct([df.age, df.name]).alias("struct")).collect()
[Row(struct=Row(age=2, name=u'Alice')), Row(struct=Row(age=5, name=u'Bob'))]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.struct(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def greatest(*cols):
"""
Returns the greatest value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(greatest(df.a, df.b, df.c).alias("greatest")).collect()
[Row(greatest=4)]
"""
if len(cols) < 2:
raise ValueError("greatest should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.greatest(_to_seq(sc, cols, _to_java_column)))
@since(1.5)
def least(*cols):
"""
Returns the least value of the list of column names, skipping null values.
This function takes at least 2 parameters. It will return null iff all parameters are null.
>>> df = spark.createDataFrame([(1, 4, 3)], ['a', 'b', 'c'])
>>> df.select(least(df.a, df.b, df.c).alias("least")).collect()
[Row(least=1)]
"""
if len(cols) < 2:
raise ValueError("least should take at least two columns")
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.least(_to_seq(sc, cols, _to_java_column)))
@since(1.4)
def when(condition, value):
"""Evaluates a list of conditions and returns one of multiple possible result expressions.
If :func:`Column.otherwise` is not invoked, None is returned for unmatched conditions.
:param condition: a boolean :class:`Column` expression.
:param value: a literal value, or a :class:`Column` expression.
>>> df.select(when(df['age'] == 2, 3).otherwise(4).alias("age")).collect()
[Row(age=3), Row(age=4)]
>>> df.select(when(df.age == 2, df.age + 1).alias("age")).collect()
[Row(age=3), Row(age=None)]
"""
sc = SparkContext._active_spark_context
if not isinstance(condition, Column):
raise TypeError("condition should be a Column")
v = value._jc if isinstance(value, Column) else value
jc = sc._jvm.functions.when(condition._jc, v)
return Column(jc)
@since(1.5)
def log(arg1, arg2=None):
"""Returns the first argument-based logarithm of the second argument.
If there is only one argument, then this takes the natural logarithm of the argument.
>>> df.select(log(10.0, df.age).alias('ten')).rdd.map(lambda l: str(l.ten)[:7]).collect()
['0.30102', '0.69897']
>>> df.select(log(df.age).alias('e')).rdd.map(lambda l: str(l.e)[:7]).collect()
['0.69314', '1.60943']
"""
sc = SparkContext._active_spark_context
if arg2 is None:
jc = sc._jvm.functions.log(_to_java_column(arg1))
else:
jc = sc._jvm.functions.log(arg1, _to_java_column(arg2))
return Column(jc)
@since(1.5)
def log2(col):
"""Returns the base-2 logarithm of the argument.
>>> spark.createDataFrame([(4,)], ['a']).select(log2('a').alias('log2')).collect()
[Row(log2=2.0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.log2(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def conv(col, fromBase, toBase):
"""
Convert a number in a string column from one base to another.
>>> df = spark.createDataFrame([("010101",)], ['n'])
>>> df.select(conv(df.n, 2, 16).alias('hex')).collect()
[Row(hex=u'15')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.conv(_to_java_column(col), fromBase, toBase))
@since(1.5)
def factorial(col):
"""
Computes the factorial of the given value.
>>> df = spark.createDataFrame([(5,)], ['n'])
>>> df.select(factorial(df.n).alias('f')).collect()
[Row(f=120)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.factorial(_to_java_column(col)))
# --------------- Window functions ------------------------
@since(1.4)
def lag(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows before the current row, and
`defaultValue` if there is less than `offset` rows before the current row. For example,
an `offset` of one will return the previous row at any given point in the window partition.
This is equivalent to the LAG function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lag(_to_java_column(col), offset, default))
@since(1.4)
def lead(col, offset=1, default=None):
"""
Window function: returns the value that is `offset` rows after the current row, and
`defaultValue` if there is less than `offset` rows after the current row. For example,
an `offset` of one will return the next row at any given point in the window partition.
This is equivalent to the LEAD function in SQL.
:param col: name of column or expression
:param offset: number of row to extend
:param default: default value
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lead(_to_java_column(col), offset, default))
@since(1.4)
def ntile(n):
"""
Window function: returns the ntile group id (from 1 to `n` inclusive)
in an ordered window partition. For example, if `n` is 4, the first
quarter of the rows will get value 1, the second quarter will get 2,
the third quarter will get 3, and the last quarter will get 4.
This is equivalent to the NTILE function in SQL.
:param n: an integer
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.ntile(int(n)))
# ---------------------- Date/Timestamp functions ------------------------------
@since(1.5)
def current_date():
"""
Returns the current date as a :class:`DateType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_date())
def current_timestamp():
"""
Returns the current timestamp as a :class:`TimestampType` column.
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.current_timestamp())
@ignore_unicode_prefix
@since(1.5)
def date_format(date, format):
"""
Converts a date/timestamp/string to a value of string in the format specified by the date
format given by the second argument.
A pattern could be for instance `dd.MM.yyyy` and could return a string like '18.03.1993'. All
pattern letters of the Java class `java.time.format.DateTimeFormatter` can be used.
.. note:: Use when ever possible specialized functions like `year`. These benefit from a
specialized implementation.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_format('dt', 'MM/dd/yyy').alias('date')).collect()
[Row(date=u'04/08/2015')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_format(_to_java_column(date), format))
@since(1.5)
def year(col):
"""
Extract the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(year('dt').alias('year')).collect()
[Row(year=2015)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.year(_to_java_column(col)))
@since(1.5)
def quarter(col):
"""
Extract the quarter of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(quarter('dt').alias('quarter')).collect()
[Row(quarter=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.quarter(_to_java_column(col)))
@since(1.5)
def month(col):
"""
Extract the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(month('dt').alias('month')).collect()
[Row(month=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.month(_to_java_column(col)))
@since(2.3)
def dayofweek(col):
"""
Extract the day of the week of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofweek('dt').alias('day')).collect()
[Row(day=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofweek(_to_java_column(col)))
@since(1.5)
def dayofmonth(col):
"""
Extract the day of the month of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofmonth('dt').alias('day')).collect()
[Row(day=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofmonth(_to_java_column(col)))
@since(1.5)
def dayofyear(col):
"""
Extract the day of the year of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(dayofyear('dt').alias('day')).collect()
[Row(day=98)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.dayofyear(_to_java_column(col)))
@since(1.5)
def hour(col):
"""
Extract the hours of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(hour('ts').alias('hour')).collect()
[Row(hour=13)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.hour(_to_java_column(col)))
@since(1.5)
def minute(col):
"""
Extract the minutes of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(minute('ts').alias('minute')).collect()
[Row(minute=8)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.minute(_to_java_column(col)))
@since(1.5)
def second(col):
"""
Extract the seconds of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08 13:08:15',)], ['ts'])
>>> df.select(second('ts').alias('second')).collect()
[Row(second=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.second(_to_java_column(col)))
@since(1.5)
def weekofyear(col):
"""
Extract the week number of a given date as integer.
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(weekofyear(df.dt).alias('week')).collect()
[Row(week=15)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.weekofyear(_to_java_column(col)))
@since(1.5)
def date_add(start, days):
"""
Returns the date that is `days` days after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_add(df.dt, 1).alias('next_date')).collect()
[Row(next_date=datetime.date(2015, 4, 9))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_add(_to_java_column(start), days))
@since(1.5)
def date_sub(start, days):
"""
Returns the date that is `days` days before `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(date_sub(df.dt, 1).alias('prev_date')).collect()
[Row(prev_date=datetime.date(2015, 4, 7))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_sub(_to_java_column(start), days))
@since(1.5)
def datediff(end, start):
"""
Returns the number of days from `start` to `end`.
>>> df = spark.createDataFrame([('2015-04-08','2015-05-10')], ['d1', 'd2'])
>>> df.select(datediff(df.d2, df.d1).alias('diff')).collect()
[Row(diff=32)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.datediff(_to_java_column(end), _to_java_column(start)))
@since(1.5)
def add_months(start, months):
"""
Returns the date that is `months` months after `start`
>>> df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> df.select(add_months(df.dt, 1).alias('next_month')).collect()
[Row(next_month=datetime.date(2015, 5, 8))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.add_months(_to_java_column(start), months))
@since(1.5)
def months_between(date1, date2, roundOff=True):
"""
Returns number of months between dates date1 and date2.
If date1 is later than date2, then the result is positive.
If date1 and date2 are on the same day of month, or both are the last day of month,
returns an integer (time of day will be ignored).
The result is rounded off to 8 digits unless `roundOff` is set to `False`.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', '1996-10-30')], ['date1', 'date2'])
>>> df.select(months_between(df.date1, df.date2).alias('months')).collect()
[Row(months=3.94959677)]
>>> df.select(months_between(df.date1, df.date2, False).alias('months')).collect()
[Row(months=3.9495967741935485)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.months_between(
_to_java_column(date1), _to_java_column(date2), roundOff))
@since(2.2)
def to_date(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.DateType` if the format
is omitted (equivalent to ``col.cast("date")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_date(df.t, 'yyyy-MM-dd HH:mm:ss').alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_date(_to_java_column(col))
else:
jc = sc._jvm.functions.to_date(_to_java_column(col), format)
return Column(jc)
@since(2.2)
def to_timestamp(col, format=None):
"""Converts a :class:`Column` of :class:`pyspark.sql.types.StringType` or
:class:`pyspark.sql.types.TimestampType` into :class:`pyspark.sql.types.DateType`
using the optionally specified format. Specify formats according to
`DateTimeFormatter <https://docs.oracle.com/javase/8/docs/api/java/time/format/DateTimeFormatter.html>`_. # noqa
By default, it follows casting rules to :class:`pyspark.sql.types.TimestampType` if the format
is omitted (equivalent to ``col.cast("timestamp")``).
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t).alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
>>> df = spark.createDataFrame([('1997-02-28 10:30:00',)], ['t'])
>>> df.select(to_timestamp(df.t, 'yyyy-MM-dd HH:mm:ss').alias('dt')).collect()
[Row(dt=datetime.datetime(1997, 2, 28, 10, 30))]
"""
sc = SparkContext._active_spark_context
if format is None:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col))
else:
jc = sc._jvm.functions.to_timestamp(_to_java_column(col), format)
return Column(jc)
@since(1.5)
def trunc(date, format):
"""
Returns date truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy' or 'month', 'mon', 'mm'
>>> df = spark.createDataFrame([('1997-02-28',)], ['d'])
>>> df.select(trunc(df.d, 'year').alias('year')).collect()
[Row(year=datetime.date(1997, 1, 1))]
>>> df.select(trunc(df.d, 'mon').alias('month')).collect()
[Row(month=datetime.date(1997, 2, 1))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.trunc(_to_java_column(date), format))
@since(2.3)
def date_trunc(format, timestamp):
"""
Returns timestamp truncated to the unit specified by the format.
:param format: 'year', 'yyyy', 'yy', 'month', 'mon', 'mm',
'day', 'dd', 'hour', 'minute', 'second', 'week', 'quarter'
>>> df = spark.createDataFrame([('1997-02-28 05:02:11',)], ['t'])
>>> df.select(date_trunc('year', df.t).alias('year')).collect()
[Row(year=datetime.datetime(1997, 1, 1, 0, 0))]
>>> df.select(date_trunc('mon', df.t).alias('month')).collect()
[Row(month=datetime.datetime(1997, 2, 1, 0, 0))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.date_trunc(format, _to_java_column(timestamp)))
@since(1.5)
def next_day(date, dayOfWeek):
"""
Returns the first date which is later than the value of the date column.
Day of the week parameter is case insensitive, and accepts:
"Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun".
>>> df = spark.createDataFrame([('2015-07-27',)], ['d'])
>>> df.select(next_day(df.d, 'Sun').alias('date')).collect()
[Row(date=datetime.date(2015, 8, 2))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.next_day(_to_java_column(date), dayOfWeek))
@since(1.5)
def last_day(date):
"""
Returns the last day of the month which the given date belongs to.
>>> df = spark.createDataFrame([('1997-02-10',)], ['d'])
>>> df.select(last_day(df.d).alias('date')).collect()
[Row(date=datetime.date(1997, 2, 28))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.last_day(_to_java_column(date)))
@ignore_unicode_prefix
@since(1.5)
def from_unixtime(timestamp, format="yyyy-MM-dd HH:mm:ss"):
"""
Converts the number of seconds from unix epoch (1970-01-01 00:00:00 UTC) to a string
representing the timestamp of that moment in the current system time zone in the given
format.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([(1428476400,)], ['unix_time'])
>>> time_df.select(from_unixtime('unix_time').alias('ts')).collect()
[Row(ts=u'2015-04-08 00:00:00')]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.from_unixtime(_to_java_column(timestamp), format))
@since(1.5)
def unix_timestamp(timestamp=None, format='yyyy-MM-dd HH:mm:ss'):
"""
Convert time string with given pattern ('yyyy-MM-dd HH:mm:ss', by default)
to Unix time stamp (in seconds), using the default timezone and the default
locale, return null if fail.
if `timestamp` is None, then it returns current timestamp.
>>> spark.conf.set("spark.sql.session.timeZone", "America/Los_Angeles")
>>> time_df = spark.createDataFrame([('2015-04-08',)], ['dt'])
>>> time_df.select(unix_timestamp('dt', 'yyyy-MM-dd').alias('unix_time')).collect()
[Row(unix_time=1428476400)]
>>> spark.conf.unset("spark.sql.session.timeZone")
"""
sc = SparkContext._active_spark_context
if timestamp is None:
return Column(sc._jvm.functions.unix_timestamp())
return Column(sc._jvm.functions.unix_timestamp(_to_java_column(timestamp), format))
@since(1.5)
def from_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in UTC, and
renders that timestamp as a timestamp in the given time zone.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from UTC timezone to
the given timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(from_utc_timestamp(df.ts, "PST").alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 2, 30))]
>>> df.select(from_utc_timestamp(df.ts, df.tz).alias('local_time')).collect()
[Row(local_time=datetime.datetime(1997, 2, 28, 19, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.from_utc_timestamp(_to_java_column(timestamp), tz))
@since(1.5)
def to_utc_timestamp(timestamp, tz):
"""
This is a common function for databases supporting TIMESTAMP WITHOUT TIMEZONE. This function
takes a timestamp which is timezone-agnostic, and interprets it as a timestamp in the given
timezone, and renders that timestamp as a timestamp in UTC.
However, timestamp in Spark represents number of microseconds from the Unix epoch, which is not
timezone-agnostic. So in Spark this function just shift the timestamp value from the given
timezone to UTC timezone.
This function may return confusing result if the input is a string with timezone, e.g.
'2018-03-13T06:18:23+00:00'. The reason is that, Spark firstly cast the string to timestamp
according to the timezone in the string, and finally display the result by converting the
timestamp to string according to the session local timezone.
:param timestamp: the column that contains timestamps
:param tz: a string that has the ID of timezone, e.g. "GMT", "America/Los_Angeles", etc
.. versionchanged:: 2.4
`tz` can take a :class:`Column` containing timezone ID strings.
>>> df = spark.createDataFrame([('1997-02-28 10:30:00', 'JST')], ['ts', 'tz'])
>>> df.select(to_utc_timestamp(df.ts, "PST").alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 18, 30))]
>>> df.select(to_utc_timestamp(df.ts, df.tz).alias('utc_time')).collect()
[Row(utc_time=datetime.datetime(1997, 2, 28, 1, 30))]
"""
sc = SparkContext._active_spark_context
if isinstance(tz, Column):
tz = _to_java_column(tz)
return Column(sc._jvm.functions.to_utc_timestamp(_to_java_column(timestamp), tz))
@since(2.0)
@ignore_unicode_prefix
def window(timeColumn, windowDuration, slideDuration=None, startTime=None):
"""Bucketize rows into one or more time windows given a timestamp specifying column. Window
starts are inclusive but the window ends are exclusive, e.g. 12:05 will be in the window
[12:05,12:10) but not in [12:00,12:05). Windows can support microsecond precision. Windows in
the order of months are not supported.
The time column must be of :class:`pyspark.sql.types.TimestampType`.
Durations are provided as strings, e.g. '1 second', '1 day 12 hours', '2 minutes'. Valid
interval strings are 'week', 'day', 'hour', 'minute', 'second', 'millisecond', 'microsecond'.
If the ``slideDuration`` is not provided, the windows will be tumbling windows.
The startTime is the offset with respect to 1970-01-01 00:00:00 UTC with which to start
window intervals. For example, in order to have hourly tumbling windows that start 15 minutes
past the hour, e.g. 12:15-13:15, 13:15-14:15... provide `startTime` as `15 minutes`.
The output column will be a struct called 'window' by default with the nested columns 'start'
and 'end', where 'start' and 'end' will be of :class:`pyspark.sql.types.TimestampType`.
>>> df = spark.createDataFrame([("2016-03-11 09:00:07", 1)]).toDF("date", "val")
>>> w = df.groupBy(window("date", "5 seconds")).agg(sum("val").alias("sum"))
>>> w.select(w.window.start.cast("string").alias("start"),
... w.window.end.cast("string").alias("end"), "sum").collect()
[Row(start=u'2016-03-11 09:00:05', end=u'2016-03-11 09:00:10', sum=1)]
"""
def check_string_field(field, fieldName):
if not field or type(field) is not str:
raise TypeError("%s should be provided as a string" % fieldName)
sc = SparkContext._active_spark_context
time_col = _to_java_column(timeColumn)
check_string_field(windowDuration, "windowDuration")
if slideDuration and startTime:
check_string_field(slideDuration, "slideDuration")
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration, startTime)
elif slideDuration:
check_string_field(slideDuration, "slideDuration")
res = sc._jvm.functions.window(time_col, windowDuration, slideDuration)
elif startTime:
check_string_field(startTime, "startTime")
res = sc._jvm.functions.window(time_col, windowDuration, windowDuration, startTime)
else:
res = sc._jvm.functions.window(time_col, windowDuration)
return Column(res)
# ---------------------------- misc functions ----------------------------------
@since(1.5)
@ignore_unicode_prefix
def crc32(col):
"""
Calculates the cyclic redundancy check value (CRC32) of a binary column and
returns the value as a bigint.
>>> spark.createDataFrame([('ABC',)], ['a']).select(crc32('a').alias('crc32')).collect()
[Row(crc32=2743272264)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.crc32(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def md5(col):
"""Calculates the MD5 digest and returns the value as a 32 character hex string.
>>> spark.createDataFrame([('ABC',)], ['a']).select(md5('a').alias('hash')).collect()
[Row(hash=u'902fbdd2b1df0c4f70b4a5d23525e932')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.md5(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha1(col):
"""Returns the hex string result of SHA-1.
>>> spark.createDataFrame([('ABC',)], ['a']).select(sha1('a').alias('hash')).collect()
[Row(hash=u'3c01bdbb26f358bab27f267924aa2c9a03fcfdb8')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha1(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def sha2(col, numBits):
"""Returns the hex string result of SHA-2 family of hash functions (SHA-224, SHA-256, SHA-384,
and SHA-512). The numBits indicates the desired bit length of the result, which must have a
value of 224, 256, 384, 512, or 0 (which is equivalent to 256).
>>> digests = df.select(sha2(df.name, 256).alias('s')).collect()
>>> digests[0]
Row(s=u'3bc51062973c458d5a6f2d8d64a023246354ad7e064b1e4e009ec8a0699a3043')
>>> digests[1]
Row(s=u'cd9fb1e148ccd8442e5aa74904cc73bf6fb54d1d54d333bd596aa9bb4bb4e961')
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.sha2(_to_java_column(col), numBits)
return Column(jc)
@since(2.0)
def hash(*cols):
"""Calculates the hash code of given columns, and returns the result as an int column.
>>> spark.createDataFrame([('ABC',)], ['a']).select(hash('a').alias('hash')).collect()
[Row(hash=-757602832)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hash(_to_seq(sc, cols, _to_java_column))
return Column(jc)
# ---------------------- String/Binary functions ------------------------------
_string_functions = {
'ascii': 'Computes the numeric value of the first character of the string column.',
'base64': 'Computes the BASE64 encoding of a binary column and returns it as a string column.',
'unbase64': 'Decodes a BASE64 encoded string column and returns it as a binary column.',
'initcap': 'Returns a new string column by converting the first letter of each word to ' +
'uppercase. Words are delimited by whitespace.',
'lower': 'Converts a string column to lower case.',
'upper': 'Converts a string column to upper case.',
'ltrim': 'Trim the spaces from left end for the specified string value.',
'rtrim': 'Trim the spaces from right end for the specified string value.',
'trim': 'Trim the spaces from both ends for the specified string column.',
}
for _name, _doc in _string_functions.items():
globals()[_name] = since(1.5)(_create_function(_name, _doc))
del _name, _doc
@since(1.5)
@ignore_unicode_prefix
def concat_ws(sep, *cols):
"""
Concatenates multiple input string columns together into a single string column,
using the given separator.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat_ws('-', df.s, df.d).alias('s')).collect()
[Row(s=u'abcd-123')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat_ws(sep, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def decode(col, charset):
"""
Computes the first argument into a string from a binary using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.decode(_to_java_column(col), charset))
@since(1.5)
def encode(col, charset):
"""
Computes the first argument into a binary from a string using the provided character set
(one of 'US-ASCII', 'ISO-8859-1', 'UTF-8', 'UTF-16BE', 'UTF-16LE', 'UTF-16').
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.encode(_to_java_column(col), charset))
@ignore_unicode_prefix
@since(1.5)
def format_number(col, d):
"""
Formats the number X to a format like '#,--#,--#.--', rounded to d decimal places
with HALF_EVEN round mode, and returns the result as a string.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> spark.createDataFrame([(5,)], ['a']).select(format_number('a', 4).alias('v')).collect()
[Row(v=u'5.0000')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_number(_to_java_column(col), d))
@ignore_unicode_prefix
@since(1.5)
def format_string(format, *cols):
"""
Formats the arguments in printf-style and returns the result as a string column.
:param col: the column name of the numeric value to be formatted
:param d: the N decimal places
>>> df = spark.createDataFrame([(5, "hello")], ['a', 'b'])
>>> df.select(format_string('%d %s', df.a, df.b).alias('v')).collect()
[Row(v=u'5 hello')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.format_string(format, _to_seq(sc, cols, _to_java_column)))
@since(1.5)
def instr(str, substr):
"""
Locate the position of the first occurrence of substr column in the given string.
Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(instr(df.s, 'b').alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.instr(_to_java_column(str), substr))
@since(1.5)
@ignore_unicode_prefix
def substring(str, pos, len):
"""
Substring starts at `pos` and is of length `len` when str is String type or
returns the slice of byte array that starts at `pos` in byte and is of length `len`
when str is Binary type.
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(substring(df.s, 1, 2).alias('s')).collect()
[Row(s=u'ab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring(_to_java_column(str), pos, len))
@since(1.5)
@ignore_unicode_prefix
def substring_index(str, delim, count):
"""
Returns the substring from string str before count occurrences of the delimiter delim.
If count is positive, everything the left of the final delimiter (counting from left) is
returned. If count is negative, every to the right of the final delimiter (counting from the
right) is returned. substring_index performs a case-sensitive match when searching for delim.
>>> df = spark.createDataFrame([('a.b.c.d',)], ['s'])
>>> df.select(substring_index(df.s, '.', 2).alias('s')).collect()
[Row(s=u'a.b')]
>>> df.select(substring_index(df.s, '.', -3).alias('s')).collect()
[Row(s=u'b.c.d')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.substring_index(_to_java_column(str), delim, count))
@ignore_unicode_prefix
@since(1.5)
def levenshtein(left, right):
"""Computes the Levenshtein distance of the two given strings.
>>> df0 = spark.createDataFrame([('kitten', 'sitting',)], ['l', 'r'])
>>> df0.select(levenshtein('l', 'r').alias('d')).collect()
[Row(d=3)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.levenshtein(_to_java_column(left), _to_java_column(right))
return Column(jc)
@since(1.5)
def locate(substr, str, pos=1):
"""
Locate the position of the first occurrence of substr in a string column, after position pos.
.. note:: The position is not zero based, but 1 based index. Returns 0 if substr
could not be found in str.
:param substr: a string
:param str: a Column of :class:`pyspark.sql.types.StringType`
:param pos: start position (zero based)
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(locate('b', df.s, 1).alias('s')).collect()
[Row(s=2)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.locate(substr, _to_java_column(str), pos))
@since(1.5)
@ignore_unicode_prefix
def lpad(col, len, pad):
"""
Left-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(lpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'##abcd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.lpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def rpad(col, len, pad):
"""
Right-pad the string column to width `len` with `pad`.
>>> df = spark.createDataFrame([('abcd',)], ['s',])
>>> df.select(rpad(df.s, 6, '#').alias('s')).collect()
[Row(s=u'abcd##')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.rpad(_to_java_column(col), len, pad))
@since(1.5)
@ignore_unicode_prefix
def repeat(col, n):
"""
Repeats a string column n times, and returns it as a new string column.
>>> df = spark.createDataFrame([('ab',)], ['s',])
>>> df.select(repeat(df.s, 3).alias('s')).collect()
[Row(s=u'ababab')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.repeat(_to_java_column(col), n))
@since(1.5)
@ignore_unicode_prefix
def split(str, pattern, limit=-1):
"""
Splits str around matches of the given pattern.
:param str: a string expression to split
:param pattern: a string representing a regular expression. The regex string should be
a Java regular expression.
:param limit: an integer which controls the number of times `pattern` is applied.
* ``limit > 0``: The resulting array's length will not be more than `limit`, and the
resulting array's last entry will contain all input beyond the last
matched pattern.
* ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting
array can be of any size.
.. versionchanged:: 3.0
`split` now takes an optional `limit` field. If not provided, default limit value is -1.
>>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',])
>>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect()
[Row(s=[u'one', u'twoBthreeC'])]
>>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect()
[Row(s=[u'one', u'two', u'three', u''])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
@ignore_unicode_prefix
@since(1.5)
def regexp_extract(str, pattern, idx):
r"""Extract a specific group matched by a Java regex, from the specified string column.
If the regex did not match, or the specified group did not match, an empty string is returned.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)-(\d+)', 1).alias('d')).collect()
[Row(d=u'100')]
>>> df = spark.createDataFrame([('foo',)], ['str'])
>>> df.select(regexp_extract('str', r'(\d+)', 1).alias('d')).collect()
[Row(d=u'')]
>>> df = spark.createDataFrame([('aaaac',)], ['str'])
>>> df.select(regexp_extract('str', '(a+)(b)?(c)', 2).alias('d')).collect()
[Row(d=u'')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_extract(_to_java_column(str), pattern, idx)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def regexp_replace(str, pattern, replacement):
r"""Replace all substrings of the specified string value that match regexp with rep.
>>> df = spark.createDataFrame([('100-200',)], ['str'])
>>> df.select(regexp_replace('str', r'(\d+)', '--').alias('d')).collect()
[Row(d=u'-----')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.regexp_replace(_to_java_column(str), pattern, replacement)
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def initcap(col):
"""Translate the first letter of each word to upper case in the sentence.
>>> spark.createDataFrame([('ab cd',)], ['a']).select(initcap("a").alias('v')).collect()
[Row(v=u'Ab Cd')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.initcap(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def soundex(col):
"""
Returns the SoundEx encoding for a string
>>> df = spark.createDataFrame([("Peters",),("Uhrbach",)], ['name'])
>>> df.select(soundex(df.name).alias("soundex")).collect()
[Row(soundex=u'P362'), Row(soundex=u'U612')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.soundex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def bin(col):
"""Returns the string representation of the binary value of the given column.
>>> df.select(bin(df.age).alias('c')).collect()
[Row(c=u'10'), Row(c=u'101')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.bin(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def hex(col):
"""Computes hex value of the given column, which could be :class:`pyspark.sql.types.StringType`,
:class:`pyspark.sql.types.BinaryType`, :class:`pyspark.sql.types.IntegerType` or
:class:`pyspark.sql.types.LongType`.
>>> spark.createDataFrame([('ABC', 3)], ['a', 'b']).select(hex('a'), hex('b')).collect()
[Row(hex(a)=u'414243', hex(b)=u'3')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.hex(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.5)
def unhex(col):
"""Inverse of hex. Interprets each pair of characters as a hexadecimal number
and converts to the byte representation of number.
>>> spark.createDataFrame([('414243',)], ['a']).select(unhex('a')).collect()
[Row(unhex(a)=bytearray(b'ABC'))]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.unhex(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def length(col):
"""Computes the character length of string data or number of bytes of binary data.
The length of character data includes the trailing spaces. The length of binary data
includes binary zeros.
>>> spark.createDataFrame([('ABC ',)], ['a']).select(length('a').alias('length')).collect()
[Row(length=4)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.length(_to_java_column(col)))
@ignore_unicode_prefix
@since(1.5)
def translate(srcCol, matching, replace):
"""A function translate any character in the `srcCol` by a character in `matching`.
The characters in `replace` is corresponding to the characters in `matching`.
The translate will happen when any character in the string matching with the character
in the `matching`.
>>> spark.createDataFrame([('translate',)], ['a']).select(translate('a', "rnlt", "123") \\
... .alias('r')).collect()
[Row(r=u'1a2s3ae')]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.translate(_to_java_column(srcCol), matching, replace))
# ---------------------- Collection functions ------------------------------
@ignore_unicode_prefix
@since(2.0)
def create_map(*cols):
"""Creates a new map column.
:param cols: list of column names (string) or list of :class:`Column` expressions that are
grouped as key-value pairs, e.g. (key1, value1, key2, value2, ...).
>>> df.select(create_map('name', 'age').alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
>>> df.select(create_map([df.name, df.age]).alias("map")).collect()
[Row(map={u'Alice': 2}), Row(map={u'Bob': 5})]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def map_from_arrays(col1, col2):
"""Creates a new map from two arrays.
:param col1: name of column containing a set of keys. All elements should not be null
:param col2: name of column containing a set of values
>>> df = spark.createDataFrame([([2, 5], ['a', 'b'])], ['k', 'v'])
>>> df.select(map_from_arrays(df.k, df.v).alias("map")).show()
+----------------+
| map|
+----------------+
|[2 -> a, 5 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_arrays(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def array(*cols):
"""Creates a new array column.
:param cols: list of column names (string) or list of :class:`Column` expressions that have
the same data type.
>>> df.select(array('age', 'age').alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
>>> df.select(array([df.age, df.age]).alias("arr")).collect()
[Row(arr=[2, 2]), Row(arr=[5, 5])]
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.array(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(1.5)
def array_contains(col, value):
"""
Collection function: returns null if the array is null, true if the array contains the
given value, and false otherwise.
:param col: name of column containing array
:param value: value to check for in array
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(array_contains(df.data, "a")).collect()
[Row(array_contains(data, a)=True), Row(array_contains(data, a)=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_contains(_to_java_column(col), value))
@since(2.4)
def arrays_overlap(a1, a2):
"""
Collection function: returns true if the arrays contain any common non-null element; if not,
returns null if both the arrays are non-empty and any of them contains a null element; returns
false otherwise.
>>> df = spark.createDataFrame([(["a", "b"], ["b", "c"]), (["a"], ["b", "c"])], ['x', 'y'])
>>> df.select(arrays_overlap(df.x, df.y).alias("overlap")).collect()
[Row(overlap=True), Row(overlap=False)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_overlap(_to_java_column(a1), _to_java_column(a2)))
@since(2.4)
def slice(x, start, length):
"""
Collection function: returns an array containing all the elements in `x` from index `start`
(or starting from the end if `start` is negative) with the specified `length`.
>>> df = spark.createDataFrame([([1, 2, 3],), ([4, 5],)], ['x'])
>>> df.select(slice(df.x, 2, 2).alias("sliced")).collect()
[Row(sliced=[2, 3]), Row(sliced=[5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.slice(_to_java_column(x), start, length))
@ignore_unicode_prefix
@since(2.4)
def array_join(col, delimiter, null_replacement=None):
"""
Concatenates the elements of `column` using the `delimiter`. Null values are replaced with
`null_replacement` if set, otherwise they are ignored.
>>> df = spark.createDataFrame([(["a", "b", "c"],), (["a", None],)], ['data'])
>>> df.select(array_join(df.data, ",").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a')]
>>> df.select(array_join(df.data, ",", "NULL").alias("joined")).collect()
[Row(joined=u'a,b,c'), Row(joined=u'a,NULL')]
"""
sc = SparkContext._active_spark_context
if null_replacement is None:
return Column(sc._jvm.functions.array_join(_to_java_column(col), delimiter))
else:
return Column(sc._jvm.functions.array_join(
_to_java_column(col), delimiter, null_replacement))
@since(1.5)
@ignore_unicode_prefix
def concat(*cols):
"""
Concatenates multiple input columns together into a single column.
The function works with strings, binary and compatible array columns.
>>> df = spark.createDataFrame([('abcd','123')], ['s', 'd'])
>>> df.select(concat(df.s, df.d).alias('s')).collect()
[Row(s=u'abcd123')]
>>> df = spark.createDataFrame([([1, 2], [3, 4], [5]), ([1, 2], None, [3])], ['a', 'b', 'c'])
>>> df.select(concat(df.a, df.b, df.c).alias("arr")).collect()
[Row(arr=[1, 2, 3, 4, 5]), Row(arr=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.concat(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def array_position(col, value):
"""
Collection function: Locates the position of the first occurrence of the given value
in the given array. Returns null if either of the arguments are null.
.. note:: The position is not zero based, but 1 based index. Returns 0 if the given
value could not be found in the array.
>>> df = spark.createDataFrame([(["c", "b", "a"],), ([],)], ['data'])
>>> df.select(array_position(df.data, "a")).collect()
[Row(array_position(data, a)=3), Row(array_position(data, a)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_position(_to_java_column(col), value))
@ignore_unicode_prefix
@since(2.4)
def element_at(col, extraction):
"""
Collection function: Returns element of array at given index in extraction if col is array.
Returns value for the given key in extraction if col is map.
:param col: name of column containing array or map
:param extraction: index to check for in array or key to check for in map
.. note:: The position is not zero based, but 1 based index.
>>> df = spark.createDataFrame([(["a", "b", "c"],), ([],)], ['data'])
>>> df.select(element_at(df.data, 1)).collect()
[Row(element_at(data, 1)=u'a'), Row(element_at(data, 1)=None)]
>>> df = spark.createDataFrame([({"a": 1.0, "b": 2.0},), ({},)], ['data'])
>>> df.select(element_at(df.data, "a")).collect()
[Row(element_at(data, a)=1.0), Row(element_at(data, a)=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.element_at(_to_java_column(col), extraction))
@since(2.4)
def array_remove(col, element):
"""
Collection function: Remove all elements that equal to element from the given array.
:param col: name of column containing array
:param element: element to be removed from the array
>>> df = spark.createDataFrame([([1, 2, 3, 1, 1],), ([],)], ['data'])
>>> df.select(array_remove(df.data, 1)).collect()
[Row(array_remove(data, 1)=[2, 3]), Row(array_remove(data, 1)=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_remove(_to_java_column(col), element))
@since(2.4)
def array_distinct(col):
"""
Collection function: removes duplicate values from the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3, 2],), ([4, 5, 5, 4],)], ['data'])
>>> df.select(array_distinct(df.data)).collect()
[Row(array_distinct(data)=[1, 2, 3]), Row(array_distinct(data)=[4, 5])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_distinct(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_intersect(col1, col2):
"""
Collection function: returns an array of the elements in the intersection of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_intersect(df.c1, df.c2)).collect()
[Row(array_intersect(c1, c2)=[u'a', u'c'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_intersect(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_union(col1, col2):
"""
Collection function: returns an array of the elements in the union of col1 and col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_union(df.c1, df.c2)).collect()
[Row(array_union(c1, c2)=[u'b', u'a', u'c', u'd', u'f'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_union(_to_java_column(col1), _to_java_column(col2)))
@ignore_unicode_prefix
@since(2.4)
def array_except(col1, col2):
"""
Collection function: returns an array of the elements in col1 but not in col2,
without duplicates.
:param col1: name of column containing array
:param col2: name of column containing array
>>> from pyspark.sql import Row
>>> df = spark.createDataFrame([Row(c1=["b", "a", "c"], c2=["c", "d", "a", "f"])])
>>> df.select(array_except(df.c1, df.c2)).collect()
[Row(array_except(c1, c2)=[u'b'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_except(_to_java_column(col1), _to_java_column(col2)))
@since(1.4)
def explode(col):
"""Returns a new row for each element in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(explode(eDF.intlist).alias("anInt")).collect()
[Row(anInt=1), Row(anInt=2), Row(anInt=3)]
>>> eDF.select(explode(eDF.mapfield).alias("key", "value")).show()
+---+-----+
|key|value|
+---+-----+
| a| b|
+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode(_to_java_column(col))
return Column(jc)
@since(2.1)
def posexplode(col):
"""Returns a new row for each element with position in the given array or map.
>>> from pyspark.sql import Row
>>> eDF = spark.createDataFrame([Row(a=1, intlist=[1,2,3], mapfield={"a": "b"})])
>>> eDF.select(posexplode(eDF.intlist)).collect()
[Row(pos=0, col=1), Row(pos=1, col=2), Row(pos=2, col=3)]
>>> eDF.select(posexplode(eDF.mapfield)).show()
+---+---+-----+
|pos|key|value|
+---+---+-----+
| 0| a| b|
+---+---+-----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode(_to_java_column(col))
return Column(jc)
@since(2.3)
def explode_outer(col):
"""Returns a new row for each element in the given array or map.
Unlike explode, if the array/map is null or empty then null is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", explode_outer("a_map")).show()
+---+----------+----+-----+
| id| an_array| key|value|
+---+----------+----+-----+
| 1|[foo, bar]| x| 1.0|
| 2| []|null| null|
| 3| null|null| null|
+---+----------+----+-----+
>>> df.select("id", "a_map", explode_outer("an_array")).show()
+---+----------+----+
| id| a_map| col|
+---+----------+----+
| 1|[x -> 1.0]| foo|
| 1|[x -> 1.0]| bar|
| 2| []|null|
| 3| null|null|
+---+----------+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.explode_outer(_to_java_column(col))
return Column(jc)
@since(2.3)
def posexplode_outer(col):
"""Returns a new row for each element with position in the given array or map.
Unlike posexplode, if the array/map is null or empty then the row (null, null) is produced.
>>> df = spark.createDataFrame(
... [(1, ["foo", "bar"], {"x": 1.0}), (2, [], {}), (3, None, None)],
... ("id", "an_array", "a_map")
... )
>>> df.select("id", "an_array", posexplode_outer("a_map")).show()
+---+----------+----+----+-----+
| id| an_array| pos| key|value|
+---+----------+----+----+-----+
| 1|[foo, bar]| 0| x| 1.0|
| 2| []|null|null| null|
| 3| null|null|null| null|
+---+----------+----+----+-----+
>>> df.select("id", "a_map", posexplode_outer("an_array")).show()
+---+----------+----+----+
| id| a_map| pos| col|
+---+----------+----+----+
| 1|[x -> 1.0]| 0| foo|
| 1|[x -> 1.0]| 1| bar|
| 2| []|null|null|
| 3| null|null|null|
+---+----------+----+----+
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.posexplode_outer(_to_java_column(col))
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def get_json_object(col, path):
"""
Extracts json object from a json string based on json path specified, and returns json string
of the extracted json object. It will return null if the input json string is invalid.
:param col: string column in json format
:param path: path to the json object to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, get_json_object(df.jstring, '$.f1').alias("c0"), \\
... get_json_object(df.jstring, '$.f2').alias("c1") ).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.get_json_object(_to_java_column(col), path)
return Column(jc)
@ignore_unicode_prefix
@since(1.6)
def json_tuple(col, *fields):
"""Creates a new row for a json column according to the given field names.
:param col: string column in json format
:param fields: list of fields to extract
>>> data = [("1", '''{"f1": "value1", "f2": "value2"}'''), ("2", '''{"f1": "value12"}''')]
>>> df = spark.createDataFrame(data, ("key", "jstring"))
>>> df.select(df.key, json_tuple(df.jstring, 'f1', 'f2')).collect()
[Row(key=u'1', c0=u'value1', c1=u'value2'), Row(key=u'2', c0=u'value12', c1=None)]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.json_tuple(_to_java_column(col), _to_seq(sc, fields))
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def from_json(col, schema, options={}):
"""
Parses a column containing a JSON string into a :class:`MapType` with :class:`StringType`
as keys type, :class:`StructType` or :class:`ArrayType` with
the specified schema. Returns `null`, in the case of an unparseable string.
:param col: string column in json format
:param schema: a StructType or ArrayType of StructType to use when parsing the json column.
:param options: options to control parsing. accepts the same options as the json datasource
.. note:: Since Spark 2.3, the DDL-formatted string or a JSON format string is also
supported for ``schema``.
>>> from pyspark.sql.types import *
>>> data = [(1, '''{"a": 1}''')]
>>> schema = StructType([StructField("a", IntegerType())])
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "a INT").alias("json")).collect()
[Row(json=Row(a=1))]
>>> df.select(from_json(df.value, "MAP<STRING,INT>").alias("json")).collect()
[Row(json={u'a': 1})]
>>> data = [(1, '''[{"a": 1}]''')]
>>> schema = ArrayType(StructType([StructField("a", IntegerType())]))
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[Row(a=1)])]
>>> schema = schema_of_json(lit('''{"a": 0}'''))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=Row(a=None))]
>>> data = [(1, '''[1, 2, 3]''')]
>>> schema = ArrayType(IntegerType())
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(from_json(df.value, schema).alias("json")).collect()
[Row(json=[1, 2, 3])]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, DataType):
schema = schema.json()
elif isinstance(schema, Column):
schema = _to_java_column(schema)
jc = sc._jvm.functions.from_json(_to_java_column(col), schema, options)
return Column(jc)
@ignore_unicode_prefix
@since(2.1)
def to_json(col, options={}):
"""
Converts a column containing a :class:`StructType`, :class:`ArrayType` or a :class:`MapType`
into a JSON string. Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct, an array or a map.
:param options: options to control converting. accepts the same options as the JSON datasource.
Additionally the function supports the `pretty` option which enables
pretty JSON generation.
>>> from pyspark.sql import Row
>>> from pyspark.sql.types import *
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"age":2,"name":"Alice"}')]
>>> data = [(1, [Row(name='Alice', age=2), Row(name='Bob', age=3)])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"age":2,"name":"Alice"},{"age":3,"name":"Bob"}]')]
>>> data = [(1, {"name": "Alice"})]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'{"name":"Alice"}')]
>>> data = [(1, [{"name": "Alice"}, {"name": "Bob"}])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'[{"name":"Alice"},{"name":"Bob"}]')]
>>> data = [(1, ["Alice", "Bob"])]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_json(df.value).alias("json")).collect()
[Row(json=u'["Alice","Bob"]')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_json(_to_java_column(col), options)
return Column(jc)
@ignore_unicode_prefix
@since(2.4)
def schema_of_json(json, options={}):
"""
Parses a JSON string and infers its schema in DDL format.
:param json: a JSON string or a string literal containing a JSON string.
:param options: options to control parsing. accepts the same options as the JSON datasource
.. versionchanged:: 3.0
It accepts `options` parameter to control schema inferring.
>>> df = spark.range(1)
>>> df.select(schema_of_json(lit('{"a": 0}')).alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
>>> schema = schema_of_json('{a: 1}', {'allowUnquotedFieldNames':'true'})
>>> df.select(schema.alias("json")).collect()
[Row(json=u'struct<a:bigint>')]
"""
if isinstance(json, basestring):
col = _create_column_from_literal(json)
elif isinstance(json, Column):
col = _to_java_column(json)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_json(col, options)
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def schema_of_csv(csv, options={}):
"""
Parses a CSV string and infers its schema in DDL format.
:param col: a CSV string or a string literal containing a CSV string.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> df = spark.range(1)
>>> df.select(schema_of_csv(lit('1|a'), {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
>>> df.select(schema_of_csv('1|a', {'sep':'|'}).alias("csv")).collect()
[Row(csv=u'struct<_c0:int,_c1:string>')]
"""
if isinstance(csv, basestring):
col = _create_column_from_literal(csv)
elif isinstance(csv, Column):
col = _to_java_column(csv)
else:
raise TypeError("schema argument should be a column or string")
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.schema_of_csv(col, options)
return Column(jc)
@ignore_unicode_prefix
@since(3.0)
def to_csv(col, options={}):
"""
Converts a column containing a :class:`StructType` into a CSV string.
Throws an exception, in the case of an unsupported type.
:param col: name of column containing a struct.
:param options: options to control converting. accepts the same options as the CSV datasource.
>>> from pyspark.sql import Row
>>> data = [(1, Row(name='Alice', age=2))]
>>> df = spark.createDataFrame(data, ("key", "value"))
>>> df.select(to_csv(df.value).alias("csv")).collect()
[Row(csv=u'2,Alice')]
"""
sc = SparkContext._active_spark_context
jc = sc._jvm.functions.to_csv(_to_java_column(col), options)
return Column(jc)
@since(1.5)
def size(col):
"""
Collection function: returns the length of the array or map stored in the column.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 2, 3],),([1],),([],)], ['data'])
>>> df.select(size(df.data)).collect()
[Row(size(data)=3), Row(size(data)=1), Row(size(data)=0)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.size(_to_java_column(col)))
@since(2.4)
def array_min(col):
"""
Collection function: returns the minimum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_min(df.data).alias('min')).collect()
[Row(min=1), Row(min=-1)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_min(_to_java_column(col)))
@since(2.4)
def array_max(col):
"""
Collection function: returns the maximum value of the array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, 3],), ([None, 10, -1],)], ['data'])
>>> df.select(array_max(df.data).alias('max')).collect()
[Row(max=3), Row(max=10)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_max(_to_java_column(col)))
@since(1.5)
def sort_array(col, asc=True):
"""
Collection function: sorts the input array in ascending or descending order according
to the natural ordering of the array elements. Null elements will be placed at the beginning
of the returned array in ascending order or at the end of the returned array in descending
order.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(sort_array(df.data).alias('r')).collect()
[Row(r=[None, 1, 2, 3]), Row(r=[1]), Row(r=[])]
>>> df.select(sort_array(df.data, asc=False).alias('r')).collect()
[Row(r=[3, 2, 1, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.sort_array(_to_java_column(col), asc))
@since(2.4)
def array_sort(col):
"""
Collection function: sorts the input array in ascending order. The elements of the input array
must be orderable. Null elements will be placed at the end of the returned array.
:param col: name of column or expression
>>> df = spark.createDataFrame([([2, 1, None, 3],),([1],),([],)], ['data'])
>>> df.select(array_sort(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, None]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_sort(_to_java_column(col)))
@since(2.4)
def shuffle(col):
"""
Collection function: Generates a random permutation of the given array.
.. note:: The function is non-deterministic.
:param col: name of column or expression
>>> df = spark.createDataFrame([([1, 20, 3, 5],), ([1, 20, None, 3],)], ['data'])
>>> df.select(shuffle(df.data).alias('s')).collect() # doctest: +SKIP
[Row(s=[3, 1, 5, 20]), Row(s=[20, None, 3, 1])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.shuffle(_to_java_column(col)))
@since(1.5)
@ignore_unicode_prefix
def reverse(col):
"""
Collection function: returns a reversed string or an array with reverse order of elements.
:param col: name of column or expression
>>> df = spark.createDataFrame([('Spark SQL',)], ['data'])
>>> df.select(reverse(df.data).alias('s')).collect()
[Row(s=u'LQS krapS')]
>>> df = spark.createDataFrame([([2, 1, 3],) ,([1],) ,([],)], ['data'])
>>> df.select(reverse(df.data).alias('r')).collect()
[Row(r=[3, 1, 2]), Row(r=[1]), Row(r=[])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.reverse(_to_java_column(col)))
@since(2.4)
def flatten(col):
"""
Collection function: creates a single array from an array of arrays.
If a structure of nested arrays is deeper than two levels,
only one level of nesting is removed.
:param col: name of column or expression
>>> df = spark.createDataFrame([([[1, 2, 3], [4, 5], [6]],), ([None, [4, 5]],)], ['data'])
>>> df.select(flatten(df.data).alias('r')).collect()
[Row(r=[1, 2, 3, 4, 5, 6]), Row(r=None)]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.flatten(_to_java_column(col)))
@since(2.3)
def map_keys(col):
"""
Collection function: Returns an unordered array containing the keys of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_keys
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_keys("data").alias("keys")).show()
+------+
| keys|
+------+
|[1, 2]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_keys(_to_java_column(col)))
@since(2.3)
def map_values(col):
"""
Collection function: Returns an unordered array containing the values of the map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_values
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_values("data").alias("values")).show()
+------+
|values|
+------+
|[a, b]|
+------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_values(_to_java_column(col)))
@since(3.0)
def map_entries(col):
"""
Collection function: Returns an unordered array of all entries in the given map.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_entries
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as data")
>>> df.select(map_entries("data").alias("entries")).show()
+----------------+
| entries|
+----------------+
|[[1, a], [2, b]]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_entries(_to_java_column(col)))
@since(2.4)
def map_from_entries(col):
"""
Collection function: Returns a map created from the given array of entries.
:param col: name of column or expression
>>> from pyspark.sql.functions import map_from_entries
>>> df = spark.sql("SELECT array(struct(1, 'a'), struct(2, 'b')) as data")
>>> df.select(map_from_entries("data").alias("map")).show()
+----------------+
| map|
+----------------+
|[1 -> a, 2 -> b]|
+----------------+
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.map_from_entries(_to_java_column(col)))
@ignore_unicode_prefix
@since(2.4)
def array_repeat(col, count):
"""
Collection function: creates an array containing a column repeated count times.
>>> df = spark.createDataFrame([('ab',)], ['data'])
>>> df.select(array_repeat(df.data, 3).alias('r')).collect()
[Row(r=[u'ab', u'ab', u'ab'])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.array_repeat(_to_java_column(col), count))
@since(2.4)
def arrays_zip(*cols):
"""
Collection function: Returns a merged array of structs in which the N-th struct contains all
N-th values of input arrays.
:param cols: columns of arrays to be merged.
>>> from pyspark.sql.functions import arrays_zip
>>> df = spark.createDataFrame([(([1, 2, 3], [2, 3, 4]))], ['vals1', 'vals2'])
>>> df.select(arrays_zip(df.vals1, df.vals2).alias('zipped')).collect()
[Row(zipped=[Row(vals1=1, vals2=2), Row(vals1=2, vals2=3), Row(vals1=3, vals2=4)])]
"""
sc = SparkContext._active_spark_context
return Column(sc._jvm.functions.arrays_zip(_to_seq(sc, cols, _to_java_column)))
@since(2.4)
def map_concat(*cols):
"""Returns the union of all the given maps.
:param cols: list of column names (string) or list of :class:`Column` expressions
>>> from pyspark.sql.functions import map_concat
>>> df = spark.sql("SELECT map(1, 'a', 2, 'b') as map1, map(3, 'c', 1, 'd') as map2")
>>> df.select(map_concat("map1", "map2").alias("map3")).show(truncate=False)
+------------------------+
|map3 |
+------------------------+
|[1 -> d, 2 -> b, 3 -> c]|
+------------------------+
"""
sc = SparkContext._active_spark_context
if len(cols) == 1 and isinstance(cols[0], (list, set)):
cols = cols[0]
jc = sc._jvm.functions.map_concat(_to_seq(sc, cols, _to_java_column))
return Column(jc)
@since(2.4)
def sequence(start, stop, step=None):
"""
Generate a sequence of integers from `start` to `stop`, incrementing by `step`.
If `step` is not set, incrementing by 1 if `start` is less than or equal to `stop`,
otherwise -1.
>>> df1 = spark.createDataFrame([(-2, 2)], ('C1', 'C2'))
>>> df1.select(sequence('C1', 'C2').alias('r')).collect()
[Row(r=[-2, -1, 0, 1, 2])]
>>> df2 = spark.createDataFrame([(4, -4, -2)], ('C1', 'C2', 'C3'))
>>> df2.select(sequence('C1', 'C2', 'C3').alias('r')).collect()
[Row(r=[4, 2, 0, -2, -4])]
"""
sc = SparkContext._active_spark_context
if step is None:
return Column(sc._jvm.functions.sequence(_to_java_column(start), _to_java_column(stop)))
else:
return Column(sc._jvm.functions.sequence(
_to_java_column(start), _to_java_column(stop), _to_java_column(step)))
@ignore_unicode_prefix
@since(3.0)
def from_csv(col, schema, options={}):
"""
Parses a column containing a CSV string to a row with the specified schema.
Returns `null`, in the case of an unparseable string.
:param col: string column in CSV format
:param schema: a string with schema in DDL format to use when parsing the CSV column.
:param options: options to control parsing. accepts the same options as the CSV datasource
>>> data = [("1,2,3",)]
>>> df = spark.createDataFrame(data, ("value",))
>>> df.select(from_csv(df.value, "a INT, b INT, c INT").alias("csv")).collect()
[Row(csv=Row(a=1, b=2, c=3))]
>>> value = data[0][0]
>>> df.select(from_csv(df.value, schema_of_csv(value)).alias("csv")).collect()
[Row(csv=Row(_c0=1, _c1=2, _c2=3))]
"""
sc = SparkContext._active_spark_context
if isinstance(schema, basestring):
schema = _create_column_from_literal(schema)
elif isinstance(schema, Column):
schema = _to_java_column(schema)
else:
raise TypeError("schema argument should be a column or string")
jc = sc._jvm.functions.from_csv(_to_java_column(col), schema, options)
return Column(jc)
# ---------------------------- User Defined Function ----------------------------------
class PandasUDFType(object):
"""Pandas UDF Types. See :meth:`pyspark.sql.functions.pandas_udf`.
"""
SCALAR = PythonEvalType.SQL_SCALAR_PANDAS_UDF
GROUPED_MAP = PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF
GROUPED_AGG = PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
@since(1.3)
def udf(f=None, returnType=StringType()):
"""Creates a user defined function (UDF).
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> from pyspark.sql.types import IntegerType
>>> import random
>>> random_udf = udf(lambda: int(random.random() * 100), IntegerType()).asNondeterministic()
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
:param f: python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
>>> from pyspark.sql.types import IntegerType
>>> slen = udf(lambda s: len(s), IntegerType())
>>> @udf
... def to_upper(s):
... if s is not None:
... return s.upper()
...
>>> @udf(returnType=IntegerType())
... def add_one(x):
... if x is not None:
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)], ("id", "name", "age"))
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")).show()
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
"""
# The following table shows most of Python data and SQL type conversions in normal UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-25666's PR to see the codes in order to generate the table below.
#
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# |SQL Type \ Python Value(Type)|None(NoneType)|True(bool)|1(int)|1(long)| a(str)| a(unicode)| 1970-01-01(date)|1970-01-01 00:00:00(datetime)|1.0(float)|array('i', [1])(array)|[1](list)| (1,)(tuple)| ABC(bytearray)| 1(Decimal)|{'a': 1}(dict)|Row(kwargs=1)(Row)|Row(namedtuple=1)(Row)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
# | boolean| None| True| None| None| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | tinyint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | smallint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | int| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | bigint| None| None| 1| 1| None| None| None| None| None| None| None| None| None| None| None| X| X| # noqa
# | string| None| u'true'| u'1'| u'1'| u'a'| u'a'|u'java.util.Grego...| u'java.util.Grego...| u'1.0'| u'[I@24a83055'| u'[1]'|u'[Ljava.lang.Obj...| u'[B@49093632'| u'1'| u'{a=1}'| X| X| # noqa
# | date| None| X| X| X| X| X|datetime.date(197...| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X| X| X| datetime.datetime...| X| X| X| X| X| X| X| X| X| # noqa
# | float| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | double| None| None| None| None| None| None| None| None| 1.0| None| None| None| None| None| None| X| X| # noqa
# | array<int>| None| None| None| None| None| None| None| None| None| [1]| [1]| [1]| [65, 66, 67]| None| None| X| X| # noqa
# | binary| None| None| None| None|bytearray(b'a')|bytearray(b'a')| None| None| None| None| None| None|bytearray(b'ABC')| None| None| X| X| # noqa
# | decimal(10,0)| None| None| None| None| None| None| None| None| None| None| None| None| None|Decimal('1')| None| X| X| # noqa
# | map<string,int>| None| None| None| None| None| None| None| None| None| None| None| None| None| None| {u'a': 1}| X| X| # noqa
# | struct<_1:int>| None| X| X| X| X| X| X| X| X| X|Row(_1=1)| Row(_1=1)| X| X| Row(_1=None)| Row(_1=1)| Row(_1=1)| # noqa
# +-----------------------------+--------------+----------+------+-------+---------------+---------------+--------------------+-----------------------------+----------+----------------------+---------+--------------------+-----------------+------------+--------------+------------------+----------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice.
# Note: 'X' means it throws an exception during the conversion.
# decorator @udf, @udf(), @udf(dataType())
if f is None or isinstance(f, (str, DataType)):
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
return functools.partial(_create_udf, returnType=return_type,
evalType=PythonEvalType.SQL_BATCHED_UDF)
else:
return _create_udf(f=f, returnType=returnType,
evalType=PythonEvalType.SQL_BATCHED_UDF)
@since(2.3)
def pandas_udf(f=None, returnType=None, functionType=None):
"""
Creates a vectorized user defined function (UDF).
:param f: user-defined function. A python function if used as a standalone function
:param returnType: the return type of the user-defined function. The value can be either a
:class:`pyspark.sql.types.DataType` object or a DDL-formatted type string.
:param functionType: an enum value in :class:`pyspark.sql.functions.PandasUDFType`.
Default: SCALAR.
.. note:: Experimental
The function type of the UDF can be one of the following:
1. SCALAR
A scalar UDF defines a transformation: One or more `pandas.Series` -> A `pandas.Series`.
The length of the returned `pandas.Series` must be of the same as the input `pandas.Series`.
:class:`MapType`, :class:`StructType` are currently not supported as output types.
Scalar UDFs are used with :meth:`pyspark.sql.DataFrame.withColumn` and
:meth:`pyspark.sql.DataFrame.select`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql.types import IntegerType, StringType
>>> slen = pandas_udf(lambda s: s.str.len(), IntegerType()) # doctest: +SKIP
>>> @pandas_udf(StringType()) # doctest: +SKIP
... def to_upper(s):
... return s.str.upper()
...
>>> @pandas_udf("integer", PandasUDFType.SCALAR) # doctest: +SKIP
... def add_one(x):
... return x + 1
...
>>> df = spark.createDataFrame([(1, "John Doe", 21)],
... ("id", "name", "age")) # doctest: +SKIP
>>> df.select(slen("name").alias("slen(name)"), to_upper("name"), add_one("age")) \\
... .show() # doctest: +SKIP
+----------+--------------+------------+
|slen(name)|to_upper(name)|add_one(age)|
+----------+--------------+------------+
| 8| JOHN DOE| 22|
+----------+--------------+------------+
.. note:: The length of `pandas.Series` within a scalar UDF is not that of the whole input
column, but is the length of an internal batch used for each call to the function.
Therefore, this can be used, for example, to ensure the length of each returned
`pandas.Series`, and can not be used as the column length.
2. GROUPED_MAP
A grouped map UDF defines transformation: A `pandas.DataFrame` -> A `pandas.DataFrame`
The returnType should be a :class:`StructType` describing the schema of the returned
`pandas.DataFrame`. The column labels of the returned `pandas.DataFrame` must either match
the field names in the defined returnType schema if specified as strings, or match the
field data types by position if not strings, e.g. integer indices.
The length of the returned `pandas.DataFrame` can be arbitrary.
Grouped map UDFs are used with :meth:`pyspark.sql.GroupedData.apply`.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def normalize(pdf):
... v = pdf.v
... return pdf.assign(v=(v - v.mean()) / v.std())
>>> df.groupby("id").apply(normalize).show() # doctest: +SKIP
+---+-------------------+
| id| v|
+---+-------------------+
| 1|-0.7071067811865475|
| 1| 0.7071067811865475|
| 2|-0.8320502943378437|
| 2|-0.2773500981126146|
| 2| 1.1094003924504583|
+---+-------------------+
Alternatively, the user can define a function that takes two arguments.
In this case, the grouping key(s) will be passed as the first argument and the data will
be passed as the second argument. The grouping key(s) will be passed as a tuple of numpy
data types, e.g., `numpy.int32` and `numpy.float64`. The data will still be passed in
as a `pandas.DataFrame` containing all columns from the original Spark DataFrame.
This is useful when the user does not want to hardcode grouping key(s) in the function.
>>> import pandas as pd # doctest: +SKIP
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v")) # doctest: +SKIP
>>> @pandas_udf("id long, v double", PandasUDFType.GROUPED_MAP) # doctest: +SKIP
... def mean_udf(key, pdf):
... # key is a tuple of one numpy.int64, which is the value
... # of 'id' for the current group
... return pd.DataFrame([key + (pdf.v.mean(),)])
>>> df.groupby('id').apply(mean_udf).show() # doctest: +SKIP
+---+---+
| id| v|
+---+---+
| 1|1.5|
| 2|6.0|
+---+---+
>>> @pandas_udf(
... "id long, `ceil(v / 2)` long, v double",
... PandasUDFType.GROUPED_MAP) # doctest: +SKIP
>>> def sum_udf(key, pdf):
... # key is a tuple of two numpy.int64s, which is the values
... # of 'id' and 'ceil(df.v / 2)' for the current group
... return pd.DataFrame([key + (pdf.v.sum(),)])
>>> df.groupby(df.id, ceil(df.v / 2)).apply(sum_udf).show() # doctest: +SKIP
+---+-----------+----+
| id|ceil(v / 2)| v|
+---+-----------+----+
| 2| 5|10.0|
| 1| 1| 3.0|
| 2| 3| 5.0|
| 2| 2| 3.0|
+---+-----------+----+
.. note:: If returning a new `pandas.DataFrame` constructed with a dictionary, it is
recommended to explicitly index the columns by name to ensure the positions are correct,
or alternatively use an `OrderedDict`.
For example, `pd.DataFrame({'id': ids, 'a': data}, columns=['id', 'a'])` or
`pd.DataFrame(OrderedDict([('id', ids), ('a', data)]))`.
.. seealso:: :meth:`pyspark.sql.GroupedData.apply`
3. GROUPED_AGG
A grouped aggregate UDF defines a transformation: One or more `pandas.Series` -> A scalar
The `returnType` should be a primitive data type, e.g., :class:`DoubleType`.
The returned scalar can be either a python primitive type, e.g., `int` or `float`
or a numpy data type, e.g., `numpy.int64` or `numpy.float64`.
:class:`MapType` and :class:`StructType` are currently not supported as output types.
Group aggregate UDFs are used with :meth:`pyspark.sql.GroupedData.agg` and
:class:`pyspark.sql.Window`
This example shows using grouped aggregated UDFs with groupby:
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> df.groupby("id").agg(mean_udf(df['v'])).show() # doctest: +SKIP
+---+-----------+
| id|mean_udf(v)|
+---+-----------+
| 1| 1.5|
| 2| 6.0|
+---+-----------+
This example shows using grouped aggregated UDFs as window functions.
>>> from pyspark.sql.functions import pandas_udf, PandasUDFType
>>> from pyspark.sql import Window
>>> df = spark.createDataFrame(
... [(1, 1.0), (1, 2.0), (2, 3.0), (2, 5.0), (2, 10.0)],
... ("id", "v"))
>>> @pandas_udf("double", PandasUDFType.GROUPED_AGG) # doctest: +SKIP
... def mean_udf(v):
... return v.mean()
>>> w = (Window.partitionBy('id')
... .orderBy('v')
... .rowsBetween(-1, 0))
>>> df.withColumn('mean_v', mean_udf(df['v']).over(w)).show() # doctest: +SKIP
+---+----+------+
| id| v|mean_v|
+---+----+------+
| 1| 1.0| 1.0|
| 1| 2.0| 1.5|
| 2| 3.0| 3.0|
| 2| 5.0| 4.0|
| 2|10.0| 7.5|
+---+----+------+
.. note:: For performance reasons, the input series to window functions are not copied.
Therefore, mutating the input series is not allowed and will cause incorrect results.
For the same reason, users should also not rely on the index of the input series.
.. seealso:: :meth:`pyspark.sql.GroupedData.agg` and :class:`pyspark.sql.Window`
.. note:: The user-defined functions are considered deterministic by default. Due to
optimization, duplicate invocations may be eliminated or the function may even be invoked
more times than it is present in the query. If your function is not deterministic, call
`asNondeterministic` on the user defined function. E.g.:
>>> @pandas_udf('double', PandasUDFType.SCALAR) # doctest: +SKIP
... def random(v):
... import numpy as np
... import pandas as pd
... return pd.Series(np.random.randn(len(v))
>>> random = random.asNondeterministic() # doctest: +SKIP
.. note:: The user-defined functions do not support conditional expressions or short circuiting
in boolean expressions and it ends up with being executed all internally. If the functions
can fail on special rows, the workaround is to incorporate the condition into the functions.
.. note:: The user-defined functions do not take keyword arguments on the calling side.
.. note:: The data type of returned `pandas.Series` from the user-defined functions should be
matched with defined returnType (see :meth:`types.to_arrow_type` and
:meth:`types.from_arrow_type`). When there is mismatch between them, Spark might do
conversion on returned data. The conversion is not guaranteed to be correct and results
should be checked for accuracy by users.
"""
# The following table shows most of Pandas data and SQL type conversions in Pandas UDFs that
# are not yet visible to the user. Some of behaviors are buggy and might be changed in the near
# future. The table might have to be eventually documented externally.
# Please see SPARK-25798's PR to see the codes in order to generate the table below.
#
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# |SQL Type \ Pandas Value(Type)|None(object(NoneType))|True(bool)|1(int8)|1(int16)| 1(int32)| 1(int64)|1(uint8)|1(uint16)|1(uint32)|1(uint64)|1.0(float16)|1.0(float32)|1.0(float64)|1970-01-01 00:00:00(datetime64[ns])|1970-01-01 00:00:00-05:00(datetime64[ns, US/Eastern])|a(object(string))| 1(object(Decimal))|[1 2 3](object(array[int32]))|1.0(float128)|(1+0j)(complex64)|(1+0j)(complex128)|A(category)|1 days 00:00:00(timedelta64[ns])| # noqa
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
# | boolean| None| True| True| True| True| True| True| True| True| True| False| False| False| False| False| X| X| X| False| False| False| X| False| # noqa
# | tinyint| None| 1| 1| 1| 1| 1| X| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| 0| X| # noqa
# | smallint| None| 1| 1| 1| 1| 1| 1| X| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa
# | int| None| 1| 1| 1| 1| 1| 1| 1| X| X| 1| 1| 1| X| X| X| X| X| X| X| X| X| X| # noqa
# | bigint| None| 1| 1| 1| 1| 1| 1| 1| 1| X| 1| 1| 1| 0| 18000000000000| X| X| X| X| X| X| X| X| # noqa
# | float| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X|1.401298464324817...| X| X| X| X| X| X| # noqa
# | double| None| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| 1.0| X| X| X| X| X| X| X| X| X| X| # noqa
# | date| None| X| X| X|datetime.date(197...| X| X| X| X| X| X| X| X| datetime.date(197...| X| X| X| X| X| X| X| X| X| # noqa
# | timestamp| None| X| X| X| X|datetime.datetime...| X| X| X| X| X| X| X| datetime.datetime...| datetime.datetime...| X| X| X| X| X| X| X| X| # noqa
# | string| None| u''|u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u'\x01'| u''| u''| u''| X| X| u'a'| X| X| u''| u''| u''| X| X| # noqa
# | decimal(10,0)| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| Decimal('1')| X| X| X| X| X| X| # noqa
# | array<int>| None| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| [1, 2, 3]| X| X| X| X| X| # noqa
# | map<string,int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | struct<_1:int>| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# | binary| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| X| # noqa
# +-----------------------------+----------------------+----------+-------+--------+--------------------+--------------------+--------+---------+---------+---------+------------+------------+------------+-----------------------------------+-----------------------------------------------------+-----------------+--------------------+-----------------------------+-------------+-----------------+------------------+-----------+--------------------------------+ # noqa
#
# Note: DDL formatted string is used for 'SQL Type' for simplicity. This string can be
# used in `returnType`.
# Note: The values inside of the table are generated by `repr`.
# Note: Python 2 is used to generate this table since it is used to check the backward
# compatibility often in practice.
# Note: Pandas 0.19.2 and PyArrow 0.9.0 are used.
# Note: Timezone is Singapore timezone.
# Note: 'X' means it throws an exception during the conversion.
# Note: 'binary' type is only supported with PyArrow 0.10.0+ (SPARK-23555).
# decorator @pandas_udf(returnType, functionType)
is_decorator = f is None or isinstance(f, (str, DataType))
if is_decorator:
# If DataType has been passed as a positional argument
# for decorator use it as a returnType
return_type = f or returnType
if functionType is not None:
# @pandas_udf(dataType, functionType=functionType)
# @pandas_udf(returnType=dataType, functionType=functionType)
eval_type = functionType
elif returnType is not None and isinstance(returnType, int):
# @pandas_udf(dataType, functionType)
eval_type = returnType
else:
# @pandas_udf(dataType) or @pandas_udf(returnType=dataType)
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
else:
return_type = returnType
if functionType is not None:
eval_type = functionType
else:
eval_type = PythonEvalType.SQL_SCALAR_PANDAS_UDF
if return_type is None:
raise ValueError("Invalid returnType: returnType can not be None")
if eval_type not in [PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF]:
raise ValueError("Invalid functionType: "
"functionType must be one the values from PandasUDFType")
if is_decorator:
return functools.partial(_create_udf, returnType=return_type, evalType=eval_type)
else:
return _create_udf(f=f, returnType=return_type, evalType=eval_type)
blacklist = ['map', 'since', 'ignore_unicode_prefix']
__all__ = [k for k, v in globals().items()
if not k.startswith('_') and k[0].islower() and callable(v) and k not in blacklist]
__all__ += ["PandasUDFType"]
__all__.sort()
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
if __name__ == "__main__":
_test()
| apache-2.0 |
jm-begon/scikit-learn | examples/linear_model/plot_bayesian_ridge.py | 248 | 2588 | """
=========================
Bayesian Ridge Regression
=========================
Computes a Bayesian Ridge Regression on a synthetic dataset.
See :ref:`bayesian_ridge_regression` for more information on the regressor.
Compared to the OLS (ordinary least squares) estimator, the coefficient
weights are slightly shifted toward zeros, which stabilises them.
As the prior on the weights is a Gaussian prior, the histogram of the
estimated weights is Gaussian.
The estimation of the model is done by iteratively maximizing the
marginal log-likelihood of the observations.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats
from sklearn.linear_model import BayesianRidge, LinearRegression
###############################################################################
# Generating simulated data with Gaussian weigthts
np.random.seed(0)
n_samples, n_features = 100, 100
X = np.random.randn(n_samples, n_features) # Create Gaussian data
# Create weigts with a precision lambda_ of 4.
lambda_ = 4.
w = np.zeros(n_features)
# Only keep 10 weights of interest
relevant_features = np.random.randint(0, n_features, 10)
for i in relevant_features:
w[i] = stats.norm.rvs(loc=0, scale=1. / np.sqrt(lambda_))
# Create noise with a precision alpha of 50.
alpha_ = 50.
noise = stats.norm.rvs(loc=0, scale=1. / np.sqrt(alpha_), size=n_samples)
# Create the target
y = np.dot(X, w) + noise
###############################################################################
# Fit the Bayesian Ridge Regression and an OLS for comparison
clf = BayesianRidge(compute_score=True)
clf.fit(X, y)
ols = LinearRegression()
ols.fit(X, y)
###############################################################################
# Plot true weights, estimated weights and histogram of the weights
plt.figure(figsize=(6, 5))
plt.title("Weights of the model")
plt.plot(clf.coef_, 'b-', label="Bayesian Ridge estimate")
plt.plot(w, 'g-', label="Ground truth")
plt.plot(ols.coef_, 'r--', label="OLS estimate")
plt.xlabel("Features")
plt.ylabel("Values of the weights")
plt.legend(loc="best", prop=dict(size=12))
plt.figure(figsize=(6, 5))
plt.title("Histogram of the weights")
plt.hist(clf.coef_, bins=n_features, log=True)
plt.plot(clf.coef_[relevant_features], 5 * np.ones(len(relevant_features)),
'ro', label="Relevant features")
plt.ylabel("Features")
plt.xlabel("Values of the weights")
plt.legend(loc="lower left")
plt.figure(figsize=(6, 5))
plt.title("Marginal log-likelihood")
plt.plot(clf.scores_)
plt.ylabel("Score")
plt.xlabel("Iterations")
plt.show()
| bsd-3-clause |
sergiohzlz/complejos | JdelC/jdelc.py | 1 | 2211 | #!/usr/bin/python
import numpy as np
import numpy.random as rnd
import sys
import matplotlib
matplotlib.use('TkAgg')
from matplotlib import pyplot as plt
from numpy import pi
poligono_p = lambda n,rot: [(1,i*2*np.pi/n+rot) for i in range(1,n+1)]
pol2cart = lambda ro,te: (ro*np.cos(te),ro*np.sin(te))
poligono_c = lambda L: [pol2cart(x[0],x[1]) for x in L]
genera_coords = lambda L,p: dict(zip(L,p))
pmedio = lambda x,y: (0.5*(x[0]+y[0]) , 0.5*(x[1]+y[1]) )
class JdelC(object):
def __init__(self):
pass
def juego(n,m=100000, rot=pi/2):
C = genera_coords(range(n), poligono_c(poligono_p(n,rot)))
P = [C[rnd.choice(range(n))]]
for i in range(m):
up = P[-1]
vz = C[rnd.choice(range(n))]
P.append(pmedio(up,vz))
return np.array(P), C
def juego_sec(V,S,m=100000,rot=pi/4):
n = len(V)
C = genera_coords(V, poligono_c(poligono_p(n,rot)))
P = [C[S[0]]]
cont = 0
for i in range(1,m):
up = P[-1]
vz = C[S[i]]
P.append(pmedio(up,vz))
return np.array(P), C
def secciones_nucleotidos(f,m):
cont=0
for r in f:
l = r.strip()
if(l[0]=='>'):
continue
acum = m-cont
sec = ''.join([ s for s in l[:acum] if s!='N' ])
cont+=len(sec)
if(cont<=m):
yield sec
def secciones(f,m):
cont=0
for r in f:
l = r.strip()
try:
if(l[0]=='>'):
continue
except:
continue
acum = m-cont
sec = ''.join([ s for s in l[:acum] ])
cont+=len(sec)
if(cont<=m):
yield sec
def grafica(R):
plt.scatter(R[:,0],R[:,1],s=0.1, c='k')
def grafcoords(*D):
R,C = D
plt.scatter(R[:,0],R[:,1],s=0.1, c='k')
for c in C:
plt.annotate(c,C[c])
if __name__=='__main__':
n = int(sys.argv[0])
# Ejemplo
# In [150]: G = open('Saccharomyces_cerevisiae_aa.fasta','r')
#
# In [151]: secs = jdelc.secciones(G,1000)
#
# In [152]: secuencia = ''
#
# In [153]: for sec in secs:
# ...: secuencia += sec
# ...:
#
# In [154]: R,C = jdelc.juego_sec(aminos,secuencia, len(secuencia),pi/4); jdelc.grafcoords(R,C); show()
| gpl-2.0 |
michaeljohnbennett/zipline | tests/modelling/test_numerical_expression.py | 15 | 13165 | from operator import (
and_,
ge,
gt,
le,
lt,
methodcaller,
ne,
or_,
)
from unittest import TestCase
import numpy
from numpy import (
arange,
eye,
full,
isnan,
zeros,
)
from pandas import (
DataFrame,
date_range,
Int64Index,
)
from zipline.modelling.expression import (
NumericalExpression,
NUMEXPR_MATH_FUNCS,
)
from zipline.modelling.factor import TestingFactor
from zipline.utils.test_utils import check_arrays
class F(TestingFactor):
inputs = ()
window_length = 0
class G(TestingFactor):
inputs = ()
window_length = 0
class H(TestingFactor):
inputs = ()
window_length = 0
class NumericalExpressionTestCase(TestCase):
def setUp(self):
self.dates = date_range('2014-01-01', periods=5, freq='D')
self.assets = Int64Index(range(5))
self.f = F()
self.g = G()
self.h = H()
self.fake_raw_data = {
self.f: full((5, 5), 3),
self.g: full((5, 5), 2),
self.h: full((5, 5), 1),
}
self.mask = DataFrame(True, index=self.dates, columns=self.assets)
def check_output(self, expr, expected):
result = expr.compute_from_arrays(
[self.fake_raw_data[input_] for input_ in expr.inputs],
self.mask,
)
check_arrays(result, expected)
def check_constant_output(self, expr, expected):
self.assertFalse(isnan(expected))
return self.check_output(expr, full((5, 5), expected))
def test_validate_good(self):
f = self.f
g = self.g
NumericalExpression("x_0", (f,))
NumericalExpression("x_0 ", (f,))
NumericalExpression("x_0 + x_0", (f,))
NumericalExpression("x_0 + 2", (f,))
NumericalExpression("2 * x_0", (f,))
NumericalExpression("x_0 + x_1", (f, g))
NumericalExpression("x_0 + x_1 + x_0", (f, g))
NumericalExpression("x_0 + 1 + x_1", (f, g))
def test_validate_bad(self):
f, g, h = F(), G(), H()
# Too few inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", ())
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f,))
# Too many inputs.
with self.assertRaises(ValueError):
NumericalExpression("x_0", (f, g))
with self.assertRaises(ValueError):
NumericalExpression("x_0 + x_1", (f, g, h))
# Invalid variable name.
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f,))
with self.assertRaises(ValueError):
NumericalExpression("x_0x_1", (f, g))
# Variable index must start at 0.
with self.assertRaises(ValueError):
NumericalExpression("x_1", (f,))
# Scalar operands must be numeric.
with self.assertRaises(TypeError):
"2" + f
with self.assertRaises(TypeError):
f + "2"
with self.assertRaises(TypeError):
f > "2"
# Boolean binary operators must be between filters.
with self.assertRaises(TypeError):
f + (f > 2)
with self.assertRaises(TypeError):
(f > f) > f
def test_negate(self):
f, g = self.f, self.g
self.check_constant_output(-f, -3.0)
self.check_constant_output(--f, 3.0)
self.check_constant_output(---f, -3.0)
self.check_constant_output(-(f + f), -6.0)
self.check_constant_output(-f + -f, -6.0)
self.check_constant_output(-(-f + -f), 6.0)
self.check_constant_output(f + -g, 1.0)
self.check_constant_output(f - -g, 5.0)
self.check_constant_output(-(f + g) + (f + g), 0.0)
self.check_constant_output((f + g) + -(f + g), 0.0)
self.check_constant_output(-(f + g) + -(f + g), -10.0)
def test_add(self):
f, g = self.f, self.g
self.check_constant_output(f + g, 5.0)
self.check_constant_output((1 + f) + g, 6.0)
self.check_constant_output(1 + (f + g), 6.0)
self.check_constant_output((f + 1) + g, 6.0)
self.check_constant_output(f + (1 + g), 6.0)
self.check_constant_output((f + g) + 1, 6.0)
self.check_constant_output(f + (g + 1), 6.0)
self.check_constant_output((f + f) + f, 9.0)
self.check_constant_output(f + (f + f), 9.0)
self.check_constant_output((f + g) + f, 8.0)
self.check_constant_output(f + (g + f), 8.0)
self.check_constant_output((f + g) + (f + g), 10.0)
self.check_constant_output((f + g) + (g + f), 10.0)
self.check_constant_output((g + f) + (f + g), 10.0)
self.check_constant_output((g + f) + (g + f), 10.0)
def test_subtract(self):
f, g = self.f, self.g
self.check_constant_output(f - g, 1.0) # 3 - 2
self.check_constant_output((1 - f) - g, -4.) # (1 - 3) - 2
self.check_constant_output(1 - (f - g), 0.0) # 1 - (3 - 2)
self.check_constant_output((f - 1) - g, 0.0) # (3 - 1) - 2
self.check_constant_output(f - (1 - g), 4.0) # 3 - (1 - 2)
self.check_constant_output((f - g) - 1, 0.0) # (3 - 2) - 1
self.check_constant_output(f - (g - 1), 2.0) # 3 - (2 - 1)
self.check_constant_output((f - f) - f, -3.) # (3 - 3) - 3
self.check_constant_output(f - (f - f), 3.0) # 3 - (3 - 3)
self.check_constant_output((f - g) - f, -2.) # (3 - 2) - 3
self.check_constant_output(f - (g - f), 4.0) # 3 - (2 - 3)
self.check_constant_output((f - g) - (f - g), 0.0) # (3 - 2) - (3 - 2)
self.check_constant_output((f - g) - (g - f), 2.0) # (3 - 2) - (2 - 3)
self.check_constant_output((g - f) - (f - g), -2.) # (2 - 3) - (3 - 2)
self.check_constant_output((g - f) - (g - f), 0.0) # (2 - 3) - (2 - 3)
def test_multiply(self):
f, g = self.f, self.g
self.check_constant_output(f * g, 6.0)
self.check_constant_output((2 * f) * g, 12.0)
self.check_constant_output(2 * (f * g), 12.0)
self.check_constant_output((f * 2) * g, 12.0)
self.check_constant_output(f * (2 * g), 12.0)
self.check_constant_output((f * g) * 2, 12.0)
self.check_constant_output(f * (g * 2), 12.0)
self.check_constant_output((f * f) * f, 27.0)
self.check_constant_output(f * (f * f), 27.0)
self.check_constant_output((f * g) * f, 18.0)
self.check_constant_output(f * (g * f), 18.0)
self.check_constant_output((f * g) * (f * g), 36.0)
self.check_constant_output((f * g) * (g * f), 36.0)
self.check_constant_output((g * f) * (f * g), 36.0)
self.check_constant_output((g * f) * (g * f), 36.0)
self.check_constant_output(f * f * f * 0 * f * f, 0.0)
def test_divide(self):
f, g = self.f, self.g
self.check_constant_output(f / g, 3.0 / 2.0)
self.check_constant_output(
(2 / f) / g,
(2 / 3.0) / 2.0
)
self.check_constant_output(
2 / (f / g),
2 / (3.0 / 2.0),
)
self.check_constant_output(
(f / 2) / g,
(3.0 / 2) / 2.0,
)
self.check_constant_output(
f / (2 / g),
3.0 / (2 / 2.0),
)
self.check_constant_output(
(f / g) / 2,
(3.0 / 2.0) / 2,
)
self.check_constant_output(
f / (g / 2),
3.0 / (2.0 / 2),
)
self.check_constant_output(
(f / f) / f,
(3.0 / 3.0) / 3.0
)
self.check_constant_output(
f / (f / f),
3.0 / (3.0 / 3.0),
)
self.check_constant_output(
(f / g) / f,
(3.0 / 2.0) / 3.0,
)
self.check_constant_output(
f / (g / f),
3.0 / (2.0 / 3.0),
)
self.check_constant_output(
(f / g) / (f / g),
(3.0 / 2.0) / (3.0 / 2.0),
)
self.check_constant_output(
(f / g) / (g / f),
(3.0 / 2.0) / (2.0 / 3.0),
)
self.check_constant_output(
(g / f) / (f / g),
(2.0 / 3.0) / (3.0 / 2.0),
)
self.check_constant_output(
(g / f) / (g / f),
(2.0 / 3.0) / (2.0 / 3.0),
)
def test_pow(self):
f, g = self.f, self.g
self.check_constant_output(f ** g, 3.0 ** 2)
self.check_constant_output(2 ** f, 2.0 ** 3)
self.check_constant_output(f ** 2, 3.0 ** 2)
self.check_constant_output((f + g) ** 2, (3.0 + 2.0) ** 2)
self.check_constant_output(2 ** (f + g), 2 ** (3.0 + 2.0))
self.check_constant_output(f ** (f ** g), 3.0 ** (3.0 ** 2.0))
self.check_constant_output((f ** f) ** g, (3.0 ** 3.0) ** 2.0)
self.check_constant_output((f ** g) ** (f ** g), 9.0 ** 9.0)
self.check_constant_output((f ** g) ** (g ** f), 9.0 ** 8.0)
self.check_constant_output((g ** f) ** (f ** g), 8.0 ** 9.0)
self.check_constant_output((g ** f) ** (g ** f), 8.0 ** 8.0)
def test_mod(self):
f, g = self.f, self.g
self.check_constant_output(f % g, 3.0 % 2.0)
self.check_constant_output(f % 2.0, 3.0 % 2.0)
self.check_constant_output(g % f, 2.0 % 3.0)
self.check_constant_output((f + g) % 2, (3.0 + 2.0) % 2)
self.check_constant_output(2 % (f + g), 2 % (3.0 + 2.0))
self.check_constant_output(f % (f % g), 3.0 % (3.0 % 2.0))
self.check_constant_output((f % f) % g, (3.0 % 3.0) % 2.0)
self.check_constant_output((f + g) % (f * g), 5.0 % 6.0)
def test_math_functions(self):
f, g = self.f, self.g
fake_raw_data = self.fake_raw_data
alt_fake_raw_data = {
self.f: full((5, 5), .5),
self.g: full((5, 5), -.5),
}
for funcname in NUMEXPR_MATH_FUNCS:
method = methodcaller(funcname)
func = getattr(numpy, funcname)
# These methods have domains in [0, 1], so we need alternate inputs
# that are in the domain.
if funcname in ('arcsin', 'arccos', 'arctanh'):
self.fake_raw_data = alt_fake_raw_data
else:
self.fake_raw_data = fake_raw_data
f_val = self.fake_raw_data[f][0, 0]
g_val = self.fake_raw_data[g][0, 0]
self.check_constant_output(method(f), func(f_val))
self.check_constant_output(method(g), func(g_val))
self.check_constant_output(method(f) + 1, func(f_val) + 1)
self.check_constant_output(1 + method(f), 1 + func(f_val))
self.check_constant_output(method(f + .25), func(f_val + .25))
self.check_constant_output(method(.25 + f), func(.25 + f_val))
self.check_constant_output(
method(f) + method(g),
func(f_val) + func(g_val),
)
self.check_constant_output(
method(f + g),
func(f_val + g_val),
)
def test_comparisons(self):
f, g, h = self.f, self.g, self.h
self.fake_raw_data = {
f: arange(25).reshape(5, 5),
g: arange(25).reshape(5, 5) - eye(5),
h: full((5, 5), 5),
}
f_data = self.fake_raw_data[f]
g_data = self.fake_raw_data[g]
cases = [
# Sanity Check with hand-computed values.
(f, g, eye(5), zeros((5, 5))),
(f, 10, f_data, 10),
(10, f, 10, f_data),
(f, f, f_data, f_data),
(f + 1, f, f_data + 1, f_data),
(1 + f, f, 1 + f_data, f_data),
(f, g, f_data, g_data),
(f + 1, g, f_data + 1, g_data),
(f, g + 1, f_data, g_data + 1),
(f + 1, g + 1, f_data + 1, g_data + 1),
((f + g) / 2, f ** 2, (f_data + g_data) / 2, f_data ** 2),
]
for op in (gt, ge, lt, le, ne):
for expr_lhs, expr_rhs, expected_lhs, expected_rhs in cases:
self.check_output(
op(expr_lhs, expr_rhs),
op(expected_lhs, expected_rhs),
)
def test_boolean_binops(self):
f, g, h = self.f, self.g, self.h
self.fake_raw_data = {
f: arange(25).reshape(5, 5),
g: arange(25).reshape(5, 5) - eye(5),
h: full((5, 5), 5),
}
# Should be True on the diagonal.
eye_filter = f > g
# Should be True in the first row only.
first_row_filter = f < h
eye_mask = eye(5, dtype=bool)
first_row_mask = zeros((5, 5), dtype=bool)
first_row_mask[0] = 1
self.check_output(eye_filter, eye_mask)
self.check_output(first_row_filter, first_row_mask)
for op in (and_, or_): # NumExpr doesn't support xor.
self.check_output(
op(eye_filter, first_row_filter),
op(eye_mask, first_row_mask),
)
| apache-2.0 |
letsgoexploring/economicData | usConvergenceData/stateIncomeData.py | 1 | 5246 |
# coding: utf-8
# In[1]:
from __future__ import division,unicode_literals
# get_ipython().magic('matplotlib inline')
import numpy as np
import pandas as pd
import json
import runProcs
from urllib.request import urlopen
import matplotlib.pyplot as plt
# In[2]:
# 0. State abbreviations
# 0.1 dictionary:
stateAbbr = {
u'Alabama':u'AL',
u'Alaska':u'AK',
u'Arizona':u'AZ',
u'Arkansas':u'AR',
u'California':u'CA',
u'Colorado':u'CO',
u'Connecticut':u'CT',
u'Delaware':u'DE',
u'District of Columbia':u'DC',
u'Florida':u'FL',
u'Georgia':u'GA',
u'Hawaii':u'HI',
u'Idaho':u'ID',
u'Illinois':u'IL',
u'Indiana':u'IN',
u'Iowa':u'IA',
u'Kansas':u'KS',
u'Kentucky':u'KY',
u'Louisiana':u'LA',
u'Maine':u'ME',
u'Maryland':u'MD',
u'Massachusetts':u'MA',
u'Michigan':u'MI',
u'Minnesota':u'MN',
u'Mississippi':u'MS',
u'Missouri':u'MO',
u'Montana':u'MT',
u'Nebraska':u'NE',
u'Nevada':u'NV',
u'New Hampshire':u'NH',
u'New Jersey':u'NJ',
u'New Mexico':u'NM',
u'New York':u'NY',
u'North Carolina':u'NC',
u'North Dakota':u'ND',
u'Ohio':u'OH',
u'Oklahoma':u'OK',
u'Oregon':u'OR',
u'Pennsylvania':u'PA',
u'Rhode Island':u'RI',
u'South Carolina':u'SC',
u'South Dakota':u'SD',
u'Tennessee':u'TN',
u'Texas':u'TX',
u'Utah':u'UT',
u'Vermont':u'VT',
u'Virginia':u'VA',
u'Washington':u'WA',
u'West Virginia':u'WV',
u'Wisconsin':u'WI',
u'Wyoming':u'WY'
}
# 0.2 List of states in the US
stateList = [s for s in stateAbbr]
# In[3]:
# 1. Construct series for price deflator
# 1.1 Obtain data from BEA
gdpDeflator = urlopen('http://bea.gov/api/data/?UserID=3EDEAA66-4B2B-4926-83C9-FD2089747A5B&method=GetData&datasetname=NIPA&TableID=13&Frequency=A&Year=X&ResultFormat=JSON&')
# result = gdpDeflator.readall().decode('utf-8')
result = gdpDeflator.read().decode('utf-8')
jsonResponse = json.loads(result)
# In[4]:
# 1.2 Construct the data frame for the deflator series
values = []
years = []
for element in jsonResponse['BEAAPI']['Results']['Data']:
# if element['LineDescription'] == 'Personal consumption expenditures':
if element['LineDescription'] == 'Gross domestic product':
years.append(element['TimePeriod'])
values.append(float(element['DataValue'])/100)
values = np.array([values]).T
dataP = pd.DataFrame(values,index = years,columns = ['price level'])
# 1.3 Display the data
print(dataP)
# In[5]:
# 2. Construct series for per capita income by state, region, and the entire us
# 2.1 Obtain data from BEA
stateYpc = urlopen('http://bea.gov/api/data/?UserID=3EDEAA66-4B2B-4926-83C9-FD2089747A5B&method=GetData&datasetname=RegionalData&KeyCode=PCPI_SI&Year=ALL&GeoFips=STATE&ResultFormat=JSON&')
# result = stateYpc.readall().decode('utf-8')
result = stateYpc.read().decode('utf-8')
jsonResponse = json.loads(result)
# jsonResponse['BEAAPI']['Results']['Data'][0]['GeoName']
# In[6]:
# 2.2 Construct the data frame for the per capita income series
# 2.2.1 Initialize the dataframe
regions = []
years = []
for element in jsonResponse['BEAAPI']['Results']['Data']:
if element['GeoName'] not in regions:
regions.append(element['GeoName'])
if element['TimePeriod'] not in years:
years.append(element['TimePeriod'])
df = np.zeros([len(years),len(regions)])
dataY = pd.DataFrame(df,index = years,columns = regions)
# 2.2.2 Populate the dataframe with values
for element in jsonResponse['BEAAPI']['Results']['Data']:
try:
dataY[element['GeoName']][element['TimePeriod']] = np.round(float(element[u'DataValue'])/float(dataP.loc[element['TimePeriod']]),2)# real
except:
dataY[element['GeoName']][element['TimePeriod']] = np.nan
# 2.2.3 Replace the state names in the index with abbreviations
columns=[]
for r in regions:
if r in stateList:
columns.append(stateAbbr[r])
else:
columns.append(r)
dataY.columns=columns
# 2.2.4 Display the data obtained from the BEA
dataY
# In[7]:
# 3. State income data for 1840, 1880, and 1900
# 3.1.1 Import Easterlin's income data
easterlin_data = pd.read_csv('Historical Statistics of the US - Easterlin State Income Data.csv',index_col=0)
# 3.1.2 Import historic CPI data
historic_cpi_data=pd.read_csv('Historical Statistics of the US - cpi.csv',index_col=0)
historic_cpi_data = historic_cpi_data/historic_cpi_data.loc[1929]*float(dataP.loc['1929'])
# In[8]:
# Const
df_1840 = easterlin_data['Income per capita - 1840 - A [cur dollars]']/float(historic_cpi_data.loc[1840])
df_1880 = easterlin_data['Income per capita - 1880 [cur dollars]']/float(historic_cpi_data.loc[1890])
df_1900 = easterlin_data['Income per capita - 1900 [cur dollars]']/float(historic_cpi_data.loc[1900])
df = pd.DataFrame({'1840':df_1840,'1880':df_1880,'1900':df_1900}).transpose()
# In[9]:
df = pd.concat([dataY,df]).sort_index()
# In[17]:
df.loc['1880'].sort_values()
# In[10]:
# 3. Export data to csv
series = dataY.sort_index()
series = df.sort_index()
dropCols = [u'AK', u'HI', u'New England', u'Mideast', u'Great Lakes', u'Plains', u'Southeast', u'Southwest', u'Rocky Mountain', u'Far West']
for c in dropCols:
series = series.drop([c],axis=1)
series.to_csv('stateIncomeData.csv',na_rep='NaN')
# In[11]:
len(dataY.columns)
# In[12]:
# 4. Export notebook to .py
runProcs.exportNb('stateIncomeData')
| mit |
sumspr/scikit-learn | examples/linear_model/plot_lasso_and_elasticnet.py | 249 | 1982 | """
========================================
Lasso and Elastic Net for Sparse Signals
========================================
Estimates Lasso and Elastic-Net regression models on a manually generated
sparse signal corrupted with an additive noise. Estimated coefficients are
compared with the ground-truth.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import r2_score
###############################################################################
# generate some sparse data to play with
np.random.seed(42)
n_samples, n_features = 50, 200
X = np.random.randn(n_samples, n_features)
coef = 3 * np.random.randn(n_features)
inds = np.arange(n_features)
np.random.shuffle(inds)
coef[inds[10:]] = 0 # sparsify coef
y = np.dot(X, coef)
# add noise
y += 0.01 * np.random.normal((n_samples,))
# Split data in train set and test set
n_samples = X.shape[0]
X_train, y_train = X[:n_samples / 2], y[:n_samples / 2]
X_test, y_test = X[n_samples / 2:], y[n_samples / 2:]
###############################################################################
# Lasso
from sklearn.linear_model import Lasso
alpha = 0.1
lasso = Lasso(alpha=alpha)
y_pred_lasso = lasso.fit(X_train, y_train).predict(X_test)
r2_score_lasso = r2_score(y_test, y_pred_lasso)
print(lasso)
print("r^2 on test data : %f" % r2_score_lasso)
###############################################################################
# ElasticNet
from sklearn.linear_model import ElasticNet
enet = ElasticNet(alpha=alpha, l1_ratio=0.7)
y_pred_enet = enet.fit(X_train, y_train).predict(X_test)
r2_score_enet = r2_score(y_test, y_pred_enet)
print(enet)
print("r^2 on test data : %f" % r2_score_enet)
plt.plot(enet.coef_, label='Elastic net coefficients')
plt.plot(lasso.coef_, label='Lasso coefficients')
plt.plot(coef, '--', label='original coefficients')
plt.legend(loc='best')
plt.title("Lasso R^2: %f, Elastic Net R^2: %f"
% (r2_score_lasso, r2_score_enet))
plt.show()
| bsd-3-clause |
teoliphant/scipy | scipy/stats/distributions.py | 2 | 215895 | # Functions to implement several important functions for
# various Continous and Discrete Probability Distributions
#
# Author: Travis Oliphant 2002-2011 with contributions from
# SciPy Developers 2004-2011
#
import math
import warnings
from copy import copy
from scipy.misc import comb, derivative
from scipy import special
from scipy import optimize
from scipy import integrate
from scipy.special import gammaln as gamln
import inspect
from numpy import all, where, arange, putmask, \
ravel, take, ones, sum, shape, product, repeat, reshape, \
zeros, floor, logical_and, log, sqrt, exp, arctanh, tan, sin, arcsin, \
arctan, tanh, ndarray, cos, cosh, sinh, newaxis, array, log1p, expm1
from numpy import atleast_1d, polyval, ceil, place, extract, \
any, argsort, argmax, vectorize, r_, asarray, nan, inf, pi, isinf, \
power, NINF, empty
import numpy
import numpy as np
import numpy.random as mtrand
from numpy import flatnonzero as nonzero
import vonmises_cython
from _tukeylambda_stats import tukeylambda_variance as _tlvar, \
tukeylambda_kurtosis as _tlkurt
__all__ = [
'rv_continuous',
'ksone', 'kstwobign', 'norm', 'alpha', 'anglit', 'arcsine',
'beta', 'betaprime', 'bradford', 'burr', 'fisk', 'cauchy',
'chi', 'chi2', 'cosine', 'dgamma', 'dweibull', 'erlang',
'expon', 'exponweib', 'exponpow', 'fatiguelife', 'foldcauchy',
'f', 'foldnorm', 'frechet_r', 'weibull_min', 'frechet_l',
'weibull_max', 'genlogistic', 'genpareto', 'genexpon', 'genextreme',
'gamma', 'gengamma', 'genhalflogistic', 'gompertz', 'gumbel_r',
'gumbel_l', 'halfcauchy', 'halflogistic', 'halfnorm', 'hypsecant',
'gausshyper', 'invgamma', 'invgauss', 'invweibull',
'johnsonsb', 'johnsonsu', 'laplace', 'levy', 'levy_l',
'levy_stable', 'logistic', 'loggamma', 'loglaplace', 'lognorm',
'gilbrat', 'maxwell', 'mielke', 'nakagami', 'ncx2', 'ncf', 't',
'nct', 'pareto', 'lomax', 'powerlaw', 'powerlognorm', 'powernorm',
'rdist', 'rayleigh', 'reciprocal', 'rice', 'recipinvgauss',
'semicircular', 'triang', 'truncexpon', 'truncnorm',
'tukeylambda', 'uniform', 'vonmises', 'wald', 'wrapcauchy',
'entropy', 'rv_discrete', 'binom', 'bernoulli', 'nbinom', 'geom',
'hypergeom', 'logser', 'poisson', 'planck', 'boltzmann', 'randint',
'zipf', 'dlaplace', 'skellam'
]
floatinfo = numpy.finfo(float)
gam = special.gamma
random = mtrand.random_sample
import types
from scipy.misc import doccer
sgf = vectorize
try:
from new import instancemethod
except ImportError:
# Python 3
def instancemethod(func, obj, cls):
return types.MethodType(func, obj)
# These are the docstring parts used for substitution in specific
# distribution docstrings.
docheaders = {'methods':"""\nMethods\n-------\n""",
'parameters':"""\nParameters\n---------\n""",
'notes':"""\nNotes\n-----\n""",
'examples':"""\nExamples\n--------\n"""}
_doc_rvs = \
"""rvs(%(shapes)s, loc=0, scale=1, size=1)
Random variates.
"""
_doc_pdf = \
"""pdf(x, %(shapes)s, loc=0, scale=1)
Probability density function.
"""
_doc_logpdf = \
"""logpdf(x, %(shapes)s, loc=0, scale=1)
Log of the probability density function.
"""
_doc_pmf = \
"""pmf(x, %(shapes)s, loc=0, scale=1)
Probability mass function.
"""
_doc_logpmf = \
"""logpmf(x, %(shapes)s, loc=0, scale=1)
Log of the probability mass function.
"""
_doc_cdf = \
"""cdf(x, %(shapes)s, loc=0, scale=1)
Cumulative density function.
"""
_doc_logcdf = \
"""logcdf(x, %(shapes)s, loc=0, scale=1)
Log of the cumulative density function.
"""
_doc_sf = \
"""sf(x, %(shapes)s, loc=0, scale=1)
Survival function (1-cdf --- sometimes more accurate).
"""
_doc_logsf = \
"""logsf(x, %(shapes)s, loc=0, scale=1)
Log of the survival function.
"""
_doc_ppf = \
"""ppf(q, %(shapes)s, loc=0, scale=1)
Percent point function (inverse of cdf --- percentiles).
"""
_doc_isf = \
"""isf(q, %(shapes)s, loc=0, scale=1)
Inverse survival function (inverse of sf).
"""
_doc_moment = \
"""moment(n, %(shapes)s, loc=0, scale=1)
Non-central moment of order n
"""
_doc_stats = \
"""stats(%(shapes)s, loc=0, scale=1, moments='mv')
Mean('m'), variance('v'), skew('s'), and/or kurtosis('k').
"""
_doc_entropy = \
"""entropy(%(shapes)s, loc=0, scale=1)
(Differential) entropy of the RV.
"""
_doc_fit = \
"""fit(data, %(shapes)s, loc=0, scale=1)
Parameter estimates for generic data.
"""
_doc_expect = \
"""expect(func, %(shapes)s, loc=0, scale=1, lb=None, ub=None, conditional=False, **kwds)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_expect_discrete = \
"""expect(func, %(shapes)s, loc=0, lb=None, ub=None, conditional=False)
Expected value of a function (of one argument) with respect to the distribution.
"""
_doc_median = \
"""median(%(shapes)s, loc=0, scale=1)
Median of the distribution.
"""
_doc_mean = \
"""mean(%(shapes)s, loc=0, scale=1)
Mean of the distribution.
"""
_doc_var = \
"""var(%(shapes)s, loc=0, scale=1)
Variance of the distribution.
"""
_doc_std = \
"""std(%(shapes)s, loc=0, scale=1)
Standard deviation of the distribution.
"""
_doc_interval = \
"""interval(alpha, %(shapes)s, loc=0, scale=1)
Endpoints of the range that contains alpha percent of the distribution
"""
_doc_allmethods = ''.join([docheaders['methods'], _doc_rvs, _doc_pdf,
_doc_logpdf, _doc_cdf, _doc_logcdf, _doc_sf,
_doc_logsf, _doc_ppf, _doc_isf, _doc_moment,
_doc_stats, _doc_entropy, _doc_fit,
_doc_expect, _doc_median,
_doc_mean, _doc_var, _doc_std, _doc_interval])
# Note that the two lines for %(shapes) are searched for and replaced in
# rv_continuous and rv_discrete - update there if the exact string changes
_doc_default_callparams = \
"""
Parameters
----------
x : array_like
quantiles
q : array_like
lower or upper tail probability
%(shapes)s : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : str, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
"""
_doc_default_longsummary = \
"""Continuous random variables are defined from a standard form and may
require some shape parameters to complete its specification. Any
optional keyword parameters can be passed to the methods of the RV
object as given below:
"""
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = %(name)s(%(shapes)s, loc=0, scale=1)
- Frozen RV object with the same methods but holding the given shape,
location, and scale fixed.
"""
_doc_default_example = \
"""Examples
--------
>>> from scipy.stats import %(name)s
>>> numargs = %(name)s.numargs
>>> [ %(shapes)s ] = [0.9,] * numargs
>>> rv = %(name)s(%(shapes)s)
Display frozen pdf
>>> x = np.linspace(0, np.minimum(rv.dist.b, 3))
>>> h = plt.plot(x, rv.pdf(x))
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
_doc_default = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note,
_doc_default_example])
_doc_default_before_notes = ''.join([_doc_default_longsummary,
_doc_allmethods,
_doc_default_callparams,
_doc_default_frozen_note])
docdict = {'rvs':_doc_rvs,
'pdf':_doc_pdf,
'logpdf':_doc_logpdf,
'cdf':_doc_cdf,
'logcdf':_doc_logcdf,
'sf':_doc_sf,
'logsf':_doc_logsf,
'ppf':_doc_ppf,
'isf':_doc_isf,
'stats':_doc_stats,
'entropy':_doc_entropy,
'fit':_doc_fit,
'moment':_doc_moment,
'expect':_doc_expect,
'interval':_doc_interval,
'mean':_doc_mean,
'std':_doc_std,
'var':_doc_var,
'median':_doc_median,
'allmethods':_doc_allmethods,
'callparams':_doc_default_callparams,
'longsummary':_doc_default_longsummary,
'frozennote':_doc_default_frozen_note,
'example':_doc_default_example,
'default':_doc_default,
'before_notes':_doc_default_before_notes}
# Reuse common content between continous and discrete docs, change some
# minor bits.
docdict_discrete = docdict.copy()
docdict_discrete['pmf'] = _doc_pmf
docdict_discrete['logpmf'] = _doc_logpmf
docdict_discrete['expect'] = _doc_expect_discrete
_doc_disc_methods = ['rvs', 'pmf', 'logpmf', 'cdf', 'logcdf', 'sf', 'logsf',
'ppf', 'isf', 'stats', 'entropy', 'expect', 'median',
'mean', 'var', 'std', 'interval']
for obj in _doc_disc_methods:
docdict_discrete[obj] = docdict_discrete[obj].replace(', scale=1', '')
docdict_discrete.pop('pdf')
docdict_discrete.pop('logpdf')
_doc_allmethods = ''.join([docdict_discrete[obj] for obj in
_doc_disc_methods])
docdict_discrete['allmethods'] = docheaders['methods'] + _doc_allmethods
docdict_discrete['longsummary'] = _doc_default_longsummary.replace(\
'Continuous', 'Discrete')
_doc_default_frozen_note = \
"""
Alternatively, the object may be called (as a function) to fix the shape and
location parameters returning a "frozen" discrete RV object:
rv = %(name)s(%(shapes)s, loc=0)
- Frozen RV object with the same methods but holding the given shape and
location fixed.
"""
docdict_discrete['frozennote'] = _doc_default_frozen_note
_doc_default_discrete_example = \
"""Examples
--------
>>> from scipy.stats import %(name)s
>>> [ %(shapes)s ] = [<Replace with reasonable values>]
>>> rv = %(name)s(%(shapes)s)
Display frozen pmf
>>> x = np.arange(0, np.minimum(rv.dist.b, 3))
>>> h = plt.vlines(x, 0, rv.pmf(x), lw=2)
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf
>>> prb = %(name)s.cdf(x, %(shapes)s)
>>> h = plt.semilogy(np.abs(x - %(name)s.ppf(prb, %(shapes)s)) + 1e-20)
Random number generation
>>> R = %(name)s.rvs(%(shapes)s, size=100)
"""
docdict_discrete['example'] = _doc_default_discrete_example
_doc_default_before_notes = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['callparams'],
docdict_discrete['frozennote']])
docdict_discrete['before_notes'] = _doc_default_before_notes
_doc_default_disc = ''.join([docdict_discrete['longsummary'],
docdict_discrete['allmethods'],
docdict_discrete['frozennote'],
docdict_discrete['example']])
docdict_discrete['default'] = _doc_default_disc
# clean up all the separate docstring elements, we do not need them anymore
for obj in [s for s in dir() if s.startswith('_doc_')]:
exec('del ' + obj)
del obj
try:
del s
except NameError:
# in Python 3, loop variables are not visible after the loop
pass
def _moment(data, n, mu=None):
if mu is None:
mu = data.mean()
return ((data - mu)**n).mean()
def _moment_from_stats(n, mu, mu2, g1, g2, moment_func, args):
if (n==0):
return 1.0
elif (n==1):
if mu is None:
val = moment_func(1,*args)
else:
val = mu
elif (n==2):
if mu2 is None or mu is None:
val = moment_func(2,*args)
else:
val = mu2 + mu*mu
elif (n==3):
if g1 is None or mu2 is None or mu is None:
val = moment_func(3,*args)
else:
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu3+3*mu*mu2+mu**3 # 3rd non-central moment
elif (n==4):
if g1 is None or g2 is None or mu2 is None or mu is None:
val = moment_func(4,*args)
else:
mu4 = (g2+3.0)*(mu2**2.0) # 4th central moment
mu3 = g1*(mu2**1.5) # 3rd central moment
val = mu4+4*mu*mu3+6*mu*mu*mu2+mu**4
else:
val = moment_func(n, *args)
return val
def _skew(data):
"""
skew is third central moment / variance**(1.5)
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m3 = ((data - mu)**3).mean()
return m3 / m2**1.5
def _kurtosis(data):
"""
kurtosis is fourth central moment / variance**2 - 3
"""
data = np.ravel(data)
mu = data.mean()
m2 = ((data - mu)**2).mean()
m4 = ((data - mu)**4).mean()
return m4 / m2**2 - 3
# Frozen RV class
class rv_frozen(object):
def __init__(self, dist, *args, **kwds):
self.args = args
self.kwds = kwds
self.dist = dist
def pdf(self, x): #raises AttributeError in frozen discrete distribution
return self.dist.pdf(x, *self.args, **self.kwds)
def logpdf(self, x):
return self.dist.logpdf(x, *self.args, **self.kwds)
def cdf(self, x):
return self.dist.cdf(x, *self.args, **self.kwds)
def logcdf(self, x):
return self.dist.logcdf(x, *self.args, **self.kwds)
def ppf(self, q):
return self.dist.ppf(q, *self.args, **self.kwds)
def isf(self, q):
return self.dist.isf(q, *self.args, **self.kwds)
def rvs(self, size=None):
kwds = self.kwds.copy()
kwds.update({'size':size})
return self.dist.rvs(*self.args, **kwds)
def sf(self, x):
return self.dist.sf(x, *self.args, **self.kwds)
def logsf(self, x):
return self.dist.logsf(x, *self.args, **self.kwds)
def stats(self, moments='mv'):
kwds = self.kwds.copy()
kwds.update({'moments':moments})
return self.dist.stats(*self.args, **kwds)
def median(self):
return self.dist.median(*self.args, **self.kwds)
def mean(self):
return self.dist.mean(*self.args, **self.kwds)
def var(self):
return self.dist.var(*self.args, **self.kwds)
def std(self):
return self.dist.std(*self.args, **self.kwds)
def moment(self, n):
return self.dist.moment(n, *self.args, **self.kwds)
def entropy(self):
return self.dist.entropy(*self.args, **self.kwds)
def pmf(self,k):
return self.dist.pmf(k, *self.args, **self.kwds)
def logpmf(self,k):
return self.dist.logpmf(k, *self.args, **self.kwds)
def interval(self, alpha):
return self.dist.interval(alpha, *self.args, **self.kwds)
def valarray(shape,value=nan,typecode=None):
"""Return an array of all value.
"""
out = reshape(repeat([value],product(shape,axis=0),axis=0),shape)
if typecode is not None:
out = out.astype(typecode)
if not isinstance(out, ndarray):
out = asarray(out)
return out
# This should be rewritten
def argsreduce(cond, *args):
"""Return the sequence of ravel(args[i]) where ravel(condition) is
True in 1D.
Examples
--------
>>> import numpy as np
>>> rand = np.random.random_sample
>>> A = rand((4,5))
>>> B = 2
>>> C = rand((1,5))
>>> cond = np.ones(A.shape)
>>> [A1,B1,C1] = argsreduce(cond,A,B,C)
>>> B1.shape
(20,)
>>> cond[2,:] = 0
>>> [A2,B2,C2] = argsreduce(cond,A,B,C)
>>> B2.shape
(15,)
"""
newargs = atleast_1d(*args)
if not isinstance(newargs, list):
newargs = [newargs,]
expand_arr = (cond==cond)
return [extract(cond, arr1 * expand_arr) for arr1 in newargs]
class rv_generic(object):
"""Class which encapsulates common functionality between rv_discrete
and rv_continuous.
"""
def _fix_loc_scale(self, args, loc, scale=1):
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
args = args[:self.numargs]
if scale is None:
scale = 1.0
if loc is None:
loc = 0.0
return args, loc, scale
def _fix_loc(self, args, loc):
args, loc, scale = self._fix_loc_scale(args, loc)
return args, loc
# These are actually called, and should not be overwritten if you
# want to keep error checking.
def rvs(self,*args,**kwds):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
defining number of random variates (default=1)
Returns
-------
rvs : array_like
random variates of given `size`
"""
kwd_names = ['loc', 'scale', 'size', 'discrete']
loc, scale, size, discrete = map(kwds.get, kwd_names,
[None]*len(kwd_names))
args, loc, scale = self._fix_loc_scale(args, loc, scale)
cond = logical_and(self._argcheck(*args),(scale >= 0))
if not all(cond):
raise ValueError("Domain error in arguments.")
# self._size is total size of all output values
self._size = product(size, axis=0)
if self._size is not None and self._size > 1:
size = numpy.array(size, ndmin=1)
if np.all(scale == 0):
return loc*ones(size, 'd')
vals = self._rvs(*args)
if self._size is not None:
vals = reshape(vals, size)
vals = vals * scale + loc
# Cast to int if discrete
if discrete:
if numpy.isscalar(vals):
vals = int(vals)
else:
vals = vals.astype(int)
return vals
def median(self, *args, **kwds):
"""
Median of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
median : float
the median of the distribution.
See Also
--------
self.ppf --- inverse of the CDF
"""
return self.ppf(0.5, *args, **kwds)
def mean(self, *args, **kwds):
"""
Mean of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
mean : float
the mean of the distribution
"""
kwds['moments'] = 'm'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def var(self, *args, **kwds):
"""
Variance of the distribution
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
var : float
the variance of the distribution
"""
kwds['moments'] = 'v'
res = self.stats(*args, **kwds)
if isinstance(res, ndarray) and res.ndim == 0:
return res[()]
return res
def std(self, *args, **kwds):
"""
Standard deviation of the distribution.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
std : float
standard deviation of the distribution
"""
kwds['moments'] = 'v'
res = sqrt(self.stats(*args, **kwds))
return res
def interval(self, alpha, *args, **kwds):
"""Confidence interval with equal areas around the median
Parameters
----------
alpha : array_like float in [0,1]
Probability that an rv will be drawn from the returned range
arg1, arg2, ... : array_like
The shape parameter(s) for the distribution (see docstring of the instance
object for more information)
loc : array_like, optional
location parameter (default = 0)
scale : array_like, optional
scale paramter (default = 1)
Returns
-------
a, b : array_like (float)
end-points of range that contain alpha % of the rvs
"""
alpha = asarray(alpha)
if any((alpha > 1) | (alpha < 0)):
raise ValueError("alpha must be between 0 and 1 inclusive")
q1 = (1.0-alpha)/2
q2 = (1.0+alpha)/2
a = self.ppf(q1, *args, **kwds)
b = self.ppf(q2, *args, **kwds)
return a, b
## continuous random variables: implement maybe later
##
## hf --- Hazard Function (PDF / SF)
## chf --- Cumulative hazard function (-log(SF))
## psf --- Probability sparsity function (reciprocal of the pdf) in
## units of percent-point-function (as a function of q).
## Also, the derivative of the percent-point function.
class rv_continuous(rv_generic):
"""
A generic continuous random variable class meant for subclassing.
`rv_continuous` is a base class to construct specific distribution classes
and instances from for continuous random variables. It cannot be used
directly as a distribution.
Parameters
----------
momtype : int, optional
The type of generic moment calculation to use: 0 for pdf, 1 (default) for ppf.
a : float, optional
Lower bound of the support of the distribution, default is minus
infinity.
b : float, optional
Upper bound of the support of the distribution, default is plus
infinity.
xa : float, optional
DEPRECATED
xb : float, optional
DEPRECATED
xtol : float, optional
The tolerance for fixed point calculation for generic ppf.
badvalue : object, optional
The value in a result arrays that indicates a value that for which
some argument restriction is violated, default is np.nan.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the two shape arguments for all
its methods.
extradoc : str, optional, deprecated
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
rvs(<shape(s)>, loc=0, scale=1, size=1)
random variates
pdf(x, <shape(s)>, loc=0, scale=1)
probability density function
logpdf(x, <shape(s)>, loc=0, scale=1)
log of the probability density function
cdf(x, <shape(s)>, loc=0, scale=1)
cumulative density function
logcdf(x, <shape(s)>, loc=0, scale=1)
log of the cumulative density function
sf(x, <shape(s)>, loc=0, scale=1)
survival function (1-cdf --- sometimes more accurate)
logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
ppf(q, <shape(s)>, loc=0, scale=1)
percent point function (inverse of cdf --- quantiles)
isf(q, <shape(s)>, loc=0, scale=1)
inverse survival function (inverse of sf)
moment(n, <shape(s)>, loc=0, scale=1)
non-central n-th moment of the distribution. May not work for array arguments.
stats(<shape(s)>, loc=0, scale=1, moments='mv')
mean('m'), variance('v'), skew('s'), and/or kurtosis('k')
entropy(<shape(s)>, loc=0, scale=1)
(differential) entropy of the RV.
fit(data, <shape(s)>, loc=0, scale=1)
Parameter estimates for generic data
expect(func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
median(<shape(s)>, loc=0, scale=1)
Median of the distribution.
mean(<shape(s)>, loc=0, scale=1)
Mean of the distribution.
std(<shape(s)>, loc=0, scale=1)
Standard deviation of the distribution.
var(<shape(s)>, loc=0, scale=1)
Variance of the distribution.
interval(alpha, <shape(s)>, loc=0, scale=1)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
__call__(<shape(s)>, loc=0, scale=1)
Calling a distribution instance creates a frozen RV object with the
same methods but holding the given shape, location, and scale fixed.
See Notes section.
**Parameters for Methods**
x : array_like
quantiles
q : array_like
lower or upper tail probability
<shape(s)> : array_like
shape parameters
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
size : int or tuple of ints, optional
shape of random variates (default computed from input arguments )
moments : string, optional
composed of letters ['mvsk'] specifying which moments to compute where
'm' = mean, 'v' = variance, 's' = (Fisher's) skew and
'k' = (Fisher's) kurtosis. (default='mv')
n : int
order of moment to calculate in method moments
Notes
-----
**Methods that can be overwritten by subclasses**
::
_rvs
_pdf
_cdf
_sf
_ppf
_isf
_stats
_munp
_entropy
_argcheck
There are additional (internal and private) generic methods that can
be useful for cross-checking and for debugging, but might work in all
cases when directly called.
**Frozen Distribution**
Alternatively, the object may be called (as a function) to fix the shape,
location, and scale parameters returning a "frozen" continuous RV object:
rv = generic(<shape(s)>, loc=0, scale=1)
frozen RV object with the same methods but holding the given shape,
location, and scale fixed
**Subclassing**
New random variables can be defined by subclassing rv_continuous class
and re-defining at least the ``_pdf`` or the ``_cdf`` method (normalized
to location 0 and scale 1) which will be given clean arguments (in between
a and b) and passing the argument check method.
If positive argument checking is not correct for your RV
then you will also need to re-define the ``_argcheck`` method.
Correct, but potentially slow defaults exist for the remaining
methods but for speed and/or accuracy you can over-ride::
_logpdf, _cdf, _logcdf, _ppf, _rvs, _isf, _sf, _logsf
Rarely would you override ``_isf``, ``_sf`` or ``_logsf``, but you could.
Statistics are computed using numerical integration by default.
For speed you can redefine this using ``_stats``:
- take shape parameters and return mu, mu2, g1, g2
- If you can't compute one of these, return it as None
- Can also be defined with a keyword argument ``moments=<str>``,
where <str> is a string composed of 'm', 'v', 's',
and/or 'k'. Only the components appearing in string
should be computed and returned in the order 'm', 'v',
's', or 'k' with missing values returned as None.
Alternatively, you can override ``_munp``, which takes n and shape
parameters and returns the nth non-central moment of the distribution.
Examples
--------
To create a new Gaussian distribution, we would do the following::
class gaussian_gen(rv_continuous):
"Gaussian distribution"
def _pdf:
...
...
"""
def __init__(self, momtype=1, a=None, b=None, xa=None, xb=None,
xtol=1e-14, badvalue=None, name=None, longname=None,
shapes=None, extradoc=None):
rv_generic.__init__(self)
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.name = name
self.a = a
self.b = b
if a is None:
self.a = -inf
if b is None:
self.b = inf
if xa is not None:
warnings.warn("The `xa` parameter is deprecated and will be "
"removed in scipy 0.12", DeprecationWarning)
if xb is not None:
warnings.warn("The `xb` parameter is deprecated and will be "
"removed in scipy 0.12", DeprecationWarning)
self.xa = xa
self.xb = xb
self.xtol = xtol
self._size = 1
self.m = 0.0
self.moment_type = momtype
self.expandarr = 1
if not hasattr(self,'numargs'):
#allows more general subclassing with *args
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pdf_signature = inspect.getargspec(self._pdf.im_func)
numargs2 = len(pdf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction
self.vecfunc = sgf(self._ppf_single_call,otypes='d')
self.vecfunc.nin = self.numargs + 1
self.vecentropy = sgf(self._entropy,otypes='d')
self.vecentropy.nin = self.numargs + 1
self.veccdf = sgf(self._cdf_single_call,otypes='d')
self.veccdf.nin = self.numargs + 1
self.shapes = shapes
self.extradoc = extradoc
if momtype == 0:
self.generic_moment = sgf(self._mom0_sc,otypes='d')
else:
self.generic_moment = sgf(self._mom1_sc,otypes='d')
self.generic_moment.nin = self.numargs+1 # Because of the *args argument
# of _mom0_sc, vectorize cannot count the number of arguments correctly.
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
# generate docstring for subclass instances
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the default template."""
if longname is None:
longname = 'A'
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s continuous random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _ppf_to_solve(self, x, q,*args):
return apply(self.cdf, (x, )+args)-q
def _ppf_single_call(self, q, *args):
left = right = None
if self.a > -np.inf:
left = self.a
if self.b < np.inf:
right = self.b
factor = 10.
if not left: # i.e. self.a = -inf
left = -1.*factor
while self._ppf_to_solve(left, q,*args) > 0.:
right = left
left *= factor
# left is now such that cdf(left) < q
if not right: # i.e. self.b = inf
right = factor
while self._ppf_to_solve(right, q,*args) < 0.:
left = right
right *= factor
# right is now such that cdf(right) > q
return optimize.brentq(self._ppf_to_solve, \
left, right, args=(q,)+args, xtol=self.xtol)
# moment from definition
def _mom_integ0(self, x,m,*args):
return x**m * self.pdf(x,*args)
def _mom0_sc(self, m,*args):
return integrate.quad(self._mom_integ0, self.a,
self.b, args=(m,)+args)[0]
# moment calculated using ppf
def _mom_integ1(self, q,m,*args):
return (self.ppf(q,*args))**m
def _mom1_sc(self, m,*args):
return integrate.quad(self._mom_integ1, 0, 1,args=(m,)+args)[0]
## These are the methods you must define (standard form functions)
def _argcheck(self, *args):
# Default check for correct values on args and keywords.
# Returns condition array of 1's where arguments are correct and
# 0's where they are not.
cond = 1
for arg in args:
cond = logical_and(cond,(asarray(arg) > 0))
return cond
def _pdf(self,x,*args):
return derivative(self._cdf,x,dx=1e-5,args=args,order=5)
## Could also define any of these
def _logpdf(self, x, *args):
return log(self._pdf(x, *args))
##(return 1-d using self._size to get number)
def _rvs(self, *args):
## Use basic inverse cdf algorithm for RV generation as default.
U = mtrand.sample(self._size)
Y = self._ppf(U,*args)
return Y
def _cdf_single_call(self, x, *args):
return integrate.quad(self._pdf, self.a, x, args=args)[0]
def _cdf(self, x, *args):
return self.veccdf(x,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self.vecfunc(q,*args)
def _isf(self, q, *args):
return self._ppf(1.0-q,*args) #use correct _ppf for subclasses
# The actual cacluation functions (no basic checking need be done)
# If these are defined, the others won't be looked at.
# Otherwise, the other set can be defined.
def _stats(self,*args, **kwds):
return None, None, None, None
# Central moments
def _munp(self,n,*args):
return self.generic_moment(n,*args)
def pdf(self,x,*args,**kwds):
"""
Probability density function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
pdf : ndarray
Probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._pdf(*goodargs) / scale)
if output.ndim == 0:
return output[()]
return output
def logpdf(self, x, *args, **kwds):
"""
Log of the probability density function at x of the given RV.
This uses a more numerically accurate calculation if available.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logpdf : array_like
Log of the probability density function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = asarray((x-loc)*1.0/scale)
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x >= self.a) & (x <= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
putmask(output,(1-cond0)+np.isnan(x),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args+(scale,)))
scale, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._logpdf(*goodargs) - log(scale))
if output.ndim == 0:
return output[()]
return output
def cdf(self,x,*args,**kwds):
"""
Cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
cdf : array_like
Cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self,x,*args,**kwds):
"""
Log of the cumulative distribution function at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = (x >= self.b) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)*(cond1==cond1)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,x,*args,**kwds):
"""
Survival function (1-cdf) at x of the given RV.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
sf : array_like
Survival function evaluated at x
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,x,*args,**kwds):
"""
Log of the survival function of the given RV.
Returns the log of the "survival function," defined as (1 - `cdf`),
evaluated at `x`.
Parameters
----------
x : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
logsf : ndarray
Log of the survival function evaluated at `x`.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
x,loc,scale = map(asarray,(x,loc,scale))
args = tuple(map(asarray,args))
x = (x-loc)*1.0/scale
cond0 = self._argcheck(*args) & (scale > 0)
cond1 = (scale > 0) & (x > self.a) & (x < self.b)
cond2 = cond0 & (x <= self.a)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0)+np.isnan(x),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((x,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV.
Parameters
----------
q : array_like
lower tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the lower tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(asarray,(q,loc,scale))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.a*scale + loc)
place(output,(1-cond0)+(1-cond1)*(q!=0.0), self.badvalue)
place(output,cond2,self.b*scale + loc)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output,cond,self._ppf(*goodargs)*scale + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function at q of the given RV.
Parameters
----------
q : array_like
upper tail probability
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
Returns
-------
x : array_like
quantile corresponding to the upper tail probability q.
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
q,loc,scale = map(asarray,(q,loc,scale))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.b)
#place(output,(1-cond0)*(cond1==cond1), self.badvalue)
place(output,(1-cond0)*(cond1==cond1)+(1-cond1)*(q!=0.0), self.badvalue)
place(output,cond2,self.a)
if any(cond): #call only if at least 1 entry
goodargs = argsreduce(cond, *((q,)+args+(scale,loc))) #PB replace 1-q by q
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
place(output,cond,self._isf(*goodargs)*scale + loc) #PB use _isf instead of _ppf
if output.ndim == 0:
return output[()]
return output
def stats(self,*args,**kwds):
"""
Some statistics of the given RV
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
location parameter (default=0)
scale : array_like, optional
scale parameter (default=1)
moments : string, optional
composed of letters ['mvsk'] defining which moments to compute:
'm' = mean,
'v' = variance,
's' = (Fisher's) skew,
'k' = (Fisher's) kurtosis.
(default='mv')
Returns
-------
stats : sequence
of requested moments.
"""
loc,scale,moments=map(kwds.get,['loc','scale','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None:
# loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and scale is None:
# loc and scale given without keyword
loc, scale = args[-2:]
if N == self.numargs + 3 and moments is None:
# loc, scale, and moments
loc, scale, moments = args[-3:]
args = args[:self.numargs]
if scale is None: scale = 1.0
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc,scale = map(asarray,(loc,scale))
args = tuple(map(asarray,args))
cond = self._argcheck(*args) & (scale > 0) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*np.power(mu2,1.5) #(mu2**1.5) breaks down for nan and inf
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
if any(cond):
goodargs = argsreduce(cond, *(args+(scale,loc)))
scale, loc, goodargs = goodargs[-2], goodargs[-1], goodargs[:-2]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu*scale+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
if np.isinf(mu):
#if mean is inf then var is also inf
mu2 = np.inf
out0 = default.copy()
place(out0,cond,mu2*scale*scale)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
else: #no valid args
output = []
for _ in moments:
out0 = default.copy()
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds):
"""
n'th order non-central moment of distribution.
Parameters
----------
n : int, n>=1
Order of moment.
arg1, arg2, arg3,... : float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
kwds : keyword arguments, optional
These can include "loc" and "scale", as well as other keyword
arguments relevant for a given distribution.
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
mdict = {}
mu, mu2, g1, g2 = self._stats(*args,**mdict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def _nnlf(self, x, *args):
return -sum(self._logpdf(x, *args),axis=0)
def nnlf(self, theta, x):
# - sum (log pdf(x, theta),axis=0)
# where theta are the parameters (including loc and scale)
#
try:
loc = theta[-2]
scale = theta[-1]
args = tuple(theta[:-2])
except IndexError:
raise ValueError("Not enough input arguments.")
if not self._argcheck(*args) or scale <= 0:
return inf
x = asarray((x-loc) / scale)
cond0 = (x <= self.a) | (x >= self.b)
if (any(cond0)):
return inf
else:
N = len(x)
return self._nnlf(x, *args) + N*log(scale)
# return starting point for fit (shape arguments + loc + scale)
def _fitstart(self, data, args=None):
if args is None:
args = (1.0,)*self.numargs
return args + self.fit_loc_scale(data, *args)
# Return the (possibly reduced) function to optimize in order to find MLE
# estimates for the .fit method
def _reduce_func(self, args, kwds):
args = list(args)
Nargs = len(args)
fixedn = []
index = range(Nargs)
names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale']
x0 = []
for n, key in zip(index, names):
if kwds.has_key(key):
fixedn.append(n)
args[n] = kwds[key]
else:
x0.append(args[n])
if len(fixedn) == 0:
func = self.nnlf
restore = None
else:
if len(fixedn) == len(index):
raise ValueError("All parameters fixed. There is nothing to optimize.")
def restore(args, theta):
# Replace with theta for all numbers not in fixedn
# This allows the non-fixed values to vary, but
# we still call self.nnlf with all parameters.
i = 0
for n in range(Nargs):
if n not in fixedn:
args[n] = theta[i]
i += 1
return args
def func(theta, x):
newtheta = restore(args[:], theta)
return self.nnlf(newtheta, x)
return x0, func, restore, args
def fit(self, data, *args, **kwds):
"""
Return MLEs for shape, location, and scale parameters from data.
MLE stands for Maximum Likelihood Estimate. Starting estimates for
the fit are given by input arguments; for any arguments not provided
with starting estimates, ``self._fitstart(data)`` is called to generate
such.
One can hold some parameters fixed to specific values by passing in
keyword arguments ``f0``, ``f1``, ..., ``fn`` (for shape parameters)
and ``floc`` and ``fscale`` (for location and scale parameters,
respectively).
Parameters
----------
data : array_like
Data to use in calculating the MLEs.
args : floats, optional
Starting value(s) for any shape-characterizing arguments (those not
provided will be determined by a call to ``_fitstart(data)``).
No default value.
kwds : floats, optional
Starting values for the location and scale parameters; no default.
Special keyword arguments are recognized as holding certain
parameters fixed:
f0...fn : hold respective shape parameters fixed.
floc : hold location parameter fixed to specified value.
fscale : hold scale parameter fixed to specified value.
optimizer : The optimizer to use. The optimizer must take func,
and starting position as the first two arguments,
plus args (for extra arguments to pass to the
function to be optimized) and disp=0 to suppress
output as keyword arguments.
Returns
-------
shape, loc, scale : tuple of floats
MLEs for any shape statistics, followed by those for location and
scale.
"""
Narg = len(args)
if Narg > self.numargs:
raise ValueError("Too many input arguments.")
start = [None]*2
if (Narg < self.numargs) or not (kwds.has_key('loc') and
kwds.has_key('scale')):
start = self._fitstart(data) # get distribution specific starting locations
args += start[Narg:-2]
loc = kwds.get('loc', start[-2])
scale = kwds.get('scale', start[-1])
args += (loc, scale)
x0, func, restore, args = self._reduce_func(args, kwds)
optimizer = kwds.get('optimizer', optimize.fmin)
# convert string to function in scipy.optimize
if not callable(optimizer) and isinstance(optimizer, (str, unicode)):
if not optimizer.startswith('fmin_'):
optimizer = "fmin_"+optimizer
if optimizer == 'fmin_':
optimizer = 'fmin'
try:
optimizer = getattr(optimize, optimizer)
except AttributeError:
raise ValueError("%s is not a valid optimizer" % optimizer)
vals = optimizer(func,x0,args=(ravel(data),),disp=0)
if restore is not None:
vals = restore(args, vals)
vals = tuple(vals)
return vals
def fit_loc_scale(self, data, *args):
"""
Estimate loc and scale parameters from data using 1st and 2nd moments.
Parameters
----------
data : array_like
Data to fit.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
Returns
-------
Lhat : float
Estimated location parameter for the data.
Shat : float
Estimated scale parameter for the data.
"""
mu, mu2 = self.stats(*args,**{'moments':'mv'})
tmp = asarray(data)
muhat = tmp.mean()
mu2hat = tmp.var()
Shat = sqrt(mu2hat / mu2)
Lhat = muhat - Shat*mu
return Lhat, Shat
@np.deprecate
def est_loc_scale(self, data, *args):
"""This function is deprecated, use self.fit_loc_scale(data) instead."""
return self.fit_loc_scale(data, *args)
def freeze(self,*args,**kwds):
"""Freeze the distribution for the given arguments.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution. Should include all
the non-optional arguments, may include ``loc`` and ``scale``.
Returns
-------
rv_frozen : rv_frozen instance
The frozen distribution.
"""
return rv_frozen(self,*args,**kwds)
def __call__(self, *args, **kwds):
return self.freeze(*args, **kwds)
def _entropy(self, *args):
def integ(x):
val = self._pdf(x, *args)
return val*log(val)
entr = -integrate.quad(integ,self.a,self.b)[0]
if not np.isnan(entr):
return entr
else: # try with different limits if integration problems
low,upp = self.ppf([0.001,0.999],*args)
if np.isinf(self.b):
upper = upp
else:
upper = self.b
if np.isinf(self.a):
lower = low
else:
lower = self.a
return -integrate.quad(integ,lower,upper)[0]
def entropy(self, *args, **kwds):
"""
Differential entropy of the RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
"""
loc,scale=map(kwds.get,['loc','scale'])
args, loc, scale = self._fix_loc_scale(args, loc, scale)
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (scale > 0) & (loc==loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
#I don't know when or why vecentropy got broken when numargs == 0
if self.numargs == 0:
place(output,cond0,self._entropy()+log(scale))
else:
place(output,cond0,self.vecentropy(*goodargs)+log(scale))
return output
def expect(self, func=None, args=(), loc=0, scale=1, lb=None, ub=None,
conditional=False, **kwds):
"""Calculate expected value of a function with respect to the distribution
Location and scale only tested on a few examples.
Parameters
----------
func : callable, optional
Function for which integral is calculated. Takes only one argument.
The default is the identity mapping f(x) = x.
args : tuple, optional
Argument (parameters) of the distribution.
lb, ub : scalar, optional
Lower and upper bound for integration. default is set to the support
of the distribution.
conditional : bool, optional
If True, the integral is corrected by the conditional probability
of the integration interval. The return value is the expectation
of the function, conditional on being in the given interval.
Default is False.
Additional keyword arguments are passed to the integration routine.
Returns
-------
expected value : float
Notes
-----
This function has not been checked for it's behavior when the integral is
not finite. The integration behavior is inherited from integrate.quad.
"""
lockwds = {'loc': loc,
'scale':scale}
if func is None:
def fun(x, *args):
return x*self.pdf(x, *args, **lockwds)
else:
def fun(x, *args):
return func(x)*self.pdf(x, *args, **lockwds)
if lb is None:
lb = loc + self.a * scale
if ub is None:
ub = loc + self.b * scale
if conditional:
invfac = (self.sf(lb, *args, **lockwds)
- self.sf(ub, *args, **lockwds))
else:
invfac = 1.0
kwds['args'] = args
return integrate.quad(fun, lb, ub, **kwds)[0] / invfac
_EULER = 0.577215664901532860606512090082402431042 # -special.psi(1)
_ZETA3 = 1.202056903159594285399738161511449990765 # special.zeta(3,1) Apery's constant
## Kolmogorov-Smirnov one-sided and two-sided test statistics
class ksone_gen(rv_continuous):
"""General Kolmogorov-Smirnov one-sided test.
%(default)s
"""
def _cdf(self,x,n):
return 1.0-special.smirnov(n,x)
def _ppf(self,q,n):
return special.smirnovi(n,1.0-q)
ksone = ksone_gen(a=0.0, name='ksone', shapes="n")
class kstwobign_gen(rv_continuous):
"""Kolmogorov-Smirnov two-sided test for large N.
%(default)s
"""
def _cdf(self,x):
return 1.0-special.kolmogorov(x)
def _sf(self,x):
return special.kolmogorov(x)
def _ppf(self,q):
return special.kolmogi(1.0-q)
kstwobign = kstwobign_gen(a=0.0, name='kstwobign')
## Normal distribution
# loc = mu, scale = std
# Keep these implementations out of the class definition so they can be reused
# by other distributions.
_norm_pdf_C = math.sqrt(2*pi)
_norm_pdf_logC = math.log(_norm_pdf_C)
def _norm_pdf(x):
return exp(-x**2/2.0) / _norm_pdf_C
def _norm_logpdf(x):
return -x**2 / 2.0 - _norm_pdf_logC
def _norm_cdf(x):
return special.ndtr(x)
def _norm_logcdf(x):
return special.log_ndtr(x)
def _norm_ppf(q):
return special.ndtri(q)
class norm_gen(rv_continuous):
"""A normal continuous random variable.
The location (loc) keyword specifies the mean.
The scale (scale) keyword specifies the standard deviation.
%(before_notes)s
Notes
-----
The probability density function for `norm` is::
norm.pdf(x) = exp(-x**2/2)/sqrt(2*pi)
%(example)s
"""
def _rvs(self):
return mtrand.standard_normal(self._size)
def _pdf(self,x):
return _norm_pdf(x)
def _logpdf(self, x):
return _norm_logpdf(x)
def _cdf(self,x):
return _norm_cdf(x)
def _logcdf(self, x):
return _norm_logcdf(x)
def _sf(self, x):
return _norm_cdf(-x)
def _logsf(self, x):
return _norm_logcdf(-x)
def _ppf(self,q):
return _norm_ppf(q)
def _isf(self,q):
return -_norm_ppf(q)
def _stats(self):
return 0.0, 1.0, 0.0, 0.0
def _entropy(self):
return 0.5*(log(2*pi)+1)
norm = norm_gen(name='norm')
## Alpha distribution
##
class alpha_gen(rv_continuous):
"""An alpha continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `alpha` is::
alpha.pdf(x,a) = 1/(x**2*Phi(a)*sqrt(2*pi)) * exp(-1/2 * (a-1/x)**2),
where ``Phi(alpha)`` is the normal CDF, ``x > 0``, and ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return 1.0/(x**2)/special.ndtr(a)*_norm_pdf(a-1.0/x)
def _logpdf(self, x, a):
return -2*log(x) + _norm_logpdf(a-1.0/x) - log(special.ndtr(a))
def _cdf(self, x, a):
return special.ndtr(a-1.0/x) / special.ndtr(a)
def _ppf(self, q, a):
return 1.0/asarray(a-special.ndtri(q*special.ndtr(a)))
def _stats(self, a):
return [inf]*2 + [nan]*2
alpha = alpha_gen(a=0.0, name='alpha', shapes='a')
## Anglit distribution
##
class anglit_gen(rv_continuous):
"""An anglit continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `anglit` is::
anglit.pdf(x) = sin(2*x + pi/2) = cos(2*x),
for ``-pi/4 <= x <= pi/4``.
%(example)s
"""
def _pdf(self, x):
return cos(2*x)
def _cdf(self, x):
return sin(x+pi/4)**2.0
def _ppf(self, q):
return (arcsin(sqrt(q))-pi/4)
def _stats(self):
return 0.0, pi*pi/16-0.5, 0.0, -2*(pi**4 - 96)/(pi*pi-8)**2
def _entropy(self):
return 1-log(2)
anglit = anglit_gen(a=-pi/4, b=pi/4, name='anglit')
## Arcsine distribution
##
class arcsine_gen(rv_continuous):
"""An arcsine continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `arcsine` is::
arcsine.pdf(x) = 1/(pi*sqrt(x*(1-x)))
for 0 < x < 1.
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/sqrt(x*(1-x))
def _cdf(self, x):
return 2.0/pi*arcsin(sqrt(x))
def _ppf(self, q):
return sin(pi/2.0*q)**2.0
def _stats(self):
#mup = 0.5, 3.0/8.0, 15.0/48.0, 35.0/128.0
mu = 0.5
mu2 = 1.0/8
g1 = 0
g2 = -3.0/2.0
return mu, mu2, g1, g2
def _entropy(self):
return -0.24156447527049044468
arcsine = arcsine_gen(a=0.0, b=1.0, name='arcsine')
## Beta distribution
##
class beta_gen(rv_continuous):
"""A beta continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `beta` is::
beta.pdf(x, a, b) = gamma(a+b)/(gamma(a)*gamma(b)) * x**(a-1) *
(1-x)**(b-1),
for ``0 < x < 1``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
return mtrand.beta(a,b,self._size)
def _pdf(self, x, a, b):
Px = (1.0-x)**(b-1.0) * x**(a-1.0)
Px /= special.beta(a,b)
return Px
def _logpdf(self, x, a, b):
lPx = (b-1.0)*log(1.0-x) + (a-1.0)*log(x)
lPx -= log(special.beta(a,b))
return lPx
def _cdf(self, x, a, b):
return special.btdtr(a,b,x)
def _ppf(self, q, a, b):
return special.btdtri(a,b,q)
def _stats(self, a, b):
mn = a *1.0 / (a + b)
var = (a*b*1.0)/(a+b+1.0)/(a+b)**2.0
g1 = 2.0*(b-a)*sqrt((1.0+a+b)/(a*b)) / (2+a+b)
g2 = 6.0*(a**3 + a**2*(1-2*b) + b**2*(1+b) - 2*a*b*(2+b))
g2 /= a*b*(a+b+2)*(a+b+3)
return mn, var, g1, g2
def _fitstart(self, data):
g1 = _skew(data)
g2 = _kurtosis(data)
def func(x):
a, b = x
sk = 2*(b-a)*sqrt(a + b + 1) / (a + b + 2) / sqrt(a*b)
ku = a**3 - a**2*(2*b-1) + b**2*(b+1) - 2*a*b*(b+2)
ku /= a*b*(a+b+2)*(a+b+3)
ku *= 6
return [sk-g1, ku-g2]
a, b = optimize.fsolve(func, (1.0, 1.0))
return super(beta_gen, self)._fitstart(data, args=(a,b))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
fscale = kwds.get('fscale', None)
if floc is not None and fscale is not None:
# special case
data = (ravel(data)-floc)/fscale
xbar = data.mean()
v = data.var(ddof=0)
fac = xbar*(1-xbar)/v - 1
a = xbar * fac
b = (1-xbar) * fac
return a, b, floc, fscale
else: # do general fit
return super(beta_gen, self).fit(data, *args, **kwds)
beta = beta_gen(a=0.0, b=1.0, name='beta', shapes='a, b')
## Beta Prime
class betaprime_gen(rv_continuous):
"""A beta prima continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `betaprime` is::
betaprime.pdf(x, a, b) =
gamma(a+b) / (gamma(a)*gamma(b)) * x**(a-1) * (1-x)**(-a-b)
for ``x > 0``, ``a > 0``, ``b > 0``.
%(example)s
"""
def _rvs(self, a, b):
u1 = gamma.rvs(a,size=self._size)
u2 = gamma.rvs(b,size=self._size)
return (u1 / u2)
def _pdf(self, x, a, b):
return 1.0/special.beta(a,b)*x**(a-1.0)/(1+x)**(a+b)
def _logpdf(self, x, a, b):
return (a-1.0)*log(x) - (a+b)*log(1+x) - log(special.beta(a,b))
def _cdf_skip(self, x, a, b):
# remove for now: special.hyp2f1 is incorrect for large a
x = where(x==1.0, 1.0-1e-6,x)
return pow(x,a)*special.hyp2f1(a+b,a,1+a,-x)/a/special.beta(a,b)
def _munp(self, n, a, b):
if (n == 1.0):
return where(b > 1, a/(b-1.0), inf)
elif (n == 2.0):
return where(b > 2, a*(a+1.0)/((b-2.0)*(b-1.0)), inf)
elif (n == 3.0):
return where(b > 3, a*(a+1.0)*(a+2.0)/((b-3.0)*(b-2.0)*(b-1.0)),
inf)
elif (n == 4.0):
return where(b > 4,
a*(a+1.0)*(a+2.0)*(a+3.0)/((b-4.0)*(b-3.0) \
*(b-2.0)*(b-1.0)), inf)
else:
raise NotImplementedError
betaprime = betaprime_gen(a=0.0, b=500.0, name='betaprime', shapes='a, b')
## Bradford
##
class bradford_gen(rv_continuous):
"""A Bradford continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `bradford` is::
bradford.pdf(x, c) = c / (k * (1+c*x)),
for ``0 < x < 1``, ``c > 0`` and ``k = log(1+c)``.
%(example)s
"""
def _pdf(self, x, c):
return c / (c*x + 1.0) / log(1.0+c)
def _cdf(self, x, c):
return log(1.0+c*x) / log(c+1.0)
def _ppf(self, q, c):
return ((1.0+c)**q-1)/c
def _stats(self, c, moments='mv'):
k = log(1.0+c)
mu = (c-k)/(c*k)
mu2 = ((c+2.0)*k-2.0*c)/(2*c*k*k)
g1 = None
g2 = None
if 's' in moments:
g1 = sqrt(2)*(12*c*c-9*c*k*(c+2)+2*k*k*(c*(c+3)+3))
g1 /= sqrt(c*(c*(k-2)+2*k))*(3*c*(k-2)+6*k)
if 'k' in moments:
g2 = c**3*(k-3)*(k*(3*k-16)+24)+12*k*c*c*(k-4)*(k-3) \
+ 6*c*k*k*(3*k-14) + 12*k**3
g2 /= 3*c*(c*(k-2)+2*k)**2
return mu, mu2, g1, g2
def _entropy(self, c):
k = log(1+c)
return k/2.0 - log(c/k)
bradford = bradford_gen(a=0.0, b=1.0, name='bradford', shapes='c')
## Burr
# burr with d=1 is called the fisk distribution
class burr_gen(rv_continuous):
"""A Burr continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `burr` is::
burr.pdf(x, c, d) = c * d * x**(-c-1) * (1+x**(-c))**(-d-1)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, c, d):
return c*d*(x**(-c-1.0))*((1+x**(-c*1.0))**(-d-1.0))
def _cdf(self, x, c, d):
return (1+x**(-c*1.0))**(-d**1.0)
def _ppf(self, q, c, d):
return (q**(-1.0/d)-1)**(-1.0/c)
def _stats(self, c, d, moments='mv'):
g2c, g2cd = gam(1-2.0/c), gam(2.0/c+d)
g1c, g1cd = gam(1-1.0/c), gam(1.0/c+d)
gd = gam(d)
k = gd*g2c*g2cd - g1c**2 * g1cd**2
mu = g1c*g1cd / gd
mu2 = k / gd**2.0
g1, g2 = None, None
g3c, g3cd = None, None
if 's' in moments:
g3c, g3cd = gam(1-3.0/c), gam(3.0/c+d)
g1 = 2*g1c**3 * g1cd**3 + gd*gd*g3c*g3cd - 3*gd*g2c*g1c*g1cd*g2cd
g1 /= sqrt(k**3)
if 'k' in moments:
if g3c is None:
g3c = gam(1-3.0/c)
if g3cd is None:
g3cd = gam(3.0/c+d)
g4c, g4cd = gam(1-4.0/c), gam(4.0/c+d)
g2 = 6*gd*g2c*g2cd * g1c**2 * g1cd**2 + gd**3 * g4c*g4cd
g2 -= 3*g1c**4 * g1cd**4 -4*gd**2*g3c*g1c*g1cd*g3cd
return mu, mu2, g1, g2
burr = burr_gen(a=0.0, name='burr', shapes="c, d")
# Fisk distribution
# burr is a generalization
class fisk_gen(burr_gen):
"""A Fisk continuous random variable.
The Fisk distribution is also known as the log-logistic distribution, and
equals the Burr distribution with ``d=1``.
%(before_notes)s
See Also
--------
burr
%(example)s
"""
def _pdf(self, x, c):
return burr_gen._pdf(self, x, c, 1.0)
def _cdf(self, x, c):
return burr_gen._cdf(self, x, c, 1.0)
def _ppf(self, x, c):
return burr_gen._ppf(self, x, c, 1.0)
def _stats(self, c):
return burr_gen._stats(self, c, 1.0)
def _entropy(self, c):
return 2 - log(c)
fisk = fisk_gen(a=0.0, name='fisk', shapes='c')
## Cauchy
# median = loc
class cauchy_gen(rv_continuous):
"""A Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `cauchy` is::
cauchy.pdf(x) = 1 / (pi * (1 + x**2))
%(example)s
"""
def _pdf(self, x):
return 1.0/pi/(1.0+x*x)
def _cdf(self, x):
return 0.5 + 1.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi*q-pi/2.0)
def _sf(self, x):
return 0.5 - 1.0/pi*arctan(x)
def _isf(self, q):
return tan(pi/2.0-pi*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(4*pi)
def _fitstart(data, args=None):
return (0, 1)
cauchy = cauchy_gen(name='cauchy')
## Chi
## (positive square-root of chi-square)
## chi(1, loc, scale) = halfnormal
## chi(2, 0, scale) = Rayleigh
## chi(3, 0, scale) = MaxWell
class chi_gen(rv_continuous):
"""A chi continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi` is::
chi.pdf(x,df) = x**(df-1) * exp(-x**2/2) / (2**(df/2-1) * gamma(df/2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df):
return sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df):
return x**(df-1.)*exp(-x*x*0.5)/(2.0)**(df*0.5-1)/gam(df*0.5)
def _cdf(self, x, df):
return special.gammainc(df*0.5,0.5*x*x)
def _ppf(self, q, df):
return sqrt(2*special.gammaincinv(df*0.5,q))
def _stats(self, df):
mu = sqrt(2)*special.gamma(df/2.0+0.5)/special.gamma(df/2.0)
mu2 = df - mu*mu
g1 = (2*mu**3.0 + mu*(1-2*df))/asarray(mu2**1.5)
g2 = 2*df*(1.0-df)-6*mu**4 + 4*mu**2 * (2*df-1)
g2 /= asarray(mu2**2.0)
return mu, mu2, g1, g2
chi = chi_gen(a=0.0, name='chi', shapes='df')
## Chi-squared (gamma-distributed with loc=0 and scale=2 and shape=df/2)
class chi2_gen(rv_continuous):
"""A chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `chi2` is::
chi2.pdf(x,df) = 1 / (2*gamma(df/2)) * (x/2)**(df/2-1) * exp(-x/2)
%(example)s
"""
def _rvs(self, df):
return mtrand.chisquare(df,self._size)
def _pdf(self, x, df):
return exp(self._logpdf(x, df))
def _logpdf(self, x, df):
#term1 = (df/2.-1)*log(x)
#term1[(df==2)*(x==0)] = 0
#avoid 0*log(0)==nan
return (df/2.-1)*log(x+1e-300) - x/2. - gamln(df/2.) - (log(2)*df)/2.
## Px = x**(df/2.0-1)*exp(-x/2.0)
## Px /= special.gamma(df/2.0)* 2**(df/2.0)
## return log(Px)
def _cdf(self, x, df):
return special.chdtr(df, x)
def _sf(self, x, df):
return special.chdtrc(df, x)
def _isf(self, p, df):
return special.chdtri(df, p)
def _ppf(self, p, df):
return self._isf(1.0-p, df)
def _stats(self, df):
mu = df
mu2 = 2*df
g1 = 2*sqrt(2.0/df)
g2 = 12.0/df
return mu, mu2, g1, g2
chi2 = chi2_gen(a=0.0, name='chi2', shapes='df')
## Cosine (Approximation to the Normal)
class cosine_gen(rv_continuous):
"""A cosine continuous random variable.
%(before_notes)s
Notes
-----
The cosine distribution is an approximation to the normal distribution.
The probability density function for `cosine` is::
cosine.pdf(x) = 1/(2*pi) * (1+cos(x))
for ``-pi <= x <= pi``.
%(example)s
"""
def _pdf(self, x):
return 1.0/2/pi*(1+cos(x))
def _cdf(self, x):
return 1.0/2/pi*(pi + x + sin(x))
def _stats(self):
return 0.0, pi*pi/3.0-2.0, 0.0, -6.0*(pi**4-90)/(5.0*(pi*pi-6)**2)
def _entropy(self):
return log(4*pi)-1.0
cosine = cosine_gen(a=-pi, b=pi, name='cosine')
## Double Gamma distribution
class dgamma_gen(rv_continuous):
"""A double gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dgamma` is::
dgamma.pdf(x, a) = 1 / (2*gamma(a)) * abs(x)**(a-1) * exp(-abs(x))
for ``a > 0``.
%(example)s
"""
def _rvs(self, a):
u = random(size=self._size)
return (gamma.rvs(a,size=self._size)*where(u>=0.5,1,-1))
def _pdf(self, x, a):
ax = abs(x)
return 1.0/(2*special.gamma(a))*ax**(a-1.0) * exp(-ax)
def _logpdf(self, x, a):
ax = abs(x)
return (a-1.0)*log(ax) - ax - log(2) - gamln(a)
def _cdf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
return where(x>0,0.5+fac,0.5-fac)
def _sf(self, x, a):
fac = 0.5*special.gammainc(a,abs(x))
#return where(x>0,0.5-0.5*fac,0.5+0.5*fac)
return where(x>0,0.5-fac,0.5+fac)
def _ppf(self, q, a):
fac = special.gammainccinv(a,1-abs(2*q-1))
return where(q>0.5, fac, -fac)
def _stats(self, a):
mu2 = a*(a+1.0)
return 0.0, mu2, 0.0, (a+2.0)*(a+3.0)/mu2-3.0
dgamma = dgamma_gen(name='dgamma', shapes='a')
## Double Weibull distribution
##
class dweibull_gen(rv_continuous):
"""A double Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `dweibull` is::
dweibull.pdf(x, c) = c / 2 * abs(x)**(c-1) * exp(-abs(x)**c)
%(example)s
"""
def _rvs(self, c):
u = random(size=self._size)
return weibull_min.rvs(c, size=self._size)*(where(u>=0.5,1,-1))
def _pdf(self, x, c):
ax = abs(x)
Px = c/2.0*ax**(c-1.0)*exp(-ax**c)
return Px
def _logpdf(self, x, c):
ax = abs(x)
return log(c) - log(2.0) + (c-1.0)*log(ax) - ax**c
def _cdf(self, x, c):
Cx1 = 0.5*exp(-abs(x)**c)
return where(x > 0, 1-Cx1, Cx1)
def _ppf_skip(self, q, c):
fac = where(q<=0.5,2*q,2*q-1)
fac = pow(asarray(log(1.0/fac)),1.0/c)
return where(q>0.5,fac,-fac)
def _stats(self, c):
var = gam(1+2.0/c)
return 0.0, var, 0.0, gam(1+4.0/c)/var
dweibull = dweibull_gen(name='dweibull', shapes='c')
## ERLANG
##
## Special case of the Gamma distribution with shape parameter an integer.
##
class erlang_gen(rv_continuous):
"""An Erlang continuous random variable.
%(before_notes)s
See Also
--------
gamma
Notes
-----
The Erlang distribution is a special case of the Gamma
distribution, with the shape parameter ``a`` an integer. Refer to
the ``gamma`` distribution for further examples.
"""
def _rvs(self, a):
return gamma.rvs(a, size=self._size)
def _arg_check(self, a):
return (a > 0) & (floor(a)==a)
def _pdf(self, x, a):
Px = (x)**(a-1.0)*exp(-x)/special.gamma(a)
return Px
def _logpdf(self, x, a):
return (a-1.0)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gdtr(1.0,a,x)
def _sf(self, x, a):
return special.gdtrc(1.0,a,x)
def _ppf(self, q, a):
return special.gdtrix(1.0, a, q)
def _stats(self, a):
a = a*1.0
return a, a, 2/sqrt(a), 6/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
erlang = erlang_gen(a=0.0, name='erlang', shapes='a')
## Exponential (gamma distributed with a=1.0, loc=loc and scale=scale)
## scale == 1.0 / lambda
class expon_gen(rv_continuous):
"""An exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `expon` is::
expon.pdf(x) = lambda * exp(- lambda*x)
for ``x >= 0``.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`expon` does not have shape parameters.
%(example)s
"""
def _rvs(self):
return mtrand.standard_exponential(self._size)
def _pdf(self, x):
return exp(-x)
def _logpdf(self, x):
return -x
def _cdf(self, x):
return -expm1(-x)
def _ppf(self, q):
return -log1p(-q)
def _sf(self,x):
return exp(-x)
def _logsf(self, x):
return -x
def _isf(self,q):
return -log(q)
def _stats(self):
return 1.0, 1.0, 2.0, 6.0
def _entropy(self):
return 1.0
expon = expon_gen(a=0.0, name='expon')
## Exponentiated Weibull
class exponweib_gen(rv_continuous):
"""An exponentiated Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponweib` is::
exponweib.pdf(x, a, c) =
a * c * (1-exp(-x**c))**(a-1) * exp(-x**c)*x**(c-1)
for ``x > 0``, ``a > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, a, c):
exc = exp(-x**c)
return a*c*(1-exc)**asarray(a-1) * exc * x**(c-1)
def _logpdf(self, x, a, c):
exc = exp(-x**c)
return log(a) + log(c) + (a-1.)*log(1-exc) - x**c + (c-1.0)*log(x)
def _cdf(self, x, a, c):
exm1c = -expm1(-x**c)
return (exm1c)**a
def _ppf(self, q, a, c):
return (-log1p(-q**(1.0/a)))**asarray(1.0/c)
exponweib = exponweib_gen(a=0.0, name='exponweib', shapes="a, c")
## Exponential Power
class exponpow_gen(rv_continuous):
"""An exponential power continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `exponpow` is::
exponpow.pdf(x, b) = b * x**(b-1) * exp(1+x**b - exp(x**b))
for ``x >= 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
xbm1 = x**(b-1.0)
xb = xbm1 * x
return exp(1)*b*xbm1 * exp(xb - exp(xb))
def _logpdf(self, x, b):
xb = x**(b-1.0)*x
return 1 + log(b) + (b-1.0)*log(x) + xb - exp(xb)
def _cdf(self, x, b):
return -expm1(-expm1(x**b))
def _sf(self, x, b):
return exp(-expm1(x**b))
def _isf(self, x, b):
return (log1p(-log(x)))**(1./b)
def _ppf(self, q, b):
return pow(log1p(-log1p(-q)), 1.0/b)
exponpow = exponpow_gen(a=0.0, name='exponpow', shapes='b')
## Fatigue-Life (Birnbaum-Sanders)
class fatiguelife_gen(rv_continuous):
"""A fatigue-life (Birnbaum-Sanders) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `fatiguelife` is::
fatiguelife.pdf(x,c) =
(x+1) / (2*c*sqrt(2*pi*x**3)) * exp(-(x-1)**2/(2*x*c**2))
for ``x > 0``.
%(example)s
"""
def _rvs(self, c):
z = norm.rvs(size=self._size)
x = 0.5*c*z
x2 = x*x
t = 1.0 + 2*x2 + 2*x*sqrt(1 + x2)
return t
def _pdf(self, x, c):
return (x+1)/asarray(2*c*sqrt(2*pi*x**3))*exp(-(x-1)**2/asarray((2.0*x*c**2)))
def _logpdf(self, x, c):
return log(x+1) - (x-1)**2 / (2.0*x*c**2) - log(2*c) - 0.5*(log(2*pi) + 3*log(x))
def _cdf(self, x, c):
return special.ndtr(1.0/c*(sqrt(x)-1.0/asarray(sqrt(x))))
def _ppf(self, q, c):
tmp = c*special.ndtri(q)
return 0.25*(tmp + sqrt(tmp**2 + 4))**2
def _stats(self, c):
c2 = c*c
mu = c2 / 2.0 + 1
den = 5*c2 + 4
mu2 = c2*den /4.0
g1 = 4*c*sqrt(11*c2+6.0)/den**1.5
g2 = 6*c2*(93*c2+41.0) / den**2.0
return mu, mu2, g1, g2
fatiguelife = fatiguelife_gen(a=0.0, name='fatiguelife', shapes='c')
## Folded Cauchy
class foldcauchy_gen(rv_continuous):
"""A folded Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldcauchy` is::
foldcauchy.pdf(x, c) = 1/(pi*(1+(x-c)**2)) + 1/(pi*(1+(x+c)**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(cauchy.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return 1.0/pi*(1.0/(1+(x-c)**2) + 1.0/(1+(x+c)**2))
def _cdf(self, x, c):
return 1.0/pi*(arctan(x-c) + arctan(x+c))
def _stats(self, c):
return inf, inf, nan, nan
foldcauchy = foldcauchy_gen(a=0.0, name='foldcauchy', shapes='c')
## F
class f_gen(rv_continuous):
"""An F continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `f` is::
df2**(df2/2) * df1**(df1/2) * x**(df1/2-1)
F.pdf(x, df1, df2) = --------------------------------------------
(df2+df1*x)**((df1+df2)/2) * B(df1/2, df2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd):
return mtrand.f(dfn, dfd, self._size)
def _pdf(self, x, dfn, dfd):
# n = asarray(1.0*dfn)
# m = asarray(1.0*dfd)
# Px = m**(m/2) * n**(n/2) * x**(n/2-1)
# Px /= (m+n*x)**((n+m)/2)*special.beta(n/2,m/2)
return exp(self._logpdf(x, dfn, dfd))
def _logpdf(self, x, dfn, dfd):
n = 1.0*dfn
m = 1.0*dfd
lPx = m/2*log(m) + n/2*log(n) + (n/2-1)*log(x)
lPx -= ((n+m)/2)*log(m+n*x) + special.betaln(n/2,m/2)
return lPx
def _cdf(self, x, dfn, dfd):
return special.fdtr(dfn, dfd, x)
def _sf(self, x, dfn, dfd):
return special.fdtrc(dfn, dfd, x)
def _ppf(self, q, dfn, dfd):
return special.fdtri(dfn, dfd, q)
def _stats(self, dfn, dfd):
v2 = asarray(dfd*1.0)
v1 = asarray(dfn*1.0)
mu = where (v2 > 2, v2 / asarray(v2 - 2), inf)
mu2 = 2*v2*v2*(v2+v1-2)/(v1*(v2-2)**2 * (v2-4))
mu2 = where(v2 > 4, mu2, inf)
g1 = 2*(v2+2*v1-2)/(v2-6)*sqrt((2*v2-4)/(v1*(v2+v1-2)))
g1 = where(v2 > 6, g1, nan)
g2 = 3/(2*v2-16)*(8+g1*g1*(v2-6))
g2 = where(v2 > 8, g2, nan)
return mu, mu2, g1, g2
f = f_gen(a=0.0, name='f', shapes="dfn, dfd")
## Folded Normal
## abs(Z) where (Z is normal with mu=L and std=S so that c=abs(L)/S)
##
## note: regress docs have scale parameter correct, but first parameter
## he gives is a shape parameter A = c * scale
## Half-normal is folded normal with shape-parameter c=0.
class foldnorm_gen(rv_continuous):
"""A folded normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `foldnorm` is::
foldnormal.pdf(x, c) = sqrt(2/pi) * cosh(c*x) * exp(-(x**2+c**2)/2)
for ``c >= 0``.
%(example)s
"""
def _rvs(self, c):
return abs(norm.rvs(loc=c,size=self._size))
def _pdf(self, x, c):
return sqrt(2.0/pi)*cosh(c*x)*exp(-(x*x+c*c)/2.0)
def _cdf(self, x, c,):
return special.ndtr(x-c) + special.ndtr(x+c) - 1.0
def _stats(self, c):
fac = special.erf(c/sqrt(2))
mu = sqrt(2.0/pi)*exp(-0.5*c*c)+c*fac
mu2 = c*c + 1 - mu*mu
c2 = c*c
g1 = sqrt(2/pi)*exp(-1.5*c2)*(4-pi*exp(c2)*(2*c2+1.0))
g1 += 2*c*fac*(6*exp(-c2) + 3*sqrt(2*pi)*c*exp(-c2/2.0)*fac + \
pi*c*(fac*fac-1))
g1 /= pi*mu2**1.5
g2 = c2*c2+6*c2+3+6*(c2+1)*mu*mu - 3*mu**4
g2 -= 4*exp(-c2/2.0)*mu*(sqrt(2.0/pi)*(c2+2)+c*(c2+3)*exp(c2/2.0)*fac)
g2 /= mu2**2.0
return mu, mu2, g1, g2
foldnorm = foldnorm_gen(a=0.0, name='foldnorm', shapes='c')
## Extreme Value Type II or Frechet
## (defined in Regress+ documentation as Extreme LB) as
## a limiting value distribution.
##
class frechet_r_gen(rv_continuous):
"""A Frechet right (or Weibull minimum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_min : The same distribution as `frechet_r`.
frechet_l, weibull_max
Notes
-----
The probability density function for `frechet_r` is::
frechet_r.pdf(x, c) = c * x**(c-1) * exp(-x**c)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(x,c-1)*exp(-pow(x,c))
def _logpdf(self, x, c):
return log(c) + (c-1)*log(x) - pow(x,c)
def _cdf(self, x, c):
return -expm1(-pow(x,c))
def _ppf(self, q, c):
return pow(-log1p(-q),1.0/c)
def _munp(self, n, c):
return special.gamma(1.0+n*1.0/c)
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_r = frechet_r_gen(a=0.0, name='frechet_r', shapes='c')
weibull_min = frechet_r_gen(a=0.0, name='weibull_min', shapes='c')
class frechet_l_gen(rv_continuous):
"""A Frechet left (or Weibull maximum) continuous random variable.
%(before_notes)s
See Also
--------
weibull_max : The same distribution as `frechet_l`.
frechet_r, weibull_min
Notes
-----
The probability density function for `frechet_l` is::
frechet_l.pdf(x, c) = c * (-x)**(c-1) * exp(-(-x)**c)
for ``x < 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*pow(-x,c-1)*exp(-pow(-x,c))
def _cdf(self, x, c):
return exp(-pow(-x,c))
def _ppf(self, q, c):
return -pow(-log(q),1.0/c)
def _munp(self, n, c):
val = special.gamma(1.0+n*1.0/c)
if (int(n) % 2):
sgn = -1
else:
sgn = 1
return sgn * val
def _entropy(self, c):
return -_EULER / c - log(c) + _EULER + 1
frechet_l = frechet_l_gen(b=0.0, name='frechet_l', shapes='c')
weibull_max = frechet_l_gen(b=0.0, name='weibull_max', shapes='c')
## Generalized Logistic
##
class genlogistic_gen(rv_continuous):
"""A generalized logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genlogistic` is::
genlogistic.pdf(x, c) = c * exp(-x) / (1 + exp(-x))**(c+1)
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
Px = c*exp(-x)/(1+exp(-x))**(c+1.0)
return Px
def _logpdf(self, x, c):
return log(c) - x - (c+1.0)*log1p(exp(-x))
def _cdf(self, x, c):
Cx = (1+exp(-x))**(-c)
return Cx
def _ppf(self, q, c):
vals = -log(pow(q,-1.0/c)-1)
return vals
def _stats(self, c):
zeta = special.zeta
mu = _EULER + special.psi(c)
mu2 = pi*pi/6.0 + zeta(2,c)
g1 = -2*zeta(3,c) + 2*_ZETA3
g1 /= mu2**1.5
g2 = pi**4/15.0 + 6*zeta(4,c)
g2 /= mu2**2.0
return mu, mu2, g1, g2
genlogistic = genlogistic_gen(name='genlogistic', shapes='c')
## Generalized Pareto
class genpareto_gen(rv_continuous):
"""A generalized Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genpareto` is::
genpareto.pdf(x, c) = (1 + c * x)**(-1 - 1/c)
for ``c != 0``, and for ``x >= 0`` for all c,
and ``x < 1/abs(c)`` for ``c < 0``.
%(example)s
"""
def _argcheck(self, c):
c = asarray(c)
self.b = where(c < 0, 1.0/abs(c), inf)
return where(c==0, 0, 1)
def _pdf(self, x, c):
Px = pow(1+c*x,asarray(-1.0-1.0/c))
return Px
def _logpdf(self, x, c):
return (-1.0-1.0/c) * np.log1p(c*x)
def _cdf(self, x, c):
return 1.0 - pow(1+c*x,asarray(-1.0/c))
def _ppf(self, q, c):
vals = 1.0/c * (pow(1-q, -c)-1)
return vals
def _munp(self, n, c):
k = arange(0,n+1)
val = (-1.0/c)**n * sum(comb(n,k)*(-1)**k / (1.0-c*k),axis=0)
return where(c*n < 1, val, inf)
def _entropy(self, c):
if (c > 0):
return 1+c
else:
self.b = -1.0 / c
return rv_continuous._entropy(self, c)
genpareto = genpareto_gen(a=0.0, name='genpareto', shapes='c')
## Generalized Exponential
class genexpon_gen(rv_continuous):
"""A generalized exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genexpon` is::
genexpon.pdf(x, a, b, c) = (a + b * (1 - exp(-c*x))) * \
exp(-a*x - b*x + b/c * (1-exp(-c*x)))
for ``x >= 0``, ``a,b,c > 0``.
References
----------
H.K. Ryu, "An Extension of Marshall and Olkin's Bivariate Exponential
Distribution", Journal of the American Statistical Association, 1993.
N. Balakrishnan, "The Exponential Distribution: Theory, Methods and
Applications", Asit P. Basu.
%(example)s
"""
def _pdf(self, x, a, b, c):
return (a+b*(-expm1(-c*x)))*exp((-a-b)*x+b*(-expm1(-c*x))/c)
def _cdf(self, x, a, b, c):
return -expm1((-a-b)*x + b*(-expm1(-c*x))/c)
def _logpdf(self, x, a, b, c):
return np.log(a+b*(-expm1(-c*x))) + (-a-b)*x+b*(-expm1(-c*x))/c
genexpon = genexpon_gen(a=0.0, name='genexpon', shapes='a, b, c')
## Generalized Extreme Value
## c=0 is just gumbel distribution.
## This version does now accept c==0
## Use gumbel_r for c==0
# new version by Per Brodtkorb, see ticket:767
# also works for c==0, special case is gumbel_r
# increased precision for small c
class genextreme_gen(rv_continuous):
"""A generalized extreme value continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r
Notes
-----
For ``c=0``, `genextreme` is equal to `gumbel_r`.
The probability density function for `genextreme` is::
genextreme.pdf(x, c) =
exp(-exp(-x))*exp(-x), for c==0
exp(-(1-c*x)**(1/c))*(1-c*x)**(1/c-1), for x <= 1/c, c > 0
%(example)s
"""
def _argcheck(self, c):
min = np.minimum
max = np.maximum
sml = floatinfo.machar.xmin
#self.b = where(c > 0, 1.0 / c,inf)
#self.a = where(c < 0, 1.0 / c, -inf)
self.b = where(c > 0, 1.0 / max(c, sml),inf)
self.a = where(c < 0, 1.0 / min(c,-sml), -inf)
return where(abs(c)==inf, 0, 1) #True #(c!=0)
def _pdf(self, x, c):
## ex2 = 1-c*x
## pex2 = pow(ex2,1.0/c)
## p2 = exp(-pex2)*pex2/ex2
## return p2
cx = c*x
logex2 = where((c==0)*(x==x),0.0,log1p(-cx))
logpex2 = where((c==0)*(x==x),-x,logex2/c)
pex2 = exp(logpex2)
# % Handle special cases
logpdf = where((cx==1) | (cx==-inf),-inf,-pex2+logpex2-logex2)
putmask(logpdf,(c==1) & (x==1),0.0) # logpdf(c==1 & x==1) = 0; % 0^0 situation
return exp(logpdf)
def _cdf(self, x, c):
#return exp(-pow(1-c*x,1.0/c))
loglogcdf = where((c==0)*(x==x),-x,log1p(-c*x)/c)
return exp(-exp(loglogcdf))
def _ppf(self, q, c):
#return 1.0/c*(1.-(-log(q))**c)
x = -log(-log(q))
return where((c==0)*(x==x),x,-expm1(-c*x)/c)
def _stats(self,c):
g = lambda n : gam(n*c+1)
g1 = g(1)
g2 = g(2)
g3 = g(3);
g4 = g(4)
g2mg12 = where(abs(c)<1e-7,(c*pi)**2.0/6.0,g2-g1**2.0)
gam2k = where(abs(c)<1e-7,pi**2.0/6.0, expm1(gamln(2.0*c+1.0)-2*gamln(c+1.0))/c**2.0);
eps = 1e-14
gamk = where(abs(c)<eps,-_EULER,expm1(gamln(c+1))/c)
m = where(c<-1.0,nan,-gamk)
v = where(c<-0.5,nan,g1**2.0*gam2k)
#% skewness
sk1 = where(c<-1./3,nan,np.sign(c)*(-g3+(g2+2*g2mg12)*g1)/((g2mg12)**(3./2.)));
sk = where(abs(c)<=eps**0.29,12*sqrt(6)*_ZETA3/pi**3,sk1)
#% The kurtosis is:
ku1 = where(c<-1./4,nan,(g4+(-4*g3+3*(g2+g2mg12)*g1)*g1)/((g2mg12)**2))
ku = where(abs(c)<=(eps)**0.23,12.0/5.0,ku1-3.0)
return m,v,sk,ku
def _munp(self, n, c):
k = arange(0,n+1)
vals = 1.0/c**n * sum(comb(n,k) * (-1)**k * special.gamma(c*k + 1),axis=0)
return where(c*n > -1, vals, inf)
genextreme = genextreme_gen(name='genextreme', shapes='c')
## Gamma (Use MATLAB and MATHEMATICA (b=theta=scale, a=alpha=shape) definition)
## gamma(a, loc, scale) with a an integer is the Erlang distribution
## gamma(1, loc, scale) is the Exponential distribution
## gamma(df/2, 0, 2) is the chi2 distribution with df degrees of freedom.
class gamma_gen(rv_continuous):
"""A gamma continuous random variable.
%(before_notes)s
See Also
--------
erlang, expon
Notes
-----
The probability density function for `gamma` is::
gamma.pdf(x, a) = lambda**a * x**(a-1) * exp(-lambda*x) / gamma(a)
for ``x >= 0``, ``a > 0``. Here ``gamma(a)`` refers to the gamma function.
The scale parameter is equal to ``scale = 1.0 / lambda``.
`gamma` has a shape parameter `a` which needs to be set explicitly. For instance:
>>> from scipy.stats import gamma
>>> rv = gamma(3., loc = 0., scale = 2.)
produces a frozen form of `gamma` with shape ``a = 3.``, ``loc =
0.`` and ``lambda = 1./scale = 1./2.``.
When ``a`` is an integer, `gamma` reduces to the Erlang
distribution, and when ``a=1`` to the exponential distribution.
%(example)s
"""
def _rvs(self, a):
return mtrand.standard_gamma(a, self._size)
def _pdf(self, x, a):
return exp(self._logpdf(x, a))
def _logpdf(self, x, a):
return (a-1)*log(x) - x - gamln(a)
def _cdf(self, x, a):
return special.gammainc(a, x)
def _ppf(self, q, a):
return special.gammaincinv(a,q)
def _stats(self, a):
return a, a, 2.0/sqrt(a), 6.0/a
def _entropy(self, a):
return special.psi(a)*(1-a) + 1 + gamln(a)
def _fitstart(self, data):
a = 4 / _skew(data)**2
return super(gamma_gen, self)._fitstart(data, args=(a,))
def fit(self, data, *args, **kwds):
floc = kwds.get('floc', None)
if floc == 0:
xbar = ravel(data).mean()
logx_bar = ravel(log(data)).mean()
s = log(xbar) - logx_bar
def func(a):
return log(a) - special.digamma(a) - s
aest = (3-s + math.sqrt((s-3)**2 + 24*s)) / (12*s)
xa = aest*(1-0.4)
xb = aest*(1+0.4)
a = optimize.brentq(func, xa, xb, disp=0)
scale = xbar / a
return a, floc, scale
else:
return super(gamma_gen, self).fit(data, *args, **kwds)
gamma = gamma_gen(a=0.0, name='gamma', shapes='a')
# Generalized Gamma
class gengamma_gen(rv_continuous):
"""A generalized gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gengamma` is::
gengamma.pdf(x, a, c) = abs(c) * x**(c*a-1) * exp(-x**c) / gamma(a)
for ``x > 0``, ``a > 0``, and ``c != 0``.
%(example)s
"""
def _argcheck(self, a, c):
return (a > 0) & (c != 0)
def _pdf(self, x, a, c):
return abs(c)* exp((c*a-1)*log(x)-x**c- gamln(a))
def _cdf(self, x, a, c):
val = special.gammainc(a,x**c)
cond = c + 0*val
return where(cond>0,val,1-val)
def _ppf(self, q, a, c):
val1 = special.gammaincinv(a,q)
val2 = special.gammaincinv(a,1.0-q)
ic = 1.0/c
cond = c+0*val1
return where(cond > 0,val1**ic,val2**ic)
def _munp(self, n, a, c):
return special.gamma(a+n*1.0/c) / special.gamma(a)
def _entropy(self, a,c):
val = special.psi(a)
return a*(1-val) + 1.0/c*val + gamln(a)-log(abs(c))
gengamma = gengamma_gen(a=0.0, name='gengamma', shapes="a, c")
## Generalized Half-Logistic
##
class genhalflogistic_gen(rv_continuous):
"""A generalized half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `genhalflogistic` is::
genhalflogistic.pdf(x, c) = 2 * (1-c*x)**(1/c-1) / (1+(1-c*x)**(1/c))**2
for ``0 <= x <= 1/c``, and ``c > 0``.
%(example)s
"""
def _argcheck(self, c):
self.b = 1.0 / c
return (c > 0)
def _pdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp0 = tmp**(limit-1)
tmp2 = tmp0*tmp
return 2*tmp0 / (1+tmp2)**2
def _cdf(self, x, c):
limit = 1.0/c
tmp = asarray(1-c*x)
tmp2 = tmp**(limit)
return (1.0-tmp2) / (1+tmp2)
def _ppf(self, q, c):
return 1.0/c*(1-((1.0-q)/(1.0+q))**c)
def _entropy(self,c):
return 2 - (2*c+1)*log(2)
genhalflogistic = genhalflogistic_gen(a=0.0, name='genhalflogistic',
shapes='c')
## Gompertz (Truncated Gumbel)
## Defined for x>=0
class gompertz_gen(rv_continuous):
"""A Gompertz (or truncated Gumbel) continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gompertz` is::
gompertz.pdf(x, c) = c * exp(x) * exp(-c*(exp(x)-1))
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
ex = exp(x)
return c*ex*exp(-c*(ex-1))
def _cdf(self, x, c):
return 1.0-exp(-c*(exp(x)-1))
def _ppf(self, q, c):
return log(1-1.0/c*log(1-q))
def _entropy(self, c):
return 1.0 - log(c) - exp(c)*special.expn(1,c)
gompertz = gompertz_gen(a=0.0, name='gompertz', shapes='c')
## Gumbel, Log-Weibull, Fisher-Tippett, Gompertz
## The left-skewed gumbel distribution.
## and right-skewed are available as gumbel_l and gumbel_r
class gumbel_r_gen(rv_continuous):
"""A right-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_l, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_r` is::
gumbel_r.pdf(x) = exp(-(x + exp(-x)))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(-x)
return ex*exp(-ex)
def _logpdf(self, x):
return -x - exp(-x)
def _cdf(self, x):
return exp(-exp(-x))
def _logcdf(self, x):
return -exp(-x)
def _ppf(self, q):
return -log(-log(q))
def _stats(self):
return _EULER, pi*pi/6.0, \
12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_r = gumbel_r_gen(name='gumbel_r')
class gumbel_l_gen(rv_continuous):
"""A left-skewed Gumbel continuous random variable.
%(before_notes)s
See Also
--------
gumbel_r, gompertz, genextreme
Notes
-----
The probability density function for `gumbel_l` is::
gumbel_l.pdf(x) = exp(x - exp(x))
The Gumbel distribution is sometimes referred to as a type I Fisher-Tippett
distribution. It is also related to the extreme value distribution,
log-Weibull and Gompertz distributions.
%(example)s
"""
def _pdf(self, x):
ex = exp(x)
return ex*exp(-ex)
def _logpdf(self, x):
return x - exp(x)
def _cdf(self, x):
return 1.0-exp(-exp(x))
def _ppf(self, q):
return log(-log(1-q))
def _stats(self):
return -_EULER, pi*pi/6.0, \
-12*sqrt(6)/pi**3 * _ZETA3, 12.0/5
def _entropy(self):
return 1.0608407169541684911
gumbel_l = gumbel_l_gen(name='gumbel_l')
# Half-Cauchy
class halfcauchy_gen(rv_continuous):
"""A Half-Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfcauchy` is::
halfcauchy.pdf(x) = 2 / (pi * (1 + x**2))
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi/(1.0+x*x)
def _logpdf(self, x):
return np.log(2.0/pi) - np.log1p(x*x)
def _cdf(self, x):
return 2.0/pi*arctan(x)
def _ppf(self, q):
return tan(pi/2*q)
def _stats(self):
return inf, inf, nan, nan
def _entropy(self):
return log(2*pi)
halfcauchy = halfcauchy_gen(a=0.0, name='halfcauchy')
## Half-Logistic
##
class halflogistic_gen(rv_continuous):
"""A half-logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halflogistic` is::
halflogistic.pdf(x) = 2 * exp(-x) / (1+exp(-x))**2 = 1/2 * sech(x/2)**2
for ``x >= 0``.
%(example)s
"""
def _pdf(self, x):
return 0.5/(cosh(x/2.0))**2.0
def _cdf(self, x):
return tanh(x/2.0)
def _ppf(self, q):
return 2*arctanh(q)
def _munp(self, n):
if n==1: return 2*log(2)
if n==2: return pi*pi/3.0
if n==3: return 9*_ZETA3
if n==4: return 7*pi**4 / 15.0
return 2*(1-pow(2.0,1-n))*special.gamma(n+1)*special.zeta(n,1)
def _entropy(self):
return 2-log(2)
halflogistic = halflogistic_gen(a=0.0, name='halflogistic')
## Half-normal = chi(1, loc, scale)
class halfnorm_gen(rv_continuous):
"""A half-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `halfnorm` is::
halfnorm.pdf(x) = sqrt(2/pi) * exp(-x**2/2)
for ``x > 0``.
%(example)s
"""
def _rvs(self):
return abs(norm.rvs(size=self._size))
def _pdf(self, x):
return sqrt(2.0/pi)*exp(-x*x/2.0)
def _logpdf(self, x):
return 0.5 * np.log(2.0/pi) - x*x/2.0
def _cdf(self, x):
return special.ndtr(x)*2-1.0
def _ppf(self, q):
return special.ndtri((1+q)/2.0)
def _stats(self):
return sqrt(2.0/pi), 1-2.0/pi, sqrt(2)*(4-pi)/(pi-2)**1.5, \
8*(pi-3)/(pi-2)**2
def _entropy(self):
return 0.5*log(pi/2.0)+0.5
halfnorm = halfnorm_gen(a=0.0, name='halfnorm')
## Hyperbolic Secant
class hypsecant_gen(rv_continuous):
"""A hyperbolic secant continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `hypsecant` is::
hypsecant.pdf(x) = 1/pi * sech(x)
%(example)s
"""
def _pdf(self, x):
return 1.0/(pi*cosh(x))
def _cdf(self, x):
return 2.0/pi*arctan(exp(x))
def _ppf(self, q):
return log(tan(pi*q/2.0))
def _stats(self):
return 0, pi*pi/4, 0, 2
def _entropy(self):
return log(2*pi)
hypsecant = hypsecant_gen(name='hypsecant')
## Gauss Hypergeometric
class gausshyper_gen(rv_continuous):
"""A Gauss hypergeometric continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gausshyper` is::
gausshyper.pdf(x, a, b, c, z) =
C * x**(a-1) * (1-x)**(b-1) * (1+z*x)**(-c)
for ``0 <= x <= 1``, ``a > 0``, ``b > 0``, and
``C = 1 / (B(a,b) F[2,1](c, a; a+b; -z))``
%(example)s
"""
def _argcheck(self, a, b, c, z):
return (a > 0) & (b > 0) & (c==c) & (z==z)
def _pdf(self, x, a, b, c, z):
Cinv = gam(a)*gam(b)/gam(a+b)*special.hyp2f1(c,a,a+b,-z)
return 1.0/Cinv * x**(a-1.0) * (1.0-x)**(b-1.0) / (1.0+z*x)**c
def _munp(self, n, a, b, c, z):
fac = special.beta(n+a,b) / special.beta(a,b)
num = special.hyp2f1(c,a+n,a+b+n,-z)
den = special.hyp2f1(c,a,a+b,-z)
return fac*num / den
gausshyper = gausshyper_gen(a=0.0, b=1.0, name='gausshyper',
shapes="a, b, c, z")
## Inverted Gamma
# special case of generalized gamma with c=-1
#
class invgamma_gen(rv_continuous):
"""An inverted gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgamma` is::
invgamma.pdf(x, a) = x**(-a-1) / gamma(a) * exp(-1/x)
for x > 0, a > 0.
%(example)s
"""
def _pdf(self, x, a):
return exp(self._logpdf(x,a))
def _logpdf(self, x, a):
return (-(a+1)*log(x)-gamln(a) - 1.0/x)
def _cdf(self, x, a):
return 1.0-special.gammainc(a, 1.0/x)
def _ppf(self, q, a):
return 1.0/special.gammaincinv(a,1-q)
def _munp(self, n, a):
return exp(gamln(a-n) - gamln(a))
def _entropy(self, a):
return a - (a+1.0)*special.psi(a) + gamln(a)
invgamma = invgamma_gen(a=0.0, name='invgamma', shapes='a')
## Inverse Gaussian Distribution (used to be called 'invnorm'
# scale is gamma from DATAPLOT and B from Regress
class invgauss_gen(rv_continuous):
"""An inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invgauss` is::
invgauss.pdf(x, mu) = 1 / sqrt(2*pi*x**3) * exp(-(x-mu)**2/(2*x*mu**2))
for ``x > 0``.
When `mu` is too small, evaluating the cumulative density function will be
inaccurate due to ``cdf(mu -> 0) = inf * 0``.
NaNs are returned for ``mu <= 0.0028``.
%(example)s
"""
def _rvs(self, mu):
return mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x**3.0)*exp(-1.0/(2*x)*((x-mu)/mu)**2)
def _logpdf(self, x, mu):
return -0.5*log(2*pi) - 1.5*log(x) - ((x-mu)/mu)**2/(2*x)
def _cdf(self, x, mu):
fac = sqrt(1.0/x)
# Numerical accuracy for small `mu` is bad. See #869.
C1 = norm.cdf(fac*(x-mu)/mu)
C1 += exp(1.0/mu) * norm.cdf(-fac*(x+mu)/mu) * exp(1.0/mu)
return C1
def _stats(self, mu):
return mu, mu**3.0, 3*sqrt(mu), 15*mu
invgauss = invgauss_gen(a=0.0, name='invgauss', shapes="mu")
## Inverted Weibull
class invweibull_gen(rv_continuous):
"""An inverted Weibull continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `invweibull` is::
invweibull.pdf(x, c) = c * x**(-c-1) * exp(-x**(-c))
for ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
xc1 = x**(-c-1.0)
#xc2 = xc1*x
xc2 = x**(-c)
xc2 = exp(-xc2)
return c*xc1*xc2
def _cdf(self, x, c):
xc1 = x**(-c)
return exp(-xc1)
def _ppf(self, q, c):
return pow(-log(q),asarray(-1.0/c))
def _entropy(self, c):
return 1+_EULER + _EULER / c - log(c)
invweibull = invweibull_gen(a=0, name='invweibull', shapes='c')
## Johnson SB
class johnsonsb_gen(rv_continuous):
"""A Johnson SB continuous random variable.
%(before_notes)s
See Also
--------
johnsonsu
Notes
-----
The probability density function for `johnsonsb` is::
johnsonsb.pdf(x, a, b) = b / (x*(1-x)) * phi(a + b * log(x/(1-x)))
for ``0 < x < 1`` and ``a,b > 0``, and ``phi`` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
trm = norm.pdf(a+b*log(x/(1.0-x)))
return b*1.0/(x*(1-x))*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x/(1.0-x)))
def _ppf(self, q, a, b):
return 1.0/(1+exp(-1.0/b*(norm.ppf(q)-a)))
johnsonsb = johnsonsb_gen(a=0.0, b=1.0, name='johnsonb', shapes="a, b")
## Johnson SU
class johnsonsu_gen(rv_continuous):
"""A Johnson SU continuous random variable.
%(before_notes)s
See Also
--------
johnsonsb
Notes
-----
The probability density function for `johnsonsu` is::
johnsonsu.pdf(x, a, b) = b / sqrt(x**2 + 1) *
phi(a + b * log(x + sqrt(x**2 + 1)))
for all ``x, a, b > 0``, and `phi` is the normal pdf.
%(example)s
"""
def _argcheck(self, a, b):
return (b > 0) & (a==a)
def _pdf(self, x, a, b):
x2 = x*x
trm = norm.pdf(a+b*log(x+sqrt(x2+1)))
return b*1.0/sqrt(x2+1.0)*trm
def _cdf(self, x, a, b):
return norm.cdf(a+b*log(x+sqrt(x*x+1)))
def _ppf(self, q, a, b):
return sinh((norm.ppf(q)-a)/b)
johnsonsu = johnsonsu_gen(name='johnsonsu', shapes="a, b")
## Laplace Distribution
class laplace_gen(rv_continuous):
"""A Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `laplace` is::
laplace.pdf(x) = 1/2 * exp(-abs(x))
%(example)s
"""
def _rvs(self):
return mtrand.laplace(0, 1, size=self._size)
def _pdf(self, x):
return 0.5*exp(-abs(x))
def _cdf(self, x):
return where(x > 0, 1.0-0.5*exp(-x), 0.5*exp(x))
def _ppf(self, q):
return where(q > 0.5, -log(2*(1-q)), log(2*q))
def _stats(self):
return 0, 2, 0, 3
def _entropy(self):
return log(2)+1
laplace = laplace_gen(name='laplace')
## Levy Distribution
class levy_gen(rv_continuous):
"""A Levy continuous random variable.
%(before_notes)s
See Also
--------
levy_stable, levy_l
Notes
-----
The probability density function for `levy` is::
levy.pdf(x) = 1 / (x * sqrt(2*pi*x)) * exp(-1/(2*x))
for ``x > 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=1.
%(example)s
"""
def _pdf(self, x):
return 1/sqrt(2*pi*x)/x*exp(-1/(2*x))
def _cdf(self, x):
return 2*(1-norm._cdf(1/sqrt(x)))
def _ppf(self, q):
val = norm._ppf(1-q/2.0)
return 1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy = levy_gen(a=0.0,name="levy")
## Left-skewed Levy Distribution
class levy_l_gen(rv_continuous):
"""A left-skewed Levy continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_stable
Notes
-----
The probability density function for `levy_l` is::
levy_l.pdf(x) = 1 / (abs(x) * sqrt(2*pi*abs(x))) * exp(-1/(2*abs(x)))
for ``x < 0``.
This is the same as the Levy-stable distribution with a=1/2 and b=-1.
%(example)s
"""
def _pdf(self, x):
ax = abs(x)
return 1/sqrt(2*pi*ax)/ax*exp(-1/(2*ax))
def _cdf(self, x):
ax = abs(x)
return 2*norm._cdf(1/sqrt(ax))-1
def _ppf(self, q):
val = norm._ppf((q+1.0)/2)
return -1.0/(val*val)
def _stats(self):
return inf, inf, nan, nan
levy_l = levy_l_gen(b=0.0, name="levy_l")
## Levy-stable Distribution (only random variates)
class levy_stable_gen(rv_continuous):
"""A Levy-stable continuous random variable.
%(before_notes)s
See Also
--------
levy, levy_l
Notes
-----
Levy-stable distribution (only random variates available -- ignore other
docs)
%(example)s
"""
def _rvs(self, alpha, beta):
sz = self._size
TH = uniform.rvs(loc=-pi/2.0,scale=pi,size=sz)
W = expon.rvs(size=sz)
if alpha==1:
return 2/pi*(pi/2+beta*TH)*tan(TH)-beta*log((pi/2*W*cos(TH))/(pi/2+beta*TH))
# else
ialpha = 1.0/alpha
aTH = alpha*TH
if beta==0:
return W/(cos(TH)/tan(aTH)+sin(TH))*((cos(aTH)+sin(aTH)*tan(TH))/W)**ialpha
# else
val0 = beta*tan(pi*alpha/2)
th0 = arctan(val0)/alpha
val3 = W/(cos(TH)/tan(alpha*(th0+TH))+sin(TH))
res3 = val3*((cos(aTH)+sin(aTH)*tan(TH)-val0*(sin(aTH)-cos(aTH)*tan(TH)))/W)**ialpha
return res3
def _argcheck(self, alpha, beta):
if beta == -1:
self.b = 0.0
elif beta == 1:
self.a = 0.0
return (alpha > 0) & (alpha <= 2) & (beta <= 1) & (beta >= -1)
def _pdf(self, x, alpha, beta):
raise NotImplementedError
levy_stable = levy_stable_gen(name='levy_stable', shapes="alpha, beta")
## Logistic (special case of generalized logistic with c=1)
## Sech-squared
class logistic_gen(rv_continuous):
"""A logistic continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `logistic` is::
logistic.pdf(x) = exp(-x) / (1+exp(-x))**2
%(example)s
"""
def _rvs(self):
return mtrand.logistic(size=self._size)
def _pdf(self, x):
ex = exp(-x)
return ex / (1+ex)**2.0
def _cdf(self, x):
return 1.0/(1+exp(-x))
def _ppf(self, q):
return -log(1.0/q-1)
def _stats(self):
return 0, pi*pi/3.0, 0, 6.0/5.0
def _entropy(self):
return 1.0
logistic = logistic_gen(name='logistic')
## Log Gamma
#
class loggamma_gen(rv_continuous):
"""A log gamma continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loggamma` is::
loggamma.pdf(x, c) = exp(c*x-exp(x)) / gamma(c)
for all ``x, c > 0``.
%(example)s
"""
def _rvs(self, c):
return log(mtrand.gamma(c, size=self._size))
def _pdf(self, x, c):
return exp(c*x-exp(x)-gamln(c))
def _cdf(self, x, c):
return special.gammainc(c, exp(x))
def _ppf(self, q, c):
return log(special.gammaincinv(c,q))
def _munp(self,n,*args):
# use generic moment calculation using ppf
return self._mom0_sc(n,*args)
loggamma = loggamma_gen(name='loggamma', shapes='c')
## Log-Laplace (Log Double Exponential)
##
class loglaplace_gen(rv_continuous):
"""A log-Laplace continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `loglaplace` is::
loglaplace.pdf(x, c) = c / 2 * x**(c-1), for 0 < x < 1
= c / 2 * x**(-c-1), for x >= 1
for ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
cd2 = c/2.0
c = where(x < 1, c, -c)
return cd2*x**(c-1)
def _cdf(self, x, c):
return where(x < 1, 0.5*x**c, 1-0.5*x**(-c))
def _ppf(self, q, c):
return where(q < 0.5, (2.0*q)**(1.0/c), (2*(1.0-q))**(-1.0/c))
def _entropy(self, c):
return log(2.0/c) + 1.0
loglaplace = loglaplace_gen(a=0.0, name='loglaplace', shapes='c')
## Lognormal (Cobb-Douglass)
## std is a shape parameter and is the variance of the underlying
## distribution.
## the mean of the underlying distribution is log(scale)
class lognorm_gen(rv_continuous):
"""A lognormal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `lognorm` is::
lognorm.pdf(x, s) = 1 / (s*x*sqrt(2*pi)) * exp(-1/2*(log(x)/s)**2)
for ``x > 0``, ``s > 0``.
If log x is normally distributed with mean mu and variance sigma**2,
then x is log-normally distributed with shape paramter sigma and scale
parameter exp(mu).
%(example)s
"""
def _rvs(self, s):
return exp(s * norm.rvs(size=self._size))
def _pdf(self, x, s):
Px = exp(-log(x)**2 / (2*s**2))
return Px / (s*x*sqrt(2*pi))
def _cdf(self, x, s):
return norm.cdf(log(x)/s)
def _ppf(self, q, s):
return exp(s*norm._ppf(q))
def _stats(self, s):
p = exp(s*s)
mu = sqrt(p)
mu2 = p*(p-1)
g1 = sqrt((p-1))*(2+p)
g2 = numpy.polyval([1,2,3,0,-6.0],p)
return mu, mu2, g1, g2
def _entropy(self, s):
return 0.5*(1+log(2*pi)+2*log(s))
lognorm = lognorm_gen(a=0.0, name='lognorm', shapes='s')
# Gibrat's distribution is just lognormal with s=1
class gilbrat_gen(lognorm_gen):
"""A Gilbrat continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `gilbrat` is::
gilbrat.pdf(x) = 1/(x*sqrt(2*pi)) * exp(-1/2*(log(x))**2)
%(example)s
"""
def _rvs(self):
return lognorm_gen._rvs(self, 1.0)
def _pdf(self, x):
return lognorm_gen._pdf(self, x, 1.0)
def _cdf(self, x):
return lognorm_gen._cdf(self, x, 1.0)
def _ppf(self, q):
return lognorm_gen._ppf(self, q, 1.0)
def _stats(self):
return lognorm_gen._stats(self, 1.0)
def _entropy(self):
return 0.5*log(2*pi) + 0.5
gilbrat = gilbrat_gen(a=0.0, name='gilbrat')
# MAXWELL
class maxwell_gen(rv_continuous):
"""A Maxwell continuous random variable.
%(before_notes)s
Notes
-----
A special case of a `chi` distribution, with ``df = 3``, ``loc = 0.0``,
and given ``scale = 1.0 / sqrt(a)``, where a is the parameter used in
the Mathworld description [1]_.
The probability density function for `maxwell` is::
maxwell.pdf(x, a) = sqrt(2/pi)x**2 * exp(-x**2/2)
for ``x > 0``.
References
----------
.. [1] http://mathworld.wolfram.com/MaxwellDistribution.html
%(example)s
"""
def _rvs(self):
return chi.rvs(3.0,size=self._size)
def _pdf(self, x):
return sqrt(2.0/pi)*x*x*exp(-x*x/2.0)
def _cdf(self, x):
return special.gammainc(1.5,x*x/2.0)
def _ppf(self, q):
return sqrt(2*special.gammaincinv(1.5,q))
def _stats(self):
val = 3*pi-8
return 2*sqrt(2.0/pi), 3-8/pi, sqrt(2)*(32-10*pi)/val**1.5, \
(-12*pi*pi + 160*pi - 384) / val**2.0
def _entropy(self):
return _EULER + 0.5*log(2*pi)-0.5
maxwell = maxwell_gen(a=0.0, name='maxwell')
# Mielke's Beta-Kappa
class mielke_gen(rv_continuous):
"""A Mielke's Beta-Kappa continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `mielke` is::
mielke.pdf(x, k, s) = k * x**(k-1) / (1+x**s)**(1+k/s)
for ``x > 0``.
%(example)s
"""
def _pdf(self, x, k, s):
return k*x**(k-1.0) / (1.0+x**s)**(1.0+k*1.0/s)
def _cdf(self, x, k, s):
return x**k / (1.0+x**s)**(k*1.0/s)
def _ppf(self, q, k, s):
qsk = pow(q,s*1.0/k)
return pow(qsk/(1.0-qsk),1.0/s)
mielke = mielke_gen(a=0.0, name='mielke', shapes="k, s")
# Nakagami (cf Chi)
class nakagami_gen(rv_continuous):
"""A Nakagami continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nakagami` is::
nakagami.pdf(x, nu) = 2 * nu**nu / gamma(nu) *
x**(2*nu-1) * exp(-nu*x**2)
for ``x > 0``, ``nu > 0``.
%(example)s
"""
def _pdf(self, x, nu):
return 2*nu**nu/gam(nu)*(x**(2*nu-1.0))*exp(-nu*x*x)
def _cdf(self, x, nu):
return special.gammainc(nu,nu*x*x)
def _ppf(self, q, nu):
return sqrt(1.0/nu*special.gammaincinv(nu,q))
def _stats(self, nu):
mu = gam(nu+0.5)/gam(nu)/sqrt(nu)
mu2 = 1.0-mu*mu
g1 = mu*(1-4*nu*mu2)/2.0/nu/mu2**1.5
g2 = -6*mu**4*nu + (8*nu-2)*mu**2-2*nu + 1
g2 /= nu*mu2**2.0
return mu, mu2, g1, g2
nakagami = nakagami_gen(a=0.0, name="nakagami", shapes='nu')
# Non-central chi-squared
# nc is lambda of definition, df is nu
class ncx2_gen(rv_continuous):
"""A non-central chi-squared continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncx2` is::
ncx2.pdf(x, df, nc) = exp(-(nc+df)/2) * 1/2 * (x/nc)**((df-2)/4)
* I[(df-2)/2](sqrt(nc*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return mtrand.noncentral_chisquare(df,nc,self._size)
def _logpdf(self, x, df, nc):
a = asarray(df/2.0)
fac = -nc/2.0 - x/2.0 + (a-1)*np.log(x) - a*np.log(2) - special.gammaln(a)
return fac + np.nan_to_num(np.log(special.hyp0f1(a, nc * x/4.0)))
def _pdf(self, x, df, nc):
return np.exp(self._logpdf(x, df, nc))
def _cdf(self, x, df, nc):
return special.chndtr(x,df,nc)
def _ppf(self, q, df, nc):
return special.chndtrix(q,df,nc)
def _stats(self, df, nc):
val = df + 2.0*nc
return df + nc, 2*val, sqrt(8)*(val+nc)/val**1.5, \
12.0*(val+2*nc)/val**2.0
ncx2 = ncx2_gen(a=0.0, name='ncx2', shapes="df, nc")
# Non-central F
class ncf_gen(rv_continuous):
"""A non-central F distribution continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `ncf` is::
ncf.pdf(x, df1, df2, nc) = exp(nc/2 + nc*df1*x/(2*(df1*x+df2)))
* df1**(df1/2) * df2**(df2/2) * x**(df1/2-1)
* (df2+df1*x)**(-(df1+df2)/2)
* gamma(df1/2)*gamma(1+df2/2)
* L^{v1/2-1}^{v2/2}(-nc*v1*x/(2*(v1*x+v2)))
/ (B(v1/2, v2/2) * gamma((v1+v2)/2))
for ``df1, df2, nc > 0``.
%(example)s
"""
def _rvs(self, dfn, dfd, nc):
return mtrand.noncentral_f(dfn,dfd,nc,self._size)
def _pdf_skip(self, x, dfn, dfd, nc):
n1,n2 = dfn, dfd
term = -nc/2+nc*n1*x/(2*(n2+n1*x)) + gamln(n1/2.)+gamln(1+n2/2.)
term -= gamln((n1+n2)/2.0)
Px = exp(term)
Px *= n1**(n1/2) * n2**(n2/2) * x**(n1/2-1)
Px *= (n2+n1*x)**(-(n1+n2)/2)
Px *= special.assoc_laguerre(-nc*n1*x/(2.0*(n2+n1*x)),n2/2,n1/2-1)
Px /= special.beta(n1/2,n2/2)
#this function does not have a return
# drop it for now, the generic function seems to work ok
def _cdf(self, x, dfn, dfd, nc):
return special.ncfdtr(dfn,dfd,nc,x)
def _ppf(self, q, dfn, dfd, nc):
return special.ncfdtri(dfn, dfd, nc, q)
def _munp(self, n, dfn, dfd, nc):
val = (dfn *1.0/dfd)**n
term = gamln(n+0.5*dfn) + gamln(0.5*dfd-n) - gamln(dfd*0.5)
val *= exp(-nc / 2.0+term)
val *= special.hyp1f1(n+0.5*dfn, 0.5*dfn, 0.5*nc)
return val
def _stats(self, dfn, dfd, nc):
mu = where(dfd <= 2, inf, dfd / (dfd-2.0)*(1+nc*1.0/dfn))
mu2 = where(dfd <=4, inf, 2*(dfd*1.0/dfn)**2.0 * \
((dfn+nc/2.0)**2.0 + (dfn+nc)*(dfd-2.0)) / \
((dfd-2.0)**2.0 * (dfd-4.0)))
return mu, mu2, None, None
ncf = ncf_gen(a=0.0, name='ncf', shapes="dfn, dfd, nc")
## Student t distribution
class t_gen(rv_continuous):
"""A Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `t` is::
gamma((df+1)/2)
t.pdf(x, df) = ---------------------------------------------------
sqrt(pi*df) * gamma(df/2) * (1+x**2/df)**((df+1)/2)
for ``df > 0``.
%(example)s
"""
def _rvs(self, df):
return mtrand.standard_t(df, size=self._size)
#Y = f.rvs(df, df, size=self._size)
#sY = sqrt(Y)
#return 0.5*sqrt(df)*(sY-1.0/sY)
def _pdf(self, x, df):
r = asarray(df*1.0)
Px = exp(gamln((r+1)/2)-gamln(r/2))
Px /= sqrt(r*pi)*(1+(x**2)/r)**((r+1)/2)
return Px
def _logpdf(self, x, df):
r = df*1.0
lPx = gamln((r+1)/2)-gamln(r/2)
lPx -= 0.5*log(r*pi) + (r+1)/2*log(1+(x**2)/r)
return lPx
def _cdf(self, x, df):
return special.stdtr(df, x)
def _sf(self, x, df):
return special.stdtr(df, -x)
def _ppf(self, q, df):
return special.stdtrit(df, q)
def _isf(self, q, df):
return -special.stdtrit(df, q)
def _stats(self, df):
mu2 = where(df > 2, df / (df-2.0), inf)
g1 = where(df > 3, 0.0, nan)
g2 = where(df > 4, 6.0/(df-4.0), nan)
return 0, mu2, g1, g2
t = t_gen(name='t', shapes="df")
## Non-central T distribution
class nct_gen(rv_continuous):
"""A non-central Student's T continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `nct` is::
df**(df/2) * gamma(df+1)
nct.pdf(x, df, nc) = ----------------------------------------------------
2**df*exp(nc**2/2) * (df+x**2)**(df/2) * gamma(df/2)
for ``df > 0``, ``nc > 0``.
%(example)s
"""
def _rvs(self, df, nc):
return norm.rvs(loc=nc,size=self._size)*sqrt(df) / sqrt(chi2.rvs(df,size=self._size))
def _pdf(self, x, df, nc):
n = df*1.0
nc = nc*1.0
x2 = x*x
ncx2 = nc*nc*x2
fac1 = n + x2
trm1 = n/2.*log(n) + gamln(n+1)
trm1 -= n*log(2)+nc*nc/2.+(n/2.)*log(fac1)+gamln(n/2.)
Px = exp(trm1)
valF = ncx2 / (2*fac1)
trm1 = sqrt(2)*nc*x*special.hyp1f1(n/2+1,1.5,valF)
trm1 /= asarray(fac1*special.gamma((n+1)/2))
trm2 = special.hyp1f1((n+1)/2,0.5,valF)
trm2 /= asarray(sqrt(fac1)*special.gamma(n/2+1))
Px *= trm1+trm2
return Px
def _cdf(self, x, df, nc):
return special.nctdtr(df, nc, x)
def _ppf(self, q, df, nc):
return special.nctdtrit(df, nc, q)
def _stats(self, df, nc, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
val1 = gam((df-1.0)/2.0)
val2 = gam(df/2.0)
if 'm' in moments:
mu = nc*sqrt(df/2.0)*val1/val2
if 'v' in moments:
var = (nc*nc+1.0)*df/(df-2.0)
var -= nc*nc*df* val1**2 / 2.0 / val2**2
mu2 = var
if 's' in moments:
g1n = 2*nc*sqrt(df)*val1*((nc*nc*(2*df-7)-3)*val2**2 \
-nc*nc*(df-2)*(df-3)*val1**2)
g1d = (df-3)*sqrt(2*df*(nc*nc+1)/(df-2) - \
nc*nc*df*(val1/val2)**2) * val2 * \
(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2**2)
g1 = g1n/g1d
if 'k' in moments:
g2n = 2*(-3*nc**4*(df-2)**2 *(df-3) *(df-4)*val1**4 + \
2**(6-2*df) * nc*nc*(df-2)*(df-4)* \
(nc*nc*(2*df-7)-3)*pi* gam(df+1)**2 - \
4*(nc**4*(df-5)-6*nc*nc-3)*(df-3)*val2**4)
g2d = (df-3)*(df-4)*(nc*nc*(df-2)*val1**2 - \
2*(nc*nc+1)*val2)**2
g2 = g2n / g2d
return mu, mu2, g1, g2
nct = nct_gen(name="nct", shapes="df, nc")
# Pareto
class pareto_gen(rv_continuous):
"""A Pareto continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `pareto` is::
pareto.pdf(x, b) = b / x**(b+1)
for ``x >= 1``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return b * x**(-b-1)
def _cdf(self, x, b):
return 1 - x**(-b)
def _ppf(self, q, b):
return pow(1-q, -1.0/b)
def _stats(self, b, moments='mv'):
mu, mu2, g1, g2 = None, None, None, None
if 'm' in moments:
mask = b > 1
bt = extract(mask,b)
mu = valarray(shape(b),value=inf)
place(mu, mask, bt / (bt-1.0))
if 'v' in moments:
mask = b > 2
bt = extract( mask,b)
mu2 = valarray(shape(b), value=inf)
place(mu2, mask, bt / (bt-2.0) / (bt-1.0)**2)
if 's' in moments:
mask = b > 3
bt = extract( mask,b)
g1 = valarray(shape(b), value=nan)
vals = 2*(bt+1.0)*sqrt(b-2.0)/((b-3.0)*sqrt(b))
place(g1, mask, vals)
if 'k' in moments:
mask = b > 4
bt = extract( mask,b)
g2 = valarray(shape(b), value=nan)
vals = 6.0*polyval([1.0,1.0,-6,-2],bt)/ \
polyval([1.0,-7.0,12.0,0.0],bt)
place(g2, mask, vals)
return mu, mu2, g1, g2
def _entropy(self, c):
return 1 + 1.0/c - log(c)
pareto = pareto_gen(a=1.0, name="pareto", shapes="b")
# LOMAX (Pareto of the second kind.)
class lomax_gen(rv_continuous):
"""A Lomax (Pareto of the second kind) continuous random variable.
%(before_notes)s
Notes
-----
The Lomax distribution is a special case of the Pareto distribution, with
(loc=-1.0).
The probability density function for `lomax` is::
lomax.pdf(x, c) = c / (1+x)**(c+1)
for ``x >= 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*1.0/(1.0+x)**(c+1.0)
def _logpdf(self, x, c):
return log(c) - (c+1)*log(1+x)
def _cdf(self, x, c):
return 1.0-1.0/(1.0+x)**c
def _sf(self, x, c):
return 1.0/(1.0+x)**c
def _logsf(self, x, c):
return -c*log(1+x)
def _ppf(self, q, c):
return pow(1.0-q,-1.0/c)-1
def _stats(self, c):
mu, mu2, g1, g2 = pareto.stats(c, loc=-1.0, moments='mvsk')
return mu, mu2, g1, g2
def _entropy(self, c):
return 1+1.0/c-log(c)
lomax = lomax_gen(a=0.0, name="lomax", shapes="c")
## Power-function distribution
## Special case of beta dist. with d =1.0
class powerlaw_gen(rv_continuous):
"""A power-function continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlaw` is::
powerlaw.pdf(x, a) = a * x**(a-1)
for ``0 <= x <= 1``, ``a > 0``.
%(example)s
"""
def _pdf(self, x, a):
return a*x**(a-1.0)
def _logpdf(self, x, a):
return log(a) + (a-1)*log(x)
def _cdf(self, x, a):
return x**(a*1.0)
def _logcdf(self, x, a):
return a*log(x)
def _ppf(self, q, a):
return pow(q, 1.0/a)
def _stats(self, a):
return (a / (a + 1.0),
a / (a + 2.0) / (a + 1.0) ** 2,
-2.0 * ((a - 1.0) / (a + 3.0)) * sqrt((a + 2.0) / a),
6 * polyval([1, -1, -6, 2], a) / (a * (a + 3.0) * (a + 4)))
def _entropy(self, a):
return 1 - 1.0/a - log(a)
powerlaw = powerlaw_gen(a=0.0, b=1.0, name="powerlaw", shapes="a")
# Power log normal
class powerlognorm_gen(rv_continuous):
"""A power log-normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powerlognorm` is::
powerlognorm.pdf(x, c, s) = c / (x*s) * phi(log(x)/s) *
(Phi(-log(x)/s))**(c-1),
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``s, c > 0``.
%(example)s
"""
def _pdf(self, x, c, s):
return c/(x*s)*norm.pdf(log(x)/s)*pow(norm.cdf(-log(x)/s),c*1.0-1.0)
def _cdf(self, x, c, s):
return 1.0 - pow(norm.cdf(-log(x)/s),c*1.0)
def _ppf(self, q, c, s):
return exp(-s*norm.ppf(pow(1.0-q,1.0/c)))
powerlognorm = powerlognorm_gen(a=0.0, name="powerlognorm", shapes="c, s")
# Power Normal
class powernorm_gen(rv_continuous):
"""A power normal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `powernorm` is::
powernorm.pdf(x, c) = c * phi(x) * (Phi(-x))**(c-1)
where ``phi`` is the normal pdf, and ``Phi`` is the normal cdf,
and ``x > 0``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return c*_norm_pdf(x)* \
(_norm_cdf(-x)**(c-1.0))
def _logpdf(self, x, c):
return log(c) + _norm_logpdf(x) + (c-1)*_norm_logcdf(-x)
def _cdf(self, x, c):
return 1.0-_norm_cdf(-x)**(c*1.0)
def _ppf(self, q, c):
return -norm.ppf(pow(1.0-q,1.0/c))
powernorm = powernorm_gen(name='powernorm', shapes="c")
# R-distribution ( a general-purpose distribution with a
# variety of shapes.
# FIXME: PPF does not work.
class rdist_gen(rv_continuous):
"""An R-distributed continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rdist` is::
rdist.pdf(x, c) = (1-x**2)**(c/2-1) / B(1/2, c/2)
for ``-1 <= x <= 1``, ``c > 0``.
%(example)s
"""
def _pdf(self, x, c):
return np.power((1.0-x*x),c/2.0-1) / special.beta(0.5,c/2.0)
def _cdf_skip(self, x, c):
#error inspecial.hyp2f1 for some values see tickets 758, 759
return 0.5 + x/special.beta(0.5,c/2.0)* \
special.hyp2f1(0.5,1.0-c/2.0,1.5,x*x)
def _munp(self, n, c):
return (1-(n % 2))*special.beta((n+1.0)/2,c/2.0)
rdist = rdist_gen(a=-1.0, b=1.0, name="rdist", shapes="c")
# Rayleigh distribution (this is chi with df=2 and loc=0.0)
# scale is the mode.
class rayleigh_gen(rv_continuous):
"""A Rayleigh continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rayleigh` is::
rayleigh.pdf(r) = r * exp(-r**2/2)
for ``x >= 0``.
%(example)s
"""
def _rvs(self):
return chi.rvs(2,size=self._size)
def _pdf(self, r):
return r*exp(-r*r/2.0)
def _cdf(self, r):
return 1.0-exp(-r*r/2.0)
def _ppf(self, q):
return sqrt(-2*log(1-q))
def _stats(self):
val = 4-pi
return np.sqrt(pi/2), val/2, 2*(pi-3)*sqrt(pi)/val**1.5, \
6*pi/val-16/val**2
def _entropy(self):
return _EULER/2.0 + 1 - 0.5*log(2)
rayleigh = rayleigh_gen(a=0.0, name="rayleigh")
# Reciprocal Distribution
class reciprocal_gen(rv_continuous):
"""A reciprocal continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `reciprocal` is::
reciprocal.pdf(x, a, b) = 1 / (x*log(b/a))
for ``a <= x <= b``, ``a, b > 0``.
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self.d = log(b*1.0 / a)
return (a > 0) & (b > 0) & (b > a)
def _pdf(self, x, a, b):
# argcheck should be called before _pdf
return 1.0/(x*self.d)
def _logpdf(self, x, a, b):
return -log(x) - log(self.d)
def _cdf(self, x, a, b):
return (log(x)-log(a)) / self.d
def _ppf(self, q, a, b):
return a*pow(b*1.0/a,q)
def _munp(self, n, a, b):
return 1.0/self.d / n * (pow(b*1.0,n) - pow(a*1.0,n))
def _entropy(self,a,b):
return 0.5*log(a*b)+log(log(b/a))
reciprocal = reciprocal_gen(name="reciprocal", shapes="a, b")
# Rice distribution
# FIXME: PPF does not work.
class rice_gen(rv_continuous):
"""A Rice continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `rice` is::
rice.pdf(x, b) = x * exp(-(x**2+b**2)/2) * I[0](x*b)
for ``x > 0``, ``b > 0``.
%(example)s
"""
def _pdf(self, x, b):
return x*exp(-(x*x+b*b)/2.0)*special.i0(x*b)
def _logpdf(self, x, b):
return log(x) - (x*x + b*b)/2.0 + log(special.i0(x*b))
def _munp(self, n, b):
nd2 = n/2.0
n1 = 1+nd2
b2 = b*b/2.0
return 2.0**(nd2)*exp(-b2)*special.gamma(n1) * \
special.hyp1f1(n1,1,b2)
rice = rice_gen(a=0.0, name="rice", shapes="b")
# Reciprocal Inverse Gaussian
# FIXME: PPF does not work.
class recipinvgauss_gen(rv_continuous):
"""A reciprocal inverse Gaussian continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `recipinvgauss` is::
recipinvgauss.pdf(x, mu) = 1/sqrt(2*pi*x) * exp(-(1-mu*x)**2/(2*x*mu**2))
for ``x >= 0``.
%(example)s
"""
def _rvs(self, mu): #added, taken from invgauss
return 1.0/mtrand.wald(mu, 1.0, size=self._size)
def _pdf(self, x, mu):
return 1.0/sqrt(2*pi*x)*exp(-(1-mu*x)**2.0 / (2*x*mu**2.0))
def _logpdf(self, x, mu):
return -(1-mu*x)**2.0 / (2*x*mu**2.0) - 0.5*log(2*pi*x)
def _cdf(self, x, mu):
trm1 = 1.0/mu - x
trm2 = 1.0/mu + x
isqx = 1.0/sqrt(x)
return 1.0-_norm_cdf(isqx*trm1)-exp(2.0/mu)*_norm_cdf(-isqx*trm2)
recipinvgauss = recipinvgauss_gen(a=0.0, name='recipinvgauss', shapes="mu")
# Semicircular
class semicircular_gen(rv_continuous):
"""A semicircular continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `semicircular` is::
semicircular.pdf(x) = 2/pi * sqrt(1-x**2)
for ``-1 <= x <= 1``.
%(example)s
"""
def _pdf(self, x):
return 2.0/pi*sqrt(1-x*x)
def _cdf(self, x):
return 0.5+1.0/pi*(x*sqrt(1-x*x) + arcsin(x))
def _stats(self):
return 0, 0.25, 0, -1.0
def _entropy(self):
return 0.64472988584940017414
semicircular = semicircular_gen(a=-1.0, b=1.0, name="semicircular")
# Triangular
class triang_gen(rv_continuous):
"""A triangular continuous random variable.
%(before_notes)s
Notes
-----
The triangular distribution can be represented with an up-sloping line from
``loc`` to ``(loc + c*scale)`` and then downsloping for ``(loc + c*scale)``
to ``(loc+scale)``.
The standard form is in the range [0, 1] with c the mode.
The location parameter shifts the start to `loc`.
The scale parameter changes the width from 1 to `scale`.
%(example)s
"""
def _rvs(self, c):
return mtrand.triangular(0, c, 1, self._size)
def _argcheck(self, c):
return (c >= 0) & (c <= 1)
def _pdf(self, x, c):
return where(x < c, 2*x/c, 2*(1-x)/(1-c))
def _cdf(self, x, c):
return where(x < c, x*x/c, (x*x-2*x+c)/(c-1))
def _ppf(self, q, c):
return where(q < c, sqrt(c*q), 1-sqrt((1-c)*(1-q)))
def _stats(self, c):
return (c+1.0)/3.0, (1.0-c+c*c)/18, sqrt(2)*(2*c-1)*(c+1)*(c-2) / \
(5*(1.0-c+c*c)**1.5), -3.0/5.0
def _entropy(self,c):
return 0.5-log(2)
triang = triang_gen(a=0.0, b=1.0, name="triang", shapes="c")
# Truncated Exponential
class truncexpon_gen(rv_continuous):
"""A truncated exponential continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `truncexpon` is::
truncexpon.pdf(x, b) = exp(-x) / (1-exp(-b))
for ``0 < x < b``.
%(example)s
"""
def _argcheck(self, b):
self.b = b
return (b > 0)
def _pdf(self, x, b):
return exp(-x)/(1-exp(-b))
def _logpdf(self, x, b):
return -x - log(1-exp(-b))
def _cdf(self, x, b):
return (1.0-exp(-x))/(1-exp(-b))
def _ppf(self, q, b):
return -log(1-q+q*exp(-b))
def _munp(self, n, b):
#wrong answer with formula, same as in continuous.pdf
#return gam(n+1)-special.gammainc(1+n,b)
if n == 1:
return (1-(b+1)*exp(-b))/(-expm1(-b))
elif n == 2:
return 2*(1-0.5*(b*b+2*b+2)*exp(-b))/(-expm1(-b))
else:
#return generic for higher moments
#return rv_continuous._mom1_sc(self,n, b)
return self._mom1_sc(n, b)
def _entropy(self, b):
eB = exp(b)
return log(eB-1)+(1+eB*(b-1.0))/(1.0-eB)
truncexpon = truncexpon_gen(a=0.0, name='truncexpon', shapes="b")
# Truncated Normal
class truncnorm_gen(rv_continuous):
"""A truncated normal continuous random variable.
%(before_notes)s
Notes
-----
The standard form of this distribution is a standard normal truncated to
the range [a,b] --- notice that a and b are defined over the domain of the
standard normal. To convert clip values for a specific mean and standard
deviation, use::
a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
%(example)s
"""
def _argcheck(self, a, b):
self.a = a
self.b = b
self._nb = _norm_cdf(b)
self._na = _norm_cdf(a)
self._delta = self._nb - self._na
self._logdelta = log(self._delta)
return (a != b)
# All of these assume that _argcheck is called first
# and no other thread calls _pdf before.
def _pdf(self, x, a, b):
return _norm_pdf(x) / self._delta
def _logpdf(self, x, a, b):
return _norm_logpdf(x) - self._logdelta
def _cdf(self, x, a, b):
return (_norm_cdf(x) - self._na) / self._delta
def _ppf(self, q, a, b):
return norm._ppf(q*self._nb + self._na*(1.0-q))
def _stats(self, a, b):
nA, nB = self._na, self._nb
d = nB - nA
pA, pB = _norm_pdf(a), _norm_pdf(b)
mu = (pA - pB) / d #correction sign
mu2 = 1 + (a*pA - b*pB) / d - mu*mu
return mu, mu2, None, None
truncnorm = truncnorm_gen(name='truncnorm', shapes="a, b")
# Tukey-Lambda
# FIXME: RVS does not work.
class tukeylambda_gen(rv_continuous):
"""A Tukey-Lamdba continuous random variable.
%(before_notes)s
Notes
-----
A flexible distribution, able to represent and interpolate between the
following distributions:
- Cauchy (lam=-1)
- logistic (lam=0.0)
- approx Normal (lam=0.14)
- u-shape (lam = 0.5)
- uniform from -1 to 1 (lam = 1)
%(example)s
"""
def _argcheck(self, lam):
# lam in RR.
return np.ones(np.shape(lam), dtype=bool)
def _pdf(self, x, lam):
Fx = asarray(special.tklmbda(x,lam))
Px = Fx**(lam-1.0) + (asarray(1-Fx))**(lam-1.0)
Px = 1.0/asarray(Px)
return where((lam <= 0) | (abs(x) < 1.0/asarray(lam)), Px, 0.0)
def _cdf(self, x, lam):
return special.tklmbda(x, lam)
def _ppf(self, q, lam):
q = q*1.0
vals1 = (q**lam - (1-q)**lam)/lam
vals2 = log(q/(1-q))
return where((lam == 0)&(q==q), vals2, vals1)
def _stats(self, lam):
return 0, _tlvar(lam), 0, _tlkurt(lam)
def _entropy(self, lam):
def integ(p):
return log(pow(p,lam-1)+pow(1-p,lam-1))
return integrate.quad(integ,0,1)[0]
tukeylambda = tukeylambda_gen(name='tukeylambda', shapes="lam")
# Uniform
class uniform_gen(rv_continuous):
"""A uniform continuous random variable.
This distribution is constant between `loc` and ``loc + scale``.
%(before_notes)s
%(example)s
"""
def _rvs(self):
return mtrand.uniform(0.0,1.0,self._size)
def _pdf(self, x):
return 1.0*(x==x)
def _cdf(self, x):
return x
def _ppf(self, q):
return q
def _stats(self):
return 0.5, 1.0/12, 0, -1.2
def _entropy(self):
return 0.0
uniform = uniform_gen(a=0.0, b=1.0, name='uniform')
# Von-Mises
# if x is not in range or loc is not in range it assumes they are angles
# and converts them to [-pi, pi] equivalents.
eps = numpy.finfo(float).eps
class vonmises_gen(rv_continuous):
"""A Von Mises continuous random variable.
%(before_notes)s
Notes
-----
If `x` is not in range or `loc` is not in range it assumes they are angles
and converts them to [-pi, pi] equivalents.
The probability density function for `vonmises` is::
vonmises.pdf(x, b) = exp(b*cos(x)) / (2*pi*I[0](b))
for ``-pi <= x <= pi``, ``b > 0``.
%(example)s
"""
def _rvs(self, b):
return mtrand.vonmises(0.0, b, size=self._size)
def _pdf(self, x, b):
return exp(b*cos(x)) / (2*pi*special.i0(b))
def _cdf(self, x, b):
return vonmises_cython.von_mises_cdf(b,x)
def _stats_skip(self, b):
return 0, None, 0, None
vonmises = vonmises_gen(name='vonmises', shapes="b")
## Wald distribution (Inverse Normal with shape parameter mu=1.0)
class wald_gen(invgauss_gen):
"""A Wald continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wald` is::
wald.pdf(x, a) = 1/sqrt(2*pi*x**3) * exp(-(x-1)**2/(2*x))
for ``x > 0``.
%(example)s
"""
def _rvs(self):
return mtrand.wald(1.0, 1.0, size=self._size)
def _pdf(self, x):
return invgauss._pdf(x, 1.0)
def _logpdf(self, x):
return invgauss._logpdf(x, 1.0)
def _cdf(self, x):
return invgauss._cdf(x, 1.0)
def _stats(self):
return 1.0, 1.0, 3.0, 15.0
wald = wald_gen(a=0.0, name="wald")
# Wrapped Cauchy
class wrapcauchy_gen(rv_continuous):
"""A wrapped Cauchy continuous random variable.
%(before_notes)s
Notes
-----
The probability density function for `wrapcauchy` is::
wrapcauchy.pdf(x, c) = (1-c**2) / (2*pi*(1+c**2-2*c*cos(x)))
for ``0 <= x <= 2*pi``, ``0 < c < 1``.
%(example)s
"""
def _argcheck(self, c):
return (c > 0) & (c < 1)
def _pdf(self, x, c):
return (1.0-c*c)/(2*pi*(1+c*c-2*c*cos(x)))
def _cdf(self, x, c):
output = 0.0*x
val = (1.0+c)/(1.0-c)
c1 = x<pi
c2 = 1-c1
xp = extract( c1,x)
#valp = extract(c1,val)
xn = extract( c2,x)
#valn = extract(c2,val)
if (any(xn)):
valn = extract(c2, np.ones_like(x)*val)
xn = 2*pi - xn
yn = tan(xn/2.0)
on = 1.0-1.0/pi*arctan(valn*yn)
place(output, c2, on)
if (any(xp)):
valp = extract(c1, np.ones_like(x)*val)
yp = tan(xp/2.0)
op = 1.0/pi*arctan(valp*yp)
place(output, c1, op)
return output
def _ppf(self, q, c):
val = (1.0-c)/(1.0+c)
rcq = 2*arctan(val*tan(pi*q))
rcmq = 2*pi-2*arctan(val*tan(pi*(1-q)))
return where(q < 1.0/2, rcq, rcmq)
def _entropy(self, c):
return log(2*pi*(1-c*c))
wrapcauchy = wrapcauchy_gen(a=0.0, b=2*pi, name='wrapcauchy', shapes="c")
### DISCRETE DISTRIBUTIONS
###
def entropy(pk, qk=None, base=None):
""" Calculate the entropy of a distribution for given probability values.
If only probabilities `pk` are given, the entropy is calculated as
``S = -sum(pk * log(pk), axis=0)``.
If `qk` is not None, then compute a relative entropy
``S = sum(pk * log(pk / qk), axis=0)``.
This routine will normalize `pk` and `qk` if they don't sum to 1.
Parameters
----------
pk : sequence
Defines the (discrete) distribution. ``pk[i]`` is the (possibly
unnormalized) probability of event ``i``.
qk : sequence, optional
Sequence against which the relative entropy is computed. Should be in
the same format as `pk`.
base : float, optional
The logarithmic base to use, defaults to ``e`` (natural logarithm).
Returns
-------
S : float
The calculated entropy.
"""
pk = asarray(pk)
pk = 1.0* pk / sum(pk, axis=0)
if qk is None:
vec = where(pk == 0, 0.0, pk*log(pk))
else:
qk = asarray(qk)
if len(qk) != len(pk):
raise ValueError("qk and pk must have same length.")
qk = 1.0*qk / sum(qk, axis=0)
# If qk is zero anywhere, then unless pk is zero at those places
# too, the relative entropy is infinite.
if any(take(pk, nonzero(qk == 0.0), axis=0) != 0.0, 0):
return inf
vec = where (pk == 0, 0.0, -pk*log(pk / qk))
S = -sum(vec, axis=0)
if base is not None:
S /= log(base)
return S
## Handlers for generic case where xk and pk are given
def _drv_pmf(self, xk, *args):
try:
return self.P[xk]
except KeyError:
return 0.0
def _drv_cdf(self, xk, *args):
indx = argmax((self.xk>xk),axis=-1)-1
return self.F[self.xk[indx]]
def _drv_ppf(self, q, *args):
indx = argmax((self.qvals>=q),axis=-1)
return self.Finv[self.qvals[indx]]
def _drv_nonzero(self, k, *args):
return 1
def _drv_moment(self, n, *args):
n = asarray(n)
return sum(self.xk**n[newaxis,...] * self.pk, axis=0)
def _drv_moment_gen(self, t, *args):
t = asarray(t)
return sum(exp(self.xk * t[newaxis,...]) * self.pk, axis=0)
def _drv2_moment(self, n, *args):
"""Non-central moment of discrete distribution."""
#many changes, originally not even a return
tot = 0.0
diff = 1e100
#pos = self.a
pos = max(0.0, 1.0*self.a)
count = 0
#handle cases with infinite support
ulimit = max(1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
llimit = min(-1000, (min(self.b,1000) + max(self.a,-1000))/2.0 )
while (pos <= self.b) and ((pos <= ulimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
# use pmf because _pmf does not check support in randint
# and there might be problems ? with correct self.a, self.b at this stage
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = -self.inc
while (pos >= self.a) and ((pos >= llimit) or \
(diff > self.moment_tol)):
diff = np.power(pos, n) * self.pmf(pos,*args)
#using pmf instead of _pmf, see above
tot += diff
pos -= self.inc
count += 1
return tot
def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm
b = self.invcdf_b
a = self.invcdf_a
if isinf(b): # Be sure ending point is > q
b = max(100*q,10)
while 1:
if b >= self.b: qb = 1.0; break
qb = self._cdf(b,*args)
if (qb < q): b += 10
else: break
else:
qb = 1.0
if isinf(a): # be sure starting point < q
a = min(-100*q,-10)
while 1:
if a <= self.a: qb = 0.0; break
qa = self._cdf(a,*args)
if (qa > q): a -= 10
else: break
else:
qa = self._cdf(a, *args)
while 1:
if (qa == q):
return a
if (qb == q):
return b
if b == a+1:
#testcase: return wrong number at lower index
#python -c "from scipy.stats import zipf;print zipf.ppf(0.01,2)" wrong
#python -c "from scipy.stats import zipf;print zipf.ppf([0.01,0.61,0.77,0.83],2)"
#python -c "from scipy.stats import logser;print logser.ppf([0.1,0.66, 0.86,0.93],0.6)"
if qa > q:
return a
else:
return b
c = int((a+b)/2.0)
qc = self._cdf(c, *args)
if (qc < q):
a = c
qa = qc
elif (qc > q):
b = c
qb = qc
else:
return c
def reverse_dict(dict):
newdict = {}
sorted_keys = copy(dict.keys())
sorted_keys.sort()
for key in sorted_keys[::-1]:
newdict[dict[key]] = key
return newdict
def make_dict(keys, values):
d = {}
for key, value in zip(keys, values):
d[key] = value
return d
# Must over-ride one of _pmf or _cdf or pass in
# x_k, p(x_k) lists in initialization
class rv_discrete(rv_generic):
"""
A generic discrete random variable class meant for subclassing.
`rv_discrete` is a base class to construct specific distribution classes
and instances from for discrete random variables. rv_discrete can be used
to construct an arbitrary distribution with defined by a list of support
points and the corresponding probabilities.
Parameters
----------
a : float, optional
Lower bound of the support of the distribution, default: 0
b : float, optional
Upper bound of the support of the distribution, default: plus infinity
moment_tol : float, optional
The tolerance for the generic calculation of moments
values : tuple of two array_like
(xk, pk) where xk are points (integers) with positive probability pk
with sum(pk) = 1
inc : integer
increment for the support of the distribution, default: 1
other values have not been tested
badvalue : object, optional
The value in (masked) arrays that indicates a value that should be
ignored.
name : str, optional
The name of the instance. This string is used to construct the default
example for distributions.
longname : str, optional
This string is used as part of the first line of the docstring returned
when a subclass has no docstring of its own. Note: `longname` exists
for backwards compatibility, do not use for new subclasses.
shapes : str, optional
The shape of the distribution. For example ``"m, n"`` for a
distribution that takes two integers as the first two arguments for all
its methods.
extradoc : str, optional
This string is used as the last part of the docstring returned when a
subclass has no docstring of its own. Note: `extradoc` exists for
backwards compatibility, do not use for new subclasses.
Methods
-------
generic.rvs(<shape(s)>, loc=0, size=1)
random variates
generic.pmf(x, <shape(s)>, loc=0)
probability mass function
logpmf(x, <shape(s)>, loc=0)
log of the probability density function
generic.cdf(x, <shape(s)>, loc=0)
cumulative density function
generic.logcdf(x, <shape(s)>, loc=0)
log of the cumulative density function
generic.sf(x, <shape(s)>, loc=0)
survival function (1-cdf --- sometimes more accurate)
generic.logsf(x, <shape(s)>, loc=0, scale=1)
log of the survival function
generic.ppf(q, <shape(s)>, loc=0)
percent point function (inverse of cdf --- percentiles)
generic.isf(q, <shape(s)>, loc=0)
inverse survival function (inverse of sf)
generic.moment(n, <shape(s)>, loc=0)
non-central n-th moment of the distribution. May not work for array arguments.
generic.stats(<shape(s)>, loc=0, moments='mv')
mean('m', axis=0), variance('v'), skew('s'), and/or kurtosis('k')
generic.entropy(<shape(s)>, loc=0)
entropy of the RV
generic.expect(func=None, args=(), loc=0, lb=None, ub=None, conditional=False)
Expected value of a function with respect to the distribution.
Additional kwd arguments passed to integrate.quad
generic.median(<shape(s)>, loc=0)
Median of the distribution.
generic.mean(<shape(s)>, loc=0)
Mean of the distribution.
generic.std(<shape(s)>, loc=0)
Standard deviation of the distribution.
generic.var(<shape(s)>, loc=0)
Variance of the distribution.
generic.interval(alpha, <shape(s)>, loc=0)
Interval that with `alpha` percent probability contains a random
realization of this distribution.
generic(<shape(s)>, loc=0)
calling a distribution instance returns a frozen distribution
Notes
-----
You can construct an arbitrary discrete rv where ``P{X=xk} = pk``
by passing to the rv_discrete initialization method (through the
values=keyword) a tuple of sequences (xk, pk) which describes only those
values of X (xk) that occur with nonzero probability (pk).
To create a new discrete distribution, we would do the following::
class poisson_gen(rv_continuous):
#"Poisson distribution"
def _pmf(self, k, mu):
...
and create an instance::
poisson = poisson_gen(name="poisson", shapes="mu",
longname='A Poisson')
The docstring can be created from a template.
Alternatively, the object may be called (as a function) to fix the shape
and location parameters returning a "frozen" discrete RV object::
myrv = generic(<shape(s)>, loc=0)
- frozen RV object with the same methods but holding the given
shape and location fixed.
Examples
--------
Custom made discrete distribution:
>>> import matplotlib.pyplot as plt
>>> from scipy import stats
>>> xk = np.arange(7)
>>> pk = (0.1, 0.2, 0.3, 0.1, 0.1, 0.1, 0.1)
>>> custm = stats.rv_discrete(name='custm', values=(xk, pk))
>>> h = plt.plot(xk, custm.pmf(xk))
Random number generation:
>>> R = custm.rvs(size=100)
Display frozen pmf:
>>> numargs = generic.numargs
>>> [ <shape(s)> ] = ['Replace with resonable value', ]*numargs
>>> rv = generic(<shape(s)>)
>>> x = np.arange(0, np.min(rv.dist.b, 3)+1)
>>> h = plt.plot(x, rv.pmf(x))
Here, ``rv.dist.b`` is the right endpoint of the support of ``rv.dist``.
Check accuracy of cdf and ppf:
>>> prb = generic.cdf(x, <shape(s)>)
>>> h = plt.semilogy(np.abs(x-generic.ppf(prb, <shape(s)>))+1e-20)
"""
def __init__(self, a=0, b=inf, name=None, badvalue=None,
moment_tol=1e-8,values=None,inc=1,longname=None,
shapes=None, extradoc=None):
super(rv_generic,self).__init__()
if badvalue is None:
badvalue = nan
if name is None:
name = 'Distribution'
self.badvalue = badvalue
self.a = a
self.b = b
self.invcdf_a = a # what's the difference to self.a, .b
self.invcdf_b = b
self.name = name
self.moment_tol = moment_tol
self.inc = inc
self._cdfvec = sgf(self._cdfsingle,otypes='d')
self.return_integers = 1
self.vecentropy = vectorize(self._entropy)
self.shapes = shapes
self.extradoc = extradoc
if values is not None:
self.xk, self.pk = values
self.return_integers = 0
indx = argsort(ravel(self.xk))
self.xk = take(ravel(self.xk),indx, 0)
self.pk = take(ravel(self.pk),indx, 0)
self.a = self.xk[0]
self.b = self.xk[-1]
self.P = make_dict(self.xk, self.pk)
self.qvals = numpy.cumsum(self.pk,axis=0)
self.F = make_dict(self.xk, self.qvals)
self.Finv = reverse_dict(self.F)
self._ppf = instancemethod(sgf(_drv_ppf,otypes='d'),
self, rv_discrete)
self._pmf = instancemethod(sgf(_drv_pmf,otypes='d'),
self, rv_discrete)
self._cdf = instancemethod(sgf(_drv_cdf,otypes='d'),
self, rv_discrete)
self._nonzero = instancemethod(_drv_nonzero, self, rv_discrete)
self.generic_moment = instancemethod(_drv_moment,
self, rv_discrete)
self.moment_gen = instancemethod(_drv_moment_gen,
self, rv_discrete)
self.numargs=0
else:
cdf_signature = inspect.getargspec(self._cdf.im_func)
numargs1 = len(cdf_signature[0]) - 2
pmf_signature = inspect.getargspec(self._pmf.im_func)
numargs2 = len(pmf_signature[0]) - 2
self.numargs = max(numargs1, numargs2)
#nin correction needs to be after we know numargs
#correct nin for generic moment vectorization
self.vec_generic_moment = sgf(_drv2_moment, otypes='d')
self.vec_generic_moment.nin = self.numargs + 2
self.generic_moment = instancemethod(self.vec_generic_moment,
self, rv_discrete)
#correct nin for ppf vectorization
_vppf = sgf(_drv2_ppfsingle,otypes='d')
_vppf.nin = self.numargs + 2 # +1 is for self
self._vecppf = instancemethod(_vppf,
self, rv_discrete)
#now that self.numargs is defined, we can adjust nin
self._cdfvec.nin = self.numargs + 1
# generate docstring for subclass instances
if longname is None:
if name[0] in ['aeiouAEIOU']:
hstr = "An "
else:
hstr = "A "
longname = hstr + name
if self.__doc__ is None:
self._construct_default_doc(longname=longname, extradoc=extradoc)
else:
self._construct_doc()
## This only works for old-style classes...
# self.__class__.__doc__ = self.__doc__
def _construct_default_doc(self, longname=None, extradoc=None):
"""Construct instance docstring from the rv_discrete template."""
if extradoc is None:
extradoc = ''
if extradoc.startswith('\n\n'):
extradoc = extradoc[2:]
self.__doc__ = ''.join(['%s discrete random variable.'%longname,
'\n\n%(before_notes)s\n', docheaders['notes'],
extradoc, '\n%(example)s'])
self._construct_doc()
def _construct_doc(self):
"""Construct the instance docstring with string substitutions."""
tempdict = docdict_discrete.copy()
tempdict['name'] = self.name or 'distname'
tempdict['shapes'] = self.shapes or ''
if self.shapes is None:
# remove shapes from call parameters if there are none
for item in ['callparams', 'default', 'before_notes']:
tempdict[item] = tempdict[item].replace(\
"\n%(shapes)s : array_like\n shape parameters", "")
for i in range(2):
if self.shapes is None:
# necessary because we use %(shapes)s in two forms (w w/o ", ")
self.__doc__ = self.__doc__.replace("%(shapes)s, ", "")
self.__doc__ = doccer.docformat(self.__doc__, tempdict)
def _rvs(self, *args):
return self._ppf(mtrand.random_sample(self._size),*args)
def _nonzero(self, k, *args):
return floor(k)==k
def _argcheck(self, *args):
cond = 1
for arg in args:
cond &= (arg > 0)
return cond
def _pmf(self, k, *args):
return self._cdf(k,*args) - self._cdf(k-1,*args)
def _logpmf(self, k, *args):
return log(self._pmf(k, *args))
def _cdfsingle(self, k, *args):
m = arange(int(self.a),k+1)
return sum(self._pmf(m,*args),axis=0)
def _cdf(self, x, *args):
k = floor(x)
return self._cdfvec(k,*args)
def _logcdf(self, x, *args):
return log(self._cdf(x, *args))
def _sf(self, x, *args):
return 1.0-self._cdf(x,*args)
def _logsf(self, x, *args):
return log(self._sf(x, *args))
def _ppf(self, q, *args):
return self._vecppf(q, *args)
def _isf(self, q, *args):
return self._ppf(1-q,*args)
def _stats(self, *args):
return None, None, None, None
def _munp(self, n, *args):
return self.generic_moment(n, *args)
def rvs(self, *args, **kwargs):
"""
Random variates of given type.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
size : int or tuple of ints, optional
Defining number of random variates (default=1).
Returns
-------
rvs : array_like
Random variates of given `size`.
"""
kwargs['discrete'] = True
return super(rv_discrete, self).rvs(*args, **kwargs)
def pmf(self, k,*args, **kwds):
"""
Probability mass function at k of the given RV.
Parameters
----------
k : array_like
quantiles
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : array_like, optional
Location parameter (default=0).
Returns
-------
pmf : array_like
Probability mass function evaluated at k
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._pmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logpmf(self, k,*args, **kwds):
"""
Log of the probability mass function at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter. Default is 0.
Returns
-------
logpmf : array_like
Log of the probability mass function evaluated at k.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b) & self._nonzero(k,*args)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logpmf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def cdf(self, k, *args, **kwds):
"""
Cumulative distribution function at k of the given RV.
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
cdf : array_like
Cumulative distribution function evaluated at k.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0==cond0), 1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._cdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logcdf(self, k, *args, **kwds):
"""
Log of the cumulative distribution function at k of the given RV
Parameters
----------
k : array_like, int
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
logcdf : array_like
Log of the cumulative distribution function evaluated at k.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray((k-loc))
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k < self.b)
cond2 = (k >= self.b)
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2*(cond0==cond0), 0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logcdf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def sf(self,k,*args,**kwds):
"""
Survival function (1-cdf) at k of the given RV.
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = zeros(shape(cond),'d')
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,1.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._sf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def logsf(self,k,*args,**kwds):
"""
Log of the survival function (1-cdf) at k of the given RV
Parameters
----------
k : array_like
Quantiles.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
sf : array_like
Survival function evaluated at k.
"""
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
k,loc = map(asarray,(k,loc))
args = tuple(map(asarray,args))
k = asarray(k-loc)
cond0 = self._argcheck(*args)
cond1 = (k >= self.a) & (k <= self.b)
cond2 = (k < self.a) & cond0
cond = cond0 & cond1
output = empty(shape(cond),'d')
output.fill(NINF)
place(output,(1-cond0) + np.isnan(k),self.badvalue)
place(output,cond2,0.0)
if any(cond):
goodargs = argsreduce(cond, *((k,)+args))
place(output,cond,self._logsf(*goodargs))
if output.ndim == 0:
return output[()]
return output
def ppf(self,q,*args,**kwds):
"""
Percent point function (inverse of cdf) at q of the given RV
Parameters
----------
q : array_like
Lower tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
scale : array_like, optional
Scale parameter (default=1).
Returns
-------
k : array_like
Quantile corresponding to the lower tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(asarray,(q,loc))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
place(output,(q==0)*(cond==cond), self.a-1)
place(output,cond2,self.b)
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._ppf(*goodargs) + loc)
if output.ndim == 0:
return output[()]
return output
def isf(self,q,*args,**kwds):
"""
Inverse survival function (1-sf) at q of the given RV.
Parameters
----------
q : array_like
Upper tail probability.
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
Returns
-------
k : array_like
Quantile corresponding to the upper tail probability, q.
"""
loc = kwds.get('loc')
args, loc = self._fix_loc(args, loc)
q,loc = map(asarray,(q,loc))
args = tuple(map(asarray,args))
cond0 = self._argcheck(*args) & (loc == loc)
cond1 = (q > 0) & (q < 1)
cond2 = (q==1) & cond0
cond = cond0 & cond1
#old:
## output = valarray(shape(cond),value=self.b,typecode='d')
## #typecode 'd' to handle nin and inf
## place(output,(1-cond0)*(cond1==cond1), self.badvalue)
## place(output,cond2,self.a-1)
#same problem as with ppf
# copied from ppf and changed
output = valarray(shape(cond),value=self.badvalue,typecode='d')
#output type 'd' to handle nin and inf
place(output,(q==0)*(cond==cond), self.b)
place(output,cond2,self.a-1)
# call place only if at least 1 valid argument
if any(cond):
goodargs = argsreduce(cond, *((q,)+args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
place(output,cond,self._isf(*goodargs) + loc) #PB same as ticket 766
if output.ndim == 0:
return output[()]
return output
def stats(self, *args, **kwds):
"""
Some statistics of the given discrete RV.
Parameters
----------
arg1, arg2, arg3,... : array_like
The shape parameter(s) for the distribution (see docstring of the
instance object for more information).
loc : array_like, optional
Location parameter (default=0).
moments : string, optional
Composed of letters ['mvsk'] defining which moments to compute:
- 'm' = mean,
- 'v' = variance,
- 's' = (Fisher's) skew,
- 'k' = (Fisher's) kurtosis.
The default is'mv'.
Returns
-------
stats : sequence
of requested moments.
"""
loc,moments=map(kwds.get,['loc','moments'])
N = len(args)
if N > self.numargs:
if N == self.numargs + 1 and loc is None: # loc is given without keyword
loc = args[-1]
if N == self.numargs + 2 and moments is None: # loc, scale, and moments
loc, moments = args[-2:]
args = args[:self.numargs]
if loc is None: loc = 0.0
if moments is None: moments = 'mv'
loc = asarray(loc)
args = tuple(map(asarray,args))
cond = self._argcheck(*args) & (loc==loc)
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments})
else:
mu, mu2, g1, g2 = self._stats(*args)
if g1 is None:
mu3 = None
else:
mu3 = g1*(mu2**1.5)
default = valarray(shape(cond), self.badvalue)
output = []
# Use only entries that are valid in calculation
goodargs = argsreduce(cond, *(args+(loc,)))
loc, goodargs = goodargs[-1], goodargs[:-1]
if 'm' in moments:
if mu is None:
mu = self._munp(1.0,*goodargs)
out0 = default.copy()
place(out0,cond,mu+loc)
output.append(out0)
if 'v' in moments:
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
mu2 = mu2p - mu*mu
out0 = default.copy()
place(out0,cond,mu2)
output.append(out0)
if 's' in moments:
if g1 is None:
mu3p = self._munp(3.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
mu3 = mu3p - 3*mu*mu2 - mu**3
g1 = mu3 / mu2**1.5
out0 = default.copy()
place(out0,cond,g1)
output.append(out0)
if 'k' in moments:
if g2 is None:
mu4p = self._munp(4.0,*goodargs)
if mu is None:
mu = self._munp(1.0,*goodargs)
if mu2 is None:
mu2p = self._munp(2.0,*goodargs)
mu2 = mu2p - mu*mu
if mu3 is None:
mu3p = self._munp(3.0,*goodargs)
mu3 = mu3p - 3*mu*mu2 - mu**3
mu4 = mu4p - 4*mu*mu3 - 6*mu*mu*mu2 - mu**4
g2 = mu4 / mu2**2.0 - 3.0
out0 = default.copy()
place(out0,cond,g2)
output.append(out0)
if len(output) == 1:
return output[0]
else:
return tuple(output)
def moment(self, n, *args, **kwds): # Non-central moments in standard form.
"""
n'th non-central moment of the distribution
Parameters
----------
n : int, n>=1
order of moment
arg1, arg2, arg3,...: float
The shape parameter(s) for the distribution (see docstring of the
instance object for more information)
loc : float, optional
location parameter (default=0)
scale : float, optional
scale parameter (default=1)
"""
loc = kwds.get('loc', 0)
scale = kwds.get('scale', 1)
if not (self._argcheck(*args) and (scale > 0)):
return nan
if (floor(n) != n):
raise ValueError("Moment must be an integer.")
if (n < 0): raise ValueError("Moment must be positive.")
mu, mu2, g1, g2 = None, None, None, None
if (n > 0) and (n < 5):
signature = inspect.getargspec(self._stats.im_func)
if (signature[2] is not None) or ('moments' in signature[0]):
dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]}
else:
dict = {}
mu, mu2, g1, g2 = self._stats(*args,**dict)
val = _moment_from_stats(n, mu, mu2, g1, g2, self._munp, args)
# Convert to transformed X = L + S*Y
# so E[X^n] = E[(L+S*Y)^n] = L^n sum(comb(n,k)*(S/L)^k E[Y^k],k=0...n)
if loc == 0:
return scale**n * val
else:
result = 0
fac = float(scale) / float(loc)
for k in range(n):
valk = _moment_from_stats(k, mu, mu2, g1, g2, self._munp, args)
result += comb(n,k,exact=True)*(fac**k) * valk
result += fac**n * val
return result * loc**n
def freeze(self, *args, **kwds):
return rv_frozen(self, *args, **kwds)
def _entropy(self, *args):
if hasattr(self,'pk'):
return entropy(self.pk)
else:
mu = int(self.stats(*args, **{'moments':'m'}))
val = self.pmf(mu,*args)
if (val==0.0): ent = 0.0
else: ent = -val*log(val)
k = 1
term = 1.0
while (abs(term) > eps):
val = self.pmf(mu+k,*args)
if val == 0.0: term = 0.0
else: term = -val * log(val)
val = self.pmf(mu-k,*args)
if val != 0.0: term -= val*log(val)
k += 1
ent += term
return ent
def entropy(self, *args, **kwds):
loc= kwds.get('loc')
args, loc = self._fix_loc(args, loc)
loc = asarray(loc)
args = map(asarray,args)
cond0 = self._argcheck(*args) & (loc==loc)
output = zeros(shape(cond0),'d')
place(output,(1-cond0),self.badvalue)
goodargs = argsreduce(cond0, *args)
place(output,cond0,self.vecentropy(*goodargs))
return output
def __call__(self, *args, **kwds):
return self.freeze(*args,**kwds)
def expect(self, func=None, args=(), loc=0, lb=None, ub=None, conditional=False):
"""calculate expected value of a function with respect to the distribution
for discrete distribution
Parameters
----------
fn : function (default: identity mapping)
Function for which sum is calculated. Takes only one argument.
args : tuple
argument (parameters) of the distribution
optional keyword parameters
lb, ub : numbers
lower and upper bound for integration, default is set to the support
of the distribution, lb and ub are inclusive (ul<=k<=ub)
conditional : boolean (False)
If true then the expectation is corrected by the conditional
probability of the integration interval. The return value is the
expectation of the function, conditional on being in the given
interval (k such that ul<=k<=ub).
Returns
-------
expected value : float
Notes
-----
* function is not vectorized
* accuracy: uses self.moment_tol as stopping criterium
for heavy tailed distribution e.g. zipf(4), accuracy for
mean, variance in example is only 1e-5,
increasing precision (moment_tol) makes zipf very slow
* suppnmin=100 internal parameter for minimum number of points to evaluate
could be added as keyword parameter, to evaluate functions with
non-monotonic shapes, points include integers in (-suppnmin, suppnmin)
* uses maxcount=1000 limits the number of points that are evaluated
to break loop for infinite sums
(a maximum of suppnmin+1000 positive plus suppnmin+1000 negative integers
are evaluated)
"""
#moment_tol = 1e-12 # increase compared to self.moment_tol,
# too slow for only small gain in precision for zipf
#avoid endless loop with unbound integral, eg. var of zipf(2)
maxcount = 1000
suppnmin = 100 #minimum number of points to evaluate (+ and -)
if func is None:
def fun(x):
#loc and args from outer scope
return (x+loc)*self._pmf(x, *args)
else:
def fun(x):
#loc and args from outer scope
return func(x+loc)*self._pmf(x, *args)
# used pmf because _pmf does not check support in randint
# and there might be problems(?) with correct self.a, self.b at this stage
# maybe not anymore, seems to work now with _pmf
self._argcheck(*args) # (re)generate scalar self.a and self.b
if lb is None:
lb = (self.a)
else:
lb = lb - loc #convert bound for standardized distribution
if ub is None:
ub = (self.b)
else:
ub = ub - loc #convert bound for standardized distribution
if conditional:
if np.isposinf(ub)[()]:
#work around bug: stats.poisson.sf(stats.poisson.b, 2) is nan
invfac = 1 - self.cdf(lb-1,*args)
else:
invfac = 1 - self.cdf(lb-1,*args) - self.sf(ub,*args)
else:
invfac = 1.0
tot = 0.0
low, upp = self._ppf(0.001, *args), self._ppf(0.999, *args)
low = max(min(-suppnmin, low), lb)
upp = min(max(suppnmin, upp), ub)
supp = np.arange(low, upp+1, self.inc) #check limits
#print 'low, upp', low, upp
tot = np.sum(fun(supp))
diff = 1e100
pos = upp + self.inc
count = 0
#handle cases with infinite support
while (pos <= ub) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos += self.inc
count += 1
if self.a < 0: #handle case when self.a = -inf
diff = 1e100
pos = low - self.inc
while (pos >= lb) and (diff > self.moment_tol) and count <= maxcount:
diff = fun(pos)
tot += diff
pos -= self.inc
count += 1
if count > maxcount:
# fixme: replace with proper warning
print 'sum did not converge'
return tot/invfac
# Binomial
class binom_gen(rv_discrete):
"""A binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `binom` is::
binom.pmf(k) = choose(n,k) * p**k * (1-p)**(n-k)
for ``k`` in ``{0,1,...,n}``.
`binom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.binomial(n,p,self._size)
def _argcheck(self, n, p):
self.b = n
return (n>=0) & (p >= 0) & (p <= 1)
def _logpmf(self, x, n, p):
k = floor(x)
combiln = (gamln(n+1) - (gamln(k+1) +
gamln(n-k+1)))
return combiln + k*np.log(p) + (n-k)*np.log(1-p)
def _pmf(self, x, n, p):
return exp(self._logpmf(x, n, p))
def _cdf(self, x, n, p):
k = floor(x)
vals = special.bdtr(k,n,p)
return vals
def _sf(self, x, n, p):
k = floor(x)
return special.bdtrc(k,n,p)
def _ppf(self, q, n, p):
vals = ceil(special.bdtrik(q,n,p))
vals1 = vals-1
temp = special.bdtr(vals1,n,p)
return where(temp >= q, vals1, vals)
def _stats(self, n, p):
q = 1.0-p
mu = n * p
var = n * p * q
g1 = (q-p) / sqrt(n*p*q)
g2 = (1.0-6*p*q)/(n*p*q)
return mu, var, g1, g2
def _entropy(self, n, p):
k = r_[0:n+1]
vals = self._pmf(k,n,p)
lvals = where(vals==0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
binom = binom_gen(name='binom',shapes="n, p")
# Bernoulli distribution
class bernoulli_gen(binom_gen):
"""A Bernoulli discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `bernoulli` is::
bernoulli.pmf(k) = 1-p if k = 0
= p if k = 1
for ``k`` in ``{0,1}``.
`bernoulli` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, pr):
return binom_gen._rvs(self, 1, pr)
def _argcheck(self, pr):
return (pr >=0 ) & (pr <= 1)
def _logpmf(self, x, pr):
return binom._logpmf(x, 1, pr)
def _pmf(self, x, pr):
return binom._pmf(x, 1, pr)
def _cdf(self, x, pr):
return binom._cdf(x, 1, pr)
def _sf(self, x, pr):
return binom._sf(x, 1, pr)
def _ppf(self, q, pr):
return binom._ppf(q, 1, pr)
def _stats(self, pr):
return binom._stats(1, pr)
def _entropy(self, pr):
return -pr*log(pr)-(1-pr)*log(1-pr)
bernoulli = bernoulli_gen(b=1,name='bernoulli',shapes="p")
# Negative binomial
class nbinom_gen(rv_discrete):
"""A negative binomial discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `nbinom` is::
nbinom.pmf(k) = choose(k+n-1, n-1) * p**n * (1-p)**k
for ``k >= 0``.
`nbinom` takes ``n`` and ``p`` as shape parameters.
%(example)s
"""
def _rvs(self, n, p):
return mtrand.negative_binomial(n, p, self._size)
def _argcheck(self, n, p):
return (n >= 0) & (p >= 0) & (p <= 1)
def _pmf(self, x, n, p):
coeff = exp(gamln(n+x) - gamln(x+1) - gamln(n))
return coeff * power(p,n) * power(1-p,x)
def _logpmf(self, x, n, p):
coeff = gamln(n+x) - gamln(x+1) - gamln(n)
return coeff + n*log(p) + x*log(1-p)
def _cdf(self, x, n, p):
k = floor(x)
return special.betainc(n, k+1, p)
def _sf_skip(self, x, n, p):
#skip because special.nbdtrc doesn't work for 0<n<1
k = floor(x)
return special.nbdtrc(k,n,p)
def _ppf(self, q, n, p):
vals = ceil(special.nbdtrik(q,n,p))
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1,n,p)
return where(temp >= q, vals1, vals)
def _stats(self, n, p):
Q = 1.0 / p
P = Q - 1.0
mu = n*P
var = n*P*Q
g1 = (Q+P)/sqrt(n*P*Q)
g2 = (1.0 + 6*P*Q) / (n*P*Q)
return mu, var, g1, g2
nbinom = nbinom_gen(name='nbinom', shapes="n, p")
## Geometric distribution
class geom_gen(rv_discrete):
"""A geometric discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `geom` is::
geom.pmf(k) = (1-p)**(k-1)*p
for ``k >= 1``.
`geom` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, p):
return mtrand.geometric(p,size=self._size)
def _argcheck(self, p):
return (p<=1) & (p >= 0)
def _pmf(self, k, p):
return (1-p)**(k-1) * p
def _logpmf(self, k, p):
return (k-1)*log(1-p) + p
def _cdf(self, x, p):
k = floor(x)
return (1.0-(1.0-p)**k)
def _sf(self, x, p):
k = floor(x)
return (1.0-p)**k
def _ppf(self, q, p):
vals = ceil(log(1.0-q)/log(1-p))
temp = 1.0-(1.0-p)**(vals-1)
return where((temp >= q) & (vals > 0), vals-1, vals)
def _stats(self, p):
mu = 1.0/p
qr = 1.0-p
var = qr / p / p
g1 = (2.0-p) / sqrt(qr)
g2 = numpy.polyval([1,-6,6],p)/(1.0-p)
return mu, var, g1, g2
geom = geom_gen(a=1,name='geom', longname="A geometric",
shapes="p")
## Hypergeometric distribution
class hypergeom_gen(rv_discrete):
"""A hypergeometric discrete random variable.
The hypergeometric distribution models drawing objects from a bin.
M is the total number of objects, n is total number of Type I objects.
The random variate represents the number of Type I objects in N drawn
without replacement from the total population.
%(before_notes)s
Notes
-----
The probability mass function is defined as::
pmf(k, M, n, N) = choose(n, k) * choose(M - n, N - k) / choose(M, N),
for N - (M-n) <= k <= min(m,N)
Examples
--------
>>> from scipy.stats import hypergeom
Suppose we have a collection of 20 animals, of which 7 are dogs. Then if
we want to know the probability of finding a given number of dogs if we
choose at random 12 of the 20 animals, we can initialize a frozen
distribution and plot the probability mass function:
>>> [M, n, N] = [20, 7, 12]
>>> rv = hypergeom(M, n, N)
>>> x = np.arange(0, n+1)
>>> pmf_dogs = rv.pmf(x)
>>> fig = plt.figure()
>>> ax = fig.add_subplot(111)
>>> ax.plot(x, pmf_dogs, 'bo')
>>> ax.vlines(x, 0, pmf_dogs, lw=2)
>>> ax.set_xlabel('# of dogs in our group of chosen animals')
>>> ax.set_ylabel('hypergeom PMF')
>>> plt.show()
Instead of using a frozen distribution we can also use `hypergeom`
methods directly. To for example obtain the cumulative distribution
function, use:
>>> prb = hypergeom.cdf(x, M, n, N)
And to generate random numbers:
>>> R = hypergeom.rvs(M, n, N, size=10)
"""
def _rvs(self, M, n, N):
return mtrand.hypergeometric(n,M-n,N,size=self._size)
def _argcheck(self, M, n, N):
cond = rv_discrete._argcheck(self,M,n,N)
cond &= (n <= M) & (N <= M)
self.a = N-(M-n)
self.b = min(n,N)
return cond
def _logpmf(self, k, M, n, N):
tot, good = M, n
bad = tot - good
return gamln(good+1) - gamln(good-k+1) - gamln(k+1) + gamln(bad+1) \
- gamln(bad-N+k+1) - gamln(N-k+1) - gamln(tot+1) + gamln(tot-N+1) \
+ gamln(N+1)
def _pmf(self, k, M, n, N):
#same as the following but numerically more precise
#return comb(good,k) * comb(bad,N-k) / comb(tot,N)
return exp(self._logpmf(k, M, n, N))
def _stats(self, M, n, N):
tot, good = M, n
n = good*1.0
m = (tot-good)*1.0
N = N*1.0
tot = m+n
p = n/tot
mu = N*p
var = m*n*N*(tot-N)*1.0/(tot*tot*(tot-1))
g1 = (m - n)*(tot-2*N) / (tot-2.0)*sqrt((tot-1.0)/(m*n*N*(tot-N)))
m2, m3, m4, m5 = m**2, m**3, m**4, m**5
n2, n3, n4, n5 = n**2, n**2, n**4, n**5
g2 = m3 - m5 + n*(3*m2-6*m3+m4) + 3*m*n2 - 12*m2*n2 + 8*m3*n2 + n3 \
- 6*m*n3 + 8*m2*n3 + m*n4 - n5 - 6*m3*N + 6*m4*N + 18*m2*n*N \
- 6*m3*n*N + 18*m*n2*N - 24*m2*n2*N - 6*n3*N - 6*m*n3*N \
+ 6*n4*N + N*N*(6*m2 - 6*m3 - 24*m*n + 12*m2*n + 6*n2 + \
12*m*n2 - 6*n3)
return mu, var, g1, g2
def _entropy(self, M, n, N):
k = r_[N-(M-n):min(n,N)+1]
vals = self.pmf(k,M,n,N)
lvals = where(vals==0.0,0.0,log(vals))
return -sum(vals*lvals,axis=0)
def _sf(self, k, M, n, N):
"""More precise calculation, 1 - cdf doesn't cut it."""
# This for loop is needed because `k` can be an array. If that's the
# case, the sf() method makes M, n and N arrays of the same shape. We
# therefore unpack all inputs args, so we can do the manual integration.
res = []
for quant, tot, good, draw in zip(k, M, n, N):
# Manual integration over probability mass function. More accurate
# than integrate.quad.
k2 = np.arange(quant + 1, draw + 1)
res.append(np.sum(self._pmf(k2, tot, good, draw)))
return np.asarray(res)
hypergeom = hypergeom_gen(name='hypergeom', shapes="M, n, N")
## Logarithmic (Log-Series), (Series) distribution
# FIXME: Fails _cdfvec
class logser_gen(rv_discrete):
"""A Logarithmic (Log-Series, Series) discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `logser` is::
logser.pmf(k) = - p**k / (k*log(1-p))
for ``k >= 1``.
`logser` takes ``p`` as shape parameter.
%(example)s
"""
def _rvs(self, pr):
# looks wrong for pr>0.5, too few k=1
# trying to use generic is worse, no k=1 at all
return mtrand.logseries(pr,size=self._size)
def _argcheck(self, pr):
return (pr > 0) & (pr < 1)
def _pmf(self, k, pr):
return -pr**k * 1.0 / k / log(1-pr)
def _stats(self, pr):
r = log(1-pr)
mu = pr / (pr - 1.0) / r
mu2p = -pr / r / (pr-1.0)**2
var = mu2p - mu*mu
mu3p = -pr / r * (1.0+pr) / (1.0-pr)**3
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / var**1.5
mu4p = -pr / r * (1.0/(pr-1)**2 - 6*pr/(pr-1)**3 + \
6*pr*pr / (pr-1)**4)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / var**2 - 3.0
return mu, var, g1, g2
logser = logser_gen(a=1,name='logser', longname='A logarithmic',
shapes='p')
## Poisson distribution
class poisson_gen(rv_discrete):
"""A Poisson discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `poisson` is::
poisson.pmf(k) = exp(-mu) * mu**k / k!
for ``k >= 0``.
`poisson` takes ``mu`` as shape parameter.
%(example)s
"""
def _rvs(self, mu):
return mtrand.poisson(mu, self._size)
def _logpmf(self, k, mu):
Pk = k*log(mu)-gamln(k+1) - mu
return Pk
def _pmf(self, k, mu):
return exp(self._logpmf(k, mu))
def _cdf(self, x, mu):
k = floor(x)
return special.pdtr(k,mu)
def _sf(self, x, mu):
k = floor(x)
return special.pdtrc(k,mu)
def _ppf(self, q, mu):
vals = ceil(special.pdtrik(q,mu))
vals1 = vals-1
temp = special.pdtr(vals1,mu)
return where((temp >= q), vals1, vals)
def _stats(self, mu):
var = mu
tmp = asarray(mu)
g1 = 1.0 / tmp
g2 = 1.0 / tmp
return mu, var, g1, g2
poisson = poisson_gen(name="poisson", longname='A Poisson', shapes="mu")
## (Planck) Discrete Exponential
class planck_gen(rv_discrete):
"""A Planck discrete exponential random variable.
%(before_notes)s
Notes
-----
The probability mass function for `planck` is::
planck.pmf(k) = (1-exp(-lambda))*exp(-lambda*k)
for ``k*lambda >= 0``.
`planck` takes ``lambda`` as shape parameter.
%(example)s
"""
def _argcheck(self, lambda_):
if (lambda_ > 0):
self.a = 0
self.b = inf
return 1
elif (lambda_ < 0):
self.a = -inf
self.b = 0
return 1
return 0 # lambda_ = 0
def _pmf(self, k, lambda_):
fact = (1-exp(-lambda_))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_):
k = floor(x)
return 1-exp(-lambda_*(k+1))
def _ppf(self, q, lambda_):
vals = ceil(-1.0/lambda_ * log1p(-q)-1)
vals1 = (vals-1).clip(self.a, np.inf)
temp = self._cdf(vals1, lambda_)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_):
mu = 1/(exp(lambda_)-1)
var = exp(-lambda_)/(expm1(-lambda_))**2
g1 = 2*cosh(lambda_/2.0)
g2 = 4+2*cosh(lambda_)
return mu, var, g1, g2
def _entropy(self, lambda_):
l = lambda_
C = (1-exp(-l))
return l*exp(-l)/C - log(C)
planck = planck_gen(name='planck',longname='A discrete exponential ',
shapes="lamda")
class boltzmann_gen(rv_discrete):
"""A Boltzmann (Truncated Discrete Exponential) random variable.
%(before_notes)s
Notes
-----
The probability mass function for `boltzmann` is::
boltzmann.pmf(k) = (1-exp(-lambda)*exp(-lambda*k)/(1-exp(-lambda*N))
for ``k = 0,...,N-1``.
`boltzmann` takes ``lambda`` and ``N`` as shape parameters.
%(example)s
"""
def _pmf(self, k, lambda_, N):
fact = (1-exp(-lambda_))/(1-exp(-lambda_*N))
return fact*exp(-lambda_*k)
def _cdf(self, x, lambda_, N):
k = floor(x)
return (1-exp(-lambda_*(k+1)))/(1-exp(-lambda_*N))
def _ppf(self, q, lambda_, N):
qnew = q*(1-exp(-lambda_*N))
vals = ceil(-1.0/lambda_ * log(1-qnew)-1)
vals1 = (vals-1).clip(0.0, np.inf)
temp = self._cdf(vals1, lambda_, N)
return where(temp >= q, vals1, vals)
def _stats(self, lambda_, N):
z = exp(-lambda_)
zN = exp(-lambda_*N)
mu = z/(1.0-z)-N*zN/(1-zN)
var = z/(1.0-z)**2 - N*N*zN/(1-zN)**2
trm = (1-zN)/(1-z)
trm2 = (z*trm**2 - N*N*zN)
g1 = z*(1+z)*trm**3 - N**3*zN*(1+zN)
g1 = g1 / trm2**(1.5)
g2 = z*(1+4*z+z*z)*trm**4 - N**4 * zN*(1+4*zN+zN*zN)
g2 = g2 / trm2 / trm2
return mu, var, g1, g2
boltzmann = boltzmann_gen(name='boltzmann',longname='A truncated discrete exponential ',
shapes="lamda, N")
## Discrete Uniform
class randint_gen(rv_discrete):
"""A uniform discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `randint` is::
randint.pmf(k) = 1./(max- min)
for ``k = min,...,max``.
`randint` takes ``min`` and ``max`` as shape parameters.
%(example)s
"""
def _argcheck(self, min, max):
self.a = min
self.b = max-1
return (max > min)
def _pmf(self, k, min, max):
fact = 1.0 / (max - min)
return fact
def _cdf(self, x, min, max):
k = floor(x)
return (k-min+1)*1.0/(max-min)
def _ppf(self, q, min, max):
vals = ceil(q*(max-min)+min)-1
vals1 = (vals-1).clip(min, max)
temp = self._cdf(vals1, min, max)
return where(temp >= q, vals1, vals)
def _stats(self, min, max):
m2, m1 = asarray(max), asarray(min)
mu = (m2 + m1 - 1.0) / 2
d = m2 - m1
var = (d-1)*(d+1.0)/12.0
g1 = 0.0
g2 = -6.0/5.0*(d*d+1.0)/(d-1.0)*(d+1.0)
return mu, var, g1, g2
def _rvs(self, min, max=None):
"""An array of *size* random integers >= min and < max.
If max is None, then range is >=0 and < min
"""
return mtrand.randint(min, max, self._size)
def _entropy(self, min, max):
return log(max-min)
randint = randint_gen(name='randint',longname='A discrete uniform '\
'(random integer)', shapes="min, max")
# Zipf distribution
# FIXME: problems sampling.
class zipf_gen(rv_discrete):
"""A Zipf discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `zipf` is::
zipf.pmf(k) = 1/(zeta(a)*k**a)
for ``k >= 1``.
`zipf` takes ``a`` as shape parameter.
%(example)s
"""
def _rvs(self, a):
return mtrand.zipf(a, size=self._size)
def _argcheck(self, a):
return a > 1
def _pmf(self, k, a):
Pk = 1.0 / asarray(special.zeta(a,1) * k**a)
return Pk
def _munp(self, n, a):
return special.zeta(a-n,1) / special.zeta(a,1)
def _stats(self, a):
sv = special.errprint(0)
fac = asarray(special.zeta(a,1))
mu = special.zeta(a-1.0,1)/fac
mu2p = special.zeta(a-2.0,1)/fac
var = mu2p - mu*mu
mu3p = special.zeta(a-3.0,1)/fac
mu3 = mu3p - 3*mu*mu2p + 2*mu**3
g1 = mu3 / asarray(var**1.5)
mu4p = special.zeta(a-4.0,1)/fac
sv = special.errprint(sv)
mu4 = mu4p - 4*mu3p*mu + 6*mu2p*mu*mu - 3*mu**4
g2 = mu4 / asarray(var**2) - 3.0
return mu, var, g1, g2
zipf = zipf_gen(a=1,name='zipf', longname='A Zipf',
shapes="a")
# Discrete Laplacian
class dlaplace_gen(rv_discrete):
"""A Laplacian discrete random variable.
%(before_notes)s
Notes
-----
The probability mass function for `dlaplace` is::
dlaplace.pmf(k) = tanh(a/2) * exp(-a*abs(k))
for ``a >0``.
`dlaplace` takes ``a`` as shape parameter.
%(example)s
"""
def _pmf(self, k, a):
return tanh(a/2.0)*exp(-a*abs(k))
def _cdf(self, x, a):
k = floor(x)
ind = (k >= 0)
const = exp(a)+1
return where(ind, 1.0-exp(-a*k)/const, exp(a*(k+1))/const)
def _ppf(self, q, a):
const = 1.0/(1+exp(-a))
cons2 = 1+exp(a)
ind = q < const
vals = ceil(where(ind, log(q*cons2)/a-1, -log((1-q)*cons2)/a))
vals1 = (vals-1)
temp = self._cdf(vals1, a)
return where(temp >= q, vals1, vals)
def _stats_skip(self, a):
# variance mu2 does not aggree with sample variance,
# nor with direct calculation using pmf
# remove for now because generic calculation works
# except it does not show nice zeros for mean and skew(?)
ea = exp(-a)
e2a = exp(-2*a)
e3a = exp(-3*a)
e4a = exp(-4*a)
mu2 = 2* (e2a + ea) / (1-ea)**3.0
mu4 = 2* (e4a + 11*e3a + 11*e2a + ea) / (1-ea)**5.0
return 0.0, mu2, 0.0, mu4 / mu2**2.0 - 3
def _entropy(self, a):
return a / sinh(a) - log(tanh(a/2.0))
dlaplace = dlaplace_gen(a=-inf,
name='dlaplace', longname='A discrete Laplacian',
shapes="a")
class skellam_gen(rv_discrete):
"""A Skellam discrete random variable.
%(before_notes)s
Notes
-----
Probability distribution of the difference of two correlated or
uncorrelated Poisson random variables.
Let k1 and k2 be two Poisson-distributed r.v. with expected values
lam1 and lam2. Then, ``k1 - k2`` follows a Skellam distribution with
parameters ``mu1 = lam1 - rho*sqrt(lam1*lam2)`` and
``mu2 = lam2 - rho*sqrt(lam1*lam2)``, where rho is the correlation
coefficient between k1 and k2. If the two Poisson-distributed r.v.
are independent then ``rho = 0``.
Parameters mu1 and mu2 must be strictly positive.
For details see: http://en.wikipedia.org/wiki/Skellam_distribution
`skellam` takes ``mu1`` and ``mu2`` as shape parameters.
%(example)s
"""
def _rvs(self, mu1, mu2):
n = self._size
return np.random.poisson(mu1, n)-np.random.poisson(mu2, n)
def _pmf(self, x, mu1, mu2):
px = np.where(x < 0, ncx2.pdf(2*mu2, 2*(1-x), 2*mu1)*2,
ncx2.pdf(2*mu1, 2*(x+1), 2*mu2)*2)
#ncx2.pdf() returns nan's for extremely low probabilities
return px
def _cdf(self, x, mu1, mu2):
x = np.floor(x)
px = np.where(x < 0, ncx2.cdf(2*mu2, -2*x, 2*mu1),
1-ncx2.cdf(2*mu1, 2*(x+1), 2*mu2))
return px
# enable later
## def _cf(self, w, mu1, mu2):
## # characteristic function
## poisscf = poisson._cf
## return poisscf(w, mu1) * poisscf(-w, mu2)
def _stats(self, mu1, mu2):
mean = mu1 - mu2
var = mu1 + mu2
g1 = mean / np.sqrt((var)**3)
g2 = 1 / var
return mean, var, g1, g2
skellam = skellam_gen(a=-np.inf, name="skellam", longname='A Skellam',
shapes="mu1,mu2")
| bsd-3-clause |
zfrenchee/pandas | pandas/tests/indexes/datetimes/test_arithmetic.py | 1 | 21153 | # -*- coding: utf-8 -*-
import warnings
from datetime import datetime, timedelta
import pytest
import numpy as np
import pandas as pd
import pandas.util.testing as tm
from pandas.errors import PerformanceWarning
from pandas import (Timestamp, Timedelta, Series,
DatetimeIndex, TimedeltaIndex,
date_range)
@pytest.fixture(params=[None, 'UTC', 'Asia/Tokyo',
'US/Eastern', 'dateutil/Asia/Singapore',
'dateutil/US/Pacific'])
def tz(request):
return request.param
@pytest.fixture(params=[pd.offsets.Hour(2), timedelta(hours=2),
np.timedelta64(2, 'h'), Timedelta(hours=2)],
ids=str)
def delta(request):
# Several ways of representing two hours
return request.param
@pytest.fixture(
params=[
datetime(2011, 1, 1),
DatetimeIndex(['2011-01-01', '2011-01-02']),
DatetimeIndex(['2011-01-01', '2011-01-02']).tz_localize('US/Eastern'),
np.datetime64('2011-01-01'),
Timestamp('2011-01-01')],
ids=lambda x: type(x).__name__)
def addend(request):
return request.param
class TestDatetimeIndexArithmetic(object):
def test_dti_add_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
idx + Timestamp('2011-01-01')
def test_dti_radd_timestamp_raises(self):
idx = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = "cannot add DatetimeIndex and Timestamp"
with tm.assert_raises_regex(TypeError, msg):
Timestamp('2011-01-01') + idx
# -------------------------------------------------------------
# Binary operations DatetimeIndex and int
def test_dti_add_int(self, tz, one):
# Variants of `one` for #19012
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng + one
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 10:00', freq='H',
periods=10, tz=tz)
rng += one
tm.assert_index_equal(rng, expected)
def test_dti_sub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
result = rng - one
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_isub_int(self, tz, one):
rng = pd.date_range('2000-01-01 09:00', freq='H',
periods=10, tz=tz)
expected = pd.date_range('2000-01-01 08:00', freq='H',
periods=10, tz=tz)
rng -= one
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and timedelta-like
def test_dti_add_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
result = rng + delta
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
tm.assert_index_equal(result, expected)
def test_dti_iadd_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('2000-01-01 02:00',
'2000-02-01 02:00', tz=tz)
rng += delta
tm.assert_index_equal(rng, expected)
def test_dti_sub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
result = rng - delta
tm.assert_index_equal(result, expected)
def test_dti_isub_timedeltalike(self, tz, delta):
rng = pd.date_range('2000-01-01', '2000-02-01', tz=tz)
expected = pd.date_range('1999-12-31 22:00',
'2000-01-31 22:00', tz=tz)
rng -= delta
tm.assert_index_equal(rng, expected)
# -------------------------------------------------------------
# Binary operations DatetimeIndex and TimedeltaIndex/array
def test_dti_add_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# add with TimdeltaIndex
result = dti + tdi
tm.assert_index_equal(result, expected)
result = tdi + dti
tm.assert_index_equal(result, expected)
# add with timedelta64 array
result = dti + tdi.values
tm.assert_index_equal(result, expected)
result = tdi.values + dti
tm.assert_index_equal(result, expected)
def test_dti_iadd_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz)
# iadd with TimdeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
# iadd with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result += tdi.values
tm.assert_index_equal(result, expected)
result = pd.timedelta_range('0 days', periods=10)
result += dti
tm.assert_index_equal(result, expected)
def test_dti_sub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# sub with TimedeltaIndex
result = dti - tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi - dti
# sub with timedelta64 array
result = dti - tdi.values
tm.assert_index_equal(result, expected)
msg = 'cannot perform __neg__ with this index type:'
with tm.assert_raises_regex(TypeError, msg):
tdi.values - dti
def test_dti_isub_tdi(self, tz):
# GH 17558
dti = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
tdi = pd.timedelta_range('0 days', periods=10)
expected = pd.date_range('2017-01-01', periods=10, tz=tz, freq='-1D')
# isub with TimedeltaIndex
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi
tm.assert_index_equal(result, expected)
msg = 'cannot subtract TimedeltaIndex and DatetimeIndex'
with tm.assert_raises_regex(TypeError, msg):
tdi -= dti
# isub with timedelta64 array
result = DatetimeIndex([Timestamp('2017-01-01', tz=tz)] * 10)
result -= tdi.values
tm.assert_index_equal(result, expected)
msg = '|'.join(['cannot perform __neg__ with this index type:',
'ufunc subtract cannot use operands with types'])
with tm.assert_raises_regex(TypeError, msg):
tdi.values -= dti
# -------------------------------------------------------------
# Binary Operations DatetimeIndex and datetime-like
# TODO: A couple other tests belong in this section. Move them in
# A PR where there isn't already a giant diff.
def test_add_datetimelike_and_dti(self, addend):
# GH#9631
dti = DatetimeIndex(['2011-01-01', '2011-01-02'])
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti
def test_add_datetimelike_and_dti_tz(self, addend):
# GH#9631
dti_tz = DatetimeIndex(['2011-01-01',
'2011-01-02']).tz_localize('US/Eastern')
msg = 'cannot add DatetimeIndex and {0}'.format(
type(addend).__name__)
with tm.assert_raises_regex(TypeError, msg):
dti_tz + addend
with tm.assert_raises_regex(TypeError, msg):
addend + dti_tz
# -------------------------------------------------------------
def test_sub_dti_dti(self):
# previously performed setop (deprecated in 0.16.0), now changed to
# return subtraction -> TimeDeltaIndex (GH ...)
dti = date_range('20130101', periods=3)
dti_tz = date_range('20130101', periods=3).tz_localize('US/Eastern')
dti_tz2 = date_range('20130101', periods=3).tz_localize('UTC')
expected = TimedeltaIndex([0, 0, 0])
result = dti - dti
tm.assert_index_equal(result, expected)
result = dti_tz - dti_tz
tm.assert_index_equal(result, expected)
with pytest.raises(TypeError):
dti_tz - dti
with pytest.raises(TypeError):
dti - dti_tz
with pytest.raises(TypeError):
dti_tz - dti_tz2
# isub
dti -= dti
tm.assert_index_equal(dti, expected)
# different length raises ValueError
dti1 = date_range('20130101', periods=3)
dti2 = date_range('20130101', periods=4)
with pytest.raises(ValueError):
dti1 - dti2
# NaN propagation
dti1 = DatetimeIndex(['2012-01-01', np.nan, '2012-01-03'])
dti2 = DatetimeIndex(['2012-01-02', '2012-01-03', np.nan])
expected = TimedeltaIndex(['1 days', np.nan, np.nan])
result = dti2 - dti1
tm.assert_index_equal(result, expected)
def test_sub_period(self):
# GH 13078
# not supported, check TypeError
p = pd.Period('2011-01-01', freq='D')
for freq in [None, 'D']:
idx = pd.DatetimeIndex(['2011-01-01', '2011-01-02'], freq=freq)
with pytest.raises(TypeError):
idx - p
with pytest.raises(TypeError):
p - idx
def test_ufunc_coercions(self):
idx = date_range('2011-01-01', periods=3, freq='2D', name='x')
delta = np.timedelta64(1, 'D')
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2011-01-02', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = date_range('2010-12-31', periods=3, freq='2D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '2D'
delta = np.array([np.timedelta64(1, 'D'), np.timedelta64(2, 'D'),
np.timedelta64(3, 'D')])
for result in [idx + delta, np.add(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2011-01-02', '2011-01-05', '2011-01-08'],
freq='3D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == '3D'
for result in [idx - delta, np.subtract(idx, delta)]:
assert isinstance(result, DatetimeIndex)
exp = DatetimeIndex(['2010-12-31', '2011-01-01', '2011-01-02'],
freq='D', name='x')
tm.assert_index_equal(result, exp)
assert result.freq == 'D'
def test_datetimeindex_sub_timestamp_overflow(self):
dtimax = pd.to_datetime(['now', pd.Timestamp.max])
dtimin = pd.to_datetime(['now', pd.Timestamp.min])
tsneg = Timestamp('1950-01-01')
ts_neg_variants = [tsneg,
tsneg.to_pydatetime(),
tsneg.to_datetime64().astype('datetime64[ns]'),
tsneg.to_datetime64().astype('datetime64[D]')]
tspos = Timestamp('1980-01-01')
ts_pos_variants = [tspos,
tspos.to_pydatetime(),
tspos.to_datetime64().astype('datetime64[ns]'),
tspos.to_datetime64().astype('datetime64[D]')]
for variant in ts_neg_variants:
with pytest.raises(OverflowError):
dtimax - variant
expected = pd.Timestamp.max.value - tspos.value
for variant in ts_pos_variants:
res = dtimax - variant
assert res[1].value == expected
expected = pd.Timestamp.min.value - tsneg.value
for variant in ts_neg_variants:
res = dtimin - variant
assert res[1].value == expected
for variant in ts_pos_variants:
with pytest.raises(OverflowError):
dtimin - variant
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_add_offset_array(self, tz, box):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
expected = DatetimeIndex([dti[n] + other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_index_equal(res2, expected)
@pytest.mark.parametrize('box', [np.array, pd.Index])
def test_dti_sub_offset_array(self, tz, box):
# GH#18824
dti = pd.date_range('2017-01-01', periods=2, tz=tz)
other = box([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)])
with tm.assert_produces_warning(PerformanceWarning):
res = dti - other
expected = DatetimeIndex([dti[n] - other[n] for n in range(len(dti))],
name=dti.name, freq='infer')
tm.assert_index_equal(res, expected)
@pytest.mark.parametrize('names', [(None, None, None),
('foo', 'bar', None),
('foo', 'foo', 'foo')])
def test_dti_with_offset_series(self, tz, names):
# GH#18849
dti = pd.date_range('2017-01-01', periods=2, tz=tz, name=names[0])
other = Series([pd.offsets.MonthEnd(), pd.offsets.Day(n=2)],
name=names[1])
expected_add = Series([dti[n] + other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res = dti + other
tm.assert_series_equal(res, expected_add)
with tm.assert_produces_warning(PerformanceWarning):
res2 = other + dti
tm.assert_series_equal(res2, expected_add)
expected_sub = Series([dti[n] - other[n] for n in range(len(dti))],
name=names[2])
with tm.assert_produces_warning(PerformanceWarning):
res3 = dti - other
tm.assert_series_equal(res3, expected_sub)
# GH 10699
@pytest.mark.parametrize('klass,assert_func', zip([Series, DatetimeIndex],
[tm.assert_series_equal,
tm.assert_index_equal]))
def test_datetime64_with_DateOffset(klass, assert_func):
s = klass(date_range('2000-01-01', '2000-01-31'), name='a')
result = s + pd.DateOffset(years=1)
result2 = pd.DateOffset(years=1) + s
exp = klass(date_range('2001-01-01', '2001-01-31'), name='a')
assert_func(result, exp)
assert_func(result2, exp)
result = s - pd.DateOffset(years=1)
exp = klass(date_range('1999-01-01', '1999-01-31'), name='a')
assert_func(result, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.Day()
result2 = pd.offsets.Day() + s
exp = klass([Timestamp('2000-01-16 00:15:00', tz='US/Central'),
Timestamp('2000-02-16', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
s = klass([Timestamp('2000-01-15 00:15:00', tz='US/Central'),
pd.Timestamp('2000-02-15', tz='US/Central')], name='a')
result = s + pd.offsets.MonthEnd()
result2 = pd.offsets.MonthEnd() + s
exp = klass([Timestamp('2000-01-31 00:15:00', tz='US/Central'),
Timestamp('2000-02-29', tz='US/Central')], name='a')
assert_func(result, exp)
assert_func(result2, exp)
# array of offsets - valid for Series only
if klass is Series:
with tm.assert_produces_warning(PerformanceWarning):
s = klass([Timestamp('2000-1-1'), Timestamp('2000-2-1')])
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.MonthEnd()])
exp = klass([Timestamp('2001-1-1'), Timestamp('2000-2-29')
])
assert_func(result, exp)
# same offset
result = s + Series([pd.offsets.DateOffset(years=1),
pd.offsets.DateOffset(years=1)])
exp = klass([Timestamp('2001-1-1'), Timestamp('2001-2-1')])
assert_func(result, exp)
s = klass([Timestamp('2000-01-05 00:15:00'),
Timestamp('2000-01-31 00:23:00'),
Timestamp('2000-01-01'),
Timestamp('2000-03-31'),
Timestamp('2000-02-29'),
Timestamp('2000-12-31'),
Timestamp('2000-05-15'),
Timestamp('2001-06-15')])
# DateOffset relativedelta fastpath
relative_kwargs = [('years', 2), ('months', 5), ('days', 3),
('hours', 5), ('minutes', 10), ('seconds', 2),
('microseconds', 5)]
for i, kwd in enumerate(relative_kwargs):
op = pd.DateOffset(**dict([kwd]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
op = pd.DateOffset(**dict(relative_kwargs[:i + 1]))
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
# assert these are equal on a piecewise basis
offsets = ['YearBegin', ('YearBegin', {'month': 5}),
'YearEnd', ('YearEnd', {'month': 5}),
'MonthBegin', 'MonthEnd',
'SemiMonthEnd', 'SemiMonthBegin',
'Week', ('Week', {'weekday': 3}),
'BusinessDay', 'BDay', 'QuarterEnd', 'QuarterBegin',
'CustomBusinessDay', 'CDay', 'CBMonthEnd',
'CBMonthBegin', 'BMonthBegin', 'BMonthEnd',
'BusinessHour', 'BYearBegin', 'BYearEnd',
'BQuarterBegin', ('LastWeekOfMonth', {'weekday': 2}),
('FY5253Quarter', {'qtr_with_extra_week': 1,
'startingMonth': 1,
'weekday': 2,
'variation': 'nearest'}),
('FY5253', {'weekday': 0,
'startingMonth': 2,
'variation':
'nearest'}),
('WeekOfMonth', {'weekday': 2,
'week': 2}),
'Easter', ('DateOffset', {'day': 4}),
('DateOffset', {'month': 5})]
with warnings.catch_warnings(record=True):
for normalize in (True, False):
for do in offsets:
if isinstance(do, tuple):
do, kwargs = do
else:
do = do
kwargs = {}
for n in [0, 5]:
if (do in ['WeekOfMonth', 'LastWeekOfMonth',
'FY5253Quarter', 'FY5253'] and n == 0):
continue
op = getattr(pd.offsets, do)(n,
normalize=normalize,
**kwargs)
assert_func(klass([x + op for x in s]), s + op)
assert_func(klass([x - op for x in s]), s - op)
assert_func(klass([op + x for x in s]), op + s)
| bsd-3-clause |
teonlamont/mne-python | mne/time_frequency/tests/test_tfr.py | 3 | 25782 | import numpy as np
import os.path as op
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
import pytest
import mne
from mne import Epochs, read_events, pick_types, create_info, EpochsArray
from mne.io import read_raw_fif
from mne.utils import _TempDir, run_tests_if_main, requires_h5py, grand_average
from mne.time_frequency.tfr import (morlet, tfr_morlet, _make_dpss,
tfr_multitaper, AverageTFR, read_tfrs,
write_tfrs, combine_tfr, cwt, _compute_tfr,
EpochsTFR)
from mne.time_frequency import tfr_array_multitaper, tfr_array_morlet
from mne.viz.utils import _fake_click
from itertools import product
import matplotlib
matplotlib.use('Agg') # for testing don't use X server
data_path = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(data_path, 'test_raw.fif')
event_fname = op.join(data_path, 'test-eve.fif')
raw_ctf_fname = op.join(data_path, 'test_ctf_raw.fif')
def test_tfr_ctf():
"""Test that TFRs can be calculated on CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = mne.make_fixed_length_events(raw, duration=0.5)
epochs = mne.Epochs(raw, events)
for method in (tfr_multitaper, tfr_morlet):
method(epochs, [10], 1) # smoke test
def test_morlet():
"""Test morlet with and without zero mean."""
Wz = morlet(1000, [10], 2., zero_mean=True)
W = morlet(1000, [10], 2., zero_mean=False)
assert (np.abs(np.mean(np.real(Wz[0]))) < 1e-5)
assert (np.abs(np.mean(np.real(W[0]))) > 1e-3)
def test_time_frequency():
"""Test time-frequency transform (PSD and ITC)."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
include = []
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=include, exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
times = epochs.times
nave = len(data)
epochs_nopicks = Epochs(raw, events, event_id, tmin, tmax)
freqs = np.arange(6, 20, 5) # define frequencies of interest
n_cycles = freqs / 4.
# Test first with a single epoch
power, itc = tfr_morlet(epochs[0], freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
# Now compute evoked
evoked = epochs.average()
power_evoked = tfr_morlet(evoked, freqs, n_cycles, use_fft=True,
return_itc=False)
pytest.raises(ValueError, tfr_morlet, evoked, freqs, 1., return_itc=True)
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True)
power_, itc_ = tfr_morlet(epochs, freqs=freqs, n_cycles=n_cycles,
use_fft=True, return_itc=True, decim=slice(0, 2))
# Test picks argument and average parameter
pytest.raises(ValueError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, return_itc=True, average=False)
power_picks, itc_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=True, picks=picks, average=True)
epochs_power_picks = \
tfr_morlet(epochs_nopicks,
freqs=freqs, n_cycles=n_cycles, use_fft=True,
return_itc=False, picks=picks, average=False)
power_picks_avg = epochs_power_picks.average()
# the actual data arrays here are equivalent, too...
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_picks_avg.data)
assert_array_almost_equal(itc.data, itc_picks.data)
assert_array_almost_equal(power.data, power_evoked.data)
# complex output
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
return_itc=False, average=True, output="complex")
pytest.raises(ValueError, tfr_morlet, epochs, freqs, n_cycles,
output="complex", average=False, return_itc=True)
epochs_power_complex = tfr_morlet(epochs, freqs, n_cycles,
output="complex", average=False,
return_itc=False)
epochs_power_2 = abs(epochs_power_complex)
epochs_power_3 = epochs_power_2.copy()
epochs_power_3.data[:] = np.inf # test that it's actually copied
assert_array_almost_equal(epochs_power_2.data, epochs_power_picks.data)
power_2 = epochs_power_2.average()
assert_array_almost_equal(power_2.data, power.data)
print(itc) # test repr
print(itc.ch_names) # test property
itc += power # test add
itc -= power # test sub
power = power.apply_baseline(baseline=(-0.1, 0), mode='logratio')
assert 'meg' in power
assert 'grad' in power
assert 'mag' not in power
assert 'eeg' not in power
assert_equal(power.nave, nave)
assert_equal(itc.nave, nave)
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (power_.data.shape == (len(picks), len(freqs), 2))
assert (power_.data.shape == itc_.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
# grand average
itc2 = itc.copy()
itc2.info['bads'] = [itc2.ch_names[0]] # test channel drop
gave = grand_average([itc2, itc])
assert_equal(gave.data.shape, (itc2.data.shape[0] - 1,
itc2.data.shape[1],
itc2.data.shape[2]))
assert_equal(itc2.ch_names[1:], gave.ch_names)
assert_equal(gave.nave, 2)
itc2.drop_channels(itc2.info["bads"])
assert_array_almost_equal(gave.data, itc2.data)
itc2.data = np.ones(itc2.data.shape)
itc.data = np.zeros(itc.data.shape)
itc2.nave = 2
itc.nave = 1
itc.drop_channels([itc.ch_names[0]])
combined_itc = combine_tfr([itc2, itc])
assert_array_almost_equal(combined_itc.data,
np.ones(combined_itc.data.shape) * 2 / 3)
# more tests
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2, use_fft=False,
return_itc=True)
assert (power.data.shape == (len(picks), len(freqs), len(times)))
assert (power.data.shape == itc.data.shape)
assert (np.sum(itc.data >= 1) == 0)
assert (np.sum(itc.data <= 0) == 0)
tfr = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2, average=False,
return_itc=False).data[0]
assert (tfr.shape == (len(picks), len(freqs), len(times)))
tfr2 = tfr_morlet(epochs[0], freqs, use_fft=True, n_cycles=2,
decim=slice(0, 2), average=False,
return_itc=False).data[0]
assert (tfr2.shape == (len(picks), len(freqs), 2))
single_power = tfr_morlet(epochs, freqs, 2, average=False,
return_itc=False).data
single_power2 = tfr_morlet(epochs, freqs, 2, decim=slice(0, 2),
average=False, return_itc=False).data
single_power3 = tfr_morlet(epochs, freqs, 2, decim=slice(1, 3),
average=False, return_itc=False).data
single_power4 = tfr_morlet(epochs, freqs, 2, decim=slice(2, 4),
average=False, return_itc=False).data
assert_array_almost_equal(np.mean(single_power, axis=0), power.data)
assert_array_almost_equal(np.mean(single_power2, axis=0),
power.data[:, :, :2])
assert_array_almost_equal(np.mean(single_power3, axis=0),
power.data[:, :, 1:3])
assert_array_almost_equal(np.mean(single_power4, axis=0),
power.data[:, :, 2:4])
power_pick = power.pick_channels(power.ch_names[:10:2])
assert_equal(len(power_pick.ch_names), len(power.ch_names[:10:2]))
assert_equal(power_pick.data.shape[0], len(power.ch_names[:10:2]))
power_drop = power.drop_channels(power.ch_names[1:10:2])
assert_equal(power_drop.ch_names, power_pick.ch_names)
assert_equal(power_pick.data.shape[0], len(power_drop.ch_names))
mne.equalize_channels([power_pick, power_drop])
assert_equal(power_pick.ch_names, power_drop.ch_names)
assert_equal(power_pick.data.shape, power_drop.data.shape)
# Test decimation:
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in [2, 3, 8, 9]:
for use_fft in [True, False]:
power, itc = tfr_morlet(epochs, freqs=freqs, n_cycles=2,
use_fft=use_fft, return_itc=True,
decim=decim)
assert_equal(power.data.shape[2],
np.ceil(float(len(times)) / decim))
freqs = list(range(50, 55))
decim = 2
_, n_chan, n_time = data.shape
tfr = tfr_morlet(epochs[0], freqs, 2., decim=decim, average=False,
return_itc=False).data[0]
assert_equal(tfr.shape, (n_chan, len(freqs), n_time // decim))
# Test cwt modes
Ws = morlet(512, [10, 20], n_cycles=2)
pytest.raises(ValueError, cwt, data[0, :, :], Ws, mode='foo')
for use_fft in [True, False]:
for mode in ['same', 'valid', 'full']:
cwt(data[0], Ws, use_fft=use_fft, mode=mode)
# Test decim parameter checks
pytest.raises(TypeError, tfr_morlet, epochs, freqs=freqs,
n_cycles=n_cycles, use_fft=True, return_itc=True,
decim='decim')
# When convolving in time, wavelets must not be longer than the data
pytest.raises(ValueError, cwt, data[0, :, :Ws[0].size - 1], Ws,
use_fft=False)
with pytest.warns(UserWarning, match='one of the wavelets is longer'):
cwt(data[0, :, :Ws[0].size - 1], Ws, use_fft=True)
# Check for off-by-one errors when using wavelets with an even number of
# samples
psd = cwt(data[0], [Ws[0][:-1]], use_fft=False, mode='full')
assert_equal(psd.shape, (2, 1, 420))
def test_dpsswavelet():
"""Test DPSS tapers."""
freqs = np.arange(5, 25, 3)
Ws = _make_dpss(1000, freqs=freqs, n_cycles=freqs / 2., time_bandwidth=4.0,
zero_mean=True)
assert (len(Ws) == 3) # 3 tapers expected
# Check that zero mean is true
assert (np.abs(np.mean(np.real(Ws[0][0]))) < 1e-5)
assert (len(Ws[0]) == len(freqs)) # As many wavelets as asked for
@pytest.mark.slowtest
def test_tfr_multitaper():
"""Test tfr_multitaper."""
sfreq = 200.0
ch_names = ['SIM0001', 'SIM0002']
ch_types = ['grad', 'grad']
info = create_info(ch_names=ch_names, sfreq=sfreq, ch_types=ch_types)
n_times = int(sfreq) # Second long epochs
n_epochs = 3
seed = 42
rng = np.random.RandomState(seed)
noise = 0.1 * rng.randn(n_epochs, len(ch_names), n_times)
t = np.arange(n_times, dtype=np.float) / sfreq
signal = np.sin(np.pi * 2. * 50. * t) # 50 Hz sinusoid signal
signal[np.logical_or(t < 0.45, t > 0.55)] = 0. # Hard windowing
on_time = np.logical_and(t >= 0.45, t <= 0.55)
signal[on_time] *= np.hanning(on_time.sum()) # Ramping
dat = noise + signal
reject = dict(grad=4000.)
events = np.empty((n_epochs, 3), int)
first_event_sample = 100
event_id = dict(sin50hz=1)
for k in range(n_epochs):
events[k, :] = first_event_sample + k * n_times, 0, event_id['sin50hz']
epochs = EpochsArray(data=dat, info=info, events=events, event_id=event_id,
reject=reject)
freqs = np.arange(35, 70, 5, dtype=np.float)
power, itc = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0)
power2, itc2 = tfr_multitaper(epochs, freqs=freqs, n_cycles=freqs / 2.,
time_bandwidth=4.0, decim=slice(0, 2))
picks = np.arange(len(ch_names))
power_picks, itc_picks = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2.,
time_bandwidth=4.0, picks=picks)
power_epochs = tfr_multitaper(epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False)
power_averaged = power_epochs.average()
power_evoked = tfr_multitaper(epochs.average(), freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0,
return_itc=False, average=False).average()
print(power_evoked) # test repr for EpochsTFR
# Test channel picking
power_epochs_picked = power_epochs.copy().drop_channels(['SIM0002'])
assert_equal(power_epochs_picked.data.shape, (3, 1, 7, 200))
assert_equal(power_epochs_picked.ch_names, ['SIM0001'])
pytest.raises(ValueError, tfr_multitaper, epochs,
freqs=freqs, n_cycles=freqs / 2.,
return_itc=True, average=False)
# test picks argument
assert_array_almost_equal(power.data, power_picks.data)
assert_array_almost_equal(power.data, power_averaged.data)
assert_array_almost_equal(power.times, power_epochs.times)
assert_array_almost_equal(power.times, power_averaged.times)
assert_equal(power.nave, power_averaged.nave)
assert_equal(power_epochs.data.shape, (3, 2, 7, 200))
assert_array_almost_equal(itc.data, itc_picks.data)
# one is squared magnitude of the average (evoked) and
# the other is average of the squared magnitudes (epochs PSD)
# so values shouldn't match, but shapes should
assert_array_equal(power.data.shape, power_evoked.data.shape)
pytest.raises(AssertionError, assert_array_almost_equal,
power.data, power_evoked.data)
tmax = t[np.argmax(itc.data[0, freqs == 50, :])]
fmax = freqs[np.argmax(power.data[1, :, t == 0.5])]
assert (tmax > 0.3 and tmax < 0.7)
assert not np.any(itc.data < 0.)
assert (fmax > 40 and fmax < 60)
assert (power2.data.shape == (len(picks), len(freqs), 2))
assert (power2.data.shape == itc2.data.shape)
# Test decim parameter checks and compatibility between wavelets length
# and instance length in the time dimension.
pytest.raises(TypeError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=freqs / 2., time_bandwidth=4.0, decim=(1,))
pytest.raises(ValueError, tfr_multitaper, epochs, freqs=freqs,
n_cycles=1000, time_bandwidth=4.0)
def test_crop():
"""Test TFR cropping."""
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.crop(0.2, 0.3)
assert_array_equal(tfr.times, [0.2, 0.3])
assert_equal(tfr.data.shape[-1], 2)
@requires_h5py
def test_io():
"""Test TFR IO capacities."""
tempdir = _TempDir()
fname = op.join(tempdir, 'test-tfr.h5')
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.save(fname)
tfr2 = read_tfrs(fname, condition='test')
assert_array_equal(tfr.data, tfr2.data)
assert_array_equal(tfr.times, tfr2.times)
assert_array_equal(tfr.freqs, tfr2.freqs)
assert_equal(tfr.comment, tfr2.comment)
assert_equal(tfr.nave, tfr2.nave)
pytest.raises(IOError, tfr.save, fname)
tfr.comment = None
tfr.save(fname, overwrite=True)
assert_equal(read_tfrs(fname, condition=0).comment, tfr.comment)
tfr.comment = 'test-A'
tfr2.comment = 'test-B'
fname = op.join(tempdir, 'test2-tfr.h5')
write_tfrs(fname, [tfr, tfr2])
tfr3 = read_tfrs(fname, condition='test-A')
assert_equal(tfr.comment, tfr3.comment)
assert (isinstance(tfr.info, mne.Info))
tfrs = read_tfrs(fname, condition=None)
assert_equal(len(tfrs), 2)
tfr4 = tfrs[1]
assert_equal(tfr2.comment, tfr4.comment)
pytest.raises(ValueError, read_tfrs, fname, condition='nonono')
# Test save of EpochsTFR.
data = np.zeros((5, 3, 2, 3))
tfr = EpochsTFR(info, data=data, times=times, freqs=freqs,
comment='test', method='crazy-tfr')
tfr.save(fname, True)
read_tfr = read_tfrs(fname)[0]
assert_array_equal(tfr.data, read_tfr.data)
def test_plot():
"""Test TFR plotting."""
import matplotlib.pyplot as plt
data = np.zeros((3, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(['MEG 001', 'MEG 002', 'MEG 003'], 1000.,
['mag', 'mag', 'mag'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr.plot([1, 2], title='title', colorbar=False,
mask=np.ones(tfr.data.shape[1:], bool))
plt.close('all')
ax = plt.subplot2grid((2, 2), (0, 0))
ax2 = plt.subplot2grid((2, 2), (1, 1))
ax3 = plt.subplot2grid((2, 2), (0, 1))
tfr.plot(picks=[0, 1, 2], axes=[ax, ax2, ax3])
plt.close('all')
tfr.plot([1, 2], title='title', colorbar=False, exclude='bads')
plt.close('all')
tfr.plot_topo(picks=[1, 2])
plt.close('all')
fig = tfr.plot(picks=[1], cmap='RdBu_r') # interactive mode on by default
fig.canvas.key_press_event('up')
fig.canvas.key_press_event(' ')
fig.canvas.key_press_event('down')
cbar = fig.get_axes()[0].CB # Fake dragging with mouse.
ax = cbar.cbar.ax
_fake_click(fig, ax, (0.1, 0.1))
_fake_click(fig, ax, (0.1, 0.2), kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
_fake_click(fig, ax, (0.1, 0.1), button=3)
_fake_click(fig, ax, (0.1, 0.2), button=3, kind='motion')
_fake_click(fig, ax, (0.1, 0.3), kind='release')
fig.canvas.scroll_event(0.5, 0.5, -0.5) # scroll down
fig.canvas.scroll_event(0.5, 0.5, 0.5) # scroll up
plt.close('all')
def test_plot_joint():
"""Test TFR joint plotting."""
import matplotlib.pyplot as plt
raw = read_raw_fif(raw_fname)
times = np.linspace(-0.1, 0.1, 200)
n_freqs = 3
nave = 1
rng = np.random.RandomState(42)
data = rng.randn(len(raw.ch_names), n_freqs, len(times))
tfr = AverageTFR(raw.info, data, times, np.arange(n_freqs), nave)
topomap_args = {'res': 8, 'contours': 0, 'sensors': False}
for combine in ('mean', 'rms', None):
tfr.plot_joint(title='auto', colorbar=True,
combine=combine, topomap_args=topomap_args)
plt.close('all')
# check various timefreqs
for timefreqs in (
{(tfr.times[0], tfr.freqs[1]): (0.1, 0.5),
(tfr.times[-1], tfr.freqs[-1]): (0.2, 0.6)},
[(tfr.times[1], tfr.freqs[1])]):
tfr.plot_joint(timefreqs=timefreqs, topomap_args=topomap_args)
plt.close('all')
# test bad timefreqs
timefreqs = ([(-100, 1)], tfr.times[1], [1],
[(tfr.times[1], tfr.freqs[1], tfr.freqs[1])])
for these_timefreqs in timefreqs:
pytest.raises(ValueError, tfr.plot_joint, these_timefreqs)
# test that the object is not internally modified
tfr_orig = tfr.copy()
tfr.plot_joint(baseline=(0, None), exclude=[tfr.ch_names[0]],
topomap_args=topomap_args)
plt.close('all')
assert_array_equal(tfr.data, tfr_orig.data)
assert (set(tfr.ch_names) == set(tfr_orig.ch_names))
assert (set(tfr.times) == set(tfr_orig.times))
def test_add_channels():
"""Test tfr splitting / re-appending channel types."""
data = np.zeros((6, 2, 3))
times = np.array([.1, .2, .3])
freqs = np.array([.10, .20])
info = mne.create_info(
['MEG 001', 'MEG 002', 'MEG 003', 'EEG 001', 'EEG 002', 'STIM 001'],
1000., ['mag', 'mag', 'mag', 'eeg', 'eeg', 'stim'])
tfr = AverageTFR(info, data=data, times=times, freqs=freqs,
nave=20, comment='test', method='crazy-tfr')
tfr_eeg = tfr.copy().pick_types(meg=False, eeg=True)
tfr_meg = tfr.copy().pick_types(meg=True)
tfr_stim = tfr.copy().pick_types(meg=False, stim=True)
tfr_eeg_meg = tfr.copy().pick_types(meg=True, eeg=True)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg, tfr_stim])
assert all(ch in tfr_new.ch_names
for ch in tfr_stim.ch_names + tfr_meg.ch_names)
tfr_new = tfr_meg.copy().add_channels([tfr_eeg])
assert all(ch in tfr_new.ch_names
for ch in tfr.ch_names if ch != 'STIM 001')
assert_array_equal(tfr_new.data, tfr_eeg_meg.data)
assert all(ch not in tfr_new.ch_names for ch in tfr_stim.ch_names)
# Now test errors
tfr_badsf = tfr_eeg.copy()
tfr_badsf.info['sfreq'] = 3.1415927
tfr_eeg = tfr_eeg.crop(-.1, .1)
pytest.raises(RuntimeError, tfr_meg.add_channels, [tfr_badsf])
pytest.raises(AssertionError, tfr_meg.add_channels, [tfr_eeg])
pytest.raises(ValueError, tfr_meg.add_channels, [tfr_meg])
pytest.raises(TypeError, tfr_meg.add_channels, tfr_badsf)
def test_compute_tfr():
"""Test _compute_tfr function."""
# Set parameters
event_id = 1
tmin = -0.2
tmax = 0.498 # Allows exhaustive decimation testing
# Setup for reading the raw data
raw = read_raw_fif(raw_fname)
events = read_events(event_fname)
exclude = raw.info['bads'] + ['MEG 2443', 'EEG 053'] # bads + 2 more
# picks MEG gradiometers
picks = pick_types(raw.info, meg='grad', eeg=False,
stim=False, include=[], exclude=exclude)
picks = picks[:2]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks)
data = epochs.get_data()
sfreq = epochs.info['sfreq']
freqs = np.arange(10, 20, 3).astype(float)
# Check all combination of options
for func, use_fft, zero_mean, output in product(
(tfr_array_multitaper, tfr_array_morlet), (False, True), (False, True),
('complex', 'power', 'phase',
'avg_power_itc', 'avg_power', 'itc')):
# Check exception
if (func == tfr_array_multitaper) and (output == 'phase'):
pytest.raises(NotImplementedError, func, data, sfreq=sfreq,
freqs=freqs, output=output)
continue
# Check runs
out = func(data, sfreq=sfreq, freqs=freqs, use_fft=use_fft,
zero_mean=zero_mean, n_cycles=2., output=output)
# Check shapes
shape = np.r_[data.shape[:2], len(freqs), data.shape[2]]
if ('avg' in output) or ('itc' in output):
assert_array_equal(shape[1:], out.shape)
else:
assert_array_equal(shape, out.shape)
# Check types
if output in ('complex', 'avg_power_itc'):
assert_equal(np.complex, out.dtype)
else:
assert_equal(np.float, out.dtype)
assert (np.all(np.isfinite(out)))
# Check errors params
for _data in (None, 'foo', data[0]):
pytest.raises(ValueError, _compute_tfr, _data, freqs, sfreq)
for _freqs in (None, 'foo', [[0]]):
pytest.raises(ValueError, _compute_tfr, data, _freqs, sfreq)
for _sfreq in (None, 'foo'):
pytest.raises(ValueError, _compute_tfr, data, freqs, _sfreq)
for key in ('output', 'method', 'use_fft', 'decim', 'n_jobs'):
for value in (None, 'foo'):
kwargs = {key: value} # FIXME pep8
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
**kwargs)
# No time_bandwidth param in morlet
pytest.raises(ValueError, _compute_tfr, data, freqs, sfreq,
method='morlet', time_bandwidth=1)
# No phase in multitaper XXX Check ?
pytest.raises(NotImplementedError, _compute_tfr, data, freqs, sfreq,
method='multitaper', output='phase')
# Inter-trial coherence tests
out = _compute_tfr(data, freqs, sfreq, output='itc', n_cycles=2.)
assert (np.sum(out >= 1) == 0)
assert (np.sum(out <= 0) == 0)
# Check decim shapes
# 2: multiple of len(times) even
# 3: multiple odd
# 8: not multiple, even
# 9: not multiple, odd
for decim in (2, 3, 8, 9, slice(0, 2), slice(1, 3), slice(2, 4)):
_decim = slice(None, None, decim) if isinstance(decim, int) else decim
n_time = len(np.arange(data.shape[2])[_decim])
shape = np.r_[data.shape[:2], len(freqs), n_time]
for method in ('multitaper', 'morlet'):
# Single trials
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
n_cycles=2.)
assert_array_equal(shape, out.shape)
# Averages
out = _compute_tfr(data, freqs, sfreq, method=method, decim=decim,
output='avg_power', n_cycles=2.)
assert_array_equal(shape[1:], out.shape)
run_tests_if_main()
| bsd-3-clause |
rschenck/Capsid_IDP_Classifier | development/tuning_and_validating.py | 1 | 9852 | #!/usr/bin/env python
import sys
import operator
import pandas as pd
import numpy as np
from sklearn import cross_validation
from sklearn.ensemble import ExtraTreesClassifier
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import label_binarize
from sklearn.metrics import roc_curve, auc
from sklearn.metrics import confusion_matrix
import matplotlib.pyplot as plt
from scipy import interp
from dataset import load_data
# obtains the classifications from the final curated dataset
def get_targets():
with open('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/FINAL_CURATED_TABLE.csv','r') as table:
typed = {}
for line in table:
line = line.split(',')
acc = line[1].rstrip(' ')
typed.update({acc:line[2]})
return typed
# obtain FINAL_DATASET for model (all data)
def get_data():
with open('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/FINAL_CURATED_SCORES.csv', 'r') as scores:
scores = scores.readlines()
formatted = []
for item in scores:
item = item.rstrip('\n')
item = item.split(',')
sample = [item[0]]
for i in range(1, len(item)):
ind = float(item[i])
sample.append(ind)
formatted.append(sample)
scores = None
return formatted
# get arrays after fetching the proper classification and getting that classifications set of scores
def get_arrays(types, scores):
order_types = []
out_scores = []
for item in scores:
acc = item[0]
ctype = types[acc]
order_types.append(ctype)
del item[0]
out_scores.append(item)
# the arrays needed for cross validation
type_array = np.asarray(order_types)
scores = np.asarray(out_scores)
# cleanup
item = None
ourder_types = None
out_scores = None
return scores, type_array
# ExtraTreesClassifier model
def extratrees_model(x, y):
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1)
clf = clf.fit(x, y)
return clf
# Voting model
def results_vote(x, y):
pass
# Section for running loops on different parameters
def tune_model_parameters(data, targets):
# cross validate and tuning of the ExtraTreesClassifier parameters
my_range = range(1,20)
n_scores = []
for n in my_range:
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1)
scores = cross_validation.cross_val_score(clf, data, targets, cv=10, scoring='accuracy')
n_scores.append(scores.mean())
plt.plot(my_range,n_scores)
plt.xlabel('Number of Trees in the Forest')
plt.ylabel('Cross-Validated Accuracy (10-fold Mean)')
plt.show()
#plt.savefig('/Users/ryan/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/max_features_10_126.png', bbox_inches = 'tight')
# get the parameter with the maximum mean output
m = max(n_scores)
mi = min(n_scores)
print 'Max Accuracy: ' + repr(m)
index = [i for i, j in enumerate(n_scores) if j == m]
for i in index:
print 'Parameter value max: ' + repr(my_range[i])
indexmi = [i for i, j in enumerate(n_scores) if j == mi]
print 'Min Accuracy: ' + repr(mi)
for i in indexmi:
print 'Parameter value min: ' + repr(my_range[i])
# get ROC curves for the predictions
def get_roc(data, targets):
# binarize the classifactions
bi_targets = label_binarize(targets, classes=['Type A', 'Type B', 'Neither'])
#print bi_targets
#print targets
n_classes = bi_targets.shape[1]
#print n_classes
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(data, bi_targets, train_size=.8)
# convert array to array of strings instead of arrays of arrays for the classifier (for the weights)
string_test = []
for i in range(0, len(y_train)):
string_test.append(str(y_train[i]))
string_test = np.asarray(string_test)
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"[1 0 0]":0.4,"[0 1 0]":0.5,"[0 1 0]":0.1}, bootstrap=False, max_features=125, criterion='gini', n_jobs = -1)
model = clf.fit(X_train, string_test)
y_score = model.predict(X_test)
# get output of scores from string list into a np array
array_scores = []
for item in y_score:
ind = item.split(' ')
ind0 = ind[0].lstrip('[')
ind1 = ind[1]
ind2 = ind[2].rstrip(']')
ind = [int(ind0),int(ind1), int(ind2)]
array_scores.append(ind)
array_scores = np.asarray(array_scores)
print array_scores
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], array_scores[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
# Compute micro-average ROC curve and ROC area
fpr["micro"], tpr["micro"], _ = roc_curve(y_test.ravel(), array_scores.ravel())
roc_auc["micro"] = auc(fpr["micro"], tpr["micro"])
'''
plt.figure()
plt.plot(fpr[2], tpr[2], label='ROC curve (area = %0.2f)' % roc_auc[2])
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
plt.show()
'''
# Plot ROC curves for the multiclass problem
# Compute macro-average ROC curve and ROC area
# First aggregate all false positive rates
all_fpr = np.unique(np.concatenate([fpr[i] for i in range(n_classes)]))
# Then interpolate all ROC curves at this points
mean_tpr = np.zeros_like(all_fpr)
for i in range(n_classes):
mean_tpr += interp(all_fpr, fpr[i], tpr[i])
# Finally average it and compute AUC
mean_tpr /= n_classes
fpr["macro"] = all_fpr
tpr["macro"] = mean_tpr
roc_auc["macro"] = auc(fpr["macro"], tpr["macro"])
# Plot all ROC curves
plt.figure()
plt.plot(fpr["micro"], tpr["micro"],
label='micro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["micro"]),
linewidth=2)
plt.plot(fpr["macro"], tpr["macro"],
label='macro-average ROC curve (area = {0:0.2f})'
''.format(roc_auc["macro"]),
linewidth=2)
for i in range(n_classes):
plt.plot(fpr[i], tpr[i], label='ROC curve of class {0} (area = {1:0.2f})'
''.format(i, roc_auc[i]))
plt.plot([0, 1], [0, 1], 'k--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristics')
plt.legend(loc="lower right")
plt.savefig('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/ROC_curves.eps', bbox_inches = 'tight')
# plot confusion matrices
def plot_confusion_matrix(cm, labels, title='Confusion matrix', cmap=plt.cm.Greens):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(len(labels))
plt.xticks(tick_marks, labels, rotation=45)
plt.yticks(tick_marks, labels)
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
def cm_model_p1(X_train, y_train):
clf = ExtraTreesClassifier(n_estimators=25, class_weight={"Type A":0.3,"Type B":0.5,"Neither":0.2}, bootstrap=False, max_features=125, criterion='gini', n_jobs=-1)
model = clf.fit(X_train, y_train)
return model
def cm_model_p2(model, X_test):
# generate 100 predictions and vote for the majority for final prediction
hundred_pred = []
for i in range(0,100):
y_pred = model.predict(X_test)
hundred_pred.append(y_pred)
final_pred = []
for i in range(0, len(hundred_pred[0])):
types = []
for k,t in enumerate(hundred_pred):
types.append(hundred_pred[k][i])
counts = [types.count('Type A'),types.count('Type B'),types.count('Neither')]
index, value = max(enumerate(counts), key=operator.itemgetter(1))
if index == 0:
final_pred.append('Type A')
elif index == 1:
final_pred.append('Type B')
elif index == 2:
final_pred.append('Neither')
else:
pass
y_pred = np.asarray(final_pred)
return y_pred
# Generate confusion matrix
def get_conf_matrix(data, targets):
# shuffle and split training and test sets
X_train, X_test, y_train, y_test = train_test_split(data, targets, train_size=.8)
# gets the model for predictions
model = cm_model_p1(X_train, y_train)
# generate 100 confusion matrices, get mean value for each
out_cm = np.zeros((3,3))
for i in range(0,100):
y_pred = cm_model_p2(model, X_test)
# Compute confusion matrix
labels = ['Type A', 'Type B', 'Neither']
cm = confusion_matrix(y_test, y_pred, labels=labels)
np.set_printoptions(precision=2)
# Normalize the confusion matrix by row (i.e by the number of samples
# in each class)
cm_normalized = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
out_cm += cm_normalized
print out_cm
cm_normalized = np.divide(out_cm, 100.0)
print('Normalized confusion matrix (Mean of 100 predictions)')
print(cm_normalized)
plt.figure()
plot_confusion_matrix(cm_normalized, labels, title='Normalized confusion matrix')
# plt.show()
plt.savefig('/Users/schencro/Desktop/FINAL_DATASET/Curated_Dataset/Capsid_Classifier/confusion_matrix_RYANFINAL_100mean.eps', bbox_inches = 'tight')
def main():
'''
# Use these three to get the data loaded, targets loaded, and the accessions stripped (Otherwise use dataset.py load_data())
# get classifications
type_dict = get_targets()
# load data
scores = get_data()
# get arrays of scores and targets
data, targets = get_arrays(type_dict, scores)
'''
data, targets = load_data()
# tune model parameters
#tune_model_parameters(data,targets)
# get ROC curves
#get_roc(data, targets)
# get confusion matrix
get_conf_matrix(data, targets)
'''I WANT TO RE-RUN the ROC curves and the Confusion matrix data using predictions from a cross-validation rather than train/test_split'''
if __name__ == "__main__":
main() | gpl-2.0 |
ashhher3/pyDatasets | pydatasets/wafer.py | 2 | 2120 | import os
import re
from pandas import DataFrame
class WaferRun:
def __init__(self, run_id, wafer_id, label, measurements):
self.run_id = int(run_id)
self.wafer_id = int(wafer_id)
self.label = int(label)
self.measurements = DataFrame(measurements)
self.measurements.sort(axis=1, inplace=True)
self.measurements.sort_index(inplace=True)
@staticmethod
def from_files(path, run_id, wafer_id):
fn_base = os.path.join(path, '{0}_{1:02}'.format(run_id, wafer_id))
try:
df = DataFrame({11: DataFrame.from_csv(fn_base + '.11', header=None, sep='\t', index_col=None, parse_dates=False)[1],
12: DataFrame.from_csv(fn_base + '.12', header=None, sep='\t', index_col=None, parse_dates=False)[1],
15: DataFrame.from_csv(fn_base + '.15', header=None, sep='\t', index_col=None, parse_dates=False)[1],
6: DataFrame.from_csv(fn_base + '.6', header=None, sep='\t', index_col=None, parse_dates=False)[1],
7: DataFrame.from_csv(fn_base + '.7', header=None, sep='\t', index_col=None, parse_dates=False)[1],
8: DataFrame.from_csv(fn_base + '.8', header=None, sep='\t', index_col=None, parse_dates=False)[1]})
except:
return None
m = re.search('/(normal|abnormal)', path)
if m is None:
return None
label = 1 if m.group(1) == 'abnormal' else -1
return WaferRun(run_id, wafer_id, label, df)
def as_nparray(self):
"""Spits out data as a T x D numpy.array (T=# samples, D=# variables)
Notes:
Notice what we do here: we start with a pandas.DataFrame where each channel
is a column (so you can think of it as a T x D matrix). We first rename the
columns to channel numbers,then sort the columns, then sort the index, then
transform to numpy.array.
"""
return self.measurements.sort(axis=1).sort_index().reset_index().as_matrix().astype(float)
| apache-2.0 |
rvraghav93/scikit-learn | examples/neighbors/plot_nearest_centroid.py | 38 | 1817 | """
===============================
Nearest Centroid Classification
===============================
Sample usage of Nearest Centroid classification.
It will plot the decision boundaries for each class.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn import datasets
from sklearn.neighbors import NearestCentroid
n_neighbors = 15
# import some data to play with
iris = datasets.load_iris()
# we only take the first two features. We could avoid this ugly
# slicing by using a two-dim dataset
X = iris.data[:, :2]
y = iris.target
h = .02 # step size in the mesh
# Create color maps
cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF'])
cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])
for shrinkage in [None, .2]:
# we create an instance of Neighbours Classifier and fit the data.
clf = NearestCentroid(shrink_threshold=shrinkage)
clf.fit(X, y)
y_pred = clf.predict(X)
print(shrinkage, np.mean(y == y_pred))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
Z = clf.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.figure()
plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
# Plot also the training points
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=cmap_bold,
edgecolor='b', s=20)
plt.title("3-Class classification (shrink_threshold=%r)"
% shrinkage)
plt.axis('tight')
plt.show()
| bsd-3-clause |
cbpygit/pypmj | projects/scattering/photonic_crystals/slabs/hexagonal/half_spaces/hex_plane_tools.py | 1 | 4284 | from scipy.linalg import expm, norm
import numpy as np
def rot_mat(axis, theta):
return expm(np.cross(np.eye(3), axis/norm(axis)*theta))
def rotate_vector(v, axis, theta):
M = rot_mat(axis, theta)
return np.tensordot(M,v,axes=([0],[1])).T #np.dot(M, v)
def rotate_around_z(v, theta):
return rotate_vector(v, np.array([0.,0.,1.]), theta)
def is_odd(num):
return num & 0x1
def is_inside_hexagon(x, y, d=None, x0=0., y0=0.):
p_eps = 10.*np.finfo(float).eps
if d is None:
d = y.max() - y.min() + p_eps
dx = np.abs(x - x0)/d
dy = np.abs(y - y0)/d
a = 0.25 * np.sqrt(3.0)
return np.logical_and(dx <= a, a*dy + 0.25*dx <= 0.5*a)
def get_hex_plane(plane_idx, inradius, z_height, z_center, np_xy,
np_z):
# We use 10* float machine precision to correct the ccordinates
# to avoid leaving the computational domain due to precision
# problems
p_eps = 10.*np.finfo(float).eps
ri = inradius # short for inradius
rc = inradius/np.sqrt(3.)*2. # short for circumradius
if np_z == 'auto':
np_z = int(np.round(float(np_xy)/2./rc*z_height))
# XY-plane (no hexagonal shape!)
if plane_idx == 6:
X = np.linspace(-ri+p_eps, ri-p_eps, np_xy)
Y = np.linspace(-rc+p_eps, rc-p_eps, np_xy)
XY = np.meshgrid(X,Y)
XYrs = np.concatenate((XY[0][..., np.newaxis],
XY[1][..., np.newaxis]),
axis=2)
Z = np.ones((np_xy, np_xy, 1))*z_center
pl = np.concatenate((XYrs, Z), axis=2)
pl = pl.reshape(-1, pl.shape[-1])
# Restrict to hexagon
idx_hex = is_inside_hexagon(pl[:,0], pl[:,1])
return pl[idx_hex]
# Vertical planes
elif plane_idx < 6:
r = rc if is_odd(plane_idx) else ri
r = r-p_eps
xy_line = np.empty((np_xy,2))
xy_line[:,0] = np.linspace(-r, r, np_xy)
xy_line[:,1] = 0.
z_points = np.linspace(0.+p_eps, z_height-p_eps, np_z)
# Construct the plane
plane = np.empty((np_xy*np_z, 3))
for i, xy in enumerate(xy_line):
for j, z in enumerate(z_points):
idx = i*np_z + j
plane[idx, :2] = xy
plane[idx, 2] = z
# Rotate the plane
return rotate_around_z(plane, plane_idx*np.pi/6.)
else:
raise ValueError('`plane_idx` must be in [0...6].')
def get_hex_planes_point_list(inradius, z_height, z_center, np_xy, np_z,
plane_indices=[0,1,2,3,6]):
# Construct the desired planes
planes = []
for i in plane_indices:
planes.append(get_hex_plane(i, inradius, z_height, z_center,
np_xy, np_z))
# Flatten and save lengths
lengths = [len(p) for p in planes]
return np.vstack(planes), np.array(lengths)
def hex_planes_point_list_for_keys(keys, plane_indices=[0,1,2,3,6]):
if not 'uol' in keys:
keys['uol'] = 1.e-9
inradius = keys['p'] * keys['uol'] /2.
z_height = (keys['h'] + keys['h_sub'] + keys['h_sup']) * keys['uol']
z_center = (keys['h_sub']+keys['h']/2.) * keys['uol']
np_xy = keys['hex_np_xy']
if not 'hex_np_z' in keys:
np_z = 'auto'
return get_hex_planes_point_list(inradius, z_height, z_center, np_xy,
np_z)
def plane_idx_iter(lengths_):
"""Yields the plane index plus lower index `idx_i` and upper index
`idx_f` of the point list representing this plane
(i.e. pointlist[idx_i:idx_f]).
"""
i = 0
while i < len(lengths_):
yield i, lengths_[:i].sum(), lengths_[:(i+1)].sum()
i += 1
def plot_planes(pointlist, lengths):
import matplotlib.pyplot as plt
import seaborn as sns
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
colors = sns.color_palette('husl', len(lengths))
for i, idx_i, idx_f in plane_idx_iter(lengths):
pl = pointlist[idx_i:idx_f]
ax.scatter(pl[:,0], pl[:,1], pl[:,2], s=10., c=colors[i],
label='plane {}'.format(i+1), linewidth=0.)
_ = plt.legend(loc='upper left')
| gpl-3.0 |
alexeyum/scikit-learn | sklearn/mixture/tests/test_dpgmm.py | 261 | 4490 | import unittest
import sys
import numpy as np
from sklearn.mixture import DPGMM, VBGMM
from sklearn.mixture.dpgmm import log_normalize
from sklearn.datasets import make_blobs
from sklearn.utils.testing import assert_array_less, assert_equal
from sklearn.mixture.tests.test_gmm import GMMTester
from sklearn.externals.six.moves import cStringIO as StringIO
np.seterr(all='warn')
def test_class_weights():
# check that the class weights are updated
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50)
dpgmm.fit(X)
# get indices of components that are used:
indices = np.unique(dpgmm.predict(X))
active = np.zeros(10, dtype=np.bool)
active[indices] = True
# used components are important
assert_array_less(.1, dpgmm.weights_[active])
# others are not
assert_array_less(dpgmm.weights_[~active], .05)
def test_verbose_boolean():
# checks that the output for the verbose output is the same
# for the flag values '1' and 'True'
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm_bool = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=True)
dpgmm_int = Model(n_components=10, random_state=1, alpha=20,
n_iter=50, verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
# generate output with the boolean flag
dpgmm_bool.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
bool_output = verbose_output.readline()
# generate output with the int flag
dpgmm_int.fit(X)
verbose_output = sys.stdout
verbose_output.seek(0)
int_output = verbose_output.readline()
assert_equal(bool_output, int_output)
finally:
sys.stdout = old_stdout
def test_verbose_first_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=1)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_verbose_second_level():
# simple 3 cluster dataset
X, y = make_blobs(random_state=1)
for Model in [DPGMM, VBGMM]:
dpgmm = Model(n_components=10, random_state=1, alpha=20, n_iter=50,
verbose=2)
old_stdout = sys.stdout
sys.stdout = StringIO()
try:
dpgmm.fit(X)
finally:
sys.stdout = old_stdout
def test_log_normalize():
v = np.array([0.1, 0.8, 0.01, 0.09])
a = np.log(2 * v)
assert np.allclose(v, log_normalize(a), rtol=0.01)
def do_model(self, **kwds):
return VBGMM(verbose=False, **kwds)
class DPGMMTester(GMMTester):
model = DPGMM
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestDPGMMWithSphericalCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestDPGMMWithDiagCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestDPGMMWithTiedCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestDPGMMWithFullCovars(unittest.TestCase, DPGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
class VBGMMTester(GMMTester):
model = do_model
do_test_eval = False
def score(self, g, train_obs):
_, z = g.score_samples(train_obs)
return g.lower_bound(train_obs, z)
class TestVBGMMWithSphericalCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'spherical'
setUp = GMMTester._setUp
class TestVBGMMWithDiagCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'diag'
setUp = GMMTester._setUp
class TestVBGMMWithTiedCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'tied'
setUp = GMMTester._setUp
class TestVBGMMWithFullCovars(unittest.TestCase, VBGMMTester):
covariance_type = 'full'
setUp = GMMTester._setUp
| bsd-3-clause |
beingzy/user_recommender_framework | groupwise_distance_learning/tests/test_helper_func.py | 1 | 2232 | """ functions for developing
Author: Yi Zhang <beingzy@gmail.com>
Date: 2016/03/10
"""
import os
import os.path
from os.path import dirname, abspath, join
import pandas as pd
def get_file_parent_dir_path(level=1):
""" return the path of the parent directory of current file """
current_dir_path = dirname(abspath(__file__))
path_sep = os.path.sep
components = current_dir_path.split(path_sep)
return path_sep.join(components[:-level])
def load_sample_test_data():
""" load small test data """
_root_dir = get_file_parent_dir_path(level=2)
_data_dir = join(_root_dir, 'data', 'small_test')
user_profile_fpath = join(_data_dir, "user_profile.csv")
user_connections_fpath = join(_data_dir, "connections.csv")
int_user_profile_df = pd.read_csv(user_profile_fpath, header=0, sep=',')
user_connections_df = pd.read_csv(user_connections_fpath, header=0, sep=',')
user_ids = int_user_profile_df.id.tolist()
# remove id columns and cetegorical feature column
user_profile_df = int_user_profile_df.drop(["id", "feat_3"], axis=1, inplace=False).as_matrix()
user_connections_df = user_connections_df.as_matrix()
return user_ids, user_profile_df, user_connections_df
def load_simulated_test_data():
""" load simulationd data with defined two groups """
_root_dir = get_file_parent_dir_path(level=2)
_data_dir = join(_root_dir, 'data', 'sim_two_groups')
user_profile_fpath = join(_data_dir, "user_profiles.csv")
user_connections_fpath = join(_data_dir, "friendships.csv")
# prepare user profile information
user_profile_df = pd.read_csv(user_profile_fpath, header=0, sep=",")
# unpack data
user_ids = user_profile_df.ID.tolist()
user_true_groups = user_profile_df.decision_style.tolist()
user_profile_df = user_profile_df.drop(["ID", "decision_style"], axis=1, inplace=False).as_matrix()
user_connections_df = pd.read_csv(user_connections_fpath, header=0, sep=",")
user_connections_df = (user_connections_df[user_connections_df.isFriend==1]
.drop('isFriend', axis=1, inplace=False).astype(int).as_matrix())
return user_ids, user_profile_df, user_connections_df, user_true_groups
| gpl-3.0 |
moonbury/notebooks | github/MatplotlibCookbook/Chapter 8/wx-supershape-1.py | 3 | 1121 | import wx, numpy
from matplotlib.backends.backend_wxagg import FigureCanvasWxAgg
from matplotlib.figure import Figure
def supershape_radius(phi, a, b, m, n1, n2, n3):
theta = .25 * m * phi
cos = numpy.fabs(numpy.cos(theta) / a) ** n2
sin = numpy.fabs(numpy.sin(theta) / b) ** n3
r = (cos + sin) ** (-1. / n1)
r /= numpy.max(r)
return r
class SuperShapeFrame(wx.Frame):
def __init__(self, parent, id, title):
wx.Frame.__init__(self, parent, id, title,
style = wx.DEFAULT_FRAME_STYLE ^ wx.RESIZE_BORDER,
size = (480, 480))
self.fig = Figure((6, 6), dpi = 80)
self.panel = wx.Panel(self, -1)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(FigureCanvasWxAgg(self.panel, -1, self.fig), 1)
self.panel.SetSizer(sizer)
self.draw_figure()
def draw_figure(self):
phi = numpy.linspace(0, 2 * numpy.pi, 1024)
r = supershape_radius(phi, 1, 1, 3, 2, 18, 18)
ax = self.fig.add_subplot(111, polar = True)
ax.plot(phi, r, lw = 3.)
self.fig.canvas.draw()
app = wx.App(redirect = True)
top = SuperShapeFrame(None, -1, 'SuperShape')
top.Show()
app.MainLoop()
| gpl-3.0 |
fmacias64/Dato-Core | src/unity/python/graphlab/deps/__init__.py | 13 | 1294 | '''
Copyright (C) 2015 Dato, Inc.
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the DATO-PYTHON-LICENSE file for details.
'''
from distutils.version import StrictVersion
import logging
def __get_version(version):
if 'dev' in str(version):
version = version[:version.find('.dev')]
return StrictVersion(version)
HAS_PANDAS = True
PANDAS_MIN_VERSION = '0.13.0'
try:
import pandas
if __get_version(pandas.__version__) < StrictVersion(PANDAS_MIN_VERSION):
HAS_PANDAS = False
logging.warn(('Pandas version %s is not supported. Minimum required version: %s. '
'Pandas support will be disabled.')
% (pandas.__version__, PANDAS_MIN_VERSION) )
except:
HAS_PANDAS = False
import pandas_mock as pandas
HAS_NUMPY = True
NUMPY_MIN_VERSION = '1.8.0'
try:
import numpy
if __get_version(numpy.__version__) < StrictVersion(NUMPY_MIN_VERSION):
HAS_NUMPY = False
logging.warn(('Numpy version %s is not supported. Minimum required version: %s. '
'Numpy support will be disabled.')
% (numpy.__version__, NUMPY_MIN_VERSION) )
except:
HAS_NUMPY = False
import numpy_mock as numpy
| agpl-3.0 |
imitrichev/cantera | interfaces/cython/cantera/examples/reactors/sensitivity1.py | 4 | 2165 | """
Constant-pressure, adiabatic kinetics simulation with sensitivity analysis
"""
import sys
import numpy as np
import cantera as ct
gri3 = ct.Solution('gri30.xml')
temp = 1500.0
pres = ct.one_atm
gri3.TPX = temp, pres, 'CH4:0.1, O2:2, N2:7.52'
r = ct.IdealGasConstPressureReactor(gri3, name='R1')
sim = ct.ReactorNet([r])
# enable sensitivity with respect to the rates of the first 10
# reactions (reactions 0 through 9)
for i in range(10):
r.add_sensitivity_reaction(i)
# set the tolerances for the solution and for the sensitivity coefficients
sim.rtol = 1.0e-6
sim.atol = 1.0e-15
sim.rtol_sensitivity = 1.0e-6
sim.atol_sensitivity = 1.0e-6
n_times = 400
tim = np.zeros(n_times)
data = np.zeros((n_times,6))
time = 0.0
for n in range(n_times):
time += 5.0e-6
sim.advance(time)
tim[n] = 1000 * time
data[n,0] = r.T
data[n,1:4] = r.thermo['OH','H','CH4'].X
# sensitivity of OH to reaction 2
data[n,4] = sim.sensitivity('OH',2)
# sensitivity of OH to reaction 3
data[n,5] = sim.sensitivity('OH',3)
print('%10.3e %10.3f %10.3f %14.6e %10.3f %10.3f' %
(sim.time, r.T, r.thermo.P, r.thermo.u, data[n,4], data[n,5]))
# plot the results if matplotlib is installed.
# see http://matplotlib.org/ to get it
if '--plot' in sys.argv:
import matplotlib.pyplot as plt
plt.subplot(2,2,1)
plt.plot(tim,data[:,0])
plt.xlabel('Time (ms)')
plt.ylabel('Temperature (K)')
plt.subplot(2,2,2)
plt.plot(tim,data[:,1])
plt.xlabel('Time (ms)')
plt.ylabel('OH Mole Fraction')
plt.subplot(2,2,3)
plt.plot(tim,data[:,2])
plt.xlabel('Time (ms)')
plt.ylabel('H Mole Fraction')
plt.subplot(2,2,4)
plt.plot(tim,data[:,3])
plt.xlabel('Time (ms)')
plt.ylabel('H2 Mole Fraction')
plt.tight_layout()
plt.figure(2)
plt.plot(tim,data[:,4],'-',tim,data[:,5],'-g')
plt.legend([sim.sensitivity_parameter_name(2),sim.sensitivity_parameter_name(3)],'best')
plt.xlabel('Time (ms)')
plt.ylabel('OH Sensitivity')
plt.tight_layout()
plt.show()
else:
print("""To view a plot of these results, run this script with the option '--plot""")
| bsd-3-clause |
rsivapr/scikit-learn | examples/cross_decomposition/plot_compare_cross_decomposition.py | 8 | 4706 | """
===================================
Compare cross decomposition methods
===================================
Simple usage of various cross decomposition algorithms:
- PLSCanonical
- PLSRegression, with multivariate response, a.k.a. PLS2
- PLSRegression, with univariate response, a.k.a. PLS1
- CCA
Given 2 multivariate covarying two-dimensional datasets, X, and Y,
PLS extracts the 'directions of covariance', i.e. the components of each
datasets that explain the most shared variance between both datasets.
This is apparent on the **scatterplot matrix** display: components 1 in
dataset X and dataset Y are maximally correlated (points lie around the
first diagonal). This is also true for components 2 in both dataset,
however, the correlation across datasets for different components is
weak: the point cloud is very spherical.
"""
print(__doc__)
import numpy as np
import pylab as pl
from sklearn.cross_decomposition import PLSCanonical, PLSRegression, CCA
###############################################################################
# Dataset based latent variables model
n = 500
# 2 latents vars:
l1 = np.random.normal(size=n)
l2 = np.random.normal(size=n)
latents = np.array([l1, l1, l2, l2]).T
X = latents + np.random.normal(size=4 * n).reshape((n, 4))
Y = latents + np.random.normal(size=4 * n).reshape((n, 4))
X_train = X[:n / 2]
Y_train = Y[:n / 2]
X_test = X[n / 2:]
Y_test = Y[n / 2:]
print("Corr(X)")
print(np.round(np.corrcoef(X.T), 2))
print("Corr(Y)")
print(np.round(np.corrcoef(Y.T), 2))
###############################################################################
# Canonical (symmetric) PLS
# Transform data
# ~~~~~~~~~~~~~~
plsca = PLSCanonical(n_components=2)
plsca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
# Scatter plot of scores
# ~~~~~~~~~~~~~~~~~~~~~~
# 1) On diagonal plot X vs Y scores on each components
pl.figure(figsize=(12, 8))
pl.subplot(221)
pl.plot(X_train_r[:, 0], Y_train_r[:, 0], "ob", label="train")
pl.plot(X_test_r[:, 0], Y_test_r[:, 0], "or", label="test")
pl.xlabel("x scores")
pl.ylabel("y scores")
pl.title('Comp. 1: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 0], Y_test_r[:, 0])[0, 1])
pl.xticks(())
pl.yticks(())
pl.legend(loc="best")
pl.subplot(224)
pl.plot(X_train_r[:, 1], Y_train_r[:, 1], "ob", label="train")
pl.plot(X_test_r[:, 1], Y_test_r[:, 1], "or", label="test")
pl.xlabel("x scores")
pl.ylabel("y scores")
pl.title('Comp. 2: X vs Y (test corr = %.2f)' %
np.corrcoef(X_test_r[:, 1], Y_test_r[:, 1])[0, 1])
pl.xticks(())
pl.yticks(())
pl.legend(loc="best")
# 2) Off diagonal plot components 1 vs 2 for X and Y
pl.subplot(222)
pl.plot(X_train_r[:, 0], X_train_r[:, 1], "*b", label="train")
pl.plot(X_test_r[:, 0], X_test_r[:, 1], "*r", label="test")
pl.xlabel("X comp. 1")
pl.ylabel("X comp. 2")
pl.title('X comp. 1 vs X comp. 2 (test corr = %.2f)'
% np.corrcoef(X_test_r[:, 0], X_test_r[:, 1])[0, 1])
pl.legend(loc="best")
pl.xticks(())
pl.yticks(())
pl.subplot(223)
pl.plot(Y_train_r[:, 0], Y_train_r[:, 1], "*b", label="train")
pl.plot(Y_test_r[:, 0], Y_test_r[:, 1], "*r", label="test")
pl.xlabel("Y comp. 1")
pl.ylabel("Y comp. 2")
pl.title('Y comp. 1 vs Y comp. 2 , (test corr = %.2f)'
% np.corrcoef(Y_test_r[:, 0], Y_test_r[:, 1])[0, 1])
pl.legend(loc="best")
pl.xticks(())
pl.yticks(())
pl.show()
###############################################################################
# PLS regression, with multivariate response, a.k.a. PLS2
n = 1000
q = 3
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
B = np.array([[1, 2] + [0] * (p - 2)] * q).T
# each Yj = 1*X1 + 2*X2 + noize
Y = np.dot(X, B) + np.random.normal(size=n * q).reshape((n, q)) + 5
pls2 = PLSRegression(n_components=3)
pls2.fit(X, Y)
print("True B (such that: Y = XB + Err)")
print(B)
# compare pls2.coefs with B
print("Estimated B")
print(np.round(pls2.coefs, 1))
pls2.predict(X)
###############################################################################
# PLS regression, with univariate response, a.k.a. PLS1
n = 1000
p = 10
X = np.random.normal(size=n * p).reshape((n, p))
y = X[:, 0] + 2 * X[:, 1] + np.random.normal(size=n * 1) + 5
pls1 = PLSRegression(n_components=3)
pls1.fit(X, y)
# note that the number of compements exceeds 1 (the dimension of y)
print("Estimated betas")
print(np.round(pls1.coefs, 1))
###############################################################################
# CCA (PLS mode B with symmetric deflation)
cca = CCA(n_components=2)
cca.fit(X_train, Y_train)
X_train_r, Y_train_r = plsca.transform(X_train, Y_train)
X_test_r, Y_test_r = plsca.transform(X_test, Y_test)
| bsd-3-clause |
SEMAFORInformatik/femagtools | femagtools/forcedens.py | 1 | 6880 | # -*- coding: utf-8 -*-
"""
femagtools.forcedens
~~~~~~~~~~~~~~~~~~~~
Read Force Density Plot Files
"""
import os
import re
import glob
import numpy as np
import logging
logger = logging.getLogger('femagtools.forcedens')
filename_pat = re.compile(r'^(\w+)_(\d{3}).PLT(\d+)')
num_pat = re.compile(r'([+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?)\s*')
pos_pat = re.compile(r'^\s*POSITION\s*\[(\w+)\]')
unit_pat = re.compile(r'\[([^\]]+)')
def _readSections(f):
"""return list of PLT sections
sections are surrounded by lines starting with '[***'
or 2d arrays with 7 columns
Args:
param f (file) PLT file to be read
Returns:
list of sections
"""
section = []
for line in f:
if line.startswith('[****') or pos_pat.match(line):
if section:
if len(section) > 2 and section[1].startswith('Date'):
yield section[1:]
else:
yield section
if line.startswith('[****'):
section = []
else:
section = [line.strip()]
else:
section.append(line.strip())
yield section
class ForceDensity(object):
def __init__(self):
self.type = ''
self.positions = []
pass
def __read_version(self, content):
rec = content[0].split(' ')
if len(rec) > 3:
self.version = rec[3]
else:
self.version = rec[-1]
def __read_project_filename(self, content):
self.project = content[1].strip()
def __read_nodes_and_mesh(self, content):
self.nodes, self.elements, self.quality = \
[float(r[0]) for r in [num_pat.findall(l)
for l in content[:3]]]
for l in content[3:]:
m = re.match(r'\*+([^\*]+)\*+', l)
if m:
self.type = m.group(1).strip()
return
def __read_date_and_title(self, content):
d = content[0].split(':')[1].strip().split()
dd, MM, yy = d[0].split('.')
hh, mm = ''.join(d[1:-1]).split('.')
self.date = '{}-{}-{}T{:02}:{:02}'.format(
yy, MM, dd, int(hh), int(mm))
if len(content) > 6:
self.title = content[2].strip() + ', ' + content[6].strip()
else:
self.title = content[2].strip()
self.current = float(num_pat.findall(content[4])[0])
def __read_filename(self, content):
self.filename = content[0].split(':')[1].strip()
def __read_position(self, content):
d = dict(position=float(num_pat.findall(content[0])[-1]),
unit=unit_pat.findall(content[0].split()[1])[0])
cols = content[2].split()
labels = cols[::2] # either X, FN, FT, B_N, B_T
# or X FX FY B_X B_Y
d['column_units'] = {k: u for k, u in zip(labels,
[unit_pat.findall(u)[0]
for u in cols[1::2]])}
m = []
for l in content[4:]:
rec = l.split()[1:]
if len(rec) == len(labels):
m.append([float(x) for x in rec])
d.update({k: v for k, v in zip(labels, list(zip(*m)))})
self.positions.append(d)
def read(self, filename):
with open(filename) as f:
for s in _readSections(f.readlines()):
logger.debug('Section %s' % s[0:2])
if s[0].startswith('FEMAG'):
self.__read_version(s)
elif s[0].startswith('Project'):
self.__read_project_filename(s)
elif s[0].startswith('Number'):
self.__read_nodes_and_mesh(s)
elif s[0].startswith('File'):
self.__read_filename(s)
elif s[0].startswith('Date'):
self.__read_date_and_title(s)
elif s[0].startswith('POSITION'):
self.__read_position(s)
def fft(self):
"""return FFT of FN"""
import scipy.fftpack
try:
ntiles = int(360/self.positions[0]['X'][-1])
FN = np.tile(
np.array([p['FN'][:-1] for p in self.positions[:-1]]),
(ntiles, ntiles))
except AttributeError:
return []
N = FN.shape[0]
fdn = scipy.fftpack.fft2(FN)
dim = N//ntiles//2
return np.abs(fdn)[1:dim, 1:dim]/N
def items(self):
return [(k, getattr(self, k)) for k in ('version',
'type',
'title',
'current',
'filename',
'date',
'positions')]
def __str__(self):
"return string format of this object"
if self.type:
return "\n".join([
'FEMAG {}: {}'.format(self.version, self.type),
'File: {} {}'.format(self.filename, self.date)] +
['{}: {}'.format(k, v)
for k, v in self.items()])
return "{}"
def read(filename):
f = ForceDensity()
f.read(filename)
return f
def readall(workdir='.'):
"""collect all recent PLT files
returns list of ForceDensity objects
"""
plt = dict()
pltfiles = sorted(glob.glob(os.path.join(workdir, '*_*.PLT*')))
base = os.path.basename(pltfiles[-1])
lastserie = filename_pat.match(base).groups()[1]
for p in pltfiles:
base = os.path.basename(p)
m = filename_pat.match(base)
if m and lastserie == m.groups()[1]:
model, i, k = m.groups()
fdens = ForceDensity()
fdens.read(p)
logging.info("%s: %s", p, fdens.title)
if model in plt:
plt[model].append(fdens)
else:
plt[model] = [fdens]
return plt
if __name__ == "__main__":
import matplotlib.pyplot as pl
import femagtools.plot
import sys
if len(sys.argv) == 2:
filename = sys.argv[1]
else:
filename = sys.stdin.readline().strip()
fdens = read(filename)
# Show the results
title = '{}, Rotor position {}'.format(
fdens.title, fdens.positions[0]['position'])
pos = fdens.positions[0]['X']
FT_FN = (fdens.positions[0]['FT'],
fdens.positions[0]['FN'])
femagtools.plot.forcedens(title, pos, FT_FN)
pl.show()
title = 'Force Density Harmonics'
femagtools.plot.forcedens_fft(title, fdens)
pl.show()
| bsd-2-clause |
nvoron23/scikit-learn | sklearn/linear_model/tests/test_sparse_coordinate_descent.py | 244 | 9986 | import numpy as np
import scipy.sparse as sp
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import ignore_warnings
from sklearn.linear_model.coordinate_descent import (Lasso, ElasticNet,
LassoCV, ElasticNetCV)
def test_sparse_coef():
# Check that the sparse_coef propery works
clf = ElasticNet()
clf.coef_ = [1, 2, 3]
assert_true(sp.isspmatrix(clf.sparse_coef_))
assert_equal(clf.sparse_coef_.toarray().tolist()[0], clf.coef_)
def test_normalize_option():
# Check that the normalize option in enet works
X = sp.csc_matrix([[-1], [0], [1]])
y = [-1, 0, 1]
clf_dense = ElasticNet(fit_intercept=True, normalize=True)
clf_sparse = ElasticNet(fit_intercept=True, normalize=True)
clf_dense.fit(X, y)
X = sp.csc_matrix(X)
clf_sparse.fit(X, y)
assert_almost_equal(clf_dense.dual_gap_, 0)
assert_array_almost_equal(clf_dense.coef_, clf_sparse.coef_)
def test_lasso_zero():
# Check that the sparse lasso can handle zero data without crashing
X = sp.csc_matrix((3, 1))
y = [0, 0, 0]
T = np.array([[1], [2], [3]])
clf = Lasso().fit(X, y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0])
assert_array_almost_equal(pred, [0, 0, 0])
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_list_input():
# Test ElasticNet for various values of alpha and l1_ratio with list X
X = np.array([[-1], [0], [1]])
X = sp.csc_matrix(X)
Y = [-1, 0, 1] # just a straight line
T = np.array([[2], [3], [4]]) # test sample
# this should be the same as unregularized least squares
clf = ElasticNet(alpha=0, l1_ratio=1.0)
# catch warning about alpha=0.
# this is discouraged but should work.
ignore_warnings(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def test_enet_toy_explicit_sparse_input():
# Test ElasticNet for various values of alpha and l1_ratio with sparse X
f = ignore_warnings
# training samples
X = sp.lil_matrix((3, 1))
X[0, 0] = -1
# X[1, 0] = 0
X[2, 0] = 1
Y = [-1, 0, 1] # just a straight line (the identity function)
# test samples
T = sp.lil_matrix((3, 1))
T[0, 0] = 2
T[1, 0] = 3
T[2, 0] = 4
# this should be the same as lasso
clf = ElasticNet(alpha=0, l1_ratio=1.0)
f(clf.fit)(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [1])
assert_array_almost_equal(pred, [2, 3, 4])
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.3, max_iter=1000)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.50819], decimal=3)
assert_array_almost_equal(pred, [1.0163, 1.5245, 2.0327], decimal=3)
assert_almost_equal(clf.dual_gap_, 0)
clf = ElasticNet(alpha=0.5, l1_ratio=0.5)
clf.fit(X, Y)
pred = clf.predict(T)
assert_array_almost_equal(clf.coef_, [0.45454], 3)
assert_array_almost_equal(pred, [0.9090, 1.3636, 1.8181], 3)
assert_almost_equal(clf.dual_gap_, 0)
def make_sparse_data(n_samples=100, n_features=100, n_informative=10, seed=42,
positive=False, n_targets=1):
random_state = np.random.RandomState(seed)
# build an ill-posed linear regression problem with many noisy features and
# comparatively few samples
# generate a ground truth model
w = random_state.randn(n_features, n_targets)
w[n_informative:] = 0.0 # only the top features are impacting the model
if positive:
w = np.abs(w)
X = random_state.randn(n_samples, n_features)
rnd = random_state.uniform(size=(n_samples, n_features))
X[rnd > 0.5] = 0.0 # 50% of zeros in input signal
# generate training ground truth labels
y = np.dot(X, w)
X = sp.csc_matrix(X)
if n_targets == 1:
y = np.ravel(y)
return X, y
def _test_sparse_enet_not_as_toy_dataset(alpha, fit_intercept, positive):
n_samples, n_features, max_iter = 100, 100, 1000
n_informative = 10
X, y = make_sparse_data(n_samples, n_features, n_informative,
positive=positive)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = ElasticNet(alpha=alpha, l1_ratio=0.8, fit_intercept=fit_intercept,
max_iter=max_iter, tol=1e-7, positive=positive,
warm_start=True)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
assert_almost_equal(s_clf.coef_, d_clf.coef_, 5)
assert_almost_equal(s_clf.intercept_, d_clf.intercept_, 5)
# check that the coefs are sparse
assert_less(np.sum(s_clf.coef_ != 0.0), 2 * n_informative)
def test_sparse_enet_not_as_toy_dataset():
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=False,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=0.1, fit_intercept=True,
positive=False)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=False,
positive=True)
_test_sparse_enet_not_as_toy_dataset(alpha=1e-3, fit_intercept=True,
positive=True)
def test_sparse_lasso_not_as_toy_dataset():
n_samples = 100
max_iter = 1000
n_informative = 10
X, y = make_sparse_data(n_samples=n_samples, n_informative=n_informative)
X_train, X_test = X[n_samples // 2:], X[:n_samples // 2]
y_train, y_test = y[n_samples // 2:], y[:n_samples // 2]
s_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
s_clf.fit(X_train, y_train)
assert_almost_equal(s_clf.dual_gap_, 0, 4)
assert_greater(s_clf.score(X_test, y_test), 0.85)
# check the convergence is the same as the dense version
d_clf = Lasso(alpha=0.1, fit_intercept=False, max_iter=max_iter, tol=1e-7)
d_clf.fit(X_train.toarray(), y_train)
assert_almost_equal(d_clf.dual_gap_, 0, 4)
assert_greater(d_clf.score(X_test, y_test), 0.85)
# check that the coefs are sparse
assert_equal(np.sum(s_clf.coef_ != 0.0), n_informative)
def test_enet_multitarget():
n_targets = 3
X, y = make_sparse_data(n_targets=n_targets)
estimator = ElasticNet(alpha=0.01, fit_intercept=True, precompute=None)
# XXX: There is a bug when precompute is not None!
estimator.fit(X, y)
coef, intercept, dual_gap = (estimator.coef_,
estimator.intercept_,
estimator.dual_gap_)
for k in range(n_targets):
estimator.fit(X, y[:, k])
assert_array_almost_equal(coef[k, :], estimator.coef_)
assert_array_almost_equal(intercept[k], estimator.intercept_)
assert_array_almost_equal(dual_gap[k], estimator.dual_gap_)
def test_path_parameters():
X, y = make_sparse_data()
max_iter = 50
n_alphas = 10
clf = ElasticNetCV(n_alphas=n_alphas, eps=1e-3, max_iter=max_iter,
l1_ratio=0.5, fit_intercept=False)
ignore_warnings(clf.fit)(X, y) # new params
assert_almost_equal(0.5, clf.l1_ratio)
assert_equal(n_alphas, clf.n_alphas)
assert_equal(n_alphas, len(clf.alphas_))
sparse_mse_path = clf.mse_path_
ignore_warnings(clf.fit)(X.toarray(), y) # compare with dense data
assert_almost_equal(clf.mse_path_, sparse_mse_path)
def test_same_output_sparse_dense_lasso_and_enet_cv():
X, y = make_sparse_data(n_samples=40, n_features=10)
for normalize in [True, False]:
clfs = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = ElasticNetCV(max_iter=100, cv=5, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
clfs = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfs.fit)(X, y)
clfd = LassoCV(max_iter=100, cv=4, normalize=normalize)
ignore_warnings(clfd.fit)(X.toarray(), y)
assert_almost_equal(clfs.alpha_, clfd.alpha_, 7)
assert_almost_equal(clfs.intercept_, clfd.intercept_, 7)
assert_array_almost_equal(clfs.mse_path_, clfd.mse_path_)
assert_array_almost_equal(clfs.alphas_, clfd.alphas_)
| bsd-3-clause |
martinggww/lucasenlights | ETF/lucas/bin/2getQuantCode.py | 2 | 3085 | import sys, os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import my_config as config
import logging
from src.getDf import readCsvFiles, addFundPerf, readStatics, getFeatureDf, dropOff, dropOffTrade
from src.calStatics import calStatics
from src.getQuantDf import getQuantDf
from src.calQuantCode import calQuantDf
import numpy as np
import pandas as pd
import json
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__file__)
def debug(df):
print df.columns.values
def mergeDailyWeekly(d_quant, w_quant):
daily_size = d_quant.shape[0]
quant = pd.merge(d_quant, w_quant, on='DATE', how='outer')
#Find the first weekly is Not NaN
drop_index = 0
for drop_index, row in quant.iterrows():
if not pd.isnull(row['SPY_weekly']):
break
quant.drop(quant.index[0:drop_index], inplace=True)
quant = quant.reset_index(drop=True)
print quant.columns.names
temp_dict = {}
for index, row in quant.iterrows():
print index
print row
for name in w_quant.columns.values:
if pd.isnull(row[name]):
row[name] = temp_dict[name]
else:
temp_dict[name] = row[name]
quant.drop(quant.index[daily_size:], inplace=True)
quant = quant.reset_index(drop=True)
return quant
def trim(df):
index = 0
start_index = 0
end_index = df.shape[0]
for index, row in df.iterrows():
date = row['date']
if date >= config.START_DATE:
start_index = index
break
for index, row in df.iterrows():
date = row['date']
if date >= config.END_DATE:
end_index = index
break
df = df[start_index:end_index]
df = df.reset_index(drop=True)
return df
'''
Read price df, read statics data, for each record, calculate it's quantitative code
'''
if __name__ == '__main__':
usage = "Usage: GetQuantCode, this program will calculate the quantitiave codes of trading data"
print usage
d, w, m = getFeatureDf()
d_stat, w_stat = readStatics()
d = dropOff(d, 'daily')
print "Usage: 8 daily Features: KD, KD_SLOPE, ROC, MFI_SLOPE, HIST_MOM, MFI, KD_RANK, MFI_RANK"
if sys.argv[1] == 'run':
d = trim(d)
w = trim(w)
debug(d)
d_quant = calQuantDf(d, d_stat, 'daily')
debug(d_quant)
w = dropOff(w, 'weekly')
debug(w)
print "Usage: 4 weekly features: KD, MFI, KD_RANK, MFI_RANK"
w_quant = calQuantDf(w, w_stat, 'weekly')
debug(w_quant)
if d.shape[0] != d_quant.shape[0] or w.shape[0] != w_quant.shape[0]:
logger.error("Wrong quant size")
exit(1)
quant_df = mergeDailyWeekly(d_quant, w_quant)
d = dropOffTrade(d, quant_df.iloc[0]['DATE'])
print "Save trade pickle to disk"
d.to_pickle(config.TRADE_DF_PICKLE)
print "Save trade quant pickle to disk"
quant_df.to_pickle(config.QUANT_DF_PICKLE)
usage = "Usage: GetQuantCode, save trade_quant to pickle, save quant_df to pickle"
print usage
| cc0-1.0 |
jstoxrocky/statsmodels | statsmodels/examples/ex_kernel_regression2.py | 34 | 1511 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 02 13:43:44 2013
Author: Josef Perktold
"""
from __future__ import print_function
import numpy as np
import numpy.testing as npt
import statsmodels.nonparametric.api as nparam
if __name__ == '__main__':
np.random.seed(500)
nobs = [250, 1000][0]
sig_fac = 1
x = np.random.uniform(-2, 2, size=nobs)
x.sort()
y_true = np.sin(x*5)/x + 2*x
y = y_true + sig_fac * (np.sqrt(np.abs(3+x))) * np.random.normal(size=nobs)
model = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls',
defaults=nparam.EstimatorSettings(efficient=True))
sm_bw = model.bw
sm_mean, sm_mfx = model.fit()
model1 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='lc',
var_type='c', bw='cv_ls')
mean1, mfx1 = model1.fit()
model2 = nparam.KernelReg(endog=[y],
exog=[x], reg_type='ll',
var_type='c', bw='cv_ls')
mean2, mfx2 = model2.fit()
print(model.bw)
print(model1.bw)
print(model2.bw)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.plot(x, y, 'o', alpha=0.5)
ax.plot(x, y_true, lw=2, label='DGP mean')
ax.plot(x, sm_mean, lw=2, label='kernel mean')
ax.plot(x, mean2, lw=2, label='kernel mean')
ax.legend()
plt.show()
| bsd-3-clause |
petebachant/CFT-vectors | cft_vectors.py | 1 | 18584 | #!/usr/bin/env python
"""
This script generates a force and velocity vector diagram for a cross-flow
turbine.
"""
from __future__ import division, print_function
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import pandas as pd
from scipy.interpolate import interp1d
import seaborn as sns
from pxl.styleplot import set_sns
import os
# Define some colors (some from the Seaborn deep palette)
blue = sns.color_palette()[0]
green = sns.color_palette()[1]
dark_gray = (0.3, 0.3, 0.3)
red = sns.color_palette()[2]
purple = sns.color_palette()[3]
tan = sns.color_palette()[4]
light_blue = sns.color_palette()[5]
def load_foildata():
"""Loads NACA 0020 airfoil data at Re = 2.1 x 10^5."""
Re = 2.1e5
foil = "0020"
fname = "NACA {}_T1_Re{:.3f}_M0.00_N9.0.dat".format(foil, Re/1e6)
fpath = "data/{}".format(fname)
alpha, cl, cd = np.loadtxt(fpath, skiprows=14, unpack=True)
if alpha[0] != 0.0:
alpha = np.append([0.0], alpha[:-1])
cl = np.append([1e-12], cl[:-1])
cd = np.append(cd[0], cd[:-1])
# Mirror data about 0 degrees AoA since it's a symmetrical foil
alpha = np.append(-np.flipud(alpha), alpha)
cl = np.append(-np.flipud(cl), cl)
cd = np.append(np.flipud(cd), cd)
df = pd.DataFrame()
df["alpha_deg"] = alpha
df["cl"] = cl
df["cd"] = cd
return df
def lookup_foildata(alpha_deg):
"""Lookup foil characteristics at given angle of attack."""
alpha_deg = np.asarray(alpha_deg)
df = load_foildata()
df["alpha_rad"] = np.deg2rad(df.alpha_deg)
f_cl = interp1d(df.alpha_deg, df.cl, bounds_error=False)
f_cd = interp1d(df.alpha_deg, df.cd, bounds_error=False)
f_ct = interp1d(df.alpha_deg, df.cl*np.sin(df.alpha_rad) \
- df.cd*np.cos(df.alpha_rad), bounds_error=False)
cl, cd, ct = f_cl(alpha_deg), f_cd(alpha_deg), f_ct(alpha_deg)
return {"cl": cl, "cd": cd, "ct": ct}
def calc_cft_ctorque(tsr=2.0, chord=0.14, R=0.5):
"""Calculate the geometric torque coefficient for a CFT."""
U_infty = 1.0
omega = tsr*U_infty/R
theta_blade_deg = np.arange(0, 721)
theta_blade_rad = np.deg2rad(theta_blade_deg)
blade_vel_mag = omega*R
blade_vel_x = blade_vel_mag*np.cos(theta_blade_rad)
blade_vel_y = blade_vel_mag*np.sin(theta_blade_rad)
u = U_infty # No induction
rel_vel_mag = np.sqrt((blade_vel_x + u)**2 + blade_vel_y**2)
rel_vel_x = u + blade_vel_x
rel_vel_y = blade_vel_y
relvel_dot_bladevel = (blade_vel_x*rel_vel_x + blade_vel_y*rel_vel_y)
alpha_rad = np.arccos(relvel_dot_bladevel/(rel_vel_mag*blade_vel_mag))
alpha_rad[theta_blade_deg > 180] *= -1
alpha_deg = np.rad2deg(alpha_rad)
foil_coeffs = lookup_foildata(alpha_deg)
ctorque = foil_coeffs["ct"]*chord/(2*R)*rel_vel_mag**2/U_infty**2
cdx = -foil_coeffs["cd"]*np.sin(np.pi/2 - alpha_rad + theta_blade_rad)
clx = foil_coeffs["cl"]*np.cos(np.pi/2 - alpha_rad - theta_blade_rad)
df = pd.DataFrame()
df["theta"] = theta_blade_deg
df["alpha_deg"] = alpha_deg
df["rel_vel_mag"] = rel_vel_mag
df["ctorque"] = ctorque
df["cdrag"] = clx + cdx
return df
def mag(v):
"""
Return magnitude of 2-D vector (input as a tuple, list, or NumPy array).
"""
return np.sqrt(v[0]**2 + v[1]**2)
def rotate(v, rad):
"""Rotate a 2-D vector by rad radians."""
dc, ds = np.cos(rad), np.sin(rad)
x, y = v[0], v[1]
x, y = dc*x - ds*y, ds*x + dc*y
return np.array((x, y))
def gen_naca_points(naca="0020", c=100, npoints=100, tuples=True):
"""Generate points for a NACA foil."""
x = np.linspace(0, 1, npoints)*c
t = float(naca[2:])/100.0
y = 5.0*t*c*(0.2969*np.sqrt(x/c) - 0.1260*(x/c) - 0.3516*(x/c)**2 \
+ 0.2843*(x/c)**3 - 0.1015*(x/c)**4)
y = np.append(y, -y[::-1])
x = np.append(x, x[::-1])
if tuples:
return np.array([(x0, y0) for x0, y0 in zip(x, y)])
else:
return x, y
def test_gen_naca_points():
points = gen_naca_points()
x = []
y = []
for p in points:
x.append(p[0])
y.append(p[1])
fig, ax = plt.subplots()
ax.plot(x, y, "o")
ax.set_aspect(1)
plt.show()
def plot_radius(ax, theta_deg=0):
"""Plot radius at given azimuthal angle."""
r = 0.495
theta_rad = np.deg2rad(theta_deg)
x2, y2 = r*np.cos(theta_rad), r*np.sin(theta_rad)
ax.plot((0, x2), (0, y2), "gray", linewidth=2)
def plot_center(ax, length=0.07, linewidth=1.2):
"""Plot centermark at origin."""
ax.plot((0, 0), (-length/2, length/2), lw=linewidth, color="black")
ax.plot((-length/2, length/2), (0, 0), lw=linewidth, color="black")
def make_naca_path(c=0.3, theta_deg=0.0):
verts = gen_naca_points(c=c)
verts = np.array([rotate(v, -np.pi/2) for v in verts])
verts += (0.5, c/4)
theta_rad = np.deg2rad(theta_deg)
verts = np.array([rotate(v, theta_rad) for v in verts])
p = matplotlib.path.Path(verts, closed=True)
return p
def plot_foil(ax, c=0.3, theta_deg=0.0):
"""Plot the foil shape using a matplotlib patch."""
p = matplotlib.patches.PathPatch(make_naca_path(c, theta_deg),
facecolor="gray", linewidth=1,
edgecolor="gray")
ax.add_patch(p)
def plot_blade_path(ax, R=0.5):
"""Plot blade path as a dashed line."""
p = plt.Circle((0, 0), R, linestyle="dashed", edgecolor="black",
facecolor="none", linewidth=1)
ax.add_patch(p)
def plot_vectors(fig, ax, theta_deg=0.0, tsr=2.0, c=0.3, label=False):
"""Plot blade velocity, free stream velocity, relative velocity, lift, and
drag vectors.
"""
r = 0.5
u_infty = 0.26
theta_deg %= 360
theta_rad = np.deg2rad(theta_deg)
blade_xy = r*np.cos(theta_rad), r*np.sin(theta_rad)
head_width = 0.04
head_length = 0.11
linewidth = 1.5
# Function for plotting vector labels
def plot_label(text, x, y, dx, dy, text_width=0.09, text_height=0.03,
sign=-1, dist=1.0/3.0):
text_width *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1]
text_height *= plt.rcParams["font.size"]/12*6/fig.get_size_inches()[1]
dvec = np.array((dx, dy))
perp_vec = rotate(dvec, np.pi/2)
perp_vec /= mag(perp_vec)
if theta_deg > 270:
diag = text_height
else:
diag = np.array((text_width, text_height))
# Projection of text diagonal vector onto normal vector
proj = np.dot(diag, perp_vec)
if sign != -1:
proj = 0 # Text is on right side of vector
if theta_deg > 180:
sign *= -1
dxlab, dylab = perp_vec*(np.abs(proj) + .01)*sign
xlab, ylab = x + dx*dist + dxlab, y + dy*dist + dylab
ax.text(xlab, ylab, text)
# Make blade velocity vector
x1, y1 = rotate((0.5, tsr*u_infty), np.deg2rad(theta_deg))
dx, dy = np.array(blade_xy) - np.array((x1, y1))
blade_vel = np.array((dx, dy))
ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length,
length_includes_head=True, color=dark_gray, linewidth=linewidth)
if label:
plot_label(r"$-\omega r$", x1, y1, dx*0.25, dy*0.5)
# Make chord line vector
x1c, y1c = np.array((x1, y1)) - np.array((dx, dy))*0.5
x2c, y2c = np.array((x1, y1)) + np.array((dx, dy))*2
ax.plot([x1c, x2c], [y1c, y2c], marker=None, color="k", linestyle="-.",
zorder=1)
# Make free stream velocity vector
y1 += u_infty
ax.arrow(x1, y1, 0, -u_infty, head_width=head_width,
head_length=head_length, length_includes_head=True,
color=blue, linewidth=linewidth)
u_infty = np.array((0, -u_infty))
if label:
dy = -mag(u_infty)
plot_label(r"$U_\mathrm{in}$", x1, y1, 0, dy, text_width=0.1)
# Make relative velocity vector
dx, dy = np.array(blade_xy) - np.array((x1, y1))
rel_vel = u_infty + blade_vel
ax.plot((x1, x1 + dx), (y1, y1 + dy), lw=0)
ax.arrow(x1, y1, dx, dy, head_width=head_width, head_length=head_length,
length_includes_head=True, color=tan, linewidth=linewidth)
if label:
plot_label(r"$U_\mathrm{rel}$", x1, y1, dx, dy, sign=1,
text_width=0.11)
# Calculate angle between blade vel and rel vel
alpha_deg = np.rad2deg(np.arccos(np.dot(blade_vel/mag(blade_vel),
rel_vel/mag(rel_vel))))
if theta_deg > 180:
alpha_deg *= -1
# Make drag vector
drag_amplify = 3.0
data = lookup_foildata(alpha_deg)
drag = data["cd"]*mag(rel_vel)**2*drag_amplify
if drag < 0.4/drag_amplify:
hs = 0.5
else:
hs = 1
dx, dy = drag*np.array((dx, dy))/mag((dx, dy))
ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs,
head_length=head_length*hs, length_includes_head=True, color=red,
linewidth=linewidth)
if label:
plot_label(r"$F_d$", blade_xy[0], blade_xy[1], dx, dy, sign=-1,
dist=0.66)
# Make lift vector
lift_amplify = 1.5
lift = data["cl"]*mag(rel_vel)**2*lift_amplify
dx, dy = rotate((dx, dy), -np.pi/2)/mag((dx, dy))*lift
if np.abs(lift) < 0.4/lift_amplify:
hs = 0.5
else:
hs = 1
ax.plot((blade_xy[0], blade_xy[0] + dx), (blade_xy[1], blade_xy[1] + dy),
linewidth=0)
ax.arrow(blade_xy[0], blade_xy[1], dx, dy, head_width=head_width*hs,
head_length=head_length*hs, length_includes_head=True,
color=green, linewidth=linewidth)
if label:
plot_label(r"$F_l$", blade_xy[0], blade_xy[1], dx, dy, sign=-1,
text_width=0.12, text_height=0.02, dist=0.66)
# Label radius
if label:
plot_label("$r$", 0, 0, blade_xy[0], blade_xy[1], text_width=0.04,
text_height=0.04)
# Label angle of attack
if label:
ast = "simple,head_width={},tail_width={},head_length={}".format(
head_width*8, linewidth/16, head_length*8)
xy = blade_xy - rel_vel/mag(rel_vel)*0.2
ax.annotate(r"$\alpha$", xy=xy, xycoords="data",
xytext=(37.5, 22.5), textcoords="offset points",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=0.1",
color="k"))
xy = blade_xy - blade_vel/mag(blade_vel)*0.2
ax.annotate("", xy=xy, xycoords="data",
xytext=(-15, -30), textcoords="offset points",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=-0.1",
color="k"))
# Label azimuthal angle
if label:
xy = np.array(blade_xy)*0.6
ast = "simple,head_width={},tail_width={},head_length={}".format(
head_width*5.5, linewidth/22, head_length*5.5)
ax.annotate(r"$\theta$", xy=xy, xycoords="data",
xytext=(0.28, 0.12), textcoords="data",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=0.1",
color="k"))
ax.annotate("", xy=(0.41, 0), xycoords="data",
xytext=(0.333, 0.12), textcoords="data",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=-0.1",
color="k"))
# Label pitching moment
if label:
xy = np.array(blade_xy)*1.1 - blade_vel/mag(blade_vel) * c/4
ast = "simple,head_width={},tail_width={},head_length={}".format(
head_width*8, linewidth/16, head_length*8)
ax.annotate(r"", xy=xy, xycoords="data",
xytext=(25, -15), textcoords="offset points",
arrowprops=dict(arrowstyle=ast,
ec="none",
connectionstyle="arc3,rad=0.6",
color="k"))
plot_label(r"$M$", xy[0], xy[1], 0.1, 0.1, sign=-1, dist=0.66)
return {"u_infty": u_infty, "blade_vel": blade_vel, "rel_vel": rel_vel}
def plot_alpha(ax=None, tsr=2.0, theta=None, alpha_ss=None, **kwargs):
"""Plot angle of attack versus azimuthal angle."""
if theta is not None:
theta %= 360
if ax is None:
fig, ax = plt.subplots()
df = calc_cft_ctorque(tsr=tsr)
ax.plot(df.theta, df.alpha_deg, **kwargs)
ax.set_ylabel(r"$\alpha$ (degrees)")
ax.set_xlabel(r"$\theta$ (degrees)")
ax.set_xlim((0, 360))
ylim = np.round(df.alpha_deg.max() + 5)
ax.set_ylim((-ylim, ylim))
if theta is not None:
f = interp1d(df.theta, df.alpha_deg)
ax.plot(theta, f(theta), "ok")
if alpha_ss is not None:
ax.hlines((alpha_ss, -alpha_ss), 0, 360, linestyles="dashed")
def plot_rel_vel_mag(ax=None, tsr=2.0, theta=None, **kwargs):
"""Plot relative velocity magnitude versus azimuthal angle."""
if theta is not None:
theta %= 360
if ax is None:
fig, ax = plt.subplots()
df = calc_cft_ctorque(tsr=tsr)
ax.plot(df.theta, df.rel_vel_mag, **kwargs)
ax.set_ylabel(r"$|\vec{U}_\mathrm{rel}|$")
ax.set_xlabel(r"$\theta$ (degrees)")
ax.set_xlim((0, 360))
if theta is not None:
f = interp1d(df.theta, df.rel_vel_mag)
ax.plot(theta, f(theta), "ok")
def plot_alpha_relvel_all(tsrs=np.arange(1.5, 6.1, 1.0), save=False):
"""Plot angle of attack and relative velocity magnitude for a list of TSRs.
Figure will have two subplots in a single row.
"""
fig, (ax1, ax2) = plt.subplots(nrows=1, ncols=2, figsize=(7.5, 3.0))
cm = plt.cm.get_cmap("Reds")
for tsr in tsrs:
color = cm(tsr/np.max(tsrs))
plot_alpha(ax=ax1, tsr=tsr, label=r"$\lambda = {}$".format(tsr),
color=color)
plot_rel_vel_mag(ax=ax2, tsr=tsr, color=color)
[a.set_xticks(np.arange(0, 361, 60)) for a in (ax1, ax2)]
ax1.legend(loc=(0.17, 1.1), ncol=len(tsrs))
ax1.set_ylim((-45, 45))
ax1.set_yticks(np.arange(-45, 46, 15))
ax2.set_ylabel(r"$|\vec{U}_\mathrm{rel}|/U_\infty$")
fig.tight_layout()
if save:
fig.savefig("figures/alpha_deg_urel_geom.pdf", bbox_inches="tight")
def plot_ctorque(ax=None, tsr=2.0, theta=None, **kwargs):
"""Plot torque coefficient versus azimuthal angle."""
theta %= 360
if ax is None:
fig, ax = plt.subplots()
df = calc_cft_ctorque(tsr=tsr)
ax.plot(df.theta, df.ctorque, **kwargs)
ax.set_ylabel("Torque coeff.")
ax.set_xlabel(r"$\theta$ (degrees)")
ax.set_xlim((0, 360))
if theta is not None:
f = interp1d(df.theta, df.ctorque)
ax.plot(theta, f(theta), "ok")
def plot_diagram(fig=None, ax=None, theta_deg=0.0, tsr=2.0, label=False,
save=False, axis="on", full_view=True):
"""Plot full vector diagram."""
if ax is None:
fig, ax = plt.subplots(figsize=(6, 6))
plot_blade_path(ax)
if label:
# Create dashed line for x-axis
ax.plot((-0.5, 0.5), (0, 0), linestyle="dashed", color="k",
zorder=1)
plot_foil(ax, c=0.3, theta_deg=theta_deg)
plot_radius(ax, theta_deg)
plot_center(ax)
plot_vectors(fig, ax, theta_deg, tsr, label=label)
# Figure formatting
if full_view:
ax.set_xlim((-1, 1))
ax.set_ylim((-1, 1))
ax.set_aspect(1)
ax.set_xticks([])
ax.set_yticks([])
ax.axis(axis)
if save:
fig.savefig("figures/cft-vectors.pdf")
def plot_all(theta_deg=0.0, tsr=2.0, scale=1.0, full_view=True):
"""Create diagram and plots of kinematics in a single figure."""
fig = plt.figure(figsize=(7.5*scale, 4.75*scale))
# Draw vector diagram
ax1 = plt.subplot2grid((3, 3), (0, 0), colspan=2, rowspan=3)
plot_diagram(fig, ax1, theta_deg, tsr, axis="on", full_view=full_view)
# Plot angle of attack
ax2 = plt.subplot2grid((3, 3), (0, 2))
plot_alpha(ax2, tsr=tsr, theta=theta_deg, alpha_ss=18, color=light_blue)
# Plot relative velocity magnitude
ax3 = plt.subplot2grid((3, 3), (1, 2))
plot_rel_vel_mag(ax3, tsr=tsr, theta=theta_deg, color=tan)
# Plot torque coefficient
ax4 = plt.subplot2grid((3, 3), (2, 2))
plot_ctorque(ax4, tsr=tsr, theta=theta_deg, color=purple)
fig.tight_layout()
return fig
def make_frame(t):
"""Make a frame for a movie."""
sec_per_rev = 5.0
deg = t/sec_per_rev*360
return mplfig_to_npimage(plot_all(deg, scale=2.0))
def make_animation(filetype="mp4", fps=30):
"""Make animation video."""
if not os.path.isdir("videos"):
os.mkdir("videos")
animation = VideoClip(make_frame, duration=5.0)
if "mp4" in filetype.lower():
animation.write_videofile("videos/cft-animation.mp4", fps=fps)
elif "gif" in filetype.lower():
animation.write_gif("videos/cft-animation.gif", fps=fps)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="Create cross-flow turbine \
vector diagrams.")
parser.add_argument("create", choices=["figure", "diagram", "animation"],
help="Either create a static figure or animation")
parser.add_argument("--angle", type=float, default=60.0,
help="Angle (degrees) to create figure")
parser.add_argument("--show", action="store_true", default=False)
parser.add_argument("--save", "-s", action="store_true", default=False,
help="Save figure")
args = parser.parse_args()
if args.save:
if not os.path.isdir("figures"):
os.mkdir("figures")
if args.create == "diagram":
set_sns(font_scale=2)
plot_diagram(theta_deg=args.angle, label=True, axis="off",
save=args.save)
elif args.create == "figure":
set_sns()
plot_alpha_relvel_all(save=args.save)
elif args.create == "animation":
set_sns(font_scale=2)
from moviepy.editor import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
make_animation()
if args.show:
plt.show()
| mit |
ilo10/scikit-learn | examples/applications/svm_gui.py | 287 | 11161 | """
==========
Libsvm GUI
==========
A simple graphical frontend for Libsvm mainly intended for didactic
purposes. You can create data points by point and click and visualize
the decision region induced by different kernels and parameter settings.
To create positive examples click the left mouse button; to create
negative examples click the right button.
If all examples are from the same class, it uses a one-class SVM.
"""
from __future__ import division, print_function
print(__doc__)
# Author: Peter Prettenhoer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import matplotlib
matplotlib.use('TkAgg')
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.backends.backend_tkagg import NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.contour import ContourSet
import Tkinter as Tk
import sys
import numpy as np
from sklearn import svm
from sklearn.datasets import dump_svmlight_file
from sklearn.externals.six.moves import xrange
y_min, y_max = -50, 50
x_min, x_max = -50, 50
class Model(object):
"""The Model which hold the data. It implements the
observable in the observer pattern and notifies the
registered observers on change event.
"""
def __init__(self):
self.observers = []
self.surface = None
self.data = []
self.cls = None
self.surface_type = 0
def changed(self, event):
"""Notify the observers. """
for observer in self.observers:
observer.update(event, self)
def add_observer(self, observer):
"""Register an observer. """
self.observers.append(observer)
def set_surface(self, surface):
self.surface = surface
def dump_svmlight_file(self, file):
data = np.array(self.data)
X = data[:, 0:2]
y = data[:, 2]
dump_svmlight_file(X, y, file)
class Controller(object):
def __init__(self, model):
self.model = model
self.kernel = Tk.IntVar()
self.surface_type = Tk.IntVar()
# Whether or not a model has been fitted
self.fitted = False
def fit(self):
print("fit the model")
train = np.array(self.model.data)
X = train[:, 0:2]
y = train[:, 2]
C = float(self.complexity.get())
gamma = float(self.gamma.get())
coef0 = float(self.coef0.get())
degree = int(self.degree.get())
kernel_map = {0: "linear", 1: "rbf", 2: "poly"}
if len(np.unique(y)) == 1:
clf = svm.OneClassSVM(kernel=kernel_map[self.kernel.get()],
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X)
else:
clf = svm.SVC(kernel=kernel_map[self.kernel.get()], C=C,
gamma=gamma, coef0=coef0, degree=degree)
clf.fit(X, y)
if hasattr(clf, 'score'):
print("Accuracy:", clf.score(X, y) * 100)
X1, X2, Z = self.decision_surface(clf)
self.model.clf = clf
self.model.set_surface((X1, X2, Z))
self.model.surface_type = self.surface_type.get()
self.fitted = True
self.model.changed("surface")
def decision_surface(self, cls):
delta = 1
x = np.arange(x_min, x_max + delta, delta)
y = np.arange(y_min, y_max + delta, delta)
X1, X2 = np.meshgrid(x, y)
Z = cls.decision_function(np.c_[X1.ravel(), X2.ravel()])
Z = Z.reshape(X1.shape)
return X1, X2, Z
def clear_data(self):
self.model.data = []
self.fitted = False
self.model.changed("clear")
def add_example(self, x, y, label):
self.model.data.append((x, y, label))
self.model.changed("example_added")
# update decision surface if already fitted.
self.refit()
def refit(self):
"""Refit the model if already fitted. """
if self.fitted:
self.fit()
class View(object):
"""Test docstring. """
def __init__(self, root, controller):
f = Figure()
ax = f.add_subplot(111)
ax.set_xticks([])
ax.set_yticks([])
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
canvas = FigureCanvasTkAgg(f, master=root)
canvas.show()
canvas.get_tk_widget().pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas._tkcanvas.pack(side=Tk.TOP, fill=Tk.BOTH, expand=1)
canvas.mpl_connect('button_press_event', self.onclick)
toolbar = NavigationToolbar2TkAgg(canvas, root)
toolbar.update()
self.controllbar = ControllBar(root, controller)
self.f = f
self.ax = ax
self.canvas = canvas
self.controller = controller
self.contours = []
self.c_labels = None
self.plot_kernels()
def plot_kernels(self):
self.ax.text(-50, -60, "Linear: $u^T v$")
self.ax.text(-20, -60, "RBF: $\exp (-\gamma \| u-v \|^2)$")
self.ax.text(10, -60, "Poly: $(\gamma \, u^T v + r)^d$")
def onclick(self, event):
if event.xdata and event.ydata:
if event.button == 1:
self.controller.add_example(event.xdata, event.ydata, 1)
elif event.button == 3:
self.controller.add_example(event.xdata, event.ydata, -1)
def update_example(self, model, idx):
x, y, l = model.data[idx]
if l == 1:
color = 'w'
elif l == -1:
color = 'k'
self.ax.plot([x], [y], "%so" % color, scalex=0.0, scaley=0.0)
def update(self, event, model):
if event == "examples_loaded":
for i in xrange(len(model.data)):
self.update_example(model, i)
if event == "example_added":
self.update_example(model, -1)
if event == "clear":
self.ax.clear()
self.ax.set_xticks([])
self.ax.set_yticks([])
self.contours = []
self.c_labels = None
self.plot_kernels()
if event == "surface":
self.remove_surface()
self.plot_support_vectors(model.clf.support_vectors_)
self.plot_decision_surface(model.surface, model.surface_type)
self.canvas.draw()
def remove_surface(self):
"""Remove old decision surface."""
if len(self.contours) > 0:
for contour in self.contours:
if isinstance(contour, ContourSet):
for lineset in contour.collections:
lineset.remove()
else:
contour.remove()
self.contours = []
def plot_support_vectors(self, support_vectors):
"""Plot the support vectors by placing circles over the
corresponding data points and adds the circle collection
to the contours list."""
cs = self.ax.scatter(support_vectors[:, 0], support_vectors[:, 1],
s=80, edgecolors="k", facecolors="none")
self.contours.append(cs)
def plot_decision_surface(self, surface, type):
X1, X2, Z = surface
if type == 0:
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed', 'solid', 'dashed']
colors = 'k'
self.contours.append(self.ax.contour(X1, X2, Z, levels,
colors=colors,
linestyles=linestyles))
elif type == 1:
self.contours.append(self.ax.contourf(X1, X2, Z, 10,
cmap=matplotlib.cm.bone,
origin='lower', alpha=0.85))
self.contours.append(self.ax.contour(X1, X2, Z, [0.0], colors='k',
linestyles=['solid']))
else:
raise ValueError("surface type unknown")
class ControllBar(object):
def __init__(self, root, controller):
fm = Tk.Frame(root)
kernel_group = Tk.Frame(fm)
Tk.Radiobutton(kernel_group, text="Linear", variable=controller.kernel,
value=0, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="RBF", variable=controller.kernel,
value=1, command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(kernel_group, text="Poly", variable=controller.kernel,
value=2, command=controller.refit).pack(anchor=Tk.W)
kernel_group.pack(side=Tk.LEFT)
valbox = Tk.Frame(fm)
controller.complexity = Tk.StringVar()
controller.complexity.set("1.0")
c = Tk.Frame(valbox)
Tk.Label(c, text="C:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(c, width=6, textvariable=controller.complexity).pack(
side=Tk.LEFT)
c.pack()
controller.gamma = Tk.StringVar()
controller.gamma.set("0.01")
g = Tk.Frame(valbox)
Tk.Label(g, text="gamma:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(g, width=6, textvariable=controller.gamma).pack(side=Tk.LEFT)
g.pack()
controller.degree = Tk.StringVar()
controller.degree.set("3")
d = Tk.Frame(valbox)
Tk.Label(d, text="degree:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(d, width=6, textvariable=controller.degree).pack(side=Tk.LEFT)
d.pack()
controller.coef0 = Tk.StringVar()
controller.coef0.set("0")
r = Tk.Frame(valbox)
Tk.Label(r, text="coef0:", anchor="e", width=7).pack(side=Tk.LEFT)
Tk.Entry(r, width=6, textvariable=controller.coef0).pack(side=Tk.LEFT)
r.pack()
valbox.pack(side=Tk.LEFT)
cmap_group = Tk.Frame(fm)
Tk.Radiobutton(cmap_group, text="Hyperplanes",
variable=controller.surface_type, value=0,
command=controller.refit).pack(anchor=Tk.W)
Tk.Radiobutton(cmap_group, text="Surface",
variable=controller.surface_type, value=1,
command=controller.refit).pack(anchor=Tk.W)
cmap_group.pack(side=Tk.LEFT)
train_button = Tk.Button(fm, text='Fit', width=5,
command=controller.fit)
train_button.pack()
fm.pack(side=Tk.LEFT)
Tk.Button(fm, text='Clear', width=5,
command=controller.clear_data).pack(side=Tk.LEFT)
def get_parser():
from optparse import OptionParser
op = OptionParser()
op.add_option("--output",
action="store", type="str", dest="output",
help="Path where to dump data.")
return op
def main(argv):
op = get_parser()
opts, args = op.parse_args(argv[1:])
root = Tk.Tk()
model = Model()
controller = Controller(model)
root.wm_title("Scikit-learn Libsvm GUI")
view = View(root, controller)
model.add_observer(view)
Tk.mainloop()
if opts.output:
model.dump_svmlight_file(opts.output)
if __name__ == "__main__":
main(sys.argv)
| bsd-3-clause |
Geosyntec/wqio | docs/conf.py | 2 | 10120 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# wqio documentation build configuration file, created by
# sphinx-quickstart on Sun May 22 14:36:00 2016.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
import sphinx
import matplotlib as mpl
mpl.use("agg")
import seaborn
clean_bkgd = {"axes.facecolor": "none", "figure.facecolor": "none"}
seaborn.set(style="ticks", rc=clean_bkgd)
numpydoc_show_class_members = False
autodoc_member_order = "bysource"
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
sys.path.insert(0, os.path.abspath("sphinxext"))
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.doctest",
"sphinx.ext.intersphinx",
"sphinx.ext.todo",
"sphinx.ext.mathjax",
"sphinx.ext.viewcode",
#'plot_generator',
#'plot_directive',
"numpydoc",
"ipython_directive",
"ipython_console_highlighting",
"sphinx_gallery.gen_gallery",
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = ".rst"
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# Include the example source for plots in API docs
plot_include_source = True
plot_formats = [("png", 90)]
plot_html_show_formats = False
plot_html_show_source_link = False
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = "index"
# General information about the project.
project = "wqio"
copyright = "2016, Paul Hobson (Geosyntec Consultants)"
author = "Paul Hobson (Geosyntec Consultants)"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = "0.5.1"
# The full version, including alpha/beta/rc tags.
release = "0.5.1"
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = "sphinx"
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = "sphinx_rtd_theme"
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents.
# "<project> v<release> documentation" by default.
html_title = 'wqio v0.5.1'
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (relative to this directory) to use as a favicon of
# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not None, a 'Last updated on:' timestamp is inserted at every page
# bottom, using the given strftime format.
# The empty string is equivalent to '%b %d, %Y'.
# html_last_updated_fmt = None
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr', 'zh'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# 'ja' uses this config value.
# 'zh' user can custom change `jieba` dictionary path.
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = "wqiodoc"
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
# Latex figure (float) alignment
#'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(
master_doc,
"wqio.tex",
"wqio Documentation",
"Paul Hobson (Geosyntec Consultants)",
"manual",
)
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [(master_doc, "wqio", "wqio Documentation", [author], 1)]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(
master_doc,
"wqio",
"wqio Documentation",
author,
"wqio",
"One line description of project.",
"Miscellaneous",
)
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {"https://docs.python.org/": None}
| bsd-3-clause |
armanpazouki/chrono | src/demos/python/demo_crank_plot.py | 1 | 5655 | #------------------------------------------------------------------------------
# Name: pychrono example
# Purpose:
#
# Author: Alessandro Tasora
#
# Created: 1/01/2019
# Copyright: (c) ProjectChrono 2019
#------------------------------------------------------------------------------
import pychrono.core as chrono
import pychrono.irrlicht as chronoirr
import matplotlib.pyplot as plt
import numpy as np
print ("Example: create a slider crank and plot results");
# Change this path to asset path, if running from other working dir.
# It must point to the data folder, containing GUI assets (textures, fonts, meshes, etc.)
chrono.SetChronoDataPath("../../../data/")
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
mysystem = chrono.ChSystemNSC()
# Some data shared in the following
crank_center = chrono.ChVectorD(-1,0.5,0)
crank_rad = 0.4
crank_thick = 0.1
rod_length = 1.5
# Create four rigid bodies: the truss, the crank, the rod, the piston.
# Create the floor truss
mfloor = chrono.ChBodyEasyBox(3, 1, 3, 1000)
mfloor.SetPos(chrono.ChVectorD(0,-0.5,0))
mfloor.SetBodyFixed(True)
mysystem.Add(mfloor)
# Create the flywheel crank
mcrank = chrono.ChBodyEasyCylinder(crank_rad, crank_thick, 1000)
mcrank.SetPos(crank_center + chrono.ChVectorD(0, 0, -0.1))
# Since ChBodyEasyCylinder creates a vertical (y up) cylinder, here rotate it:
mcrank.SetRot(chrono.Q_ROTATE_Y_TO_Z)
mysystem.Add(mcrank)
# Create a stylized rod
mrod = chrono.ChBodyEasyBox(rod_length, 0.1, 0.1, 1000)
mrod.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length/2 , 0, 0))
mysystem.Add(mrod)
# Create a stylized piston
mpiston = chrono.ChBodyEasyCylinder(0.2, 0.3, 1000)
mpiston.SetPos(crank_center + chrono.ChVectorD(crank_rad+rod_length, 0, 0))
mpiston.SetRot(chrono.Q_ROTATE_Y_TO_X)
mysystem.Add(mpiston)
# Now create constraints and motors between the bodies.
# Create crank-truss joint: a motor that spins the crank flywheel
my_motor = chrono.ChLinkMotorRotationSpeed()
my_motor.Initialize(mcrank, # the first connected body
mfloor, # the second connected body
chrono.ChFrameD(crank_center)) # where to create the motor in abs.space
my_angularspeed = chrono.ChFunction_Const(chrono.CH_C_PI) # ang.speed: 180°/s
my_motor.SetMotorFunction(my_angularspeed)
mysystem.Add(my_motor)
# Create crank-rod joint
mjointA = chrono.ChLinkLockRevolute()
mjointA.Initialize(mrod,
mcrank,
chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad,0,0) ))
mysystem.Add(mjointA)
# Create rod-piston joint
mjointB = chrono.ChLinkLockRevolute()
mjointB.Initialize(mpiston,
mrod,
chrono.ChCoordsysD( crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0) ))
mysystem.Add(mjointB)
# Create piston-truss joint
mjointC = chrono.ChLinkLockPrismatic()
mjointC.Initialize(mpiston,
mfloor,
chrono.ChCoordsysD(
crank_center + chrono.ChVectorD(crank_rad+rod_length,0,0),
chrono.Q_ROTATE_Z_TO_X)
)
mysystem.Add(mjointC)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(mysystem, 'PyChrono example', chronoirr.dimension2du(1024,768))
myapplication.AddTypicalSky()
myapplication.AddTypicalLogo(chrono.GetChronoDataPath() + 'logo_pychrono_alpha.png')
myapplication.AddTypicalCamera(chronoirr.vector3df(1,1,3), chronoirr.vector3df(0,1,0))
myapplication.AddTypicalLights()
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll();
# ---------------------------------------------------------------------
#
# Run the simulation
#
# Initialize these lists to store values to plot.
array_time = []
array_angle = []
array_pos = []
array_speed = []
myapplication.SetTimestep(0.005)
# Run the interactive simulation loop
while(myapplication.GetDevice().run()):
# for plotting, append instantaneous values:
array_time.append(mysystem.GetChTime())
array_angle.append(my_motor.GetMotorRot())
array_pos.append(mpiston.GetPos().x)
array_speed.append(mpiston.GetPos_dt().x)
# here happens the visualization and step time integration
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.DoStep()
myapplication.EndScene()
# stop simulation after 2 seconds
if mysystem.GetChTime() > 2:
myapplication.GetDevice().closeDevice()
# Use matplotlib to make two plots when simulation ended:
fig, (ax1, ax2) = plt.subplots(2, sharex = True)
ax1.plot(array_angle, array_pos)
ax1.set(ylabel='position [m]')
ax1.grid()
ax2.plot(array_angle, array_speed, 'r--')
ax2.set(ylabel='speed [m]',xlabel='angle [rad]')
ax2.grid()
# trick to plot \pi on x axis of plots instead of 1 2 3 4 etc.
plt.xticks(np.linspace(0, 2*np.pi, 5),['0','$\pi/2$','$\pi$','$3\pi/2$','$2\pi$'])
| bsd-3-clause |
aflaxman/scikit-learn | sklearn/feature_extraction/image.py | 21 | 18105 | """
The :mod:`sklearn.feature_extraction.image` submodule gathers utilities to
extract features from images.
"""
# Authors: Emmanuelle Gouillart <emmanuelle.gouillart@normalesup.org>
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Olivier Grisel
# Vlad Niculae
# License: BSD 3 clause
from itertools import product
import numbers
import numpy as np
from scipy import sparse
from numpy.lib.stride_tricks import as_strided
from ..utils import check_array, check_random_state
from ..base import BaseEstimator
__all__ = ['PatchExtractor',
'extract_patches_2d',
'grid_to_graph',
'img_to_graph',
'reconstruct_from_patches_2d']
###############################################################################
# From an image to a graph
def _make_edges_3d(n_x, n_y, n_z=1):
"""Returns a list of edges for a 3D image.
Parameters
===========
n_x : integer
The size of the grid in the x direction.
n_y : integer
The size of the grid in the y direction.
n_z : integer, optional
The size of the grid in the z direction, defaults to 1
"""
vertices = np.arange(n_x * n_y * n_z).reshape((n_x, n_y, n_z))
edges_deep = np.vstack((vertices[:, :, :-1].ravel(),
vertices[:, :, 1:].ravel()))
edges_right = np.vstack((vertices[:, :-1].ravel(),
vertices[:, 1:].ravel()))
edges_down = np.vstack((vertices[:-1].ravel(), vertices[1:].ravel()))
edges = np.hstack((edges_deep, edges_right, edges_down))
return edges
def _compute_gradient_3d(edges, img):
n_x, n_y, n_z = img.shape
gradient = np.abs(img[edges[0] // (n_y * n_z),
(edges[0] % (n_y * n_z)) // n_z,
(edges[0] % (n_y * n_z)) % n_z] -
img[edges[1] // (n_y * n_z),
(edges[1] % (n_y * n_z)) // n_z,
(edges[1] % (n_y * n_z)) % n_z])
return gradient
# XXX: Why mask the image after computing the weights?
def _mask_edges_weights(mask, edges, weights=None):
"""Apply a mask to edges (weighted or not)"""
inds = np.arange(mask.size)
inds = inds[mask.ravel()]
ind_mask = np.logical_and(np.in1d(edges[0], inds),
np.in1d(edges[1], inds))
edges = edges[:, ind_mask]
if weights is not None:
weights = weights[ind_mask]
if len(edges.ravel()):
maxval = edges.max()
else:
maxval = 0
order = np.searchsorted(np.unique(edges.ravel()), np.arange(maxval + 1))
edges = order[edges]
if weights is None:
return edges
else:
return edges, weights
def _to_graph(n_x, n_y, n_z, mask=None, img=None,
return_as=sparse.coo_matrix, dtype=None):
"""Auxiliary function for img_to_graph and grid_to_graph
"""
edges = _make_edges_3d(n_x, n_y, n_z)
if dtype is None:
if img is None:
dtype = np.int
else:
dtype = img.dtype
if img is not None:
img = np.atleast_3d(img)
weights = _compute_gradient_3d(edges, img)
if mask is not None:
edges, weights = _mask_edges_weights(mask, edges, weights)
diag = img.squeeze()[mask]
else:
diag = img.ravel()
n_voxels = diag.size
else:
if mask is not None:
mask = mask.astype(dtype=np.bool, copy=False)
mask = np.asarray(mask, dtype=np.bool)
edges = _mask_edges_weights(mask, edges)
n_voxels = np.sum(mask)
else:
n_voxels = n_x * n_y * n_z
weights = np.ones(edges.shape[1], dtype=dtype)
diag = np.ones(n_voxels, dtype=dtype)
diag_idx = np.arange(n_voxels)
i_idx = np.hstack((edges[0], edges[1]))
j_idx = np.hstack((edges[1], edges[0]))
graph = sparse.coo_matrix((np.hstack((weights, weights, diag)),
(np.hstack((i_idx, diag_idx)),
np.hstack((j_idx, diag_idx)))),
(n_voxels, n_voxels),
dtype=dtype)
if return_as is np.ndarray:
return graph.toarray()
return return_as(graph)
def img_to_graph(img, mask=None, return_as=sparse.coo_matrix, dtype=None):
"""Graph of the pixel-to-pixel gradient connections
Edges are weighted with the gradient values.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
img : ndarray, 2D or 3D
2D or 3D image
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : None or dtype, optional
The data of the returned sparse matrix. By default it is the
dtype of img
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
img = np.atleast_3d(img)
n_x, n_y, n_z = img.shape
return _to_graph(n_x, n_y, n_z, mask, img, return_as, dtype)
def grid_to_graph(n_x, n_y, n_z=1, mask=None, return_as=sparse.coo_matrix,
dtype=np.int):
"""Graph of the pixel-to-pixel connections
Edges exist if 2 voxels are connected.
Parameters
----------
n_x : int
Dimension in x axis
n_y : int
Dimension in y axis
n_z : int, optional, default 1
Dimension in z axis
mask : ndarray of booleans, optional
An optional mask of the image, to consider only part of the
pixels.
return_as : np.ndarray or a sparse matrix class, optional
The class to use to build the returned adjacency matrix.
dtype : dtype, optional, default int
The data of the returned sparse matrix. By default it is int
Notes
-----
For scikit-learn versions 0.14.1 and prior, return_as=np.ndarray was
handled by returning a dense np.matrix instance. Going forward, np.ndarray
returns an np.ndarray, as expected.
For compatibility, user code relying on this method should wrap its
calls in ``np.asarray`` to avoid type issues.
"""
return _to_graph(n_x, n_y, n_z, mask=mask, return_as=return_as,
dtype=dtype)
###############################################################################
# From an image to a set of small image patches
def _compute_n_patches(i_h, i_w, p_h, p_w, max_patches=None):
"""Compute the number of patches that will be extracted in an image.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
i_h : int
The image height
i_w : int
The image with
p_h : int
The height of a patch
p_w : int
The width of a patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
"""
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
all_patches = n_h * n_w
if max_patches:
if (isinstance(max_patches, (numbers.Integral))
and max_patches < all_patches):
return max_patches
elif (isinstance(max_patches, (numbers.Real))
and 0 < max_patches < 1):
return int(max_patches * all_patches)
else:
raise ValueError("Invalid value for max_patches: %r" % max_patches)
else:
return all_patches
def extract_patches(arr, patch_shape=8, extraction_step=1):
"""Extracts patches of any n-dimensional array in place using strides.
Given an n-dimensional array it will return a 2n-dimensional array with
the first n dimensions indexing patch position and the last n indexing
the patch content. This operation is immediate (O(1)). A reshape
performed on the first n dimensions will cause numpy to copy data, leading
to a list of extracted patches.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
arr : ndarray
n-dimensional array of which patches are to be extracted
patch_shape : integer or tuple of length arr.ndim
Indicates the shape of the patches to be extracted. If an
integer is given, the shape will be a hypercube of
sidelength given by its value.
extraction_step : integer or tuple of length arr.ndim
Indicates step size at which extraction shall be performed.
If integer is given, then the step is uniform in all dimensions.
Returns
-------
patches : strided ndarray
2n-dimensional array indexing patches on first n dimensions and
containing patches on the last n dimensions. These dimensions
are fake, but this way no data is copied. A simple reshape invokes
a copying operation to obtain a list of patches:
result.reshape([-1] + list(patch_shape))
"""
arr_ndim = arr.ndim
if isinstance(patch_shape, numbers.Number):
patch_shape = tuple([patch_shape] * arr_ndim)
if isinstance(extraction_step, numbers.Number):
extraction_step = tuple([extraction_step] * arr_ndim)
patch_strides = arr.strides
slices = [slice(None, None, st) for st in extraction_step]
indexing_strides = arr[slices].strides
patch_indices_shape = ((np.array(arr.shape) - np.array(patch_shape)) //
np.array(extraction_step)) + 1
shape = tuple(list(patch_indices_shape) + list(patch_shape))
strides = tuple(list(indexing_strides) + list(patch_strides))
patches = as_strided(arr, shape=shape, strides=strides)
return patches
def extract_patches_2d(image, patch_size, max_patches=None, random_state=None):
"""Reshape a 2D image into a collection of patches
The resulting patches are allocated in a dedicated array.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
image : array, shape = (image_height, image_width) or
(image_height, image_width, n_channels)
The original image data. For color images, the last dimension specifies
the channel: a RGB image would have `n_channels=3`.
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches to extract. If max_patches is a float
between 0 and 1, it is taken to be a proportion of the total number
of patches.
random_state : int, RandomState instance or None, optional (default=None)
Pseudo number generator state used for random sampling to use if
`max_patches` is not None. If int, random_state is the seed used by
the random number generator; If RandomState instance, random_state is
the random number generator; If None, the random number generator is
the RandomState instance used by `np.random`.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the image, where `n_patches`
is either `max_patches` or the total number of patches that can be
extracted.
Examples
--------
>>> from sklearn.feature_extraction import image
>>> one_image = np.arange(16).reshape((4, 4))
>>> one_image
array([[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11],
[12, 13, 14, 15]])
>>> patches = image.extract_patches_2d(one_image, (2, 2))
>>> print(patches.shape)
(9, 2, 2)
>>> patches[0]
array([[0, 1],
[4, 5]])
>>> patches[1]
array([[1, 2],
[5, 6]])
>>> patches[8]
array([[10, 11],
[14, 15]])
"""
i_h, i_w = image.shape[:2]
p_h, p_w = patch_size
if p_h > i_h:
raise ValueError("Height of the patch should be less than the height"
" of the image.")
if p_w > i_w:
raise ValueError("Width of the patch should be less than the width"
" of the image.")
image = check_array(image, allow_nd=True)
image = image.reshape((i_h, i_w, -1))
n_colors = image.shape[-1]
extracted_patches = extract_patches(image,
patch_shape=(p_h, p_w, n_colors),
extraction_step=1)
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, max_patches)
if max_patches:
rng = check_random_state(random_state)
i_s = rng.randint(i_h - p_h + 1, size=n_patches)
j_s = rng.randint(i_w - p_w + 1, size=n_patches)
patches = extracted_patches[i_s, j_s, 0]
else:
patches = extracted_patches
patches = patches.reshape(-1, p_h, p_w, n_colors)
# remove the color dimension if useless
if patches.shape[-1] == 1:
return patches.reshape((n_patches, p_h, p_w))
else:
return patches
def reconstruct_from_patches_2d(patches, image_size):
"""Reconstruct the image from all of its patches.
Patches are assumed to overlap and the image is constructed by filling in
the patches from left to right, top to bottom, averaging the overlapping
regions.
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The complete set of patches. If the patches contain colour information,
channels are indexed along the last dimension: RGB patches would
have `n_channels=3`.
image_size : tuple of ints (image_height, image_width) or
(image_height, image_width, n_channels)
the size of the image that will be reconstructed
Returns
-------
image : array, shape = image_size
the reconstructed image
"""
i_h, i_w = image_size[:2]
p_h, p_w = patches.shape[1:3]
img = np.zeros(image_size)
# compute the dimensions of the patches array
n_h = i_h - p_h + 1
n_w = i_w - p_w + 1
for p, (i, j) in zip(patches, product(range(n_h), range(n_w))):
img[i:i + p_h, j:j + p_w] += p
for i in range(i_h):
for j in range(i_w):
# divide by the amount of overlap
# XXX: is this the most efficient way? memory-wise yes, cpu wise?
img[i, j] /= float(min(i + 1, p_h, i_h - i) *
min(j + 1, p_w, i_w - j))
return img
class PatchExtractor(BaseEstimator):
"""Extracts patches from a collection of images
Read more in the :ref:`User Guide <image_feature_extraction>`.
Parameters
----------
patch_size : tuple of ints (patch_height, patch_width)
the dimensions of one patch
max_patches : integer or float, optional default is None
The maximum number of patches per image to extract. If max_patches is a
float in (0, 1), it is taken to mean a proportion of the total number
of patches.
random_state : int, RandomState instance or None, optional (default=None)
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
"""
def __init__(self, patch_size=None, max_patches=None, random_state=None):
self.patch_size = patch_size
self.max_patches = max_patches
self.random_state = random_state
def fit(self, X, y=None):
"""Do nothing and return the estimator unchanged
This method is just there to implement the usual API and hence
work in pipelines.
"""
return self
def transform(self, X):
"""Transforms the image samples in X into a matrix of patch data.
Parameters
----------
X : array, shape = (n_samples, image_height, image_width) or
(n_samples, image_height, image_width, n_channels)
Array of images from which to extract patches. For color images,
the last dimension specifies the channel: a RGB image would have
`n_channels=3`.
Returns
-------
patches : array, shape = (n_patches, patch_height, patch_width) or
(n_patches, patch_height, patch_width, n_channels)
The collection of patches extracted from the images, where
`n_patches` is either `n_samples * max_patches` or the total
number of patches that can be extracted.
"""
self.random_state = check_random_state(self.random_state)
n_images, i_h, i_w = X.shape[:3]
X = np.reshape(X, (n_images, i_h, i_w, -1))
n_channels = X.shape[-1]
if self.patch_size is None:
patch_size = i_h // 10, i_w // 10
else:
patch_size = self.patch_size
# compute the dimensions of the patches array
p_h, p_w = patch_size
n_patches = _compute_n_patches(i_h, i_w, p_h, p_w, self.max_patches)
patches_shape = (n_images * n_patches,) + patch_size
if n_channels > 1:
patches_shape += (n_channels,)
# extract the patches
patches = np.empty(patches_shape)
for ii, image in enumerate(X):
patches[ii * n_patches:(ii + 1) * n_patches] = extract_patches_2d(
image, patch_size, self.max_patches, self.random_state)
return patches
| bsd-3-clause |
sevenian3/ChromaStarPy | LevelPopsGasServer.py | 1 | 55996 | # -*- coding: utf-8 -*-
"""
Created on Mon Apr 24 14:13:47 2017
@author: ishort
"""
import math
import Useful
import ToolBox
#import numpy
#JB#
#from matplotlib.pyplot import plot, title, show, scatter
#storage for fits (not all may be used)
uw = []
uwa = []
uwb = []
uwStage = []
uwbStage = []
uwu = []
uwl = []
uua=[]
uub=[]
"""
#a function to create a cubic function fit extrapolation
def cubicFit(x,y):
coeffs = numpy.polyfit(x,y,3)
#returns an array of coefficents for the cubic fit of the form
#Ax^3 + Bx^2 + Cx + D as [A,B,C,D]
return coeffs
#this will work for any number of data points!
def valueFromFit(fit,x):
#return the value y for a given fit, at point x
return (fit[0]*(x**3)+fit[1]*(x**2)+fit[2]*x+fit[3])
#holds the five temperature at which we have partition function data
"""
masterTemp = [130, 500, 3000, 8000, 10000]
#JB#
#def levelPops(lam0In, logNStage, chiL, log10UwStage, gwL, numDeps, temp):
def levelPops(lam0In, logNStage, chiL, logUw, gwL, numDeps, temp):
""" Returns depth distribution of occupation numbers in lower level of b-b transition,
// Input parameters:
// lam0 - line centre wavelength in nm
// logNStage - log_e density of absorbers in relevent ion stage (cm^-3)
// logFlu - log_10 oscillator strength (unitless)
// chiL - energy of lower atomic E-level of b-b transition in eV
// Also needs atsmopheric structure information:
// numDeps
// temp structure """
c = Useful.c()
logC = Useful.logC()
k = Useful.k()
logK = Useful.logK()
logH = Useful.logH()
logEe = Useful.logEe()
logMe = Useful.logMe()
ln10 = math.log(10.0)
logE = math.log10(math.e); #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#//double logNl = logNlIn * ln10; // Convert to base e
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double thisLogUw, Ttheta;
thisLogUw = 0.0 # //default initialization
#logUw = [ 0.0 for i in range(5) ]
logE10 = math.log(10.0)
#print("log10UwStage ", log10UwStage)
#for kk in range(len(logUw)):
# logUw[kk] = logE10*log10UwStage[kk] #// lburns new loop
logGwL = math.log(gwL)
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#////For testing with Ca II lines using gS3 internal line list only:
#//boolean ionized = true;
#//if (ionized) {
#// //System.out.println("ionized, doing chiL - chiI: " + ionized);
#// // chiL = chiL - chiI;
#// chiL = chiL - 6.113;
#// }
#// //
#//Log of line-center wavelength in cm
logLam0 = math.log(lam0In) #// * 1.0e-7);
#// energy of b-b transition
logTransE = logH + logC - logLam0 #//ergs
if (chiL <= 0.0):
chiL = 1.0e-49
logChiL = math.log(chiL) + Useful.logEv() #// Convert lower E-level from eV to ergs
logBoltzFacL = logChiL - Useful.logK() #// Pre-factor for exponent of excitation Boltzmann factor
boltzFacL = math.exp(logBoltzFacL)
boltzFacGround = 0.0 / k #//I know - its zero, but let's do it this way anyway'
#// return a 1D numDeps array of logarithmic number densities
#// level population of lower level of bb transition (could be in either stage I or II!)
logNums = [ 0.0 for i in range(numDeps)]
#double num, logNum, expFac;
#JB#
#print("thisLogUw:",numpy.shape(logUw))
logUwFit = ToolBox.cubicFit(masterTemp,logUw)#u(T) fit
uw.append(logUwFit)
#JB#
for id in range(numDeps):
#//Determine temperature dependenet partition functions Uw:
#Ttheta = 5040.0 / temp[0][id]
#//NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
"""
if (Ttheta >= 1.0):
thisLogUw = logUw[0]
if (Ttheta <= 0.5):
thisLogUw = logUw[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUw = ( logUw[1] * (Ttheta - 0.5)/(1.0 - 0.5) ) \
+ ( logUw[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
thisLogUw = ToolBox.valueFromFit(logUwFit,thisTemp)#u(T) value extrapolated
#JB#
if (thisTemp >= 10000.0):
thisLogUw = logUw[4]
if (thisTemp <= 130.0):
thisLogUw = logUw[0]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUw = logUw[1] * (thisTemp - 130)/(500 - 130) \
+ logUw[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUw = logUw[2] * (thisTemp - 500)/(3000 - 500) \
+ logUw[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUw = logUw[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUw[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUw = logUw[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUw[3] * (10000 - thisTemp)/(10000 - 8000)
"""
#print("logUw ", logUw, " thisLogUw ", thisLogUw)
#//System.out.println("LevPops: ionized branch taken, ionized = " + ionized);
#// Take stat weight of ground state as partition function:
logNums[id] = logNStage[id] - boltzFacL / temp[0][id] + logGwL - thisLogUw #// lower level of b-b transition
#print("LevelPopsServer.stagePops id ", id, " logNStage[id] ", logNStage[id], " boltzFacL ", boltzFacL, " temp[0][id] ", temp[0][id], " logGwL ", logGwL, " thisLogUw ", thisLogUw, " logNums[id] ", logNums[id]);
#// System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id]: " + id + " "
#// + Math.exp(logNums[0][id]) + " "
#// + Math.exp(logNums[1][id]) + " "
#// + Math.exp(logNums[2][id]) + " "
#// + Math.exp(logNums[3][id]));
#//System.out.println("LevelPops: id, logNums[0][id], logNums[1][id], logNums[2][id], logNums[3][id], logNums[4][id]: " + id + " "
#// + logE * (logNums[0][id]) + " "
#// + logE * (logNums[1][id]) + " "
#// + logE * (logNums[2][id]) + " "
# // + logE * (logNums[3][id]) + " "
#// + logE * (logNums[4][id]) );
#//System.out.println("LevelPops: id, logIonFracI, logIonFracII: " + id + " " + logE*logIonFracI + " " + logE*logIonFracII
#// + "logNum, logNumI, logNums[0][id], logNums[1][id] "
#// + logE*logNum + " " + logE*logNumI + " " + logE*logNums[0][id] + " " + logE*logNums[1][id]);
#//System.out.println("LevelPops: id, logIonFracI: " + id + " " + logE*logIonFracI
#// + "logNums[0][id], boltzFacL/temp[0][id], logNums[2][id]: "
#// + logNums[0][id] + " " + boltzFacL/temp[0][id] + " " + logNums[2][id]);
#//id loop
#stop
#print (uw)
return logNums
#//This version - ionization equilibrium *WITHOUT* molecules - logNum is TOTAL element population
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops(logNum, Ne, chiIArr, logUw, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine WITHOUT molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
//
"""
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
thisLogUwA = 0.0 #// element A
#thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
#JB#
uua=[]
#uub=[]
#qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#for iMol in range(numMols):
# thisLogUwB[iMol] = logUwB[iMol][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
#end method levelPops
#def stagePops2(logNum, Ne, chiIArr, log10UwAArr, \
# numMols, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# numDeps, temp):
def stagePops2(logNum, Ne, chiIArr, logUw, \
numMols, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
numDeps, temp):
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#// Now - molecular variables:
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
ifMols = True
if (numMols == 0):
ifMols = False
numMols = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 19.0 #//eV
#//Molecular partition functions - default initialization:
#double[] thisLogUwB = new double[numMols];
thisLogUwB = [ 0.0 for i in range(numMols) ]
for iMol in range(numMols):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
thisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
logUwA = [ 0.0 for i in range(5) ]
if (numMols > 0):
for kk in range(len(logUwA)):
logUwA[kk] = logUw[0][kk]
#// lburns
#//}
#//// Molecular partition functions:
#//Molecular dissociation Boltzmann factors:
boltzFacIAB = [ 0.0 for i in range(numMols) ]
logMolSahaFac = [ 0.0 for i in range(numMols) ]
#//if (numMols > 0){
#double logDissE, logBoltzFacIAB;
for iMol in range(numMols):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//console.log("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//}
#// For molecular species:
logSahaMol = [ 0.0 for i in range(numMols) ]
invSahaMol = [ 0.0 for i in range(numMols) ]
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for iMol in range(numMols):
currentUwBArr=list(logUwB[iMol])#u(T) determined values
UwBFit = ToolBox.cubicFit(masterTemp,currentUwBArr)#u(T) fit
uub.append(UwBFit)
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
for iMol in range(numMols):
thisLogUwB[iMol] = ToolBox.valueFromFit(uub[iMol],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
for iMol in range(numMols):
thisLogUwB[iMol] = logUwB[iMol][4]
for iMol in range(numMols):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#// iMol loop
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
for iStg in range(numStages):
#print("iStg ", iStg)
logSaha[iStg+1][iStg] = logSahaFac - logNe - (boltzFacI[iStg] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg+1] - thisLogUw[iStg]
saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
#//Molecular Saha factors:
for iMol in range(numMols):
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUwB[iMol] + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//Compute log of denominator is ionization fraction, f_stage
denominator = 1.0 #//default initialization - leading term is always unity
#//ion stage contributions:
for jStg in range(1, numStages+1):
addend = 1.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend * saha[iStg+1][iStg]
denominator = denominator + addend
#//molecular contribution
if (ifMols == True):
for iMol in range(numMols):
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
logIonFrac[0] = -1.0 * logDenominator #// log ionization fraction in stage I
for jStg in range(1, numStages):
addend = 0.0 #//default initialization for product series
for iStg in range(jStg):
#//console.log("jStg " + jStg + " saha[][] indices " + (iStg+1) + " " + iStg);
addend = addend + logSaha[iStg+1][iStg]
logIonFrac[jStg] = addend - logDenominator
for iStg in range(numStages):
logNums[iStg][id] = logNum[id] + logIonFrac[iStg]
#//id loop
return logNums;
#//end method stagePops
def stagePops3(logNum, Ne, chiIArr, logUw, numDeps, temp):
#Version for ChromaStarPyGas: logNum is now *neutral stage* population from Phil
# Bennett's GAS package
#line 1: //species A data - ionization equilibrium of A
#line 2: //data for set of species "B" - molecular equlibrium for set {AB}
"""Ionization equilibrium routine that accounts for molecule formation:
// Returns depth distribution of ionization stage populations
// Input parameters:
// logNum - array with depth-dependent neutral stage number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
// Atomic element A is the one whose ionization fractions are being computed
// Element B refers to array of other species with which A forms molecules AB """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
numStages = len(chiIArr) #// + 1; //need one more stage above the highest stage to be populated
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUw = [ 0.0 for i in range(numStages+1) ]
for i in range(numStages+1):
thisLogUw[i] = 0.0
logE10 = math.log(10.0)
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
boltzFacI = [ 0.0 for i in range(numStages) ]
#print("numStages ", numStages, " Useful.logEv ", Useful.logEv())
for i in range(numStages):
#print("i ", i, " chiIArr ", chiIArr[i])
logChiI = math.log(chiIArr[i]) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI[i] = math.exp(logBoltzFacI)
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH())
#// return a 2D 5 x numDeps array of logarithmic number densities
#// Row 0: neutral stage ground state population
#// Row 1: singly ionized stage ground state population
#// Row 2: doubly ionized stage ground state population
#// Row 3: triply ionized stage ground state population
#// Row 4: quadruply ionized stage ground state population
#double[][] logNums = new double[numStages][numDeps];
logNums = [ [ 0.0 for i in range(numDeps)] for j in range(numStages) ]
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#// for index accounting pirposes
#// For atomic ionization stages:
#logSaha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#saha = [ [ 0.0 for i in range(numStages+1)] for j in range(numStages+1) ]
#//
#logIonFrac = [ 0.0 for i in range(numStages) ]
#double expFac, logNe;
#JB#
uua=[]
uub=[]
qwab=[]
for iStg in range(numStages):
currentUwArr=list(logUw[iStg])#u(T) determined values
UwFit = ToolBox.cubicFit(masterTemp,currentUwArr)#u(T) fit
uua.append(UwFit)
#print(logUw[iStg])
for id in range(numDeps):
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//
#//Row 1 of Ne is log_e Ne in cm^-3
logNe = Ne[1][id]
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0][id]
#Ttheta = 5040.0 / thisTemp
#JB#
#use temps and partition values to create a function
#then use said function to extrapolate values for all points
thisLogUw[numStages] = 0.0
for iStg in range(numStages):
thisLogUw[iStg] = ToolBox.valueFromFit(uua[iStg],thisTemp)#u(T) value extrapolated
#JB#
#// NEW Determine temperature dependent partition functions Uw: lburns
if (thisTemp <= 130.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][0]
if (thisTemp >= 10000.0):
for iStg in range(numStages):
thisLogUw[iStg] = logUw[iStg][4]
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#thisLogUwA = thisLogUw[0];
#//Ionization stage Saha factors:
logNums[0][id] = logNum[id]
for iStg in range(1, numStages):
#print("iStg ", iStg)
thisLogSaha = logSahaFac - logNe - (boltzFacI[iStg-1] /temp[0][id]) + (3.0 * temp[1][id] / 2.0) + thisLogUw[iStg] - thisLogUw[iStg-1]
#saha[iStg+1][iStg] = math.exp(logSaha[iStg+1][iStg])
logNums[iStg][id] = logNums[iStg-1][id] + thisLogSaha
#//id loop
return logNums;
#//end method stagePops
#def sahaRHS(chiI, log10UwUArr, log10UwLArr, temp):
def sahaRHS(chiI, logUwU, logUwL, temp):
"""RHS of partial pressure formulation of Saha equation in standard form (N_U*P_e/N_L on LHS)
// Returns depth distribution of LHS: Phi(T) === N_U*P_e/N_L (David Gray notation)
// Input parameters:
// chiI - ground state ionization energy of lower stage
// log10UwUArr, log10UwLArr - array of temperature-dependent partition function for upper and lower ionization stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
//
// Atomic element "A" is the one whose ionization fractions are being computed
// Element "B" refers to array of other species with which A forms molecules "AB" """
ln10 = math.log(10.0)
logE = math.log10(math.e) #// for debug output
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
#// var numMols = dissEArr.length;
#// Parition functions passed in are 2-element vectore with remperature-dependent base 10 log Us
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Default initializations:
#//We need one more stage in size of saha factor than number of stages we're actualy populating
thisLogUwU = 0.0
thisLogUwL = 0.0
logE10 = math.log(10.0)
#//We need one more stage in size of saha factor than number of stages we're actualy populating
#logUwU = [0.0 for i in range(5)]
#logUwL = [0.0 for i in range(5)]
for kk in range(len(logUwL)):
logUwU[kk] = logUwL[kk]
# logUwL[kk] = logE10*log10UwLArr[kk]
#//System.out.println("chiL before: " + chiL);
#// If we need to subtract chiI from chiL, do so *before* converting to tiny numbers in ergs!
#//atomic ionization stage Boltzmann factors:
#double logChiI, logBoltzFacI;
#double boltzFacI;
logChiI = math.log(chiI) + Useful.logEv()
logBoltzFacI = logChiI - Useful.logK()
boltzFacI = math.exp(logBoltzFacI)
#//Extra factor of k to get k^5/2 in the P_e formulation of Saha Eq.
logSahaFac = log2 + (3.0 / 2.0) * (log2pi + Useful.logMe() + Useful.logK() - 2.0 * Useful.logH()) + Useful.logK()
#//double[] logLHS = new double[numDeps];
#double logLHS;
#// For atomic ionization stages:
#double logSaha, saha, expFac;
#// for (int id = 0; id < numDeps; id++) {
#//
#//Determine temperature dependent partition functions Uw:
thisTemp = temp[0]
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (Ttheta <= 0.5):
thisLogUwU = logUwU[1]
thisLogUwL = logUwL[1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwU = ( logUwU[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwU[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
thisLogUwL = ( logUwL[1] * (Ttheta - 0.5)/(1.0 - 0.5) )
+ ( logUwL[0] * (1.0 - Ttheta)/(1.0 - 0.5) )
"""
#JB#
currentUwUArr=list(logUwU)#u(T) determined values
UwUFit = ToolBox.cubicFit(masterTemp,currentUwUArr)#u(T) fit
thisLogUwU = ToolBox.valueFromFit(UwUFit,thisTemp)#u(T) value extrapolated
currentUwLArr=list(logUwL)#u(T) determined values
UwLFit = ToolBox.cubicFit(masterTemp,currentUwLArr)#u(T) fit
thisLogUwL = ToolBox.valueFromFit(UwLFit,thisTemp)#u(T) value extrapolated
#JB#
#will need to do this one in Main as it goes through its own loop of temp
#if thisTemp == superTemp[0][len(superTemp[0])]:
# uwu.append(UwUFit)
# uwl.append(UwLFit)
#
#JB#
if (thisTemp <= 130.0):
thisLogUwU = logUwU[0]
thisLogUwL = logUwL[0]
if (thisTemp >= 10000.0):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwU = logUwU[1] * (thisTemp - 130)/(500 - 130) \
+ logUwU[0] * (500 - thisTemp)/(500 - 130)
thisLogUwL = logUwL[1] * (thisTemp - 130)/(500 - 130) \
+ logUwL[0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwU = logUwU[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwU[1] * (3000 - thisTemp)/(3000 - 500)
thisLogUwL = logUwL[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwL[1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwU = logUwU[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwU[2] * (8000 - thisTemp)/(8000 - 3000)
thisLogUwL = logUwL[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwL[2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwU = logUwU[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwU[3] * (10000 - thisTemp)/(10000 - 8000)
thisLogUwL = logUwL[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwL[3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwU = logUwU[4]
thisLogUwL = logUwL[4]
"""
#//Ionization stage Saha factors:
#//Need T_kin^5/2 in the P_e formulation of Saha Eq.
logSaha = logSahaFac - (boltzFacI /temp[0]) + (5.0 * temp[1] / 2.0) + thisLogUwU - thisLogUwL
#// saha = Math.exp(logSaha);
#//logLHS[id] = logSaha;
logLHS = logSaha;
#// } //id loop
return logLHS;
#JB
#return [logLHS,[[UwUFit,thisLogUwU],[UwLFit,thisLogUwL]]]
#//
# } //end method sahaRHS
#def molPops(nmrtrLogNumB, nmrtrDissE, log10UwA, nmrtrLog10UwB, nmrtrLogQwAB, nmrtrLogMuAB, \
# numMolsB, logNumB, dissEArr, log10UwBArr, logQwABArr, logMuABArr, \
# logGroundRatio, numDeps, temp):
def molPops(nmrtrLogNumB, nmrtrDissE, logUwA, nmrtrLogUwB, nmrtrLogQwAB, nmrtrLogMuAB, \
numMolsB, logNumB, dissEArr, logUwB, logQwABArr, logMuABArr, \
logGroundRatio, numDeps, temp):
# line 1: //species A data - ionization equilibrium of A
# //data for set of species "B" - molecular equlibrium for set {AB}
"""Diatomic molecular equilibrium routine that accounts for molecule formation:
// Returns depth distribution of molecular population
// Input parameters:
// logNum - array with depth-dependent total element number densities (cm^-3)
// chiI1 - ground state ionization energy of neutral stage
// chiI2 - ground state ionization energy of singly ionized stage
// Also needs atsmopheric structure information:
// numDeps
// temp structure
// rho structure
//
// Atomic element "A" is the one kept on the LHS of the master fraction, whose ionization fractions are included
// in the denominator of the master fraction
// Element "B" refers to array of other sintpecies with which A forms molecules "AB" """
logE = math.log10(math.e) #// for debug output
#//System.out.println("molPops: nmrtrDissE " + nmrtrDissE + " log10UwA " + log10UwA[0] + " " + log10UwA[1] + " nmrtrLog10UwB " +
#// nmrtrLog10UwB[0] + " " + nmrtrLog10UwB[1] + " nmrtrLog10QwAB " + logE*nmrtrLogQwAB[2] + " nmrtrLogMuAB " + logE*nmrtrLogMuAB
#// + " numMolsB " + numMolsB + " dissEArr " + dissEArr[0] + " log10UwBArr " + log10UwBArr[0][0] + " " + log10UwBArr[0][1] + " log10QwABArr " +
#// logE*logQwABArr[0][2] + " logMuABArr " + logE*logMuABArr[0]);
#//System.out.println("Line: nmrtrLog10UwB[0] " + logE*nmrtrLog10UwB[0] + " nmrtrLog10UwB[1] " + logE*nmrtrLog10UwB[1]);
ln10 = math.log(10.0)
log2pi = math.log(2.0 * math.pi)
log2 = math.log(2.0)
logE10 = math.log(10.0)
#// Convert to natural logs:
#double Ttheta, thisTemp;
#//Treat at least one molecule - if there are really no molecules for an atomic species,
#//there will be one phantom molecule in the denominator of the ionization fraction
#//with an impossibly high dissociation energy
if (numMolsB == 0):
numMolsB = 1
#//This should be inherited, but let's make sure:
dissEArr[0] = 29.0 #//eV
#//var molPops = function(logNum, numeratorLogNumB, numeratorDissE, numeratorLog10UwA, numeratorLog10QwAB, numeratorLogMuAB, //species A data - ionization equilibrium of A
#//Molecular partition functions - default initialization:
thisLogUwB = [0.0 for i in range(numMolsB)]
for iMol in range(numMolsB):
thisLogUwB[iMol] = 0.0 #// variable for temp-dependent computed partn fn of array element B
thisLogUwA = 0.0 #// element A
nmrtrThisLogUwB = 0.0 #// element A
thisLogQwAB = math.log(300.0)
nmrtrThisLogQwAB = math.log(300.0)
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
#logUwA = [0.0 for i in range(5)]
#nmrtrLogUwB = [0.0 for i in range(5)]
#for kk in range(len(logUwA)):
#logUwA[kk] = logE10*log10UwA[kk]
#nmrtrLogUwB[kk] = logE10*nmrtrLog10UwB[kk]
#// lburns
#// Array of elements B for all molecular species AB:
#double[][] logUwB = new double[numMolsB][2];
#logUwB = [ [ 0.0 for i in range(5) ] for j in range(numMolsB) ]
#//if (numMolsB > 0){
#for iMol in range(numMolsB):
# for kk in range(5):
# logUwB[iMol][kk] = logE10*log10UwBArr[iMol][kk]
# // lburns new loop
#//}
#// Molecular partition functions:
#// double nmrtrLogQwAB = logE10*nmrtrLog10QwAB;
#// double[] logQwAB = new double[numMolsB];
#// //if (numMolsB > 0){
#// for (int iMol = 0; iMol < numMolsB; iMol++){
#// logQwAB[iMol] = logE10*log10QwABArr[iMol];
#// }
# //}
#//Molecular dissociation Boltzmann factors:
nmrtrBoltzFacIAB = 0.0
nmrtrLogMolSahaFac = 0.0
logDissE = math.log(nmrtrDissE) + Useful.logEv()
#//System.out.println("logDissE " + logE*logDissE)
logBoltzFacIAB = logDissE - Useful.logK()
#//System.out.println("logBoltzFacIAB " + logE*logBoltzFacIAB);
nmrtrBoltzFacIAB = math.exp(logBoltzFacIAB)
nmrtrLogMolSahaFac = (3.0 / 2.0) * (log2pi + nmrtrLogMuAB + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
#//System.out.println("nmrtrDissE " + nmrtrDissE + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrLogMuAB " + logE*nmrtrLogMuAB + " nmrtrLogMolSahaFac " + logE*nmrtrLogMolSahaFac);
boltzFacIAB = [0.0 for i in range(numMolsB)]
logMolSahaFac = [0.0 for i in range(numMolsB)]
#//if (numMolsB > 0){
for iMol in range(numMolsB):
logDissE = math.log(dissEArr[iMol]) + Useful.logEv()
logBoltzFacIAB = logDissE - Useful.logK()
boltzFacIAB[iMol] = math.exp(logBoltzFacIAB)
logMolSahaFac[iMol] = (3.0 / 2.0) * (log2pi + logMuABArr[iMol] + Useful.logK() - 2.0 * Useful.logH())
#//System.out.println("logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#//System.out.println("iMol " + iMol + " dissEArr[iMol] " + dissEArr[iMol] + " logDissE " + logE*logDissE + " logBoltzFacIAB " + logE*logBoltzFacIAB + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " logMuABArr " + logE*logMuABArr[iMol] + " logMolSahaFac " + logE*logMolSahaFac[iMol]);
#//double[] logNums = new double[numDeps]
#//}
#// For molecular species:
#double nmrtrSaha, nmrtrLogSahaMol, nmrtrLogInvSahaMol; //, nmrtrInvSahaMol;
logMolFrac = [0.0 for i in range(numDeps)]
logSahaMol = [0.0 for i in range(numMolsB)]
invSahaMol = [0.0 for i in range(numMolsB)]
#JB#
currentUwAArr=list(logUwA)#u(T) determined values
UwAFit = ToolBox.cubicFit(masterTemp, currentUwAArr)#u(T) fit
nmrtrLogUwBArr=list(nmrtrLogUwB)#u(T) determined values
nmrtrLogUwBFit = ToolBox.cubicFit(masterTemp, nmrtrLogUwBArr)#u(T) fit
#uwa.append(UwAFit)
#uwb.append(nmrtrLogUwBFit)
uwbFits=[]
qwabFit = []
for iMol in range(numMolsB):
currentUwBArr=list(logUwB[iMol])
UwBFit = ToolBox.cubicFit(masterTemp, currentUwBArr)
uwbFits.append(UwBFit)
currentLogQwABArr=list(logQwABArr[iMol])#u(T) determined values
QwABFit = ToolBox.cubicFit(masterTemp, currentLogQwABArr)#u(T) fit
qwabFit.append(QwABFit)
#nmrtrQwABArr=list(nmrtrLogQwAB)#u(T) determined values
#nmrtrQwABFit = ToolBox.cubicFit(masterTemp, nmrtrQwABArr)#u(T) fit
#for Mols in range(numMolsB):
# currentLogUwBArr=list(logUwB[Mols])#u(T) determined values
# UwBFit=cubicFit(masterTemp,currentLogUwBArr)#u(T) fit
#JB#
#//
temps=[]
#valb=[]
#vala=[]
#valnb=[]
#valqab=[]
#valnmrtrqwb=[]
#// System.out.println("molPops: id nmrtrLogNumB logNumBArr[0] logGroundRatio");
for id in range(numDeps):
#//System.out.format("%03d, %21.15f, %21.15f, %21.15f, %n", id, logE*nmrtrLogNumB[id], logE*logNumB[0][id], logE*logGroundRatio[id]);
#//// reduce or enhance number density by over-all Rosseland opcity scale parameter
#//Determine temparature dependent partition functions Uw:
thisTemp = temp[0][id]
temps.append(thisTemp)
#Ttheta = 5040.0 / thisTemp
"""
if (Ttheta >= 1.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (Ttheta <= 0.5):
thisLogUwA = logUwA[1]
nmrtrThisLogUwB = nmrtrLogUwB[1]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1]
if (Ttheta > 0.5 and Ttheta < 1.0):
thisLogUwA = ( logUwA[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwA[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
nmrtrThisLogUwB = ( nmrtrLogUwB[1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( nmrtrLogUwB[0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
for iMol in range(numMolsB):
thisLogUwB[iMol] = ( logUwB[iMol][1] * ((Ttheta - 0.5)/(1.0 - 0.5)) ) \
+ ( logUwB[iMol][0] * ((1.0 - Ttheta)/(1.0 - 0.5)) )
"""
#JB#
thisLogUwA = float(ToolBox.valueFromFit(UwAFit,thisTemp))#u(T) value extrapolated
#vala.append(thisLogUwA)
nmrtrThisLogUwB = float(ToolBox.valueFromFit(nmrtrLogUwBFit,thisTemp))#u(T) value extrapolated
#valnb.append(nmrtrThisLogUwB)
#for iMol in range(numMolsB):
# thisLogUwB[iMol]=logUwB[iMol]
for iMol in range(numMolsB):
thisLogUwB[iMol] = ToolBox.valueFromFit(uwbFits[iMol],thisTemp)#u(T) value extrapolated
#valb.append(thisLogUwB[iMol])
#// NEW Determine temperature dependent partition functions Uw: lburns
thisTemp = temp[0][id]
if (thisTemp <= 130.0):
thisLogUwA = logUwA[0]
nmrtrThisLogUwB = nmrtrLogUwB[0]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][0]
if (thisTemp >= 10000.0):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
if (thisTemp > 130 and thisTemp <= 500):
thisLogUwA = logUwA[1] * (thisTemp - 130)/(500 - 130) \
+ logUwA[0] * (500 - thisTemp)/(500 - 130)
nmrtrThisLogUwB = nmrtrLogUwB[1] * (thisTemp - 130)/(500 - 130) \
+ nmrtrLogUwB[0] * (500 - thisTemp)/(500 - 130)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][1] * (thisTemp - 130)/(500 - 130) \
+ logUwB[iMol][0] * (500 - thisTemp)/(500 - 130)
if (thisTemp > 500 and thisTemp <= 3000):
thisLogUwA = logUwA[2] * (thisTemp - 500)/(3000 - 500) \
+ logUwA[1] * (3000 - thisTemp)/(3000 - 500)
nmrtrThisLogUwB = nmrtrLogUwB[2] * (thisTemp - 500)/(3000 - 500) \
+ nmrtrLogUwB[1] * (3000 - thisTemp)/(3000 - 500)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][2] * (thisTemp - 500)/(3000 - 500) \
+ logUwB[iMol][1] * (3000 - thisTemp)/(3000 - 500)
if (thisTemp > 3000 and thisTemp <= 8000):
thisLogUwA = logUwA[3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwA[2] * (8000 - thisTemp)/(8000 - 3000)
nmrtrThisLogUwB = nmrtrLogUwB[3] * (thisTemp - 3000)/(8000 - 3000) \
+ nmrtrLogUwB[2] * (8000 - thisTemp)/(8000 - 3000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][3] * (thisTemp - 3000)/(8000 - 3000) \
+ logUwB[iMol][2] * (8000 - thisTemp)/(8000 - 3000)
if (thisTemp > 8000 and thisTemp < 10000):
thisLogUwA = logUwA[4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwA[3] * (10000 - thisTemp)/(10000 - 8000)
nmrtrThisLogUwB = nmrtrLogUwB[4] * (thisTemp - 8000)/(10000 - 8000) \
+ nmrtrLogUwB[3] * (10000 - thisTemp)/(10000 - 8000)
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4] * (thisTemp - 8000)/(10000 - 8000) \
+ logUwB[iMol][3] * (10000 - thisTemp)/(10000 - 8000)
if (thisTemp >= 10000):
thisLogUwA = logUwA[4]
nmrtrThisLogUwB = nmrtrLogUwB[4]
for iMol in range(numMolsB):
thisLogUwB[iMol] = logUwB[iMol][4]
"""
#iMol loops for Q's
for iMol in range(numMolsB):
if (thisTemp < 3000.0):
thisLogQwAB = ( logQwABArr[iMol][1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( logQwABArr[iMol][2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
thisLogQwAB = ( logQwABArr[iMol][2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( logQwABArr[iMol][3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
thisLogQwAB = ( logQwABArr[iMol][3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( logQwABArr[iMol][4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
if (thisTemp < 3000.0):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[1] * (3000.0 - thisTemp)/(3000.0 - 500.0) ) \
+ ( nmrtrLogQwAB[2] * (thisTemp - 500.0)/(3000.0 - 500.0) )
if ( (thisTemp >= 3000.0) and (thisTemp <= 8000.0) ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[2] * (8000.0 - thisTemp)/(8000.0 - 3000.0) ) \
+ ( nmrtrLogQwAB[3] * (thisTemp - 3000.0)/(8000.0 - 3000.0) )
if ( thisTemp > 8000.0 ):
nmrtrThisLogQwAB = ( nmrtrLogQwAB[3] * (10000.0 - thisTemp)/(10000.0 - 8000.0) ) \
+ ( nmrtrLogQwAB[4] * (thisTemp - 8000.0)/(10000.0 - 8000.0) )
#//For clarity: neutral stage of atom whose ionization equilibrium is being computed is element A
#// for molecule formation:
# //Ionization stage Saha factors:
#//System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id]);
# // if (id == 16){
# // System.out.println("id " + id + " nmrtrLogNumB[id] " + logE*nmrtrLogNumB[id] + " pp nmrtB " + (logE*(nmrtrLogNumB[id]+temp[1][id]+Useful.logK())) + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrLogQwAB " + logE*nmrtrThisLogQwAB);
# //System.out.println("nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + logE*nmrtrThisLogQwAB);
# // }
nmrtrLogSahaMol = nmrtrLogMolSahaFac - nmrtrLogNumB[id] - (nmrtrBoltzFacIAB / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + nmrtrThisLogUwB + thisLogUwA - nmrtrThisLogQwAB
nmrtrLogInvSahaMol = -1.0 * nmrtrLogSahaMol
#//System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#//nmrtrInvSahaMol = Math.exp(nmrtrLogSahaMol);
#// if (id == 16){
#// System.out.println("nmrtrLogInvSahaMol " + logE*nmrtrLogInvSahaMol);
#// }
#// if (id == 16){
#// System.out.println("nmrtrBoltzFacIAB " + nmrtrBoltzFacIAB + " nmrtrThisLogUwB " + logE*nmrtrThisLogUwB + " thisLogUwA " + logE*thisLogUwA + " nmrtrThisLogQwAB " + nmrtrThisLogQwAB);
#// System.out.println("nmrtrLogSahaMol " + logE*nmrtrLogSahaMol); // + " nmrtrInvSahaMol " + nmrtrInvSahaMol);
#// }
#//Molecular Saha factors:
for iMol in range(numMolsB):
#//System.out.println("iMol " + iMol + " id " + id + " logNumB[iMol][id] " + logE*nmrtrLogNumB[id]);
#//System.out.println("iMol " + iMol + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " thisLogUwA " + logE*thisLogUwA + " thisLogQwAB " + logE*thisLogQwAB);
logSahaMol[iMol] = logMolSahaFac[iMol] - logNumB[iMol][id] - (boltzFacIAB[iMol] / temp[0][id]) + (3.0 * temp[1][id] / 2.0) + float(thisLogUwB[iMol]) + thisLogUwA - thisLogQwAB
#//For denominator of ionization fraction, we need *inverse* molecular Saha factors (N_AB/NI):
logSahaMol[iMol] = -1.0 * logSahaMol[iMol]
invSahaMol[iMol] = math.exp(logSahaMol[iMol])
#//TEST invSahaMol[iMol] = 1.0e-99; //test
#// if (id == 16){
#// System.out.println("iMol " + iMol + " boltzFacIAB[iMol] " + boltzFacIAB[iMol] + " thisLogUwB[iMol] " + logE*thisLogUwB[iMol] + " logQwAB[iMol] " + logE*thisLogQwAB + " logNumB[iMol][id] " + logE*logNumB[iMol][id] + " logMolSahaFac[iMol] " + logE*logMolSahaFac[iMol]);
#// System.out.println("iMol " + iMol + " logSahaMol " + logE*logSahaMol[iMol] + " invSahaMol[iMol] " + invSahaMol[iMol]);
#// }
#//Compute log of denominator is ionization fraction, f_stage
# //default initialization
# // - ratio of total atomic particles in all ionization stages to number in ground state:
denominator = math.exp(logGroundRatio[id]) #//default initialization - ratio of total atomic particles in all ionization stages to number in ground state
#//molecular contribution
for iMol in range(numMolsB):
#// if (id == 16){
#// System.out.println("invSahaMol[iMol] " + invSahaMol[iMol] + " denominator " + denominator);
#// }
denominator = denominator + invSahaMol[iMol]
#//
logDenominator = math.log(denominator)
#//System.out.println("logGroundRatio[id] " + logE*logGroundRatio[id] + " logDenominator " + logE*logDenominator);
#// if (id == 16){
#// System.out.println("id " + id + " logGroundRatio " + logGroundRatio[id] + " logDenominator " + logDenominator);
#// }
#//if (id == 36){
#// System.out.println("logDenominator " + logE*logDenominator);
#// }
#//var logDenominator = Math.log( 1.0 + saha21 + (saha32 * saha21) + (saha43 * saha32 * saha21) + (saha54 * saha43 * saha32 * saha21) );
logMolFrac[id] = nmrtrLogInvSahaMol - logDenominator
#// if (id == 16){
#// System.out.println("id " + id + " logMolFrac[id] " + logE*logMolFrac[id]);
#// }
#//logNums[id] = logNum[id] + logMolFrac;
#} //id loop
#JB - check (never used)#
#print(uwa)
#print(uwb)
#title("logUwA")
"""
plot(temps,vala)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwAFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogUwB")
plot(temps,valnb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrLogUwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logUwB")
plot(temps,valb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(UwBFit,t))
scatter(masterTemp,(tempT))
show()
#title("logQwAB")
plot(temps,valqab)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(QwABFit,t))
scatter(masterTemp,(tempT))
show()
#title("nmrtrlogQwAB")
plot(temps,valnmrtrqwb)
tempT=[]
for t in masterTemp:
tempT.append(valueFromFit(nmrtrQwABFit,t))
scatter(masterTemp,(tempT))
show()
"""
#JB#
return logMolFrac
#//end method stagePops | mit |
sugartom/tensorflow-alien | tensorflow/examples/learn/mnist.py | 45 | 3999 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This showcases how simple it is to build image classification networks.
It follows description from this TensorFlow tutorial:
https://www.tensorflow.org/versions/master/tutorials/mnist/pros/index.html#deep-mnist-for-experts
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn import metrics
import tensorflow as tf
layers = tf.contrib.layers
learn = tf.contrib.learn
def max_pool_2x2(tensor_in):
return tf.nn.max_pool(
tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
def conv_model(feature, target, mode):
"""2-layer convolution model."""
# Convert the target to a one-hot tensor of shape (batch_size, 10) and
# with a on-value of 1 for each one-hot vector of length 10.
target = tf.one_hot(tf.cast(target, tf.int32), 10, 1, 0)
# Reshape feature to 4d tensor with 2nd and 3rd dimensions being
# image width and height final dimension being the number of color channels.
feature = tf.reshape(feature, [-1, 28, 28, 1])
# First conv layer will compute 32 features for each 5x5 patch
with tf.variable_scope('conv_layer1'):
h_conv1 = layers.convolution2d(
feature, 32, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool1 = max_pool_2x2(h_conv1)
# Second conv layer will compute 64 features for each 5x5 patch.
with tf.variable_scope('conv_layer2'):
h_conv2 = layers.convolution2d(
h_pool1, 64, kernel_size=[5, 5], activation_fn=tf.nn.relu)
h_pool2 = max_pool_2x2(h_conv2)
# reshape tensor into a batch of vectors
h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
# Densely connected layer with 1024 neurons.
h_fc1 = layers.dropout(
layers.fully_connected(
h_pool2_flat, 1024, activation_fn=tf.nn.relu),
keep_prob=0.5,
is_training=mode == tf.contrib.learn.ModeKeys.TRAIN)
# Compute logits (1 per class) and compute loss.
logits = layers.fully_connected(h_fc1, 10, activation_fn=None)
loss = tf.losses.softmax_cross_entropy(target, logits)
# Create a tensor for training op.
train_op = layers.optimize_loss(
loss,
tf.contrib.framework.get_global_step(),
optimizer='SGD',
learning_rate=0.001)
return tf.argmax(logits, 1), loss, train_op
def main(unused_args):
### Download and load MNIST dataset.
mnist = learn.datasets.load_dataset('mnist')
### Linear classifier.
feature_columns = learn.infer_real_valued_columns_from_input(
mnist.train.images)
classifier = learn.LinearClassifier(
feature_columns=feature_columns, n_classes=10)
classifier.fit(mnist.train.images,
mnist.train.labels.astype(np.int32),
batch_size=100,
steps=1000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
### Convolutional network
classifier = learn.Estimator(model_fn=conv_model)
classifier.fit(mnist.train.images,
mnist.train.labels,
batch_size=100,
steps=20000)
score = metrics.accuracy_score(mnist.test.labels,
list(classifier.predict(mnist.test.images)))
print('Accuracy: {0:f}'.format(score))
if __name__ == '__main__':
tf.app.run()
| apache-2.0 |
harrysocool/ear_recognition | ear_recognition/generate_files.py | 1 | 6623 | import csv
import os
import random
import numpy as np
import pandas as pd
import matlab_wrapper
from lib.utils.timer import Timer
from tools.ear_recog import get_gt, ROI_boxes
import scipy.io as sio
def listdir_no_hidden(path):
list1 = []
for f in sorted(os.listdir(path)):
if not f.startswith('.'):
p = os.path.abspath(path)
list1.append(os.path.join(p, f))
return list1
def write_list_to_csv(list1, path_out, header=False):
temp = pd.DataFrame(list1)
temp.to_csv(path_out, index=False, header=header)
def save_gt_roidb_csv(data_path, csv_path, image_index_output_path, gt_output_path, test_image_path, test_gt):
box_list = pd.read_csv(csv_path, header=0).get_values()
image_path_list = listdir_no_hidden(data_path)
assert len(box_list) == len(image_path_list), 'the length of box list must equal to image list'
new_list = []
new_list1 = []
for idx, entry in enumerate(image_path_list):
s1 = str(entry)
temp = box_list[idx]
# change the x y coordination to correct [X1 Y1 X2 Y2]
x1 = str(temp[-2])
y1 = str(temp[-4])
x2 = str(temp[-1])
y2 = str(temp[-3])
s2 = x1+' '+ y1+' '+x2+' '+y2
new_list.append(s1 + ' 1 ' + s2)
new_list1.append(s1)
# shuffle the idx of training set
shuffle_idx = range(len(image_path_list))
random.seed(641) # make it can be reproduce
random.shuffle(shuffle_idx)
train_idx = shuffle_idx[0:437]
test_idx = shuffle_idx[437:]
train_image_path = [new_list1[idx] for idx in train_idx]
train_gt = [new_list[idx] for idx in train_idx]
test_image_path_data = [new_list1[idx] for idx in test_idx]
test_gt_data = [new_list[idx] for idx in test_idx]
write_list_to_csv(train_gt, gt_output_path)
write_list_to_csv(train_image_path, image_index_output_path)
write_list_to_csv(test_gt_data, test_gt)
write_list_to_csv(test_image_path_data, test_image_path)
def initialize_matlab():
matlab = matlab_wrapper.MatlabSession()
# edge_detector OP_method
matlab.eval("cd('/home/harrysocool/Github/fast-rcnn/OP_methods/edges')")
matlab.eval("addpath(genpath('/home/harrysocool/Github/fast-rcnn/OP_methods/edges'))")
matlab.eval("toolboxCompile")
# # selective_search OP_method
# matlab.eval("cd('/home/harrysocool/Github/fast-rcnn/OP_methods/selective_search_ijcv_with_python')")
# matlab.eval("addpath(genpath('/home/harrysocool/Github/fast-rcnn/OP_methods/selective_search_ijcv_with_python'))")
return matlab
def time_analyse(matlab, cmd, image_filepath, par1, par2):
timer = Timer()
timer.tic()
obj_proposals = ROI_boxes(matlab, image_filepath, cmd, par1, par2)
timer.toc()
time = timer.total_time
box_numer = len(obj_proposals)
return time, box_numer, obj_proposals
def mean_IOU_ratio(image_index, dets):
ratio = np.empty(0,dtype=np.float64)
(x1, y1, x2, y2) = get_gt(image_index)
if dets.size > 4:
for box in dets:
X1 = box[0]
Y1 = box[1]
X2 = box[2]
Y2 = box[3]
if ((np.float32(x1)-X1)<=15 and (X2- np.float32(x2))<=15
and (np.float32(y1)-Y1)<=15 and (Y2-np.float32(y2))<=15):
ratio = np.append(ratio,1.0)
else:
SI = max(0, min(x2, X2) - max(x1, X1)) * \
max(0, min(y2, Y2) - max(y1, Y1))
SU = (x2 - x1) * (y2 - y1) + (X2 - X1) * (Y2 - Y1) - SI
ratio = np.append(ratio, SI/SU)
if ratio.size == 0:
big_ratio = 0
else:
big = np.where(ratio >= 0.1)[0].size
total = float(len(dets))
big_ratio = float(big/total)
return big_ratio
if __name__ == '__main__':
datasets_path = '/home/harrysocool/Github/fast-rcnn/DatabaseEars'
csv_path = os.path.join(datasets_path, 'boundaries.csv')
image_path = os.path.join(datasets_path, 'DatabaseEars/')
gt_output_path = os.path.join(datasets_path, '../','ear_recognition/data_file/gt_roidb.csv')
image_index_output_path = os.path.join(datasets_path, '../', 'ear_recognition/data_file/image_index_list.csv')
mat_output_filename = os.path.join(datasets_path, '../','ear_recognition/data_file/all_boxes.mat')
test_gt_output_path = os.path.join(datasets_path, '../','ear_recognition/data_file/test_gt_roidb.csv')
test_image_index_output_path = os.path.join(datasets_path, '../', 'ear_recognition/data_file/test_image_index_list.csv')
# save_gt_roidb_csv(image_path, csv_path, image_index_output_path, gt_output_path, test_image_index_output_path,
# test_gt_output_path)
matlab = initialize_matlab()
timer = Timer()
list1 = pd.read_csv(test_image_index_output_path, header=None).values.flatten().tolist()
cmd = 'ss'
# ks = [50 100 150 200 300];
par2_list = [8]
# par2_list = [3]
time_csv_out_path = os.path.join(os.path.dirname(datasets_path), 'result', cmd + '_' + 'OPtune_result_1.csv')
if not os.path.exists(time_csv_out_path):
write_list_to_csv(par2_list, time_csv_out_path)
with open(time_csv_out_path, 'a') as csvfile:
writer = csv.writer(csvfile)
list2 = []
for par2 in [7]:
for par1 in [7]:
# par1 = float(par1)/100
# all_boxes = np.zeros((437,), dtype=np.object)
for index, image_path in enumerate(list1):
# if index>300:
# break
time, box_numer, obj_proposals = time_analyse(matlab, cmd, image_path, par1, par2)
ratio = mean_IOU_ratio(index + 1, obj_proposals)
# list2.append([time, box_numer])
# print('{} has processed in {:.3f} seconds with {} boxes'.format(len(list2), time, box_numer))
print('No. {} has processed with par {} {}, box {} IOU ratio {:.3f} in {:.2f} seconds'.format(index,
par1, par2,box_numer ,ratio, time))
writer.writerow([par1, par2,ratio,box_numer, time])
# all_boxes[index] = obj_proposals
# sio.savemat(mat_output_filename, {'all_boxes': all_boxes})
# write_list_to_csv(list2, time_csv_out_path)
# fnames_cell = "{" + ",".join("'{}'".format(x) for x in list1) + "}"
# command = "res = {}({}, '{}')".format('selective_search', fnames_cell, mat_output_filename)
# print(command)
# #
# matlab.eval(command) | mit |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/site-packages/pandas/tests/types/test_io.py | 7 | 4785 | # -*- coding: utf-8 -*-
import numpy as np
import pandas.lib as lib
import pandas.util.testing as tm
from pandas.compat import long, u
class TestParseSQL(tm.TestCase):
def test_convert_sql_column_floats(self):
arr = np.array([1.5, None, 3, 4.2], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_strings(self):
arr = np.array(['1.5', None, '3', '4.2'], dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array(['1.5', np.nan, '3', '4.2'], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_unicode(self):
arr = np.array([u('1.5'), None, u('3'), u('4.2')],
dtype=object)
result = lib.convert_sql_column(arr)
expected = np.array([u('1.5'), np.nan, u('3'), u('4.2')],
dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_ints(self):
arr = np.array([1, 2, 3, 4], dtype='O')
arr2 = np.array([1, 2, 3, 4], dtype='i4').astype('O')
result = lib.convert_sql_column(arr)
result2 = lib.convert_sql_column(arr2)
expected = np.array([1, 2, 3, 4], dtype='i8')
self.assert_numpy_array_equal(result, expected)
self.assert_numpy_array_equal(result2, expected)
arr = np.array([1, 2, 3, None, 4], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_longs(self):
arr = np.array([long(1), long(2), long(3), long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, 4], dtype='i8')
self.assert_numpy_array_equal(result, expected)
arr = np.array([long(1), long(2), long(3), None, long(4)], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([1, 2, 3, np.nan, 4], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_bools(self):
arr = np.array([True, False, True, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, True, False], dtype=bool)
self.assert_numpy_array_equal(result, expected)
arr = np.array([True, False, None, False], dtype='O')
result = lib.convert_sql_column(arr)
expected = np.array([True, False, np.nan, False], dtype=object)
self.assert_numpy_array_equal(result, expected)
def test_convert_sql_column_decimals(self):
from decimal import Decimal
arr = np.array([Decimal('1.5'), None, Decimal('3'), Decimal('4.2')])
result = lib.convert_sql_column(arr)
expected = np.array([1.5, np.nan, 3, 4.2], dtype='f8')
self.assert_numpy_array_equal(result, expected)
def test_convert_downcast_int64(self):
from pandas.parser import na_values
arr = np.array([1, 2, 7, 8, 10], dtype=np.int64)
expected = np.array([1, 2, 7, 8, 10], dtype=np.int8)
# default argument
result = lib.downcast_int64(arr, na_values)
self.assert_numpy_array_equal(result, expected)
result = lib.downcast_int64(arr, na_values, use_unsigned=False)
self.assert_numpy_array_equal(result, expected)
expected = np.array([1, 2, 7, 8, 10], dtype=np.uint8)
result = lib.downcast_int64(arr, na_values, use_unsigned=True)
self.assert_numpy_array_equal(result, expected)
# still cast to int8 despite use_unsigned=True
# because of the negative number as an element
arr = np.array([1, 2, -7, 8, 10], dtype=np.int64)
expected = np.array([1, 2, -7, 8, 10], dtype=np.int8)
result = lib.downcast_int64(arr, na_values, use_unsigned=True)
self.assert_numpy_array_equal(result, expected)
arr = np.array([1, 2, 7, 8, 300], dtype=np.int64)
expected = np.array([1, 2, 7, 8, 300], dtype=np.int16)
result = lib.downcast_int64(arr, na_values)
self.assert_numpy_array_equal(result, expected)
int8_na = na_values[np.int8]
int64_na = na_values[np.int64]
arr = np.array([int64_na, 2, 3, 10, 15], dtype=np.int64)
expected = np.array([int8_na, 2, 3, 10, 15], dtype=np.int8)
result = lib.downcast_int64(arr, na_values)
self.assert_numpy_array_equal(result, expected)
if __name__ == '__main__':
import nose
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| gpl-3.0 |
SAVeselovskiy/KFU_Visual_Tracking | Tracking/detection.py | 1 | 10566 | __author__ = 'IVMIT KFU: Gataullin Ravil & Veselovkiy Sergei'
from copy import copy
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from time import time
import warnings
warnings.filterwarnings("ignore")
from sklearn.cross_validation import train_test_split
from structure import Position
class PatchVarianceClassifier:
def __init__(self, init_patch):
self.init_patch_variance = np.var(init_patch.content)
def classify(self, patch):
# return 1 if object is positive detected
# return 0 if object is negative detected
if np.var(patch.content) > 0.5 * self.init_patch_variance:
return 1
else:
return 0
def predict_patch(self, patch):
return np.var(patch.content) / self.init_patch_variance
def predict_position(self, position):
return np.var(position.calculate_patch().content) / self.init_patch_variance
class EnsembleClassifier:
def __init__(self, learning_component):
self.learning_component = learning_component
self.classifier = RandomForestClassifier(max_depth=3)
def classify(self, patch):
# return 1 if object is positive detected
# return 0 if object is negative detected
feature = patch.calculate_feature(self.learning_component.descriptor)
if self.classifier.predict_proba(feature)[0][self.positive_class_index] > 0.5:
return 1
else:
return 0
def predict_patch(self, patch):
feature = patch.calculate_feature(self.learning_component.descriptor)
return self.classifier.predict_proba(feature)[0][self.positive_class_index]
def predict_position(self, position):
feature = position.calculate_patch().calculate_feature(self.learning_component.descriptor)
return self.classifier.predict_proba(feature)[0][self.positive_class_index]
def relearn(self, test_size=0):
samples, weights, targets = self.learning_component.get_training_set(const_weight=True)
train_samples, test_samples, train_targets, test_targets = train_test_split(samples, targets, test_size=test_size, random_state=np.random.RandomState(0))
count_positives = 1.0*np.count_nonzero(train_targets)
count_negatives = 1.0*(len(train_targets) - count_positives)
positive_weight = count_negatives/len(train_targets)
negative_weight = count_positives/len(train_targets)
weights = np.array([positive_weight if target == 1 else negative_weight for target in train_targets])
self.classifier.fit(train_samples, train_targets, sample_weight=weights)
self.learning_component.new_samples_count = 0
if len(test_samples) > 0:
test_result = [self.classifier.predict(sample) for sample in test_samples]
true_positives = 0.0
count_test_positives = 1.0*np.count_nonzero(test_targets)
count_result_positives = 1.0*np.count_nonzero(test_result)
for i in xrange(len(test_targets)):
if test_targets[i] == test_result[i] and test_result[i] == 1:
true_positives += 1
precision = true_positives / count_test_positives
recall = true_positives / count_result_positives
print "Precision:", precision
print "Recall", recall
if precision + recall != 0:
print "F-score:", 2 * precision * recall / (precision + recall)
else:
print "F-score:", 0
self.positive_class_index = 0
for elem in self.classifier.classes_:
if elem != 1.0:
self.positive_class_index += 1
else:
break
class NearestNeighborClassifier:
def __init__(self, learning_component, lmbd = 0.1, tetta = 0.6):
self.learning_component = learning_component
self.lmbd = lmbd
self.tetta = tetta
def classify(self, patch):
# return 1 if object is positive detected
# return 0 if object is negative detected
if self.learning_component.relative_similarity(patch) > self.tetta:
return 1
else:
return 0
def predict_patch(self, patch):
return self.learning_component.relative_similarity(patch)
def predict_position(self, position):
return self.learning_component.relative_similarity(position.calculate_patch())
def scanning_window(init_position, scales_step = 1.2, slip_step = 0.1, minimal_bounding_box_size = 20, min_step=1, max_step=20):
flag_inc = True
flag_dec = False
position = copy(init_position)
while min(position.width, position.height) >= minimal_bounding_box_size:
position.update(x=0,y=0)
step_width = min(max(min_step,int(slip_step * position.width)),max_step)
step_height = min(max(min_step,int(slip_step * position.height)),max_step)
while position.is_correct():
while position.is_correct():
yield position
position.update(x=position.x+step_width)
position.update(x=0, y=position.y+step_height)
# if position.is_correct():
# yield position
# is_end = False
# step_width = int(slip_step * position.width)
# step_height = int(slip_step * position.height)
# layer = 1
# xx = position.x
# yy = position.y
# while not is_end:
# is_end = True
# for start_point, vector in (([-1,-1],[1,0]),([1,-1],[0,1]),([1,1],[-1,0]),([-1,1],[0,-1])):
# position.update(x=xx + (start_point[0]*layer + vector[0])*step_width, y=yy+(start_point[1]*layer + vector[1])*step_height)
# while position.is_correct() and xx - layer*step_width <= position.x <= xx + layer*step_width and yy - layer*step_height <= position.y <= yy + layer*step_height:
# is_end = False
# yield position
# position.update(x=position.x+vector[0]*step_width, y=position.y+vector[1]*step_height)
# layer += 1
if flag_inc:
position.update(height=int(position.height * scales_step), width = int(position.width * scales_step))
if position.height > position.buffer[0].shape[0] or position.width > position.buffer[0].shape[0]:
flag_inc = False
flag_dec = True
position = copy(init_position)
if flag_dec:
position.update(height=int(position.height / scales_step), width = int(position.width / scales_step))
def get_sliding_positions(init_position, scales_step = 1.2, slip_step = 0.1, minimal_bounding_box_size = 20, min_step=2, max_step=2):
sliding_positions = []
flag_inc = True
flag_dec = False
position = copy(init_position)
while min(position.width, position.height) >= minimal_bounding_box_size:
position.update(x=0,y=0)
step_width = min(max(min_step,int(slip_step * position.width)),max_step)
step_height = min(max(min_step,int(slip_step * position.height)),max_step)
while position.is_correct():
while position.is_correct():
sliding_positions.append(copy(position))
position.update(x=position.x+step_width)
position.update(x=0, y=position.y+step_height)
if flag_inc:
position.update(height=int(position.height * scales_step), width = int(position.width * scales_step))
if position.height > position.buffer[0].shape[0] or position.width > position.buffer[0].shape[0]:
flag_inc = False
flag_dec = True
position = copy(init_position)
if flag_dec:
position.update(height=int(position.height / scales_step), width = int(position.width / scales_step))
return sliding_positions
class Detector:
def __init__(self, init_position, learning_component, threshold_patch_variance=0.5, threshold_ensemble=0.5, threshold_nearest_neighbor=0.6):
self.learning_component = learning_component
self.patch_variance_classifier = PatchVarianceClassifier(learning_component.init_patch)
self.ensemble_classifier = EnsembleClassifier(learning_component)
self.nearest_neighbor_classifier = NearestNeighborClassifier(learning_component)
self.threshold_patch_variance = threshold_patch_variance
self.threshold_ensemble = threshold_ensemble
self.threshold_nearest_neighbor = threshold_nearest_neighbor
self.sliding_positions = get_sliding_positions(init_position, scales_step = 1.2, slip_step = 0.1, minimal_bounding_box_size = 50, min_step=2, max_step=10)
def cascaded_classifier(self, patch):
# 3 stages of classify
# return 1 if object is positive detected
# return 0 if object is negative detected
if self.patch_variance_classifier.predict_patch(patch) < self.threshold_patch_variance:
return 0
if self.ensemble_classifier.predict_patch(patch) < self.threshold_patch_variance:
return 0
# elif self.nearest_neighbor_classifier.predict_patch(patch) < self.threshold_nearest_neighbor:
# return 0
return 1
def detect(self, position, is_tracked):
if self.learning_component.new_samples_count > 10:
start = time()
self.ensemble_classifier.relearn()
print "Relearn:", time() - start
detected_windows = []
predict_times = []
for current_position in self.sliding_positions:
start = time()
proba = self.predict_position(current_position)
predict_times.append(time() - start)
if proba == 1:
detected_windows.append((current_position.get_window(), current_position.calculate_patch(), proba))
self.learning_component.add_new_positive(current_position.calculate_patch())
if is_tracked:
return detected_windows
else:
self.learning_component.add_new_negative(current_position.calculate_patch())
print "Analysed window count:", len(predict_times)
print "Max detection time:", np.max(predict_times)
print "Min detection time:", np.min(predict_times)
print "Mean detection time:", np.mean(predict_times)
return detected_windows
def predict_patch(self, patch):
return self.cascaded_classifier(patch)
def predict_position(self, position):
return self.cascaded_classifier(position.calculate_patch()) | mit |
yask123/scikit-learn | sklearn/linear_model/tests/test_least_angle.py | 98 | 20870 | from nose.tools import assert_equal
import numpy as np
from scipy import linalg
from sklearn.cross_validation import train_test_split
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_less
from sklearn.utils.testing import assert_greater
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_no_warnings, assert_warns
from sklearn.utils.testing import TempMemmap
from sklearn.utils import ConvergenceWarning
from sklearn import linear_model, datasets
from sklearn.linear_model.least_angle import _lars_path_residues
diabetes = datasets.load_diabetes()
X, y = diabetes.data, diabetes.target
# TODO: use another dataset that has multiple drops
def test_simple():
# Principle of Lars is to keep covariances tied and decreasing
# also test verbose output
from sklearn.externals.six.moves import cStringIO as StringIO
import sys
old_stdout = sys.stdout
try:
sys.stdout = StringIO()
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", verbose=10)
sys.stdout = old_stdout
for (i, coef_) in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
finally:
sys.stdout = old_stdout
def test_simple_precomputed():
# The same, with precomputed Gram matrix
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, Gram=G, method="lar")
for i, coef_ in enumerate(coef_path_.T):
res = y - np.dot(X, coef_)
cov = np.dot(X.T, res)
C = np.max(abs(cov))
eps = 1e-3
ocur = len(cov[C - eps < abs(cov)])
if i < X.shape[1]:
assert_true(ocur == i + 1)
else:
# no more than max_pred variables can go into the active set
assert_true(ocur == X.shape[1])
def test_all_precomputed():
# Test that lars_path with precomputed Gram and Xy gives the right answer
X, y = diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
for method in 'lar', 'lasso':
output = linear_model.lars_path(X, y, method=method)
output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)
for expected, got in zip(output, output_pre):
assert_array_almost_equal(expected, got)
def test_lars_lstsq():
# Test that Lars gives least square solution at the end
# of the path
X1 = 3 * diabetes.data # use un-normalized dataset
clf = linear_model.LassoLars(alpha=0.)
clf.fit(X1, y)
coef_lstsq = np.linalg.lstsq(X1, y)[0]
assert_array_almost_equal(clf.coef_, coef_lstsq)
def test_lasso_gives_lstsq_solution():
# Test that Lars Lasso gives least square solution at the end
# of the path
alphas_, active, coef_path_ = linear_model.lars_path(X, y, method="lasso")
coef_lstsq = np.linalg.lstsq(X, y)[0]
assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])
def test_collinearity():
# Check that lars_path is robust to collinearity in input
X = np.array([[3., 3., 1.],
[2., 2., 0.],
[1., 1., 0]])
y = np.array([1., 0., 0])
f = ignore_warnings
_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)
assert_true(not np.isnan(coef_path_).any())
residual = np.dot(X, coef_path_[:, -1]) - y
assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded
n_samples = 10
X = np.random.rand(n_samples, 5)
y = np.zeros(n_samples)
_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,
copy_Gram=False, alpha_min=0.,
method='lasso', verbose=0,
max_iter=500)
assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))
def test_no_path():
# Test that the ``return_path=False`` option returns the correct output
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar")
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_precomputed():
# Test that the ``return_path=False`` option with Gram remains correct
G = np.dot(diabetes.data.T, diabetes.data)
alphas_, active_, coef_path_ = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G)
alpha_, active, coef = linear_model.lars_path(
diabetes.data, diabetes.target, method="lar", Gram=G,
return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_no_path_all_precomputed():
# Test that the ``return_path=False`` option with Gram and Xy remains
# correct
X, y = 3 * diabetes.data, diabetes.target
G = np.dot(X.T, X)
Xy = np.dot(X.T, y)
alphas_, active_, coef_path_ = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9)
print("---")
alpha_, active, coef = linear_model.lars_path(
X, y, method="lasso", Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)
assert_array_almost_equal(coef, coef_path_[:, -1])
assert_true(alpha_ == alphas_[-1])
def test_singular_matrix():
# Test when input is a singular matrix
X1 = np.array([[1, 1.], [1., 1.]])
y1 = np.array([1, 1])
alphas, active, coef_path = linear_model.lars_path(X1, y1)
assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])
def test_rank_deficient_design():
# consistency test that checks that LARS Lasso is handling rank
# deficient input data (with n_features < rank) in the same way
# as coordinate descent Lasso
y = [5, 0, 5]
for X in ([[5, 0],
[0, 5],
[10, 10]],
[[10, 10, 0],
[1e-32, 0, 0],
[0, 0, 1]],
):
# To be able to use the coefs to compute the objective function,
# we need to turn off normalization
lars = linear_model.LassoLars(.1, normalize=False)
coef_lars_ = lars.fit(X, y).coef_
obj_lars = (1. / (2. * 3.)
* linalg.norm(y - np.dot(X, coef_lars_)) ** 2
+ .1 * linalg.norm(coef_lars_, 1))
coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)
coef_cd_ = coord_descent.fit(X, y).coef_
obj_cd = ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2
+ .1 * linalg.norm(coef_cd_, 1))
assert_less(obj_lars, obj_cd * (1. + 1e-8))
def test_lasso_lars_vs_lasso_cd(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results.
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# similar test, with the classifiers
for alpha in np.linspace(1e-2, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)
clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,
normalize=False).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# same test, with normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_vs_lasso_cd_early_stopping(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when early stopping is used.
# (test : before, in the middle, and in the last part of the path)
alphas_min = [10, 0.9, 1e-4]
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
alphas_min = [10, 0.9, 1e-4]
# same test, with normalization
for alphas_min in alphas_min:
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
alpha_min=0.9)
lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,
tol=1e-8)
lasso_cd.alpha = alphas[-1]
lasso_cd.fit(X, y)
error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)
assert_less(error, 0.01)
def test_lasso_lars_path_length():
# Test that the path length of the LassoLars is right
lasso = linear_model.LassoLars()
lasso.fit(X, y)
lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])
lasso2.fit(X, y)
assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)
# Also check that the sequence of alphas is always decreasing
assert_true(np.all(np.diff(lasso.alphas_) < 0))
def test_lasso_lars_vs_lasso_cd_ill_conditioned():
# Test lasso lars on a very ill-conditioned design, and check that
# it does not blow up, and stays somewhat close to a solution given
# by the coordinate descent solver
# Also test that lasso_path (using lars_path output style) gives
# the same result as lars_path and previous lasso output style
# under these conditions.
rng = np.random.RandomState(42)
# Generate data
n, m = 70, 100
k = 5
X = rng.randn(n, m)
w = np.zeros((m, 1))
i = np.arange(0, m)
rng.shuffle(i)
supp = i[:k]
w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)
y = np.dot(X, w)
sigma = 0.2
y += sigma * rng.rand(*y.shape)
y = y.squeeze()
lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')
_, lasso_coef2, _ = linear_model.lasso_path(X, y,
alphas=lars_alphas,
tol=1e-6,
fit_intercept=False)
assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)
def test_lasso_lars_vs_lasso_cd_ill_conditioned2():
# Create an ill-conditioned situation in which the LARS has to go
# far in the path to converge, and check that LARS and coordinate
# descent give the same answers
# Note it used to be the case that Lars had to use the drop for good
# strategy for this but this is no longer the case with the
# equality_tolerance checks
X = [[1e20, 1e20, 0],
[-1e-32, 0, 0],
[1, 1, 1]]
y = [10, 10, 1]
alpha = .0001
def objective_function(coef):
return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2
+ alpha * linalg.norm(coef, 1))
lars = linear_model.LassoLars(alpha=alpha, normalize=False)
assert_warns(ConvergenceWarning, lars.fit, X, y)
lars_coef_ = lars.coef_
lars_obj = objective_function(lars_coef_)
coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-10, normalize=False)
cd_coef_ = coord_descent.fit(X, y).coef_
cd_obj = objective_function(cd_coef_)
assert_less(lars_obj, cd_obj * (1. + 1e-8))
def test_lars_add_features():
# assure that at least some features get added if necessary
# test for 6d2b4c
# Hilbert matrix
n = 5
H = 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])
clf = linear_model.Lars(fit_intercept=False).fit(
H, np.arange(n))
assert_true(np.all(np.isfinite(clf.coef_)))
def test_lars_n_nonzero_coefs(verbose=False):
lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)
lars.fit(X, y)
assert_equal(len(lars.coef_.nonzero()[0]), 6)
# The path should be of length 6 + 1 in a Lars going down to 6
# non-zero coefs
assert_equal(len(lars.alphas_), 7)
def test_multitarget():
# Assure that estimators receiving multidimensional y do the right thing
X = diabetes.data
Y = np.vstack([diabetes.target, diabetes.target ** 2]).T
n_targets = Y.shape[1]
for estimator in (linear_model.LassoLars(), linear_model.Lars()):
estimator.fit(X, Y)
Y_pred = estimator.predict(X)
Y_dec = estimator.decision_function(X)
assert_array_almost_equal(Y_pred, Y_dec)
alphas, active, coef, path = (estimator.alphas_, estimator.active_,
estimator.coef_, estimator.coef_path_)
for k in range(n_targets):
estimator.fit(X, Y[:, k])
y_pred = estimator.predict(X)
assert_array_almost_equal(alphas[k], estimator.alphas_)
assert_array_almost_equal(active[k], estimator.active_)
assert_array_almost_equal(coef[k], estimator.coef_)
assert_array_almost_equal(path[k], estimator.coef_path_)
assert_array_almost_equal(Y_pred[:, k], y_pred)
def test_lars_cv():
# Test the LassoLarsCV object by checking that the optimal alpha
# increases as the number of samples increases.
# This property is not actually garantied in general and is just a
# property of the given dataset, with the given steps chosen.
old_alpha = 0
lars_cv = linear_model.LassoLarsCV()
for length in (400, 200, 100):
X = diabetes.data[:length]
y = diabetes.target[:length]
lars_cv.fit(X, y)
np.testing.assert_array_less(old_alpha, lars_cv.alpha_)
old_alpha = lars_cv.alpha_
def test_lasso_lars_ic():
# Test the LassoLarsIC object by checking that
# - some good features are selected.
# - alpha_bic > alpha_aic
# - n_nonzero_bic < n_nonzero_aic
lars_bic = linear_model.LassoLarsIC('bic')
lars_aic = linear_model.LassoLarsIC('aic')
rng = np.random.RandomState(42)
X = diabetes.data
y = diabetes.target
X = np.c_[X, rng.randn(X.shape[0], 4)] # add 4 bad features
lars_bic.fit(X, y)
lars_aic.fit(X, y)
nonzero_bic = np.where(lars_bic.coef_)[0]
nonzero_aic = np.where(lars_aic.coef_)[0]
assert_greater(lars_bic.alpha_, lars_aic.alpha_)
assert_less(len(nonzero_bic), len(nonzero_aic))
assert_less(np.max(nonzero_bic), diabetes.data.shape[1])
# test error on unknown IC
lars_broken = linear_model.LassoLarsIC('<unknown>')
assert_raises(ValueError, lars_broken.fit, X, y)
def test_no_warning_for_zero_mse():
# LassoLarsIC should not warn for log of zero MSE.
y = np.arange(10, dtype=float)
X = y.reshape(-1, 1)
lars = linear_model.LassoLarsIC(normalize=False)
assert_no_warnings(lars.fit, X, y)
assert_true(np.any(np.isinf(lars.criterion_)))
def test_lars_path_readonly_data():
# When using automated memory mapping on large input, the
# fold data is in read-only mode
# This is a non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/4597
splitted_data = train_test_split(X, y, random_state=42)
with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):
# The following should not fail despite copy=False
_lars_path_residues(X_train, y_train, X_test, y_test, copy=False)
def test_lars_path_positive_constraint():
# this is the main test for the positive parameter on the lars_path method
# the estimator classes just make use of this function
# we do the test on the diabetes dataset
# ensure that we get negative coefficients when positive=False
# and all positive when positive=True
# for method 'lar' (default) and lasso
for method in ['lar', 'lasso']:
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=False)
assert_true(coefs.min() < 0)
alpha, active, coefs = \
linear_model.lars_path(diabetes['data'], diabetes['target'],
return_path=True, method=method,
positive=True)
assert_true(coefs.min() >= 0)
# now we gonna test the positive option for all estimator classes
default_parameter = {'fit_intercept': False}
estimator_parameter_map = {'Lars': {'n_nonzero_coefs': 5},
'LassoLars': {'alpha': 0.1},
'LarsCV': {},
'LassoLarsCV': {},
'LassoLarsIC': {}}
def test_estimatorclasses_positive_constraint():
# testing the transmissibility for the positive option of all estimator
# classes in this same function here
for estname in estimator_parameter_map:
params = default_parameter.copy()
params.update(estimator_parameter_map[estname])
estimator = getattr(linear_model, estname)(positive=False, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(estimator.coef_.min() < 0)
estimator = getattr(linear_model, estname)(positive=True, **params)
estimator.fit(diabetes['data'], diabetes['target'])
assert_true(min(estimator.coef_) >= 0)
def test_lasso_lars_vs_lasso_cd_positive(verbose=False):
# Test that LassoLars and Lasso using coordinate descent give the
# same results when using the positive option
# This test is basically a copy of the above with additional positive
# option. However for the middle part, the comparison of coefficient values
# for a range of alphas, we had to make an adaptations. See below.
# not normalized data
X = 3 * diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)
for c, a in zip(lasso_path.T, alphas):
if a == 0:
continue
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
# The range of alphas chosen for coefficient comparison here is restricted
# as compared with the above test without the positive option. This is due
# to the circumstance that the Lars-Lasso algorithm does not converge to
# the least-squares-solution for small alphas, see 'Least Angle Regression'
# by Efron et al 2004. The coefficients are typically in congruence up to
# the smallest alpha reached by the Lars-Lasso algorithm and start to
# diverge thereafter. See
# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff
for alpha in np.linspace(6e-1, 1 - 1e-2, 20):
clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,
normalize=False, positive=True).fit(X, y)
clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,
normalize=False, positive=True).fit(X, y)
err = linalg.norm(clf1.coef_ - clf2.coef_)
assert_less(err, 1e-3)
# normalized data
X = diabetes.data
alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',
positive=True)
lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,
tol=1e-8, positive=True)
for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0
lasso_cd.alpha = a
lasso_cd.fit(X, y)
error = linalg.norm(c - lasso_cd.coef_)
assert_less(error, 0.01)
| bsd-3-clause |
joshloyal/scikit-learn | benchmarks/bench_plot_lasso_path.py | 84 | 4005 | """Benchmarks of Lasso regularization path computation using Lars and CD
The input data is mostly low rank but is a fat infinite tail.
"""
from __future__ import print_function
from collections import defaultdict
import gc
import sys
from time import time
import numpy as np
from sklearn.linear_model import lars_path
from sklearn.linear_model import lasso_path
from sklearn.datasets.samples_generator import make_regression
def compute_bench(samples_range, features_range):
it = 0
results = defaultdict(lambda: [])
max_it = len(samples_range) * len(features_range)
for n_samples in samples_range:
for n_features in features_range:
it += 1
print('====================')
print('Iteration %03d of %03d' % (it, max_it))
print('====================')
dataset_kwargs = {
'n_samples': n_samples,
'n_features': n_features,
'n_informative': n_features / 10,
'effective_rank': min(n_samples, n_features) / 10,
#'effective_rank': None,
'bias': 0.0,
}
print("n_samples: %d" % n_samples)
print("n_features: %d" % n_features)
X, y = make_regression(**dataset_kwargs)
gc.collect()
print("benchmarking lars_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
G = np.dot(X.T, X) # precomputed Gram matrix
Xy = np.dot(X.T, y)
lars_path(X, y, Xy=Xy, Gram=G, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lars_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lars_path(X, y, method='lasso')
delta = time() - tstart
print("%0.3fs" % delta)
results['lars_path (without Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (with Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=True)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (with Gram)'].append(delta)
gc.collect()
print("benchmarking lasso_path (without Gram):", end='')
sys.stdout.flush()
tstart = time()
lasso_path(X, y, precompute=False)
delta = time() - tstart
print("%0.3fs" % delta)
results['lasso_path (without Gram)'].append(delta)
return results
if __name__ == '__main__':
from mpl_toolkits.mplot3d import axes3d # register the 3d projection
import matplotlib.pyplot as plt
samples_range = np.linspace(10, 2000, 5).astype(np.int)
features_range = np.linspace(10, 2000, 5).astype(np.int)
results = compute_bench(samples_range, features_range)
max_time = max(max(t) for t in results.values())
fig = plt.figure('scikit-learn Lasso path benchmark results')
i = 1
for c, (label, timings) in zip('bcry', sorted(results.items())):
ax = fig.add_subplot(2, 2, i, projection='3d')
X, Y = np.meshgrid(samples_range, features_range)
Z = np.asarray(timings).reshape(samples_range.shape[0],
features_range.shape[0])
# plot the actual surface
ax.plot_surface(X, Y, Z.T, cstride=1, rstride=1, color=c, alpha=0.8)
# dummy point plot to stick the legend to since surface plot do not
# support legends (yet?)
# ax.plot([1], [1], [1], color=c, label=label)
ax.set_xlabel('n_samples')
ax.set_ylabel('n_features')
ax.set_zlabel('Time (s)')
ax.set_zlim3d(0.0, max_time * 1.1)
ax.set_title(label)
# ax.legend()
i += 1
plt.show()
| bsd-3-clause |
manahl/arctic | tests/integration/store/test_pickle_store.py | 1 | 4373 | from datetime import datetime as dt, timedelta
import bson
import numpy as np
from mock import patch
from arctic._util import mongo_count
from arctic.arctic import Arctic
def test_save_read_bson(library):
blob = {'foo': dt(2015, 1, 1), 'bar': ['a', 'b', ['x', 'y', 'z']]}
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
'''
Run test at your own discretion. Takes > 60 secs
def test_save_read_MASSIVE(library):
import pandas as pd
df = pd.DataFrame(data={'data': [1] * 150000000})
data = (df, df)
library.write('BLOB', data)
saved_blob = library.read('BLOB').data
assert(saved_blob[0].equals(df))
assert(saved_blob[1].equals(df))
'''
def test_save_read_big_encodable(library):
blob = {'foo': 'a' * 1024 * 1024 * 20}
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
def test_save_read_bson_object(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
def test_get_info_bson_object(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write('BLOB', blob)
assert library.get_info('BLOB')['handler'] == 'PickleStore'
def test_bson_large_object(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic,
'large_thing': np.random.rand(int(2.1 * 1024 * 1024)).tostring()}
assert len(blob['large_thing']) > 16 * 1024 * 1024
library.write('BLOB', blob)
saved_blob = library.read('BLOB').data
assert blob == saved_blob
def test_bson_leak_objects_delete(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write('BLOB', blob)
assert mongo_count(library._collection) == 1
assert mongo_count(library._collection.versions) == 1
library.delete('BLOB')
assert mongo_count(library._collection) == 0
assert mongo_count(library._collection.versions) == 0
def test_bson_leak_objects_prune_previous(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
yesterday = dt.utcnow() - timedelta(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', blob)
assert mongo_count(library._collection) == 1
assert mongo_count(library._collection.versions) == 1
_id = bson.ObjectId.from_datetime(dt.utcnow() - timedelta(minutes=130))
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', {}, prune_previous_version=False)
assert mongo_count(library._collection) == 1
assert mongo_count(library._collection.versions) == 2
# This write should pruned the oldest version in the chunk collection
library.write('BLOB', {})
assert mongo_count(library._collection) == 0
assert mongo_count(library._collection.versions) == 2
def test_prune_previous_doesnt_kill_other_objects(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
yesterday = dt.utcnow() - timedelta(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', blob, prune_previous_version=False)
assert mongo_count(library._collection) == 1
assert mongo_count(library._collection.versions) == 1
_id = bson.ObjectId.from_datetime(dt.utcnow() - timedelta(hours=10))
with patch("bson.ObjectId", return_value=_id):
library.write('BLOB', blob, prune_previous_version=False)
assert mongo_count(library._collection) == 1
assert mongo_count(library._collection.versions) == 2
# This write should pruned the oldest version in the chunk collection
library.write('BLOB', {})
assert mongo_count(library._collection) == 1
assert mongo_count(library._collection.versions) == 2
library._delete_version('BLOB', 2)
assert mongo_count(library._collection) == 0
assert mongo_count(library._collection.versions) == 1
def test_write_metadata(library):
blob = {'foo': dt(2015, 1, 1), 'object': Arctic}
library.write(symbol='symX', data=blob, metadata={'key1': 'value1'})
library.write_metadata(symbol='symX', metadata={'key2': 'value2'})
v = library.read('symX')
assert v.data == blob
assert v.metadata == {'key2': 'value2'}
| lgpl-2.1 |
fengzhyuan/scikit-learn | sklearn/neighbors/unsupervised.py | 106 | 4461 | """Unsupervised nearest neighbors learner"""
from .base import NeighborsBase
from .base import KNeighborsMixin
from .base import RadiusNeighborsMixin
from .base import UnsupervisedMixin
class NearestNeighbors(NeighborsBase, KNeighborsMixin,
RadiusNeighborsMixin, UnsupervisedMixin):
"""Unsupervised learner for implementing neighbor searches.
Read more in the :ref:`User Guide <unsupervised_neighbors>`.
Parameters
----------
n_neighbors : int, optional (default = 5)
Number of neighbors to use by default for :meth:`k_neighbors` queries.
radius : float, optional (default = 1.0)
Range of parameter space to use by default for :meth`radius_neighbors`
queries.
algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional
Algorithm used to compute the nearest neighbors:
- 'ball_tree' will use :class:`BallTree`
- 'kd_tree' will use :class:`KDtree`
- 'brute' will use a brute-force search.
- 'auto' will attempt to decide the most appropriate algorithm
based on the values passed to :meth:`fit` method.
Note: fitting on sparse input will override the setting of
this parameter, using brute force.
leaf_size : int, optional (default = 30)
Leaf size passed to BallTree or KDTree. This can affect the
speed of the construction and query, as well as the memory
required to store the tree. The optimal value depends on the
nature of the problem.
p: integer, optional (default = 2)
Parameter for the Minkowski metric from
sklearn.metrics.pairwise.pairwise_distances. When p = 1, this is
equivalent to using manhattan_distance (l1), and euclidean_distance
(l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used.
metric : string or callable, default 'minkowski'
metric to use for distance computation. Any metric from scikit-learn
or scipy.spatial.distance can be used.
If metric is a callable function, it is called on each
pair of instances (rows) and the resulting value recorded. The callable
should take two arrays as input and return one value indicating the
distance between them. This works for Scipy's metrics, but is less
efficient than passing the metric name as a string.
Distance matrices are not supported.
Valid values for metric are:
- from scikit-learn: ['cityblock', 'cosine', 'euclidean', 'l1', 'l2',
'manhattan']
- from scipy.spatial.distance: ['braycurtis', 'canberra', 'chebyshev',
'correlation', 'dice', 'hamming', 'jaccard', 'kulsinski',
'mahalanobis', 'matching', 'minkowski', 'rogerstanimoto',
'russellrao', 'seuclidean', 'sokalmichener', 'sokalsneath',
'sqeuclidean', 'yule']
See the documentation for scipy.spatial.distance for details on these
metrics.
metric_params: dict, optional (default = None)
additional keyword arguments for the metric function.
Examples
--------
>>> import numpy as np
>>> from sklearn.neighbors import NearestNeighbors
>>> samples = [[0, 0, 2], [1, 0, 0], [0, 0, 1]]
>>> neigh = NearestNeighbors(2, 0.4)
>>> neigh.fit(samples) #doctest: +ELLIPSIS
NearestNeighbors(...)
>>> neigh.kneighbors([[0, 0, 1.3]], 2, return_distance=False)
... #doctest: +ELLIPSIS
array([[2, 0]]...)
>>> rng = neigh.radius_neighbors([0, 0, 1.3], 0.4, return_distance=False)
>>> np.asarray(rng[0][0])
array(2)
See also
--------
KNeighborsClassifier
RadiusNeighborsClassifier
KNeighborsRegressor
RadiusNeighborsRegressor
BallTree
Notes
-----
See :ref:`Nearest Neighbors <neighbors>` in the online documentation
for a discussion of the choice of ``algorithm`` and ``leaf_size``.
http://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm
"""
def __init__(self, n_neighbors=5, radius=1.0,
algorithm='auto', leaf_size=30, metric='minkowski',
p=2, metric_params=None, **kwargs):
self._init_params(n_neighbors=n_neighbors,
radius=radius,
algorithm=algorithm,
leaf_size=leaf_size, metric=metric, p=p,
metric_params=metric_params, **kwargs)
| bsd-3-clause |
kyoren/https-github.com-h2oai-h2o-3 | py2/h2o_gbm.py | 30 | 16328 |
import re, random, math
import h2o_args
import h2o_nodes
import h2o_cmd
from h2o_test import verboseprint, dump_json, check_sandbox_for_errors
def plotLists(xList, xLabel=None, eListTitle=None, eList=None, eLabel=None, fListTitle=None, fList=None, fLabel=None, server=False):
if h2o_args.python_username!='kevin':
return
# Force matplotlib to not use any Xwindows backend.
if server:
import matplotlib
matplotlib.use('Agg')
import pylab as plt
print "xList", xList
print "eList", eList
print "fList", fList
font = {'family' : 'normal',
'weight' : 'normal',
'size' : 26}
### plt.rc('font', **font)
plt.rcdefaults()
if eList:
if eListTitle:
plt.title(eListTitle)
plt.figure()
plt.plot (xList, eList)
plt.xlabel(xLabel)
plt.ylabel(eLabel)
plt.draw()
plt.savefig('eplot.jpg',format='jpg')
# Image.open('testplot.jpg').save('eplot.jpg','JPEG')
if fList:
if fListTitle:
plt.title(fListTitle)
plt.figure()
plt.plot (xList, fList)
plt.xlabel(xLabel)
plt.ylabel(fLabel)
plt.draw()
plt.savefig('fplot.jpg',format='jpg')
# Image.open('fplot.jpg').save('fplot.jpg','JPEG')
if eList or fList:
plt.show()
# pretty print a cm that the C
def pp_cm(jcm, header=None):
# header = jcm['header']
# hack col index header for now..where do we get it?
header = ['"%s"'%i for i in range(len(jcm[0]))]
# cm = ' '.join(header)
cm = '{0:<8}'.format('')
for h in header:
cm = '{0}|{1:<8}'.format(cm, h)
cm = '{0}|{1:<8}'.format(cm, 'error')
c = 0
for line in jcm:
lineSum = sum(line)
if c < 0 or c >= len(line):
raise Exception("Error in h2o_gbm.pp_cm. c: %s line: %s len(line): %s jcm: %s" % (c, line, len(line), dump_json(jcm)))
print "c:", c, "line:", line
errorSum = lineSum - line[c]
if (lineSum>0):
err = float(errorSum) / lineSum
else:
err = 0.0
fl = '{0:<8}'.format(header[c])
for num in line: fl = '{0}|{1:<8}'.format(fl, num)
fl = '{0}|{1:<8.2f}'.format(fl, err)
cm = "{0}\n{1}".format(cm, fl)
c += 1
return cm
def pp_cm_summary(cm):
# hack cut and past for now (should be in h2o_gbm.py?
scoresList = cm
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0?
# in any case, tolerate. (it shows up in test.py on poker100)
print "classIndex:", classIndex, "classSum", classSum, "<- why 0?"
else:
if classIndex >= len(s):
print "Why is classindex:", classIndex, 'for s:"', s
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = 100 - classRightPct
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0: pctRight = 100.0 * totalRight/totalScores
else: pctRight = 0.0
print "pctRight:", "%5.2f" % pctRight
pctWrong = 100 - pctRight
print "pctWrong:", "%5.2f" % pctWrong
return pctWrong
# I just copied and changed GBM to GBM. Have to update to match GBM params and responses
def pickRandGbmParams(paramDict, params):
colX = 0
randomGroupSize = random.randint(1,len(paramDict))
for i in range(randomGroupSize):
randomKey = random.choice(paramDict.keys())
randomV = paramDict[randomKey]
randomValue = random.choice(randomV)
params[randomKey] = randomValue
# compare this glm to last one. since the files are concatenations,
# the results should be similar? 10% of first is allowed delta
def compareToFirstGbm(self, key, glm, firstglm):
# if isinstance(firstglm[key], list):
# in case it's not a list allready (err is a list)
verboseprint("compareToFirstGbm key:", key)
verboseprint("compareToFirstGbm glm[key]:", glm[key])
# key could be a list or not. if a list, don't want to create list of that list
# so use extend on an empty list. covers all cases?
if type(glm[key]) is list:
kList = glm[key]
firstkList = firstglm[key]
elif type(glm[key]) is dict:
raise Exception("compareToFirstGLm: Not expecting dict for " + key)
else:
kList = [glm[key]]
firstkList = [firstglm[key]]
for k, firstk in zip(kList, firstkList):
# delta must be a positive number ?
delta = .1 * abs(float(firstk))
msg = "Too large a delta (" + str(delta) + ") comparing current and first for: " + key
self.assertAlmostEqual(float(k), float(firstk), delta=delta, msg=msg)
self.assertGreaterEqual(abs(float(k)), 0.0, str(k) + " abs not >= 0.0 in current")
def goodXFromColumnInfo(y,
num_cols=None, missingValuesDict=None, constantValuesDict=None, enumSizeDict=None,
colTypeDict=None, colNameDict=None, keepPattern=None, key=None,
timeoutSecs=120, forRF=False, noPrint=False):
y = str(y)
# if we pass a key, means we want to get the info ourselves here
if key is not None:
(missingValuesDict, constantValuesDict, enumSizeDict, colTypeDict, colNameDict) = \
h2o_cmd.columnInfoFromInspect(key, exceptionOnMissingValues=False,
max_column_display=99999999, timeoutSecs=timeoutSecs)
num_cols = len(colNameDict)
# now remove any whose names don't match the required keepPattern
if keepPattern is not None:
keepX = re.compile(keepPattern)
else:
keepX = None
x = range(num_cols)
# need to walk over a copy, cause we change x
xOrig = x[:]
ignore_x = [] # for use by RF
for k in xOrig:
name = colNameDict[k]
# remove it if it has the same name as the y output
if str(k)== y: # if they pass the col index as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, str(k), y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif name == y: # if they pass the name as y
if not noPrint:
print "Removing %d because name: %s matches output %s" % (k, name, y)
x.remove(k)
# rf doesn't want it in ignore list
# ignore_x.append(k)
elif keepX is not None and not keepX.match(name):
if not noPrint:
print "Removing %d because name: %s doesn't match desired keepPattern %s" % (k, name, keepPattern)
x.remove(k)
ignore_x.append(k)
# missing values reports as constant also. so do missing first.
# remove all cols with missing values
# could change it against num_rows for a ratio
elif k in missingValuesDict:
value = missingValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has %d missing values" % (k, name, value)
x.remove(k)
ignore_x.append(k)
elif k in constantValuesDict:
value = constantValuesDict[k]
if not noPrint:
print "Removing %d with name: %s because it has constant value: %s " % (k, name, str(value))
x.remove(k)
ignore_x.append(k)
# this is extra pruning..
# remove all cols with enums, if not already removed
elif k in enumSizeDict:
value = enumSizeDict[k]
if not noPrint:
print "Removing %d %s because it has enums of size: %d" % (k, name, value)
x.remove(k)
ignore_x.append(k)
if not noPrint:
print "x has", len(x), "cols"
print "ignore_x has", len(ignore_x), "cols"
x = ",".join(map(str,x))
ignore_x = ",".join(map(str,ignore_x))
if not noPrint:
print "\nx:", x
print "\nignore_x:", ignore_x
if forRF:
return ignore_x
else:
return x
def showGBMGridResults(GBMResult, expectedErrorMax, classification=True):
# print "GBMResult:", dump_json(GBMResult)
jobs = GBMResult['jobs']
print "GBM jobs:", jobs
for jobnum, j in enumerate(jobs):
_distribution = j['_distribution']
model_key = j['destination_key']
job_key = j['job_key']
# inspect = h2o_cmd.runInspect(key=model_key)
# print "jobnum:", jobnum, dump_json(inspect)
gbmTrainView = h2o_cmd.runGBMView(model_key=model_key)
print "jobnum:", jobnum, dump_json(gbmTrainView)
if classification:
cms = gbmTrainView['gbm_model']['cms']
cm = cms[-1]['_arr'] # take the last one
print "GBM cms[-1]['_predErr']:", cms[-1]['_predErr']
print "GBM cms[-1]['_classErr']:", cms[-1]['_classErr']
pctWrongTrain = pp_cm_summary(cm);
if pctWrongTrain > expectedErrorMax:
raise Exception("Should have < %s error here. pctWrongTrain: %s" % (expectedErrorMax, pctWrongTrain))
errsLast = gbmTrainView['gbm_model']['errs'][-1]
print "\nTrain", jobnum, job_key, "\n==========\n", "pctWrongTrain:", pctWrongTrain, "errsLast:", errsLast
print "GBM 'errsLast'", errsLast
print pp_cm(cm)
else:
print "\nTrain", jobnum, job_key, "\n==========\n", "errsLast:", errsLast
print "GBMTrainView errs:", gbmTrainView['gbm_model']['errs']
def simpleCheckGBMView(node=None, gbmv=None, noPrint=False, **kwargs):
if not node:
node = h2o_nodes.nodes[0]
if 'warnings' in gbmv:
warnings = gbmv['warnings']
# catch the 'Failed to converge" for now
for w in warnings:
if not noPrint: print "\nwarning:", w
if ('Failed' in w) or ('failed' in w):
raise Exception(w)
if 'cm' in gbmv:
cm = gbmv['cm'] # only one
else:
if 'gbm_model' in gbmv:
gbm_model = gbmv['gbm_model']
else:
raise Exception("no gbm_model in gbmv? %s" % dump_json(gbmv))
cms = gbm_model['cms']
print "number of cms:", len(cms)
print "FIX! need to add reporting of h2o's _perr per class error"
# FIX! what if regression. is rf only classification?
print "cms[-1]['_arr']:", cms[-1]['_arr']
print "cms[-1]['_predErr']:", cms[-1]['_predErr']
print "cms[-1]['_classErr']:", cms[-1]['_classErr']
## print "cms[-1]:", dump_json(cms[-1])
## for i,c in enumerate(cms):
## print "cm %s: %s" % (i, c['_arr'])
cm = cms[-1]['_arr'] # take the last one
scoresList = cm
used_trees = gbm_model['N']
errs = gbm_model['errs']
print "errs[0]:", errs[0]
print "errs[-1]:", errs[-1]
print "errs:", errs
# if we got the ntree for comparison. Not always there in kwargs though!
param_ntrees = kwargs.get('ntrees',None)
if (param_ntrees is not None and used_trees != param_ntrees):
raise Exception("used_trees should == param_ntree. used_trees: %s" % used_trees)
if (used_trees+1)!=len(cms) or (used_trees+1)!=len(errs):
raise Exception("len(cms): %s and len(errs): %s should be one more than N %s trees" % (len(cms), len(errs), used_trees))
totalScores = 0
totalRight = 0
# individual scores can be all 0 if nothing for that output class
# due to sampling
classErrorPctList = []
predictedClassDict = {} # may be missing some? so need a dict?
for classIndex,s in enumerate(scoresList):
classSum = sum(s)
if classSum == 0 :
# why would the number of scores for a class be 0? does GBM CM have entries for non-existent classes
# in a range??..in any case, tolerate. (it shows up in test.py on poker100)
if not noPrint: print "class:", classIndex, "classSum", classSum, "<- why 0?"
else:
# H2O should really give me this since it's in the browser, but it doesn't
classRightPct = ((s[classIndex] + 0.0)/classSum) * 100
totalRight += s[classIndex]
classErrorPct = round(100 - classRightPct, 2)
classErrorPctList.append(classErrorPct)
### print "s:", s, "classIndex:", classIndex
if not noPrint: print "class:", classIndex, "classSum", classSum, "classErrorPct:", "%4.2f" % classErrorPct
# gather info for prediction summary
for pIndex,p in enumerate(s):
if pIndex not in predictedClassDict:
predictedClassDict[pIndex] = p
else:
predictedClassDict[pIndex] += p
totalScores += classSum
#****************************
if not noPrint:
print "Predicted summary:"
# FIX! Not sure why we weren't working with a list..hack with dict for now
for predictedClass,p in predictedClassDict.items():
print str(predictedClass)+":", p
# this should equal the num rows in the dataset if full scoring? (minus any NAs)
print "totalScores:", totalScores
print "totalRight:", totalRight
if totalScores != 0:
pctRight = 100.0 * totalRight/totalScores
else:
pctRight = 0.0
pctWrong = 100 - pctRight
print "pctRight:", "%5.2f" % pctRight
print "pctWrong:", "%5.2f" % pctWrong
#****************************
# more testing for GBMView
# it's legal to get 0's for oobe error # if sample_rate = 1
sample_rate = kwargs.get('sample_rate', None)
validation = kwargs.get('validation', None)
if (sample_rate==1 and not validation):
pass
elif (totalScores<=0 or totalScores>5e9):
raise Exception("scores in GBMView seems wrong. scores:", scoresList)
varimp = gbm_model['varimp']
treeStats = gbm_model['treeStats']
if not treeStats:
raise Exception("treeStats not right?: %s" % dump_json(treeStats))
# print "json:", dump_json(gbmv)
data_key = gbm_model['_dataKey']
model_key = gbm_model['_key']
classification_error = pctWrong
if not noPrint:
if 'minLeaves' not in treeStats or not treeStats['minLeaves']:
raise Exception("treeStats seems to be missing minLeaves %s" % dump_json(treeStats))
print """
Leaves: {0} / {1} / {2}
Depth: {3} / {4} / {5}
Err: {6:0.2f} %
""".format(
treeStats['minLeaves'],
treeStats['meanLeaves'],
treeStats['maxLeaves'],
treeStats['minDepth'],
treeStats['meanDepth'],
treeStats['maxDepth'],
classification_error,
)
### modelInspect = node.inspect(model_key)
dataInspect = h2o_cmd.runInspect(key=data_key)
check_sandbox_for_errors()
return (round(classification_error,2), classErrorPctList, totalScores)
| apache-2.0 |
nigroup/pypet | pypet/tests/profiling/speed_analysis/storage_analysis/avg_runtima_as_function_of_length_plot_times.py | 2 | 3376 | __author__ = 'robert'
from pypet import Environment, Trajectory
from pypet.tests.testutils.ioutils import make_temp_dir, get_log_config
import os
import matplotlib.pyplot as plt
import numpy as np
import time
import numpy as np
import scipy.sparse as spsp
from pycallgraph import PyCallGraph, Config, GlobbingFilter
from pycallgraph.output import GraphvizOutput
from pycallgraph.color import Color
class CustomOutput(GraphvizOutput):
def node_color(self, node):
value = float(node.time.fraction)
return Color.hsv(value / 2 + .5, value, 0.9)
def edge_color(self, edge):
value = float(edge.time.fraction)
return Color.hsv(value / 2 + .5, value, 0.7)
def job(traj):
traj.f_ares('$set.$', 42, comment='A result')
def get_runtime(length):
filename = os.path.join('tmp', 'hdf5', 'many_runs.hdf5')
with Environment(filename = filename,
log_levels=20, report_progress=(0.0000002, 'progress', 50),
overwrite_file=True, purge_duplicate_comments=False,
log_stdout=False,
summary_tables=False, small_overview_tables=False) as env:
traj = env.v_traj
traj.par.f_apar('x', 0, 'parameter')
traj.f_explore({'x': range(length)})
max_run = 100
for idx in range(len(traj)):
if idx > max_run:
traj.f_get_run_information(idx, copy=False)['completed'] = 1
traj.f_store()
if not os.path.isdir('./tmp'):
os.mkdir('tmp')
graphviz = CustomOutput()
graphviz.output_file = './tmp/run_profile_storage_%d.png' % len(traj)
service_filter = GlobbingFilter(include=['*storageservice.*'])
config = Config(groups=True, verbose=True)
config.trace_filter = service_filter
print('RUN PROFILE')
with PyCallGraph(config=config, output=graphviz):
# start = time.time()
# env.f_run(job)
# end = time.time()
for irun in range(100):
traj._make_single_run(irun+len(traj)/2)
# Measure start time
traj._set_start()
traj.f_ares('$set.$', 42, comment='A result')
traj._set_finish()
traj._store_final(store_data=2)
traj._finalize_run()
print('STARTING_to_PLOT')
print('DONE RUN PROFILE')
# dicts = [traj.f_get_run_information(x) for x in range(min(len(traj), max_run))]
# total = end - start
# return total/float(min(len(traj), max_run)), total/float(min(len(traj), max_run)) * len(traj)
def main():
lengths = [1000, 1000000]
runtimes = [get_runtime(x) for x in lengths]
# avg_runtimes = [x[0] for x in runtimes]
# summed_runtime = [x[1] for x in runtimes]
# plt.subplot(2, 1, 1)
# plt.semilogx(list(reversed(lengths)), list(reversed(avg_runtimes)), linewidth=2)
# plt.xlabel('Runs')
# plt.ylabel('t[s]')
# plt.title('Average Runtime per single run')
# plt.grid()
# plt.subplot(2, 1, 2)
# plt.loglog(lengths, summed_runtime, linewidth=2)
# plt.grid()
# plt.xlabel('Runs')
# plt.ylabel('t[s]')
# plt.title('Total runtime of experiment')
# plt.savefig('avg_runtime_as_func_of_lenght_100')
# plt.show()
if __name__ == '__main__':
main() | bsd-3-clause |
JosmanPS/scikit-learn | sklearn/neighbors/tests/test_nearest_centroid.py | 305 | 4121 | """
Testing for the nearest centroid module.
"""
import numpy as np
from scipy import sparse as sp
from numpy.testing import assert_array_equal
from numpy.testing import assert_equal
from sklearn.neighbors import NearestCentroid
from sklearn import datasets
from sklearn.metrics.pairwise import pairwise_distances
# toy sample
X = [[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]]
X_csr = sp.csr_matrix(X) # Sparse matrix
y = [-1, -1, -1, 1, 1, 1]
T = [[-1, -1], [2, 2], [3, 2]]
T_csr = sp.csr_matrix(T)
true_result = [-1, 1, 1]
# also load the iris dataset
# and randomly permute it
iris = datasets.load_iris()
rng = np.random.RandomState(1)
perm = rng.permutation(iris.target.size)
iris.data = iris.data[perm]
iris.target = iris.target[perm]
def test_classification_toy():
# Check classification on a toy dataset, including sparse versions.
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T), true_result)
# Same test, but with a sparse matrix to fit and test.
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit with sparse, test with non-sparse
clf = NearestCentroid()
clf.fit(X_csr, y)
assert_array_equal(clf.predict(T), true_result)
# Fit with non-sparse, test with sparse
clf = NearestCentroid()
clf.fit(X, y)
assert_array_equal(clf.predict(T_csr), true_result)
# Fit and predict with non-CSR sparse matrices
clf = NearestCentroid()
clf.fit(X_csr.tocoo(), y)
assert_array_equal(clf.predict(T_csr.tolil()), true_result)
def test_precomputed():
clf = NearestCentroid(metric="precomputed")
clf.fit(X, y)
S = pairwise_distances(T, clf.centroids_)
assert_array_equal(clf.predict(S), true_result)
def test_iris():
# Check consistency on dataset iris.
for metric in ('euclidean', 'cosine'):
clf = NearestCentroid(metric=metric).fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.9, "Failed with score = " + str(score)
def test_iris_shrinkage():
# Check consistency on dataset iris, when using shrinkage.
for metric in ('euclidean', 'cosine'):
for shrink_threshold in [None, 0.1, 0.5]:
clf = NearestCentroid(metric=metric,
shrink_threshold=shrink_threshold)
clf = clf.fit(iris.data, iris.target)
score = np.mean(clf.predict(iris.data) == iris.target)
assert score > 0.8, "Failed with score = " + str(score)
def test_pickle():
import pickle
# classification
obj = NearestCentroid()
obj.fit(iris.data, iris.target)
score = obj.score(iris.data, iris.target)
s = pickle.dumps(obj)
obj2 = pickle.loads(s)
assert_equal(type(obj2), obj.__class__)
score2 = obj2.score(iris.data, iris.target)
assert_array_equal(score, score2,
"Failed to generate same score"
" after pickling (classification).")
def test_shrinkage_threshold_decoded_y():
clf = NearestCentroid(shrink_threshold=0.01)
y_ind = np.asarray(y)
y_ind[y_ind == -1] = 0
clf.fit(X, y_ind)
centroid_encoded = clf.centroids_
clf.fit(X, y)
assert_array_equal(centroid_encoded, clf.centroids_)
def test_predict_translated_data():
# Test that NearestCentroid gives same results on translated data
rng = np.random.RandomState(0)
X = rng.rand(50, 50)
y = rng.randint(0, 3, 50)
noise = rng.rand(50)
clf = NearestCentroid(shrink_threshold=0.1)
clf.fit(X, y)
y_init = clf.predict(X)
clf = NearestCentroid(shrink_threshold=0.1)
X_noise = X + noise
clf.fit(X_noise, y)
y_translate = clf.predict(X_noise)
assert_array_equal(y_init, y_translate)
def test_manhattan_metric():
# Test the manhattan metric.
clf = NearestCentroid(metric='manhattan')
clf.fit(X, y)
dense_centroid = clf.centroids_
clf.fit(X_csr, y)
assert_array_equal(clf.centroids_, dense_centroid)
assert_array_equal(dense_centroid, [[-1, -1], [1, 1]])
| bsd-3-clause |
tntnatbry/tensorflow | tensorflow/contrib/learn/python/learn/dataframe/transforms/in_memory_source.py | 82 | 6157 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Sources for numpy arrays and pandas DataFrames."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.dataframe import transform
from tensorflow.contrib.learn.python.learn.dataframe.queues import feeding_functions
class BaseInMemorySource(transform.TensorFlowTransform):
"""Abstract parent class for NumpySource and PandasSource."""
def __init__(self,
data,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="in_memory_data"):
super(BaseInMemorySource, self).__init__()
self._data = data
self._num_threads = 1 if num_threads is None else num_threads
self._batch_size = (32 if batch_size is None else batch_size)
self._enqueue_size = max(1, int(self._batch_size / self._num_threads)
) if enqueue_size is None else enqueue_size
self._queue_capacity = (self._batch_size * 10 if queue_capacity is None else
queue_capacity)
self._shuffle = shuffle
self._min_after_dequeue = (batch_size if min_after_dequeue is None else
min_after_dequeue)
self._seed = seed
self._data_name = data_name
@transform.parameter
def data(self):
return self._data
@transform.parameter
def num_threads(self):
return self._num_threads
@transform.parameter
def enqueue_size(self):
return self._enqueue_size
@transform.parameter
def batch_size(self):
return self._batch_size
@transform.parameter
def queue_capacity(self):
return self._queue_capacity
@transform.parameter
def shuffle(self):
return self._shuffle
@transform.parameter
def min_after_dequeue(self):
return self._min_after_dequeue
@transform.parameter
def seed(self):
return self._seed
@transform.parameter
def data_name(self):
return self._data_name
@property
def input_valency(self):
return 0
def _apply_transform(self, transform_input, **kwargs):
queue = feeding_functions.enqueue_data(self.data,
self.queue_capacity,
self.shuffle,
self.min_after_dequeue,
num_threads=self.num_threads,
seed=self.seed,
name=self.data_name,
enqueue_size=self.enqueue_size,
num_epochs=kwargs.get("num_epochs"))
dequeued = queue.dequeue_many(self.batch_size)
# TODO(jamieas): dequeue and dequeue_many will soon return a list regardless
# of the number of enqueued tensors. Remove the following once that change
# is in place.
if not isinstance(dequeued, (tuple, list)):
dequeued = (dequeued,)
# pylint: disable=not-callable
return self.return_type(*dequeued)
class NumpySource(BaseInMemorySource):
"""A zero-input Transform that produces a single column from a numpy array."""
@property
def name(self):
return "NumpySource"
@property
def _output_names(self):
return ("index", "value")
class OrderedDictNumpySource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a dict of numpy arrays."""
def __init__(self,
ordered_dict_of_arrays,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in ordered_dict_of_arrays.keys():
raise ValueError("Column name `index` is reserved.")
super(OrderedDictNumpySource, self).__init__(ordered_dict_of_arrays,
num_threads, enqueue_size,
batch_size, queue_capacity,
shuffle, min_after_dequeue,
seed, data_name)
@property
def name(self):
return "OrderedDictNumpySource"
@property
def _output_names(self):
return tuple(["index"] + list(self._data.keys()))
class PandasSource(BaseInMemorySource):
"""A zero-input Transform that produces Series from a DataFrame."""
def __init__(self,
dataframe,
num_threads=None,
enqueue_size=None,
batch_size=None,
queue_capacity=None,
shuffle=False,
min_after_dequeue=None,
seed=None,
data_name="pandas_data"):
if "index" in dataframe.columns:
raise ValueError("Column name `index` is reserved.")
super(PandasSource, self).__init__(dataframe, num_threads, enqueue_size,
batch_size, queue_capacity, shuffle,
min_after_dequeue, seed, data_name)
@property
def name(self):
return "PandasSource"
@property
def _output_names(self):
return tuple(["index"] + self._data.columns.tolist())
| apache-2.0 |
AlexanderFabisch/scikit-learn | sklearn/metrics/tests/test_score_objects.py | 17 | 14051 | import pickle
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
REGRESSION_SCORERS = ['r2', 'mean_absolute_error', 'mean_squared_error',
'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should a be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
estimator = dict([(name, sensible_regr)
for name in REGRESSION_SCORERS] +
[(name, sensible_clf)
for name in CLF_SCORERS] +
[(name, sensible_ml_clf)
for name in MULTILABEL_ONLY_SCORERS])
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
| bsd-3-clause |
zmlabe/IceVarFigs | Scripts/SeaIce/NSIDCseaice_quartiles.py | 1 | 7079 | """
Reads in current year's Arctic sea ice extent from Sea Ice Index 3 (NSIDC)
Website : ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/
Author : Zachary M. Labe
Date : 5 September 2016
"""
### Import modules
import numpy as np
import urllib.request
import urllib as UL
import datetime
import matplotlib.pyplot as plt
### Directory and time
directoryfigure = './Figures/'
now = datetime.datetime.now()
currentmn = str(now.month)
currentdy = str(now.day)
currentyr = str(now.year)
currenttime = currentmn + '_' + currentdy + '_' + currentyr
currentdoy = now.timetuple().tm_yday
### Load url
url = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_daily_v3.0.csv'
### Read file
raw_data = UL.request.urlopen(url)
dataset = np.genfromtxt(raw_data, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4])
print('\nCompleted: Read sea ice data!')
### Set missing data to nan
dataset[np.where(dataset==-9999)] = np.nan
### Variables
year = dataset[:,0]
month = dataset[:,1]
day = dataset[:,2]
ice = dataset[:,3]
missing = dataset[:,4]
### Call present year
yr2018 = np.where(year == 2018)[0]
ice18 = ice[yr2018]
### Ice Conversion
iceval = ice18 * 1e6
### Printing info
print('\n----- NSIDC Arctic Sea Ice -----')
print('Current Date =', now.strftime("%Y-%m-%d %H:%M"), '\n')
print('SIE Date = %s/%s/%s' % (int(month[-1]),int(day[-1]),int(year[-1])))
print('Current SIE = %s km^2 \n' % (iceval[-1]))
print('1-day change SIE = %s km^2' % (iceval[-1]-iceval[-2]))
print('7-day change SIE = %s km^2 \n' % (iceval[-1]-iceval[-8]))
###########################################################################
###########################################################################
###########################################################################
### Reads in 1981-2010 means
### Load url
url2 = 'ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/north/daily/data/' \
'N_seaice_extent_climatology_1981-2010_v3.0.csv'
### Read file
raw_data2 = UL.request.urlopen(url2)
dataset2 = np.genfromtxt(raw_data2, skip_header=2,delimiter=',',
usecols=[0,1,2,3,4,5,6,7])
### Create variables
doy = dataset2[:,0]
meanice = dataset2[:,1] * 1e6
std = dataset2[:,2]
### Quartiles
quartile10 = dataset2[:,3]
quartile25 = dataset2[:,4]
quartile50 = dataset2[:,5]
quartile75 = dataset2[:,6]
quartile90 = dataset2[:,7]
### Anomalies
currentanom = iceval[-1]-meanice[currentdoy-2]
### Printing info
print('Current anomaly = %s km^2 \n' % currentanom)
### Selected other years for comparisons
yr2007 = np.where(year == 2007)[0]
yr2012 = np.where(year == 2012)[0]
yr2016 = np.where(year == 2016)[0]
sie7 = ice[yr2007]
sie12 = ice[yr2012]
sie16 = ice[yr2016]
###########################################################################
###########################################################################
###########################################################################
### Create plot
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
plt.rc('savefig',facecolor='black')
plt.rc('axes',edgecolor='white')
plt.rc('xtick',color='white')
plt.rc('ytick',color='white')
plt.rc('axes',labelcolor='white')
plt.rc('axes',facecolor='black')
fig = plt.figure()
ax = plt.subplot(111)
xlabels = [r'Jan',r'Feb',r'Mar',r'Apr',r'May',r'Jun',r'Jul',
r'Aug',r'Sep',r'Oct',r'Nov',r'Dec',r'Jan']
plt.xticks(np.arange(0,361,30.4),xlabels,rotation=0)
ylabels = map(str,np.arange(2,19,2))
plt.yticks(np.arange(2,19,2),ylabels)
plt.ylim([2,18])
plt.xlim([0,360])
strmonth = xlabels[int(currentmn)-1]
asof = strmonth + ' ' + currentdy + ', ' + currentyr
### Adjust axes in time series plots
def adjust_spines(ax, spines):
for loc, spine in ax.spines.items():
if loc in spines:
spine.set_position(('outward', 5))
else:
spine.set_color('none')
if 'left' in spines:
ax.yaxis.set_ticks_position('left')
else:
ax.yaxis.set_ticks([])
if 'bottom' in spines:
ax.xaxis.set_ticks_position('bottom')
else:
ax.xaxis.set_ticks([])
ax.tick_params('both',length=5.5,width=2,which='major')
adjust_spines(ax, ['left','bottom'])
ax.spines['top'].set_color('none')
ax.spines['right'].set_color('none')
ax.spines['bottom'].set_linewidth(2)
ax.spines['left'].set_linewidth(2)
upper2std = (meanice/1e6)+(std*2)
lower2std = (meanice/1e6)-(std*2)
ax.grid(zorder=1,color='w',alpha=0.2)
plt.plot(ice18,linewidth=1.8,color='aqua',zorder=9,label=r'Current Year (2018)')
plt.plot(doy,upper2std,color='white',alpha=0.7,zorder=3,linewidth=0.1)
plt.plot(doy,lower2std,color='white',alpha=0.7,zorder=4,linewidth=0.1)
plt.plot(doy,quartile10,color='m',alpha=0.7,zorder=3,linewidth=0.4)
plt.plot(doy,quartile25,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4)
plt.plot(doy,quartile75,color='cornflowerblue',alpha=0.7,zorder=4,linewidth=0.4)
plt.plot(doy,quartile90,color='m',alpha=0.7,zorder=3,linewidth=0.4)
ax.fill_between(doy, lower2std, upper2std, facecolor='white', alpha=0.35,
label=r'$\pm$2 standard deviations',zorder=2)
plt.plot(doy,quartile50,color='gold',alpha=1,zorder=3,linewidth=2,
label=r'Median (1981-2010)')
ax.fill_between(doy, quartile90, quartile75, facecolor='m', alpha=0.55,
label=r'10-90th percentiles',zorder=2)
ax.fill_between(doy, quartile10, quartile25, facecolor='m', alpha=0.55,
zorder=2)
ax.fill_between(doy, quartile25, quartile50, facecolor='cornflowerblue', alpha=0.6,
zorder=2)
ax.fill_between(doy, quartile50, quartile75, facecolor='cornflowerblue', alpha=0.6,
label=r'25-75th percentiles',zorder=2)
plt.scatter(doy[currentdoy-3],ice[-1],s=10,color='aqua',zorder=9)
plt.ylabel(r'\textbf{Extent} [$\times$10$^{6}$ km$^2$]',fontsize=15,
color='darkgrey')
le = plt.legend(shadow=False,fontsize=6,loc='upper left',
bbox_to_anchor=(0.473, 1.011),fancybox=True,ncol=2)
for text in le.get_texts():
text.set_color('w')
plt.title(r'\textbf{ARCTIC SEA ICE}',
fontsize=21,color='darkgrey')
plt.text(doy[currentdoy]-5,ice[-1]-1.35,r'\textbf{2018}',
fontsize=13.5,rotation='horizontal',ha='left',color='aqua')
plt.text(0.5,3.1,r'\textbf{DATA:} National Snow \& Ice Data Center, Boulder CO',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,2.6,r'\textbf{SOURCE:} ftp://sidads.colorado.edu/DATASETS/NOAA/G02135/',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
plt.text(0.5,2.1,r'\textbf{GRAPHIC:} Zachary Labe (@ZLabe)',
fontsize=5.5,rotation='horizontal',ha='left',color='darkgrey')
fig.subplots_adjust(top=0.91)
### Save figure
plt.savefig(directoryfigure + 'nsidc_sie_quartiles_currentyear.png',dpi=300) | mit |
csyhuang/hn2016_falwa | hn2016_falwa/beta_version.py | 1 | 20465 | def input_jk_output_index(j,k,kmax):
return j*(kmax) + k
def extrap1d(interpolator):
xs = interpolator.x
ys = interpolator.y
def pointwise(x):
if x < xs[0]:
return ys[0]+(x-xs[0])*(ys[1]-ys[0])/(xs[1]-xs[0])
elif x > xs[-1]:
return ys[-1]+(x-xs[-1])*(ys[-1]-ys[-2])/(xs[-1]-xs[-2])
else:
return interpolator(x)
def ufunclike(xs):
from scipy import array
return array(map(pointwise, array(xs)))
return ufunclike
def solve_uref_both_bc(tstamp, zmum, FAWA_cos, ylat, ephalf2, Delta_PT,
zm_PT, Input_B0, Input_B1, use_real_Data=True,
plot_all_ref_quan=False):
"""
Compute equivalent latitude and wave activity on a barotropic sphere.
Parameters
----------
tstamp : string
Time stamp of the snapshot of the field.
znum : ndarray
Zonal mean wind.
FAWA_cos : ndarray
Zonal mean finite-amplitude wave activity.
ylat : sequence or array_like
1-d numpy array of latitude (in degree) with equal spacing in ascending order; dimension = nlat.
ephalf2 : ndarray
Epsilon in Nakamura and Solomon (2010).
Delta_PT : ndarray
\Delta \Theta in Nakamura and Solomon (2010); upper-boundary conditions.
zm_PT : ndarray
Zonal mean potential temperature.
Input_B0 : sequence or array_like
Zonal-mean surface wave activity for the lowest layer (k=0). Part of the lower-boundary condition.
Input_B1 : sequence or array_like
Zonal-mean surface wave activity for the second lowest layer (k=1). Part of the lower-boundary condition.
use_real_Data : boolean
Whether to use input data to compute the reference states. By detault True. If false, randomly generated arrays will be used.
plot_all_ref_quan : boolean
Whether to plot the solved reference states using matplotlib library. By default False. For debugging.
Returns
-------
u_MassCorr_regular_noslip : ndarray
2-d numpy array of mass correction \Delta u in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat).
u_Ref_regular_noslip : ndarray
2-d numpy array of zonal wind reference state u_ref in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat).
T_MassCorr_regular_noslip : ndarray
2-d numpy array of adjustment in reference temperature \Delta T in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat).
T_Ref_regular_noslip : ndarray
2-d numpy array of adjustment in reference temperature T_ref in NS10 with no-slip lower boundary conditions; dimension = (kmax,nlat).
u_MassCorr_regular_adiab : ndarray
2-d numpy array of mass correction \Delta u in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat).
u_Ref_regular_adiab : ndarray
2-d numpy array of zonal wind reference state u_ref in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat).
T_MassCorr_regular_adiab : ndarray
2-d numpy array of adjustment in reference temperature \Delta T in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat).
T_Ref_regular_adiab : ndarray
2-d numpy array of adjustment in reference temperature T_ref in NS10 with adiabatic lower boundary conditions; dimension = (kmax,nlat).
"""
# zm_PT = zonal mean potential temperature
# Import necessary modules
from math import pi, exp
from scipy import interpolate
from scipy.sparse import csc_matrix
from scipy.sparse.linalg import spsolve
from copy import copy
import numpy as np
import itertools
if plot_all_ref_quan:
import matplotlib.pyplot as plt
# === Parameters (should be input externally. To be modified) ===
dz = 1000. # vertical z spacing (m)
aa = 6378000. # planetary radius
r0 = 287. # gas constant
hh = 7000. # scale height
cp = 1004. # specific heat
rkappa = r0/cp
om = 7.29e-5 # angular velocity of the earth
# === These changes with input variables' dimensions ===
nlat = FAWA_cos.shape[-1]
jmax1 = nlat//4
dm = 1./float(jmax1+1) # gaussian latitude spacing
gl = np.array([(j+1)*dm for j in range(jmax1)]) # This is sin / mu
gl_2 = np.array([j*dm for j in range(jmax1+2)]) # This is sin / mu
cosl = np.sqrt(1.-gl**2)
#cosl_2 = np.sqrt(1.-gl_2**2)
alat = np.arcsin(gl)*180./pi
alat_2 = np.arcsin(gl_2)*180./pi
dmdz = (dm/dz)
# **** Get from input these parameters ****
kmax = FAWA_cos.shape[0]
#height = np.array([i for i in range(kmax)]) # in [km]
# **** Initialize Coefficients ****
c_a = np.zeros((jmax1, kmax))
c_b = np.zeros((jmax1, kmax))
c_c = np.zeros((jmax1, kmax))
c_d = np.zeros((jmax1, kmax))
c_e = np.zeros((jmax1, kmax))
c_f = np.zeros((jmax1, kmax))
# --- Initialize interpolated variables ---
zmu1 = np.zeros((jmax1, kmax))
cx1 = np.zeros((jmax1, kmax))
cor1 = np.zeros((jmax1, kmax))
ephalf = np.zeros((jmax1, kmax))
Delta_PT1 = np.zeros((jmax1+2))
zm_PT1 = np.zeros((jmax1, kmax))
Input_B0_1 = np.zeros((jmax1+2))
Input_B1_1 = np.zeros((jmax1+2))
# --- Define Epsilon as a function of y and z ---
# **** Interpolate to gaussian latitude ****
if use_real_Data:
# print 'use_real_Data'
for vv1,vvm in zip([zmu1,cx1,zm_PT1] , [zmum,FAWA_cos,zm_PT]):
f_toGaussian = interpolate.interp1d(ylat[:],vvm[:,:].T,axis=0, kind='linear') #[jmax x kmax]
vv1[:,:] = f_toGaussian(alat[:])
#vv1[:,:] = vvm[:,:]
#vv1[-1,:] = vvm[:,-1]
# --- Interpolation of ephalf ---
f_ep_toGaussian = interpolate.interp1d(ylat[:],ephalf2[:,:].T,axis=0, kind='linear') #[jmax x kmax]
ephalf[:,:] = f_ep_toGaussian(alat[:])
# --- Interpolation of Delta_PT ---
#f_DT_toGaussian = extrap1d( interpolate.interp1d(ylat[:],Delta_PT[:], kind='linear') ) # This is txt in Noboru's code
f_DT_toGaussian = interpolate.interp1d(ylat[:],Delta_PT[:],
kind='linear',fill_value='extrapolate')
Delta_PT1[:] = f_DT_toGaussian(alat_2[:])
# --- Interpolation of Input_B0_1 ---
#f_B0_toGaussian = extrap1d( interpolate.interp1d(ylat[:],Input_B0[:], kind='linear') ) # This is txt in Noboru's code
f_B0_toGaussian = interpolate.interp1d(ylat[:],Input_B0[:],
kind='linear',fill_value='extrapolate') # This is txt in Noboru's code
Input_B0_1[:] = f_B0_toGaussian(alat_2[:])
# --- Interpolation of Input_B1_1 ---
# f_B1_toGaussian = extrap1d( interpolate.interp1d(ylat[:],Input_B1[:], kind='linear') ) # This is txt in Noboru's code
f_B1_toGaussian = interpolate.interp1d(ylat[:],Input_B1[:],
kind='linear',fill_value='extrapolate') # This is txt in Noboru's code
Input_B1_1[:] = f_B1_toGaussian(alat_2[:])
else:
# Use random matrix here just to test!
zmu1 = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*1.e-8
cx1 = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*1.e-8
#cor1 = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*1.e-8
# --- Added on Aug 1, 2016 ---
cor1 = 2.*om*gl[:,np.newaxis] * np.ones((jmax1, kmax))
#cor1[0] = cor1[1]*0.5
# OLD: qxx0 = -cx1*cosl[:,np.newaxis]/cor1 #qxx0 = np.empty((jmax1, kmax))
qxx0 = -cx1/cor1 # Input of LWA has cosine.
c_f[0,:] = qxx0[1,:] - 2*qxx0[0,:]
c_f[-1,:] = qxx0[-2,:] - 2*qxx0[-1,:]
c_f[1:-1,:] = qxx0[:-2,:] + qxx0[2:,:] - 2*qxx0[1:-1,:]
#c_f[:,0] = 0.0
# --- Aug 9: Lower Adiabatic boundary conditions ---
Input_dB0 = np.zeros((jmax1))
Input_dB1 = np.zeros((jmax1))
uz1 = np.zeros((jmax1))
# prefac = - r0 * cosl[1:-1]**2 * dz / (cor1[1:-1,-2]**2 * aa**2 * hh * dm**2) * exp(-rkappa*(kmax-2.)/7.)
# OLD: Input_dB0[:] = Input_B0_1[:-2]*cosl_2[:-2] + Input_B0_1[2:]*cosl_2[2:] - 2*Input_B0_1[1:-1]*cosl_2[1:-1]
Input_dB0[:] = Input_B0_1[:-2] + Input_B0_1[2:] - 2*Input_B0_1[1:-1]
# OLD: Input_dB1[:] = Input_B1_1[:-2]*cosl_2[:-2] + Input_B1_1[2:]*cosl_2[2:] - 2*Input_B1_1[1:-1]*cosl_2[1:-1]
Input_dB1[:] = Input_B1_1[:-2] + Input_B1_1[2:] - 2*Input_B1_1[1:-1]
# This is supposed to be correct but gave weird results.
uz1[:] = - r0 * cosl[:]**2 * Input_dB1[:] * 2*dz / (cor1[:,1]**2 * aa**2 * hh * dm**2) * exp(-rkappa*(1.)/7.) \
- r0 * cosl[:]**2 * Input_dB0[:] * 2*dz / (cor1[:,0]**2 * aa**2 * hh * dm**2) * exp(-rkappa*(0.)/7.)
# **** Upper Boundary Condition (Come back later) ****
uz2 = np.zeros((jmax1))
dDelta_PT1 = (Delta_PT1[2:]-Delta_PT1[:-2]) # Numerical trick: Replace uz2[1] with an extrapolated value
# Original correct one:
# uz2[1:-1] = - r0 * cosl[1:-1]**2 * exp(-rkappa*(kmax-2.)/7.) * dDelta_PT1 / (cor1[1:-1,-2]**2 * aa * hh * dmdz)
uz2[:] = - r0 * cosl[:]**2 * exp(-rkappa*(kmax-2.)/7.) * dDelta_PT1 / (cor1[:,-2]**2 * aa * hh * dmdz)
# **** Initialize the coefficients a,b,c,d,e,f ****
c_a[:,:] = 1.0
c_b[:,:] = 1.0
c_c[:,1:-1] = dmdz**2 *ephalf[:,1:-1]*exp(-dz/(2*hh)) # This one should be correct
c_d[:,1:-1] = dmdz**2 *ephalf[:,0:-2]*exp(dz/(2*hh)) # Check convention of ephalf
c_e[:,1:-1] = -(c_a[:,1:-1]+c_b[:,1:-1]+c_c[:,1:-1]+c_d[:,1:-1])
b = np.zeros((jmax1*kmax))
row_index=[]
col_index=[]
coeff = []
jrange = range(jmax1)
krange = range(1,kmax-1)
for j, k in itertools.product(jrange, krange):
# for j in range(jmax1):
# for k in range(1,kmax-1):
ind = input_jk_output_index(j,k,kmax)
b[ind] = c_f[j,k]
if (j<jmax1-1):
# A[ind,input_jk_output_index(j+1,k,kmax)] = c_a[j,k]
row_index.append(ind)
col_index.append(input_jk_output_index(j+1,k,kmax))
coeff.append(c_a[j,k])
if (j>0):
# A[ind,input_jk_output_index(j-1,k,kmax)] = c_b[j,k]
row_index.append(ind)
col_index.append(input_jk_output_index(j-1,k,kmax))
coeff.append(c_b[j,k])
# A[ind,input_jk_output_index(j,k+1,kmax)] = c_c[j,k]
row_index.append(ind)
col_index.append(input_jk_output_index(j,k+1,kmax))
coeff.append(c_c[j,k])
# A[ind,input_jk_output_index(j,k-1,kmax)] = c_d[j,k]
row_index.append(ind)
col_index.append(input_jk_output_index(j,k-1,kmax))
coeff.append(c_d[j,k])
# A[ind,input_jk_output_index(j,k,kmax)] = c_e[j,k]
row_index.append(ind)
col_index.append(input_jk_output_index(j,k,kmax))
coeff.append(c_e[j,k])
# ==== Upper boundary condition - thermal wind ====
# for j in range(1,jmax1-1):
for j in range(jmax1):
ind1 = input_jk_output_index(j,kmax-1,kmax)
b[ind1] = uz2[j] #- r0 * cosl[j]**2 * exp(-rkappa*(kmax-2.)/7.) * (Delta_PT1[j+1]-Delta_PT1[j-1])/ (cor1[j,-2]**2 * aa * hh * dmdz)
# A[ind1,ind1] = 1.0
row_index.append(ind1)
col_index.append(ind1)
coeff.append(1.0)
# A[ind1,input_jk_output_index(j,kmax-3,kmax)] = -1.0
row_index.append(ind1)
col_index.append(input_jk_output_index(j,kmax-3,kmax))
coeff.append(-1.0)
# Try sparse matrix
# print 'try sparse matrix'
# A = csc_matrix((coeff_noslip, (row_index, col_index)), shape=(jmax1*kmax,jmax1*kmax))
# print 'shape of A=',A.shape
# print 'Does it work?'
#
# csc_matrix((data, (row_ind, col_ind)), [shape=(M, N)])
# where data, row_ind and col_ind satisfy the relationship a[row_ind[k], col_ind[k]] = data[k].
# A[ind1,input_jk_output_index(j,kmax-3,kmax)] = -1.0
#uz2[1:-1] = - r0 * cosl[1:-1]**2 * exp(-rkappa*(kmax-2.)/7.) * (Delta_PT1[2:]-Delta_PT1[:-2]) / (cor1[1:-1,-2]**2 * aa * hh * dmdz)
# === Make a copy to deal with adiabatic boundary condition ===
# A: no-slip
# A_adiab: adiabatic boundary conditions
row_index_adiab = copy(row_index)
col_index_adiab = copy(col_index)
coeff_adiab = copy(coeff)
b_adiab = np.copy(b)
# print 'does it work till here?'
# A_adiab = np.copy(A)
# ==== Lower boundary condition - adiabatic (k=0) ====
for j in range(jmax1):
ind0 = input_jk_output_index(j,0,kmax)
b_adiab[ind0] = uz1[j]
# A_adiab[ind0,ind0] = -1.0 # k=0
row_index_adiab.append(ind0)
col_index_adiab.append(ind0)
coeff_adiab.append(-1.0)
# A_adiab[ind0,input_jk_output_index(j,2,kmax)] = 1.0 # k=2
row_index_adiab.append(ind0)
col_index_adiab.append(input_jk_output_index(j,2,kmax))
coeff_adiab.append(1.0)
A_adiab = csc_matrix((coeff_adiab, (row_index_adiab, col_index_adiab)), shape=(jmax1*kmax,jmax1*kmax))
# ==== Lower boundary condition - no-slip (k=0) ====
for j in range(jmax1):
ind = input_jk_output_index(j,0,kmax)
b[ind] = zmu1[j,0]*cosl[j]/cor1[j,0]
# A[ind,ind] = 1.0
row_index.append(ind)
col_index.append(ind)
coeff.append(1.0)
A = csc_matrix((coeff, (row_index, col_index)), shape=(jmax1*kmax,jmax1*kmax))
# print 'is it ok till here????'
# === Solving the linear system ===
u2_adiab = spsolve(A_adiab, b_adiab)
u2 = spsolve(A, b)
# === Mapping back to 2D matrix ===
u_adiab = np.zeros((jmax1+2,kmax))
u = np.zeros((jmax1+2,kmax))
for j in range(jmax1):
for k in range(kmax):
u_adiab[j+1,k] = u2_adiab[j*kmax + k]
u[j+1,k] = u2[j*kmax + k]
u_MassCorr_adiab = np.zeros_like(u_adiab)
u_MassCorr_noslip = np.zeros_like(u)
# u_MassCorr[1:-1,:] = u[1:-1,:] * cor1[1:-1,:] / cosl[1:-1,np.newaxis]
u_MassCorr_adiab[1:-1,:] = u_adiab[1:-1,:] * cor1 / cosl[:,np.newaxis]
u_MassCorr_noslip[1:-1,:] = u[1:-1,:] * cor1 / cosl[:,np.newaxis]
# --- Initialize T_MassCorr to be output ---
u_Ref_regular_adiab = np.zeros_like(zmum)
u_Ref_regular_noslip = np.zeros_like(zmum)
u_MassCorr_regular_adiab = np.zeros_like(zmum)
u_MassCorr_regular_noslip = np.zeros_like(zmum)
T_Ref_regular_adiab = np.zeros_like(zmum)
T_Ref_regular_noslip = np.zeros_like(zmum)
T_MassCorr_regular_adiab = np.zeros_like(zmum)
T_MassCorr_regular_noslip = np.zeros_like(zmum)
for u_MassCorr,u_MassCorr_regular,u_Ref_regular,T_MassCorr_regular,T_Ref_regular,BCstring in \
zip([u_MassCorr_adiab,u_MassCorr_noslip],\
[u_MassCorr_regular_adiab,u_MassCorr_regular_noslip],\
[u_Ref_regular_adiab,u_Ref_regular_noslip],\
[T_MassCorr_regular_adiab,T_MassCorr_regular_noslip],\
[T_Ref_regular_adiab,T_Ref_regular_noslip],\
['Adiabatic','Noslip']):
# ---- Back out temperature correction here -----
T_MassCorr = np.zeros_like(u_MassCorr)
for k in range(1,kmax-2):
for j in range(2,jmax1,2): # This is temperature not potential temperature!!! Need to check.
# print 'alat['+str(j)+']=',alat[j]
# T_MassCorr[j,k] = T_MassCorr[j-2,k] - (2.*om*gl[j])*aa*hh*dmdz / (r0 * cosl[j]) * (u_MassCorr[j,k+1]-u_MassCorr[j,k-1])
T_MassCorr[j,k] = T_MassCorr[j-2,k] - (2.*om*gl[j-1])*aa*hh*dmdz / (r0 * cosl[j-1]) * (u_MassCorr[j-1,k+1]-u_MassCorr[j-1,k-1])
# ---- First do interpolation (gl is regular grid) ----
# f_Todd = interpolate.interp1d(gl[:-1:2],T_MassCorr[1:-1:2,k]) #[jmax x kmax]
#f_Todd = interpolate.interp1d(gl_2[::2],T_MassCorr[::2,k]) #[jmax x kmax]
#f_Todd_ex = extrap1d(f_Todd)
f_Todd = interpolate.interp1d(gl_2[::2],T_MassCorr[::2,k],
kind='linear',fill_value='extrapolate')
T_MassCorr[:,k] = f_Todd(gl_2[:])
# T_MassCorr[:,k] = f_Todd_ex(gl_2[:]) # Get all the points interpolated
# ---- Then do domain average ----
T_MC_mean = np.mean(T_MassCorr[:,k])
T_MassCorr[:,k] -= T_MC_mean
# --- First, interpolate MassCorr back to regular grid first ---
f_u_MassCorr = interpolate.interp1d(alat_2,u_MassCorr,axis=0, kind='linear') #[jmax x kmax]
u_MassCorr_regular[:,-nlat//2:] = f_u_MassCorr(ylat[-nlat//2:]).T
f_T_MassCorr = interpolate.interp1d(alat_2,T_MassCorr,axis=0, kind='linear') #[jmax x kmax]
T_MassCorr_regular[:,-nlat//2:] = f_T_MassCorr(ylat[-nlat//2:]).T
u_Ref = zmum[:,-nlat//2:] - u_MassCorr_regular[:,-nlat//2:]
T_ref = zm_PT[:,-nlat//2:] * np.exp(-np.arange(kmax)/7. * rkappa)[:,np.newaxis] - T_MassCorr_regular[:,-nlat//2:]
u_Ref_regular[:,-nlat//2:] = u_Ref
T_Ref_regular[:,-nlat//2:] = T_ref
#
#plot_all_ref_quan = False
if plot_all_ref_quan:
# --- height coordinate ---
height = np.array([i for i in range(kmax)]) # in [km]
# --- Colorbar scale ---
contour_int = np.arange(-120,145,5)
dT_contour_int = np.arange(-120,81,5)
T_contour_int = np.arange(160,321,5)
# --- Start plotting figure ---
fig = plt.subplots(figsize=(12,12))
plt.subplot(221)
plt.contourf(ylat[-nlat//2:],height[:-2],u_MassCorr_regular[:-2,-nlat//2:],contour_int)
plt.colorbar()
c1=plt.contour(ylat[-nlat//2:],height[:-2],u_MassCorr_regular[:-2,-nlat//2:],contour_int[::2],colors='k')
plt.clabel(c1,c1.levels,inline=True, fmt='%d', fontsize=10)
plt.title('$\Delta$ u '+tstamp)
plt.ylabel('height (km)')
plt.subplot(222)
plt.contourf(ylat[-nlat//2:],height[:-2],u_Ref[:-2,:],contour_int)
plt.colorbar()
c2=plt.contour(ylat[-nlat//2:],height[:-2],u_Ref[:-2,:],contour_int[::2],colors='k')
plt.clabel(c2,c2.levels,inline=True, fmt='%d', fontsize=10)
plt.title('$u_{REF}$ ('+BCstring+' BC)')
plt.subplot(223)
plt.contourf(ylat[-nlat//2:],height[:-2],T_MassCorr_regular[:-2,-nlat//2:],dT_contour_int)
plt.colorbar()
c3=plt.contour(ylat[-nlat//2:],height[:-2],T_MassCorr_regular[:-2,-nlat//2:],dT_contour_int,colors='k')
plt.clabel(c3,c3.levels,inline=True, fmt='%d', fontsize=10)
plt.title('$\Delta$ T')
plt.ylabel('height (km)')
plt.subplot(224)
plt.contourf(ylat[-nlat//2:],height[:-2],T_ref[:-2,:],T_contour_int)
plt.colorbar()
c4=plt.contour(ylat[-nlat//2:],height[:-2],T_ref[:-2,:],T_contour_int[::2],colors='k')
plt.clabel(c4,c4.levels,inline=True, fmt='%d', fontsize=10)
plt.title('$T_{REF}$')
plt.ylabel('height (km)')
plt.tight_layout()
plt.show()
#plt.savefig('/home/csyhuang/Dropbox/Research-code/Sep12_test3_'+BCstring+'_'+tstamp+'.png')
plt.close()
# This is for only outputing Delta_u and Uref for no-slip and adiabatic boundary conditions.
return u_MassCorr_regular_noslip,u_Ref_regular_noslip,T_MassCorr_regular_noslip,T_Ref_regular_noslip, u_MassCorr_regular_adiab,u_Ref_regular_adiab,T_MassCorr_regular_adiab,T_Ref_regular_adiab
# --- As a test whether the function Solve_Uref is working ---
if __name__ == "__main__":
import matplotlib.pyplot as plt
import numpy as np
nlat = 121
kmax = 49
jmax1 = nlat
# The codes below is just for testing purpose
tstamp = 'random'
ylat = np.linspace(-90,90,121,endpoint=True)
t1 = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001
t2 = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001
t3 = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001
Delta_PT = np.random.rand(nlat)+np.ones((nlat))*0.001
zm_PT = np.random.rand(nlat,kmax)+np.ones((nlat,kmax))*0.001
Input_B0 = np.random.rand(nlat)+np.ones((nlat))*0.001
Input_B1 = np.random.rand(nlat)+np.ones((nlat))*0.001
eh = np.random.rand(jmax1, kmax)+np.ones((jmax1, kmax))*0.001
Delta_PT = np.sort(np.random.rand(jmax1))
xxx = solve_uref_both_bc(tstamp,t1,t2,ylat,t3,Delta_PT,zm_PT,Input_B0,Input_B1,use_real_Data=True)
print(xxx)
| mit |
robcarver17/pysystemtrade | systems/accounts/pandl_calculators/pandl_generic_costs.py | 1 | 3494 | import pandas as pd
from systems.accounts.pandl_calculators.pandl_calculation import pandlCalculation, apply_weighting
curve_types = ['gross', 'net', 'costs']
GROSS_CURVE = 'gross'
NET_CURVE = 'net'
COSTS_CURVE = 'costs'
class pandlCalculationWithGenericCosts(pandlCalculation):
def weight(self, weight: pd.Series):
weighted_capital = apply_weighting(weight, self.capital)
weighted_positions = apply_weighting(weight, self.positions)
return pandlCalculationWithGenericCosts(self.price,
positions = weighted_positions,
fx = self.fx,
capital = weighted_capital,
value_per_point = self.value_per_point,
roundpositions = self.roundpositions,
delayfill = self.delayfill)
def as_pd_series(self, percent = False, curve_type=NET_CURVE):
if curve_type==NET_CURVE:
if percent:
return self.net_percentage_pandl()
else:
return self.net_pandl_in_base_currency()
elif curve_type==GROSS_CURVE:
if percent:
return self.percentage_pandl()
else:
return self.pandl_in_base_currency()
elif curve_type==COSTS_CURVE:
if percent:
return self.costs_percentage_pandl()
else:
return self.costs_pandl_in_base_currency()
else:
raise Exception("Curve type %s not recognised! Must be one of %s" % (curve_type, curve_types))
def net_percentage_pandl(self) -> pd.Series:
gross = self.percentage_pandl()
costs = self.costs_percentage_pandl()
net = _add_gross_and_costs(gross, costs)
return net
def net_pandl_in_base_currency(self) -> pd.Series:
gross = self.pandl_in_base_currency()
costs = self.costs_pandl_in_base_currency()
net = _add_gross_and_costs(gross, costs)
return net
def net_pandl_in_instrument_currency(self) -> pd.Series:
gross = self.pandl_in_instrument_currency()
costs = self.costs_pandl_in_instrument_currency()
net = _add_gross_and_costs(gross, costs)
return net
def net_pandl_in_points(self) -> pd.Series:
gross = self.pandl_in_points()
costs = self.costs_pandl_in_points()
net = _add_gross_and_costs(gross, costs)
return net
def costs_percentage_pandl(self) -> pd.Series:
costs_in_base = self.costs_pandl_in_base_currency()
costs = self._percentage_pandl_given_pandl(costs_in_base)
return costs
def costs_pandl_in_base_currency(self) -> pd.Series:
costs_in_instr_ccy = self.costs_pandl_in_instrument_currency()
costs_in_base = self._base_pandl_given_currency_pandl(costs_in_instr_ccy)
return costs_in_base
def costs_pandl_in_instrument_currency(self) -> pd.Series:
costs_in_points = self.costs_pandl_in_points()
costs_in_instr_ccy = self._pandl_in_instrument_ccy_given_points_pandl(costs_in_points)
return costs_in_instr_ccy
def costs_pandl_in_points(self) -> pd.Series:
raise NotImplementedError
def _add_gross_and_costs(gross: pd.Series,
costs: pd.Series):
cumsum_costs = costs.cumsum()
cumsum_costs_aligned = cumsum_costs.reindex(gross.index, method="ffill")
costs_aligned = cumsum_costs_aligned.diff()
net = gross + costs_aligned
return net
| gpl-3.0 |
SophieIPP/ipp-macro-series-parser | ipp_macro_series_parser/demographie/parser.py | 1 | 3235 | # -*- coding: utf-8 -*-
# TAXIPP -- A French microsimulation model
# By: IPP <taxipp@ipp.eu>
#
# Copyright (C) 2012, 2013, 2014, 2015 IPP
# https://github.com/taxipp
#
# This file is part of TAXIPP.
#
# TAXIPP is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# TAXIPP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import logging
import os
import pandas
import pkg_resources
from ipp_macro_series_parser.config import Config
config_parser = Config(
config_files_directory = os.path.join(pkg_resources.get_distribution('ipp-macro-series-parser').location)
)
xls_directory = os.path.join(config_parser.get('data', 'demographie_directory'), 'xls')
log = logging.getLogger(__name__)
def create_demographie_data_frame():
data_frame = pandas.DataFrame()
for year in range(1999, 2015 + 1):
file_path = os.path.join(xls_directory, u'pyramide-des-ages-{}.xls'.format(year))
skiprows = 5 - (year == 1999)
parse_cols = "A:E"
slice_start = 0
slice_end = 101
sheetname = 'France'
if year <= 2010:
sheetnames = ['France', u'France métropolitaine']
elif year == 2011:
sheetnames = ['{} France'.format(year), u"{} métropole".format(year)]
else:
sheetnames = ['Pyramide {} France'.format(year), u'Pyramide {} métropole'.format(year)]
for sheetname in sheetnames:
try:
df = pandas.read_excel(
file_path,
# na_values = '-',
sheetname = sheetname,
skiprows = skiprows,
parse_cols = parse_cols).iloc[slice_start:slice_end]
df['year'] = year
if sheetname in ['France', u'France métropolitaine']:
df['champ'] = sheetname
else:
df['champ'] = u'France métropolitaine' if u'métropole' in sheetname else 'France'
# All column name on one line
remove_cr = dict(
(column, column.replace(u"\n", " ").replace(" ", " ")) for column in df.columns)
df.rename(columns = remove_cr, inplace = True)
# Femmes _> Nombre de femmes etc
df.rename(columns = dict(
Femmes = "Nombre de femmes",
Hommes = "Nombre d'hommes"), inplace = True)
data_frame = pandas.concat((data_frame, df))
del df
except Exception, e:
print year
print sheetname
raise(e)
return pandas.melt(data_frame, id_vars = ['year', 'champ', u'Âge révolu', u'Année de naissance'])
| gpl-3.0 |
kambysese/mne-python | tutorials/epochs/plot_50_epochs_to_data_frame.py | 10 | 6955 | """
.. _tut-epochs-dataframe:
Exporting Epochs to Pandas DataFrames
=====================================
This tutorial shows how to export the data in :class:`~mne.Epochs` objects to a
:class:`Pandas DataFrame <pandas.DataFrame>`, and applies a typical Pandas
:doc:`split-apply-combine <pandas:user_guide/groupby>` workflow to examine the
latencies of the response maxima across epochs and conditions.
We'll use the :ref:`sample-dataset` dataset, but load a version of the raw file
that has already been filtered and downsampled, and has an average reference
applied to its EEG channels. As usual we'll start by importing the modules we
need and loading the data:
"""
import os
import seaborn as sns
import mne
sample_data_folder = mne.datasets.sample.data_path()
sample_data_raw_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw.fif')
raw = mne.io.read_raw_fif(sample_data_raw_file, verbose=False)
###############################################################################
# Next we'll load a list of events from file, map them to condition names with
# an event dictionary, set some signal rejection thresholds (cf.
# :ref:`tut-reject-epochs-section`), and segment the continuous data into
# epochs:
sample_data_events_file = os.path.join(sample_data_folder, 'MEG', 'sample',
'sample_audvis_filt-0-40_raw-eve.fif')
events = mne.read_events(sample_data_events_file)
event_dict = {'auditory/left': 1, 'auditory/right': 2, 'visual/left': 3,
'visual/right': 4}
reject_criteria = dict(mag=3000e-15, # 3000 fT
grad=3000e-13, # 3000 fT/cm
eeg=100e-6, # 100 µV
eog=200e-6) # 200 µV
tmin, tmax = (-0.2, 0.5) # epoch from 200 ms before event to 500 ms after it
baseline = (None, 0) # baseline period from start of epoch to time=0
epochs = mne.Epochs(raw, events, event_dict, tmin, tmax, proj=True,
baseline=baseline, reject=reject_criteria, preload=True)
del raw
###############################################################################
# Converting an ``Epochs`` object to a ``DataFrame``
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#
# Once we have our :class:`~mne.Epochs` object, converting it to a
# :class:`~pandas.DataFrame` is simple: just call :meth:`epochs.to_data_frame()
# <mne.Epochs.to_data_frame>`. Each channel's data will be a column of the new
# :class:`~pandas.DataFrame`, alongside three additional columns of event name,
# epoch number, and sample time. Here we'll just show the first few rows and
# columns:
df = epochs.to_data_frame()
df.iloc[:5, :10]
###############################################################################
# Scaling time and channel values
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# By default, time values are converted from seconds to milliseconds and
# then rounded to the nearest integer; if you don't want this, you can pass
# ``time_format=None`` to keep time as a :class:`float` value in seconds, or
# convert it to a :class:`~pandas.Timedelta` value via
# ``time_format='timedelta'``.
#
# Note also that, by default, channel measurement values are scaled so that EEG
# data are converted to µV, magnetometer data are converted to fT, and
# gradiometer data are converted to fT/cm. These scalings can be customized
# through the ``scalings`` parameter, or suppressed by passing
# ``scalings=dict(eeg=1, mag=1, grad=1)``.
df = epochs.to_data_frame(time_format=None,
scalings=dict(eeg=1, mag=1, grad=1))
df.iloc[:5, :10]
###############################################################################
# Notice that the time values are no longer integers, and the channel values
# have changed by several orders of magnitude compared to the earlier
# DataFrame.
#
#
# Setting the ``index``
# ~~~~~~~~~~~~~~~~~~~~~
#
# It is also possible to move one or more of the indicator columns (event name,
# epoch number, and sample time) into the :ref:`index <pandas:indexing>`, by
# passing a string or list of strings as the ``index`` parameter. We'll also
# demonstrate here the effect of ``time_format='timedelta'``, yielding
# :class:`~pandas.Timedelta` values in the "time" column.
df = epochs.to_data_frame(index=['condition', 'epoch'],
time_format='timedelta')
df.iloc[:5, :10]
###############################################################################
# Wide- versus long-format DataFrames
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Another parameter, ``long_format``, determines whether each channel's data is
# in a separate column of the :class:`~pandas.DataFrame`
# (``long_format=False``), or whether the measured values are pivoted into a
# single ``'value'`` column with an extra indicator column for the channel name
# (``long_format=True``). Passing ``long_format=True`` will also create an
# extra column ``ch_type`` indicating the channel type.
long_df = epochs.to_data_frame(time_format=None, index='condition',
long_format=True)
long_df.head()
###############################################################################
# Generating the :class:`~pandas.DataFrame` in long format can be helpful when
# using other Python modules for subsequent analysis or plotting. For example,
# here we'll take data from the "auditory/left" condition, pick a couple MEG
# channels, and use :func:`seaborn.lineplot` to automatically plot the mean and
# confidence band for each channel, with confidence computed across the epochs
# in the chosen condition:
channels = ['MEG 1332', 'MEG 1342']
data = long_df.loc['auditory/left'].query('channel in @channels')
# convert channel column (CategoryDtype → string; for a nicer-looking legend)
data['channel'] = data['channel'].astype(str)
sns.lineplot(x='time', y='value', hue='channel', data=data)
###############################################################################
# We can also now use all the power of Pandas for grouping and transforming our
# data. Here, we find the latency of peak activation of 2 gradiometers (one
# near auditory cortex and one near visual cortex), and plot the distribution
# of the timing of the peak in each channel as a :func:`~seaborn.violinplot`:
# sphinx_gallery_thumbnail_number = 2
df = epochs.to_data_frame(time_format=None)
peak_latency = (df.filter(regex=r'condition|epoch|MEG 1332|MEG 2123')
.groupby(['condition', 'epoch'])
.aggregate(lambda x: df['time'].iloc[x.idxmax()])
.reset_index()
.melt(id_vars=['condition', 'epoch'],
var_name='channel',
value_name='latency of peak')
)
ax = sns.violinplot(x='channel', y='latency of peak', hue='condition',
data=peak_latency, palette='deep', saturation=1)
| bsd-3-clause |
icdishb/scikit-learn | sklearn/tests/test_naive_bayes.py | 142 | 17496 | import pickle
from io import BytesIO
import numpy as np
import scipy.sparse
from sklearn.datasets import load_digits, load_iris
from sklearn.cross_validation import cross_val_score, train_test_split
from sklearn.externals.six.moves import zip
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_greater
from sklearn.naive_bayes import GaussianNB, BernoulliNB, MultinomialNB
# Data is just 6 separable points in the plane
X = np.array([[-2, -1], [-1, -1], [-1, -2], [1, 1], [1, 2], [2, 1]])
y = np.array([1, 1, 1, 2, 2, 2])
# A bit more random tests
rng = np.random.RandomState(0)
X1 = rng.normal(size=(10, 3))
y1 = (rng.normal(size=(10)) > 0).astype(np.int)
# Data is 6 random integer points in a 100 dimensional space classified to
# three classes.
X2 = rng.randint(5, size=(6, 100))
y2 = np.array([1, 1, 2, 2, 3, 3])
def test_gnb():
# Gaussian Naive Bayes classification.
# This checks that GaussianNB implements fit and predict and returns
# correct values for a simple toy dataset.
clf = GaussianNB()
y_pred = clf.fit(X, y).predict(X)
assert_array_equal(y_pred, y)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Test whether label mismatch between target y and classes raises
# an Error
# FIXME Remove this test once the more general partial_fit tests are merged
assert_raises(ValueError, GaussianNB().partial_fit, X, y, classes=[0, 1])
def test_gnb_prior():
# Test whether class priors are properly set.
clf = GaussianNB().fit(X, y)
assert_array_almost_equal(np.array([3, 3]) / 6.0,
clf.class_prior_, 8)
clf.fit(X1, y1)
# Check that the class priors sum to 1
assert_array_almost_equal(clf.class_prior_.sum(), 1)
def test_gnb_sample_weight():
"""Test whether sample weights are properly used in GNB. """
# Sample weights all being 1 should not change results
sw = np.ones(6)
clf = GaussianNB().fit(X, y)
clf_sw = GaussianNB().fit(X, y, sw)
assert_array_almost_equal(clf.theta_, clf_sw.theta_)
assert_array_almost_equal(clf.sigma_, clf_sw.sigma_)
# Fitting twice with half sample-weights should result
# in same result as fitting once with full weights
sw = rng.rand(y.shape[0])
clf1 = GaussianNB().fit(X, y, sample_weight=sw)
clf2 = GaussianNB().partial_fit(X, y, classes=[1, 2], sample_weight=sw / 2)
clf2.partial_fit(X, y, sample_weight=sw / 2)
assert_array_almost_equal(clf1.theta_, clf2.theta_)
assert_array_almost_equal(clf1.sigma_, clf2.sigma_)
# Check that duplicate entries and correspondingly increased sample
# weights yield the same result
ind = rng.randint(0, X.shape[0], 20)
sample_weight = np.bincount(ind, minlength=X.shape[0])
clf_dupl = GaussianNB().fit(X[ind], y[ind])
clf_sw = GaussianNB().fit(X, y, sample_weight)
assert_array_almost_equal(clf_dupl.theta_, clf_sw.theta_)
assert_array_almost_equal(clf_dupl.sigma_, clf_sw.sigma_)
def test_discrete_prior():
# Test whether class priors are properly set.
for cls in [BernoulliNB, MultinomialNB]:
clf = cls().fit(X2, y2)
assert_array_almost_equal(np.log(np.array([2, 2, 2]) / 6.0),
clf.class_log_prior_, 8)
def test_mnnb():
# Test Multinomial Naive Bayes classification.
# This checks that MultinomialNB implements fit and predict and returns
# correct values for a simple toy dataset.
for X in [X2, scipy.sparse.csr_matrix(X2)]:
# Check the ability to predict the learning set.
clf = MultinomialNB()
assert_raises(ValueError, clf.fit, -X, y2)
y_pred = clf.fit(X, y2).predict(X)
assert_array_equal(y_pred, y2)
# Verify that np.log(clf.predict_proba(X)) gives the same results as
# clf.predict_log_proba(X)
y_pred_proba = clf.predict_proba(X)
y_pred_log_proba = clf.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba), y_pred_log_proba, 8)
# Check that incremental fitting yields the same results
clf2 = MultinomialNB()
clf2.partial_fit(X[:2], y2[:2], classes=np.unique(y2))
clf2.partial_fit(X[2:5], y2[2:5])
clf2.partial_fit(X[5:], y2[5:])
y_pred2 = clf2.predict(X)
assert_array_equal(y_pred2, y2)
y_pred_proba2 = clf2.predict_proba(X)
y_pred_log_proba2 = clf2.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba2), y_pred_log_proba2, 8)
assert_array_almost_equal(y_pred_proba2, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba2, y_pred_log_proba)
# Partial fit on the whole data at once should be the same as fit too
clf3 = MultinomialNB()
clf3.partial_fit(X, y2, classes=np.unique(y2))
y_pred3 = clf3.predict(X)
assert_array_equal(y_pred3, y2)
y_pred_proba3 = clf3.predict_proba(X)
y_pred_log_proba3 = clf3.predict_log_proba(X)
assert_array_almost_equal(np.log(y_pred_proba3), y_pred_log_proba3, 8)
assert_array_almost_equal(y_pred_proba3, y_pred_proba)
assert_array_almost_equal(y_pred_log_proba3, y_pred_log_proba)
def check_partial_fit(cls):
clf1 = cls()
clf1.fit([[0, 1], [1, 0]], [0, 1])
clf2 = cls()
clf2.partial_fit([[0, 1], [1, 0]], [0, 1], classes=[0, 1])
assert_array_equal(clf1.class_count_, clf2.class_count_)
assert_array_equal(clf1.feature_count_, clf2.feature_count_)
clf3 = cls()
clf3.partial_fit([[0, 1]], [0], classes=[0, 1])
clf3.partial_fit([[1, 0]], [1])
assert_array_equal(clf1.class_count_, clf3.class_count_)
assert_array_equal(clf1.feature_count_, clf3.feature_count_)
def test_discretenb_partial_fit():
for cls in [MultinomialNB, BernoulliNB]:
yield check_partial_fit, cls
def test_gnb_partial_fit():
clf = GaussianNB().fit(X, y)
clf_pf = GaussianNB().partial_fit(X, y, np.unique(y))
assert_array_almost_equal(clf.theta_, clf_pf.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf.class_prior_)
clf_pf2 = GaussianNB().partial_fit(X[0::2, :], y[0::2], np.unique(y))
clf_pf2.partial_fit(X[1::2], y[1::2])
assert_array_almost_equal(clf.theta_, clf_pf2.theta_)
assert_array_almost_equal(clf.sigma_, clf_pf2.sigma_)
assert_array_almost_equal(clf.class_prior_, clf_pf2.class_prior_)
def test_discretenb_pickle():
# Test picklability of discrete naive Bayes classifiers
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
clf = cls().fit(X2, y2)
y_pred = clf.predict(X2)
store = BytesIO()
pickle.dump(clf, store)
clf = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf.predict(X2))
if cls is not GaussianNB:
# TODO re-enable me when partial_fit is implemented for GaussianNB
# Test pickling of estimator trained with partial_fit
clf2 = cls().partial_fit(X2[:3], y2[:3], classes=np.unique(y2))
clf2.partial_fit(X2[3:], y2[3:])
store = BytesIO()
pickle.dump(clf2, store)
clf2 = pickle.load(BytesIO(store.getvalue()))
assert_array_equal(y_pred, clf2.predict(X2))
def test_input_check_fit():
# Test input checks for the fit method
for cls in [BernoulliNB, MultinomialNB, GaussianNB]:
# check shape consistency for number of samples at fit time
assert_raises(ValueError, cls().fit, X2, y2[:-1])
# check shape consistency for number of input features at predict time
clf = cls().fit(X2, y2)
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_input_check_partial_fit():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency
assert_raises(ValueError, cls().partial_fit, X2, y2[:-1],
classes=np.unique(y2))
# classes is required for first call to partial fit
assert_raises(ValueError, cls().partial_fit, X2, y2)
# check consistency of consecutive classes values
clf = cls()
clf.partial_fit(X2, y2, classes=np.unique(y2))
assert_raises(ValueError, clf.partial_fit, X2, y2,
classes=np.arange(42))
# check consistency of input shape for partial_fit
assert_raises(ValueError, clf.partial_fit, X2[:, :-1], y2)
# check consistency of input shape for predict
assert_raises(ValueError, clf.predict, X2[:, :-1])
def test_discretenb_predict_proba():
# Test discrete NB classes' probability scores
# The 100s below distinguish Bernoulli from multinomial.
# FIXME: write a test to show this.
X_bernoulli = [[1, 100, 0], [0, 1, 0], [0, 100, 1]]
X_multinomial = [[0, 1], [1, 3], [4, 0]]
# test binary case (1-d output)
y = [0, 0, 2] # 2 is regression test for binary case, 02e673
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict(X[-1]), 2)
assert_equal(clf.predict_proba(X[0]).shape, (1, 2))
assert_array_almost_equal(clf.predict_proba(X[:2]).sum(axis=1),
np.array([1., 1.]), 6)
# test multiclass case (2-d output, must sum to one)
y = [0, 1, 2]
for cls, X in zip([BernoulliNB, MultinomialNB],
[X_bernoulli, X_multinomial]):
clf = cls().fit(X, y)
assert_equal(clf.predict_proba(X[0]).shape, (1, 3))
assert_equal(clf.predict_proba(X[:2]).shape, (2, 3))
assert_almost_equal(np.sum(clf.predict_proba(X[1])), 1)
assert_almost_equal(np.sum(clf.predict_proba(X[-1])), 1)
assert_almost_equal(np.sum(np.exp(clf.class_log_prior_)), 1)
assert_almost_equal(np.sum(np.exp(clf.intercept_)), 1)
def test_discretenb_uniform_prior():
# Test whether discrete NB classes fit a uniform prior
# when fit_prior=False and class_prior=None
for cls in [BernoulliNB, MultinomialNB]:
clf = cls()
clf.set_params(fit_prior=False)
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
def test_discretenb_provide_prior():
# Test whether discrete NB classes use provided prior
for cls in [BernoulliNB, MultinomialNB]:
clf = cls(class_prior=[0.5, 0.5])
clf.fit([[0], [0], [1]], [0, 0, 1])
prior = np.exp(clf.class_log_prior_)
assert_array_equal(prior, np.array([.5, .5]))
# Inconsistent number of classes with prior
assert_raises(ValueError, clf.fit, [[0], [1], [2]], [0, 1, 2])
assert_raises(ValueError, clf.partial_fit, [[0], [1]], [0, 1],
classes=[0, 1, 1])
def test_discretenb_provide_prior_with_partial_fit():
# Test whether discrete NB classes use provided prior
# when using partial_fit
iris = load_iris()
iris_data1, iris_data2, iris_target1, iris_target2 = train_test_split(
iris.data, iris.target, test_size=0.4, random_state=415)
for cls in [BernoulliNB, MultinomialNB]:
for prior in [None, [0.3, 0.3, 0.4]]:
clf_full = cls(class_prior=prior)
clf_full.fit(iris.data, iris.target)
clf_partial = cls(class_prior=prior)
clf_partial.partial_fit(iris_data1, iris_target1,
classes=[0, 1, 2])
clf_partial.partial_fit(iris_data2, iris_target2)
assert_array_almost_equal(clf_full.class_log_prior_,
clf_partial.class_log_prior_)
def test_sample_weight_multiclass():
for cls in [BernoulliNB, MultinomialNB]:
# check shape consistency for number of samples at fit time
yield check_sample_weight_multiclass, cls
def check_sample_weight_multiclass(cls):
X = [
[0, 0, 1],
[0, 1, 1],
[0, 1, 1],
[1, 0, 0],
]
y = [0, 0, 1, 2]
sample_weight = np.array([1, 1, 2, 2], dtype=np.float)
sample_weight /= sample_weight.sum()
clf = cls().fit(X, y, sample_weight=sample_weight)
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
# Check sample weight using the partial_fit method
clf = cls()
clf.partial_fit(X[:2], y[:2], classes=[0, 1, 2],
sample_weight=sample_weight[:2])
clf.partial_fit(X[2:3], y[2:3], sample_weight=sample_weight[2:3])
clf.partial_fit(X[3:], y[3:], sample_weight=sample_weight[3:])
assert_array_equal(clf.predict(X), [0, 1, 1, 2])
def test_sample_weight_mnb():
clf = MultinomialNB()
clf.fit([[1, 2], [1, 2], [1, 0]],
[0, 0, 1],
sample_weight=[1, 1, 4])
assert_array_equal(clf.predict([1, 0]), [1])
positive_prior = np.exp(clf.intercept_[0])
assert_array_almost_equal([1 - positive_prior, positive_prior],
[1 / 3., 2 / 3.])
def test_coef_intercept_shape():
# coef_ and intercept_ should have shapes as in other linear models.
# Non-regression test for issue #2127.
X = [[1, 0, 0], [1, 1, 1]]
y = [1, 2] # binary classification
for clf in [MultinomialNB(), BernoulliNB()]:
clf.fit(X, y)
assert_equal(clf.coef_.shape, (1, 3))
assert_equal(clf.intercept_.shape, (1,))
def test_check_accuracy_on_digits():
# Non regression test to make sure that any further refactoring / optim
# of the NB models do not harm the performance on a slightly non-linearly
# separable dataset
digits = load_digits()
X, y = digits.data, digits.target
binary_3v8 = np.logical_or(digits.target == 3, digits.target == 8)
X_3v8, y_3v8 = X[binary_3v8], y[binary_3v8]
# Multinomial NB
scores = cross_val_score(MultinomialNB(alpha=10), X, y, cv=10)
assert_greater(scores.mean(), 0.86)
scores = cross_val_score(MultinomialNB(alpha=10), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.94)
# Bernoulli NB
scores = cross_val_score(BernoulliNB(alpha=10), X > 4, y, cv=10)
assert_greater(scores.mean(), 0.83)
scores = cross_val_score(BernoulliNB(alpha=10), X_3v8 > 4, y_3v8, cv=10)
assert_greater(scores.mean(), 0.92)
# Gaussian NB
scores = cross_val_score(GaussianNB(), X, y, cv=10)
assert_greater(scores.mean(), 0.77)
scores = cross_val_score(GaussianNB(), X_3v8, y_3v8, cv=10)
assert_greater(scores.mean(), 0.86)
def test_feature_log_prob_bnb():
# Test for issue #4268.
# Tests that the feature log prob value computed by BernoulliNB when
# alpha=1.0 is equal to the expression given in Manning, Raghavan,
# and Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
X = np.array([[0, 0, 0], [1, 1, 0], [0, 1, 0], [1, 0, 1], [0, 1, 0]])
Y = np.array([0, 0, 1, 2, 2])
# Fit Bernoulli NB w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Manually form the (log) numerator and denominator that
# constitute P(feature presence | class)
num = np.log(clf.feature_count_ + 1.0)
denom = np.tile(np.log(clf.class_count_ + 2.0), (X.shape[1], 1)).T
# Check manual estimate matches
assert_array_equal(clf.feature_log_prob_, (num - denom))
def test_bnb():
# Tests that BernoulliNB when alpha=1.0 gives the same values as
# those given for the toy example in Manning, Raghavan, and
# Schuetze's "Introduction to Information Retrieval" book:
# http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html
# Training data points are:
# Chinese Beijing Chinese (class: China)
# Chinese Chinese Shanghai (class: China)
# Chinese Macao (class: China)
# Tokyo Japan Chinese (class: Japan)
# Features are Beijing, Chinese, Japan, Macao, Shanghai, and Tokyo
X = np.array([[1, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 0, 1]])
# Classes are China (0), Japan (1)
Y = np.array([0, 0, 0, 1])
# Fit BernoulliBN w/ alpha = 1.0
clf = BernoulliNB(alpha=1.0)
clf.fit(X, Y)
# Check the class prior is correct
class_prior = np.array([0.75, 0.25])
assert_array_almost_equal(np.exp(clf.class_log_prior_), class_prior)
# Check the feature probabilities are correct
feature_prob = np.array([[0.4, 0.8, 0.2, 0.4, 0.4, 0.2],
[1/3.0, 2/3.0, 2/3.0, 1/3.0, 1/3.0, 2/3.0]])
assert_array_almost_equal(np.exp(clf.feature_log_prob_), feature_prob)
# Testing data point is:
# Chinese Chinese Chinese Tokyo Japan
X_test = np.array([0, 1, 1, 0, 0, 1])
# Check the predictive probabilities are correct
unnorm_predict_proba = np.array([[0.005183999999999999,
0.02194787379972565]])
predict_proba = unnorm_predict_proba / np.sum(unnorm_predict_proba)
assert_array_almost_equal(clf.predict_proba(X_test), predict_proba)
| bsd-3-clause |
MD2Korg/CerebralCortex | jupyter_demo/demo_algorithm/gps_clustering.py | 1 | 4106 | # Copyright (c) 2019, MD2K Center of Excellence
# - Nasir Ali <nasir.ali08@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
import pandas as pd
from geopy.distance import great_circle
from pyspark.sql.functions import pandas_udf, PandasUDFType
from shapely.geometry.multipoint import MultiPoint
from sklearn.cluster import DBSCAN
from pyspark.sql.types import StructField, StructType, StringType, FloatType
EPSILON_CONSTANT = 1000
LATITUDE = 0
LONGITUDE = 1
ACCURACY = -1
GPS_ACCURACY_THRESHOLD = 41.0
KM_PER_RADIAN = 6371.0088
GEO_FENCE_DISTANCE = 2
MINIMUM_POINTS_IN_CLUSTER = 500
def get_centermost_point(cluster: object) -> object:
"""
:param cluster:
:return:
:rtype: object
"""
centroid = (
MultiPoint(cluster).centroid.x, MultiPoint(cluster).centroid.y)
centermost_point = min(cluster, key=lambda point: great_circle(point,
centroid).m)
return tuple(centermost_point)
schema = StructType([
StructField("user", StringType()),
StructField("latitude", FloatType()),
StructField("longitude", FloatType())
])
@pandas_udf(schema, PandasUDFType.GROUPED_MAP)
def gps_clusters(data: object) -> object:
"""
Computes the clusters
:rtype: object
:param list data: list of interpolated gps data
:param float geo_fence_distance: Maximum distance between points in a
cluster
:param int min_points_in_cluster: Minimum number of points in a cluster
:return: list of cluster-centroids coordinates
"""
geo_fence_distance = GEO_FENCE_DISTANCE
min_points_in_cluster = MINIMUM_POINTS_IN_CLUSTER
data = data[data.accuracy < GPS_ACCURACY_THRESHOLD]
id = data.user.iloc[0]
dataframe = pd.DataFrame(
{'latitude': data.latitude, 'longitude': data.longitude})
coords = dataframe.as_matrix(columns=['latitude', 'longitude'])
epsilon = geo_fence_distance / (
EPSILON_CONSTANT * KM_PER_RADIAN)
db = DBSCAN(eps=epsilon, min_samples=min_points_in_cluster,
algorithm='ball_tree', metric='haversine').fit(
np.radians(coords))
cluster_labels = db.labels_
num_clusters = len(set(cluster_labels))
clusters = pd.Series(
[coords[cluster_labels == n] for n in range(-1, num_clusters)])
clusters = clusters.apply(lambda y: np.nan if len(y) == 0 else y)
clusters.dropna(how='any', inplace=True)
centermost_points = clusters.map(get_centermost_point)
centermost_points = np.array(centermost_points)
all_centroid = []
for cols in centermost_points:
cols = np.array(cols)
cols.flatten()
cs = ([id, cols[LATITUDE], cols[LONGITUDE]])
all_centroid.append(cs)
df = pd.DataFrame(all_centroid, columns=['user', 'latitude', 'longitude'])
return df
| bsd-2-clause |